From d9e64910affe9298c1d5b60d7085a82dcf1c8454 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 4 Oct 2017 12:28:20 -0400 Subject: [PATCH 01/42] [release-branch.go1.9] runtime: deflake TestPeriodicGC It was only waiting 0.1 seconds for the two GCs it wanted. Let it wait 1 second. Change-Id: Ib3cdc8127cbf95694a9f173643c02529a85063af Reviewed-on: https://go-review.googlesource.com/68118 Run-TryBot: Russ Cox Reviewed-by: Chris Broadfoot Reviewed-by: Austin Clements TryBot-Result: Gobot Gobot --- src/runtime/gc_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/runtime/gc_test.go b/src/runtime/gc_test.go index 03acc8aaa6..25dc869caa 100644 --- a/src/runtime/gc_test.go +++ b/src/runtime/gc_test.go @@ -170,7 +170,7 @@ func TestPeriodicGC(t *testing.T) { // slack if things are slow. var numGCs uint32 const want = 2 - for i := 0; i < 20 && numGCs < want; i++ { + for i := 0; i < 200 && numGCs < want; i++ { time.Sleep(5 * time.Millisecond) // Test that periodic GC actually happened. From a39bcecea6660d3c6d9770516df441c3f8fc47f5 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Fri, 22 Sep 2017 12:17:21 -0400 Subject: [PATCH 02/42] [release-branch.go1.9] cmd/go: reject update of VCS inside VCS Cherry-pick of CL 68110. Change-Id: Iae84c6404ab5eeb6950faa2364f97a017c67c506 Reviewed-on: https://go-review.googlesource.com/68022 Run-TryBot: Russ Cox Reviewed-by: Chris Broadfoot TryBot-Result: Gobot Gobot --- src/cmd/go/go_test.go | 19 +++++++++++ src/cmd/go/internal/get/get.go | 5 +++ src/cmd/go/internal/get/vcs.go | 58 +++++++++++++++++++++++++++++++++- 3 files changed, 81 insertions(+), 1 deletion(-) diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go index 7d80d965ae..c1b3975c7b 100644 --- a/src/cmd/go/go_test.go +++ b/src/cmd/go/go_test.go @@ -1317,6 +1317,25 @@ func TestGetGitDefaultBranch(t *testing.T) { tg.grepStdout(`\* another-branch`, "not on correct default branch") } +func TestAccidentalGitCheckout(t *testing.T) { + testenv.MustHaveExternalNetwork(t) + if _, err := exec.LookPath("git"); err != nil { + t.Skip("skipping because git binary not found") + } + + tg := testgo(t) + defer tg.cleanup() + tg.parallel() + tg.tempDir("src") + tg.setenv("GOPATH", tg.path(".")) + + tg.runFail("get", "-u", "vcs-test.golang.org/go/test1-svn-git") + tg.grepStderr("src[\\\\/]vcs-test.* uses git, but parent .*src[\\\\/]vcs-test.* uses svn", "get did not fail for right reason") + + tg.runFail("get", "-u", "vcs-test.golang.org/go/test2-svn-git/test2main") + tg.grepStderr("src[\\\\/]vcs-test.* uses git, but parent .*src[\\\\/]vcs-test.* uses svn", "get did not fail for right reason") +} + func TestErrorMessageForSyntaxErrorInTestGoFileSaysFAIL(t *testing.T) { tg := testgo(t) defer tg.cleanup() diff --git a/src/cmd/go/internal/get/get.go b/src/cmd/go/internal/get/get.go index 550321198d..e5dda643e4 100644 --- a/src/cmd/go/internal/get/get.go +++ b/src/cmd/go/internal/get/get.go @@ -439,6 +439,11 @@ func downloadPackage(p *load.Package) error { p.Internal.Build.PkgRoot = filepath.Join(list[0], "pkg") } root := filepath.Join(p.Internal.Build.SrcRoot, filepath.FromSlash(rootPath)) + + if err := checkNestedVCS(vcs, root, p.Internal.Build.SrcRoot); err != nil { + return err + } + // If we've considered this repository already, don't do it again. if downloadRootCache[root] { return nil diff --git a/src/cmd/go/internal/get/vcs.go b/src/cmd/go/internal/get/vcs.go index 71d0b51344..f0e253ffb4 100644 --- a/src/cmd/go/internal/get/vcs.go +++ b/src/cmd/go/internal/get/vcs.go @@ -506,11 +506,28 @@ func vcsFromDir(dir, srcRoot string) (vcs *vcsCmd, root string, err error) { return nil, "", fmt.Errorf("directory %q is outside source root %q", dir, srcRoot) } + var vcsRet *vcsCmd + var rootRet string + origDir := dir for len(dir) > len(srcRoot) { for _, vcs := range vcsList { if _, err := os.Stat(filepath.Join(dir, "."+vcs.cmd)); err == nil { - return vcs, filepath.ToSlash(dir[len(srcRoot)+1:]), nil + root := filepath.ToSlash(dir[len(srcRoot)+1:]) + // Record first VCS we find, but keep looking, + // to detect mistakes like one kind of VCS inside another. + if vcsRet == nil { + vcsRet = vcs + rootRet = root + continue + } + // Allow .git inside .git, which can arise due to submodules. + if vcsRet == vcs && vcs.cmd == "git" { + continue + } + // Otherwise, we have one VCS inside a different VCS. + return nil, "", fmt.Errorf("directory %q uses %s, but parent %q uses %s", + filepath.Join(srcRoot, rootRet), vcsRet.cmd, filepath.Join(srcRoot, root), vcs.cmd) } } @@ -523,9 +540,48 @@ func vcsFromDir(dir, srcRoot string) (vcs *vcsCmd, root string, err error) { dir = ndir } + if vcsRet != nil { + return vcsRet, rootRet, nil + } + return nil, "", fmt.Errorf("directory %q is not using a known version control system", origDir) } +// checkNestedVCS checks for an incorrectly-nested VCS-inside-VCS +// situation for dir, checking parents up until srcRoot. +func checkNestedVCS(vcs *vcsCmd, dir, srcRoot string) error { + if len(dir) <= len(srcRoot) || dir[len(srcRoot)] != filepath.Separator { + return fmt.Errorf("directory %q is outside source root %q", dir, srcRoot) + } + + otherDir := dir + for len(otherDir) > len(srcRoot) { + for _, otherVCS := range vcsList { + if _, err := os.Stat(filepath.Join(dir, "."+otherVCS.cmd)); err == nil { + // Allow expected vcs in original dir. + if otherDir == dir && otherVCS == vcs { + continue + } + // Allow .git inside .git, which can arise due to submodules. + if otherVCS == vcs && vcs.cmd == "git" { + continue + } + // Otherwise, we have one VCS inside a different VCS. + return fmt.Errorf("directory %q uses %s, but parent %q uses %s", dir, vcs.cmd, otherDir, otherVCS.cmd) + } + } + // Move to parent. + newDir := filepath.Dir(otherDir) + if len(newDir) >= len(otherDir) { + // Shouldn't happen, but just in case, stop. + break + } + otherDir = newDir + } + + return nil +} + // repoRoot represents a version control system, a repo, and a root of // where to put it on disk. type repoRoot struct { From 1900d34a1042834712c04b4492e573421d965df2 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 4 Oct 2017 13:24:49 -0400 Subject: [PATCH 03/42] [release-branch.go1.9] net/smtp: fix PlainAuth to refuse to send passwords to non-TLS servers PlainAuth originally refused to send passwords to non-TLS servers and was documented as such. In 2013, issue #5184 was filed objecting to the TLS requirement, despite the fact that it is spelled out clearly in RFC 4954. The only possibly legitimate use case raised was using PLAIN auth for connections to localhost, and the suggested fix was to let the server decide: if it advertises that PLAIN auth is OK, believe it. That approach was adopted in CL 8279043 and released in Go 1.1. Unfortunately, this is exactly wrong. The whole point of the TLS requirement is to make sure not to send the password to the wrong server or to a man-in-the-middle. Instead of implementing this rule, CL 8279043 blindly trusts the server, so that if a man-in-the-middle says "it's OK, you can send me your password," PlainAuth does. And the documentation was not updated to reflect any of this. This CL restores the original TLS check, as required by RFC 4954 and as promised in the documentation for PlainAuth. It then carves out a documented exception for connections made to localhost (defined as "localhost", "127.0.0.1", or "::1"). Cherry-pick of CL 68170. Change-Id: I1d3729bbd33aa2f11a03f4c000e6bb473164957b Reviewed-on: https://go-review.googlesource.com/68210 Run-TryBot: Russ Cox Reviewed-by: Chris Broadfoot --- src/net/smtp/auth.go | 33 ++++++++++++++++++--------------- src/net/smtp/smtp_test.go | 32 ++++++++++++++++++++++---------- 2 files changed, 40 insertions(+), 25 deletions(-) diff --git a/src/net/smtp/auth.go b/src/net/smtp/auth.go index 3f1339ebc5..fd1a472f93 100644 --- a/src/net/smtp/auth.go +++ b/src/net/smtp/auth.go @@ -44,26 +44,29 @@ type plainAuth struct { } // PlainAuth returns an Auth that implements the PLAIN authentication -// mechanism as defined in RFC 4616. -// The returned Auth uses the given username and password to authenticate -// on TLS connections to host and act as identity. Usually identity will be -// left blank to act as username. +// mechanism as defined in RFC 4616. The returned Auth uses the given +// username and password to authenticate to host and act as identity. +// Usually identity should be the empty string, to act as username. +// +// PlainAuth will only send the credentials if the connection is using TLS +// or is connected to localhost. Otherwise authentication will fail with an +// error, without sending the credentials. func PlainAuth(identity, username, password, host string) Auth { return &plainAuth{identity, username, password, host} } +func isLocalhost(name string) bool { + return name == "localhost" || name == "127.0.0.1" || name == "::1" +} + func (a *plainAuth) Start(server *ServerInfo) (string, []byte, error) { - if !server.TLS { - advertised := false - for _, mechanism := range server.Auth { - if mechanism == "PLAIN" { - advertised = true - break - } - } - if !advertised { - return "", nil, errors.New("unencrypted connection") - } + // Must have TLS, or else localhost server. + // Note: If TLS is not true, then we can't trust ANYTHING in ServerInfo. + // In particular, it doesn't matter if the server advertises PLAIN auth. + // That might just be the attacker saying + // "it's ok, you can trust me with your password." + if !server.TLS && !isLocalhost(server.Name) { + return "", nil, errors.New("unencrypted connection") } if server.Name != a.host { return "", nil, errors.New("wrong host name") diff --git a/src/net/smtp/smtp_test.go b/src/net/smtp/smtp_test.go index 9dbe3eb9ec..ff6585e69b 100644 --- a/src/net/smtp/smtp_test.go +++ b/src/net/smtp/smtp_test.go @@ -62,29 +62,41 @@ testLoop: } func TestAuthPlain(t *testing.T) { - auth := PlainAuth("foo", "bar", "baz", "servername") tests := []struct { - server *ServerInfo - err string + authName string + server *ServerInfo + err string }{ { - server: &ServerInfo{Name: "servername", TLS: true}, + authName: "servername", + server: &ServerInfo{Name: "servername", TLS: true}, }, { - // Okay; explicitly advertised by server. - server: &ServerInfo{Name: "servername", Auth: []string{"PLAIN"}}, + // OK to use PlainAuth on localhost without TLS + authName: "localhost", + server: &ServerInfo{Name: "localhost", TLS: false}, }, { - server: &ServerInfo{Name: "servername", Auth: []string{"CRAM-MD5"}}, - err: "unencrypted connection", + // NOT OK on non-localhost, even if server says PLAIN is OK. + // (We don't know that the server is the real server.) + authName: "servername", + server: &ServerInfo{Name: "servername", Auth: []string{"PLAIN"}}, + err: "unencrypted connection", }, { - server: &ServerInfo{Name: "attacker", TLS: true}, - err: "wrong host name", + authName: "servername", + server: &ServerInfo{Name: "servername", Auth: []string{"CRAM-MD5"}}, + err: "unencrypted connection", + }, + { + authName: "servername", + server: &ServerInfo{Name: "attacker", TLS: true}, + err: "wrong host name", }, } for i, tt := range tests { + auth := PlainAuth("foo", "bar", "baz", tt.authName) _, _, err := auth.Start(tt.server) got := "" if err != nil { From 815cad3ed026c904b5b54a45c5e044b9a1f4538c Mon Sep 17 00:00:00 2001 From: Tom Bergan Date: Mon, 28 Aug 2017 11:09:37 -0700 Subject: [PATCH 04/42] [release-branch.go1.9] doc/1.9: add mention of net/http.LocalAddrContextKey Fixes #21603 Reviewed-on: https://go-review.googlesource.com/59530 Reviewed-by: Ian Lance Taylor Reviewed-on: https://go-review.googlesource.com/59670 Reviewed-by: Chris Broadfoot Reviewed-by: Tom Bergan Change-Id: Ie9732d57948593dc0306a4a649664eedb3de370c Reviewed-on: https://go-review.googlesource.com/68232 Reviewed-by: Chris Broadfoot --- doc/go1.9.html | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/doc/go1.9.html b/doc/go1.9.html index ce103cdb9a..222e0e9ba0 100644 --- a/doc/go1.9.html +++ b/doc/go1.9.html @@ -740,6 +740,11 @@ version of gccgo. and context.WithValue instead. + +
  • + LocalAddrContextKey now contains + the connection's actual network address instead of the interface address used by the listener. +
  • Client & Transport changes:

    From 598433b17ab4c4edf569b98ebd0a2dcb62a22f49 Mon Sep 17 00:00:00 2001 From: Chris Broadfoot Date: Wed, 4 Oct 2017 11:09:15 -0700 Subject: [PATCH 05/42] [release-branch.go1.9] doc: document go1.9.1 and go1.8.4 Change-Id: Ib42fabc6829b6033373c0378713733f88e73e73d Reviewed-on: https://go-review.googlesource.com/68230 Reviewed-by: Russ Cox Reviewed-on: https://go-review.googlesource.com/68231 Reviewed-by: Chris Broadfoot --- doc/devel/release.html | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/doc/devel/release.html b/doc/devel/release.html index 8304522449..0697198cce 100644 --- a/doc/devel/release.html +++ b/doc/devel/release.html @@ -30,6 +30,12 @@ Go 1.9 is a major release of Go. Read the Go 1.9 Release Notes for more information.

    +

    +go1.9.1 (released 2017/10/04) includes two security fixes. +See the Go +1.9.1 milestone on our issue tracker for details. +

    +

    go1.8 (released 2017/02/16)

    @@ -63,6 +69,13 @@ See the Go 1.8.3 milestone on our issue tracker for details.

    +

    +go1.8.4 (released 2017/10/04) includes two security fixes. +It contains the same fixes as Go 1.9.1 and was released at the same time. +See the Go +1.8.4 milestone on our issue tracker for details. +

    +

    go1.7 (released 2016/08/15)

    From 7f40c1214dd67cf171a347a5230da70bd8e10d32 Mon Sep 17 00:00:00 2001 From: Chris Broadfoot Date: Wed, 4 Oct 2017 11:37:20 -0700 Subject: [PATCH 06/42] [release-branch.go1.9] go1.9.1 Change-Id: I711b38738a7f6fade42a2821908234940f3cf280 Reviewed-on: https://go-review.googlesource.com/68233 Reviewed-by: Russ Cox --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index f213576692..66a2beedba 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -go1.9 \ No newline at end of file +go1.9.1 \ No newline at end of file From 93322a5b3d2821ce0af70b186d8b0bd5dbbe647e Mon Sep 17 00:00:00 2001 From: Chris Broadfoot Date: Wed, 4 Oct 2017 13:20:45 -0700 Subject: [PATCH 07/42] [release-branch.go1.9] doc: add missing "Minor revisions" header for 1.9 Change-Id: Ib042e472e62f48a6afaba1762beaf102a9b99cf5 Reviewed-on: https://go-review.googlesource.com/68290 Reviewed-by: Russ Cox Reviewed-on: https://go-review.googlesource.com/68291 Reviewed-by: Chris Broadfoot --- doc/devel/release.html | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/devel/release.html b/doc/devel/release.html index 0697198cce..bb30ff8946 100644 --- a/doc/devel/release.html +++ b/doc/devel/release.html @@ -30,6 +30,8 @@ Go 1.9 is a major release of Go. Read the Go 1.9 Release Notes for more information.

    +

    Minor revisions

    +

    go1.9.1 (released 2017/10/04) includes two security fixes. See the Go From ed3b0d63b72ffb3fc00e98af295a3add20943808 Mon Sep 17 00:00:00 2001 From: Alex Brainman Date: Mon, 7 Aug 2017 12:57:58 +1000 Subject: [PATCH 08/42] [release-branch.go1.9] internal/poll: add tests for Windows file and serial ports I also wanted to test net sockets, but I do not know how to access their file handles. So I did not implement socket tests. Updates #21172 Change-Id: I5062c0e65a817571d755397d60762c175f9791ce Reviewed-on: https://go-review.googlesource.com/53530 Reviewed-by: Ian Lance Taylor Reviewed-on: https://go-review.googlesource.com/71131 Run-TryBot: Russ Cox TryBot-Result: Gobot Gobot Reviewed-by: Alex Brainman --- src/internal/poll/export_windows_test.go | 17 ++++ src/internal/poll/fd_windows.go | 14 ++- src/internal/poll/fd_windows_test.go | 111 +++++++++++++++++++++++ 3 files changed, 139 insertions(+), 3 deletions(-) create mode 100644 src/internal/poll/export_windows_test.go create mode 100644 src/internal/poll/fd_windows_test.go diff --git a/src/internal/poll/export_windows_test.go b/src/internal/poll/export_windows_test.go new file mode 100644 index 0000000000..88ed71ad84 --- /dev/null +++ b/src/internal/poll/export_windows_test.go @@ -0,0 +1,17 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Export guts for testing on windows. +// Since testing imports os and os imports internal/poll, +// the internal/poll tests can not be in package poll. + +package poll + +var ( + LogInitFD = &logInitFD +) + +func (fd *FD) IsPartOfNetpoll() bool { + return fd.pd.runtimeCtx != 0 +} diff --git a/src/internal/poll/fd_windows.go b/src/internal/poll/fd_windows.go index 655f9348c6..f416158bbc 100644 --- a/src/internal/poll/fd_windows.go +++ b/src/internal/poll/fd_windows.go @@ -295,6 +295,9 @@ type FD struct { isDir bool } +// logInitFD is set by tests to enable file descriptor initialization logging. +var logInitFD func(net string, fd *FD, err error) + // Init initializes the FD. The Sysfd field should already be set. // This can be called multiple times on a single FD. // The net argument is a network name from the net package (e.g., "tcp"), @@ -319,6 +322,7 @@ func (fd *FD) Init(net string) (string, error) { return "", errors.New("internal error: unknown network type " + net) } + var err error if !fd.isFile && !fd.isConsole && !fd.isDir { // Only call init for a network socket. // This means that we don't add files to the runtime poller. @@ -331,9 +335,13 @@ func (fd *FD) Init(net string) (string, error) { // somehow call ExecIO, then ExecIO, and therefore the // calling method, will return an error, because // fd.pd.runtimeCtx will be 0. - if err := fd.pd.init(fd); err != nil { - return "", err - } + err = fd.pd.init(fd) + } + if logInitFD != nil { + logInitFD(net, fd, err) + } + if err != nil { + return "", err } if hasLoadSetFileCompletionNotificationModes { // We do not use events, so we can skip them always. diff --git a/src/internal/poll/fd_windows_test.go b/src/internal/poll/fd_windows_test.go new file mode 100644 index 0000000000..e3ca0e26ac --- /dev/null +++ b/src/internal/poll/fd_windows_test.go @@ -0,0 +1,111 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package poll_test + +import ( + "fmt" + "internal/poll" + "os" + "sync" + "syscall" + "testing" +) + +type loggedFD struct { + Net string + FD *poll.FD + Err error +} + +var ( + logMu sync.Mutex + loggedFDs map[syscall.Handle]*loggedFD +) + +func logFD(net string, fd *poll.FD, err error) { + logMu.Lock() + defer logMu.Unlock() + + loggedFDs[fd.Sysfd] = &loggedFD{ + Net: net, + FD: fd, + Err: err, + } +} + +func init() { + loggedFDs = make(map[syscall.Handle]*loggedFD) + *poll.LogInitFD = logFD +} + +func findLoggedFD(h syscall.Handle) (lfd *loggedFD, found bool) { + logMu.Lock() + defer logMu.Unlock() + + lfd, found = loggedFDs[h] + return lfd, found +} + +// checkFileIsNotPartOfNetpoll verifies that f is not managed by netpoll. +// It returns error, if check fails. +func checkFileIsNotPartOfNetpoll(f *os.File) error { + lfd, found := findLoggedFD(syscall.Handle(f.Fd())) + if !found { + return fmt.Errorf("%v fd=%v: is not found in the log", f.Name(), f.Fd()) + } + if lfd.FD.IsPartOfNetpoll() { + return fmt.Errorf("%v fd=%v: is part of netpoll, but should not be (logged: net=%v err=%v)", f.Name(), f.Fd(), lfd.Net, lfd.Err) + } + return nil +} + +func TestFileFdsAreInitialised(t *testing.T) { + exe, err := os.Executable() + if err != nil { + t.Fatal(err) + } + f, err := os.Open(exe) + if err != nil { + t.Fatal(err) + } + defer f.Close() + + err = checkFileIsNotPartOfNetpoll(f) + if err != nil { + t.Fatal(err) + } +} + +func TestSerialFdsAreInitialised(t *testing.T) { + for _, name := range []string{"COM1", "COM2", "COM3", "COM4"} { + t.Run(name, func(t *testing.T) { + h, err := syscall.CreateFile(syscall.StringToUTF16Ptr(name), + syscall.GENERIC_READ|syscall.GENERIC_WRITE, + 0, + nil, + syscall.OPEN_EXISTING, + syscall.FILE_ATTRIBUTE_NORMAL|syscall.FILE_FLAG_OVERLAPPED, + 0) + if err != nil { + if errno, ok := err.(syscall.Errno); ok { + switch errno { + case syscall.ERROR_FILE_NOT_FOUND, + syscall.ERROR_ACCESS_DENIED: + t.Log("Skipping: ", err) + return + } + } + t.Fatal(err) + } + f := os.NewFile(uintptr(h), name) + defer f.Close() + + err = checkFileIsNotPartOfNetpoll(f) + if err != nil { + t.Fatal(err) + } + }) + } +} From 724638c9d8cb0ba2dda71eb1fb18f96174f4866f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Martin=20M=C3=B6hrmann?= Date: Tue, 22 Aug 2017 20:00:02 +0200 Subject: [PATCH 09/42] [release-branch.go1.9] crypto/x509: skip TestSystemRoots MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit golang.org/cl/36941 enabled loading of all trusted certs on darwin for the non-cgo execSecurityRoots. The corresponding cgo version golang.org/cl/36942 for systemRootsPool has not been merged yet. This tests fails reliably on some darwin systems: --- FAIL: TestSystemRoots (1.28s) root_darwin_test.go:31: cgo sys roots: 353.552363ms root_darwin_test.go:32: non-cgo sys roots: 921.85297ms root_darwin_test.go:44: got 169 roots root_darwin_test.go:44: got 455 roots root_darwin_test.go:73: insufficient overlap between cgo and non-cgo roots; want at least 227, have 168 FAIL FAIL crypto/x509 2.445s Updates #16532 Updates #21416 Change-Id: I52c2c847651fb3621fdb6ab858ebe8e28894c201 Reviewed-on: https://go-review.googlesource.com/57830 Run-TryBot: Martin Möhrmann TryBot-Result: Gobot Gobot Reviewed-by: Joe Tsai Reviewed-on: https://go-review.googlesource.com/70847 Run-TryBot: Russ Cox Reviewed-by: Martin Möhrmann --- src/crypto/x509/root_darwin_test.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/crypto/x509/root_darwin_test.go b/src/crypto/x509/root_darwin_test.go index 2784ce2f0f..d935cc4e9a 100644 --- a/src/crypto/x509/root_darwin_test.go +++ b/src/crypto/x509/root_darwin_test.go @@ -16,6 +16,11 @@ func TestSystemRoots(t *testing.T) { t.Skipf("skipping on %s/%s, no system root", runtime.GOOS, runtime.GOARCH) } + switch runtime.GOOS { + case "darwin": + t.Skipf("skipping on %s/%s until cgo part of golang.org/issue/16532 has been implemented.", runtime.GOOS, runtime.GOARCH) + } + t0 := time.Now() sysRoots := systemRootsPool() // actual system roots sysRootsDuration := time.Since(t0) From c6388d381ee4b62c56b499bc8a8d3127af86faa3 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Thu, 24 Aug 2017 15:06:26 -0400 Subject: [PATCH 10/42] [release-branch.go1.9] runtime: capture runtimeInitTime after nanotime is initialized CL 36428 changed the way nanotime works so on Darwin and Windows it now depends on runtime.startNano, which is computed at runtime.init time. Unfortunately, the `runtimeInitTime = nanotime()` initialization happened *before* runtime.init, so on these platforms runtimeInitTime is set incorrectly. The one (and only) consequence of this is that the start time printed in gctrace lines is bogus: gc 1 18446653480.186s 0%: 0.092+0.47+0.038 ms clock, 0.37+0.15/0.81/1.8+0.15 ms cpu, 4->4->1 MB, 5 MB goal, 8 P To fix this, this commit moves the runtimeInitTime initialization to shortly after runtime.init, at which point nanotime is safe to use. This also requires changing the condition in newproc1 that currently uses runtimeInitTime != 0 simply to detect whether or not the main M has started. Since runtimeInitTime could genuinely be 0 now, this introduces a separate flag to newproc1. Fixes #21554. Change-Id: Id874a4b912d3fa3d22f58d01b31ffb3548266d3b Reviewed-on: https://go-review.googlesource.com/58690 Run-TryBot: Austin Clements TryBot-Result: Gobot Gobot Reviewed-by: Rick Hudson Reviewed-by: Ian Lance Taylor Reviewed-on: https://go-review.googlesource.com/70848 Run-TryBot: Russ Cox Reviewed-by: Austin Clements --- src/runtime/proc.go | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/src/runtime/proc.go b/src/runtime/proc.go index ed333bb92e..a631a016a3 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -96,6 +96,9 @@ var main_init_done chan bool //go:linkname main_main main.main func main_main() +// mainStarted indicates that the main M has started. +var mainStarted bool + // runtimeInitTime is the nanotime() at which the runtime started. var runtimeInitTime int64 @@ -119,8 +122,8 @@ func main() { maxstacksize = 250000000 } - // Record when the world started. - runtimeInitTime = nanotime() + // Allow newproc to start new Ms. + mainStarted = true systemstack(func() { newm(sysmon, nil) @@ -148,6 +151,10 @@ func main() { } }() + // Record when the world started. Must be after runtime_init + // because nanotime on some platforms depends on startNano. + runtimeInitTime = nanotime() + gcenable() main_init_done = make(chan bool) @@ -3024,7 +3031,7 @@ func newproc1(fn *funcval, argp *uint8, narg int32, nret int32, callerpc uintptr } runqput(_p_, newg, true) - if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && runtimeInitTime != 0 { + if atomic.Load(&sched.npidle) != 0 && atomic.Load(&sched.nmspinning) == 0 && mainStarted { wakep() } _g_.m.locks-- From 2e4358c960dbdea925ff879cca7f5de57f2e8629 Mon Sep 17 00:00:00 2001 From: Dmitri Shuralyov Date: Thu, 17 Aug 2017 19:30:34 -0400 Subject: [PATCH 11/42] [release-branch.go1.9] time: fix documentation of Round, Truncate behavior for d <= 0 Saying that they return t unchanged is misleading, because they return a modified t, stripped of any monotonic clock reading, as of Go 1.9. Fixes #21485. Change-Id: Icddf8813aed3d687fcefcd2fe542829438be6a0a Reviewed-on: https://go-review.googlesource.com/56690 Reviewed-by: Avelino Reviewed-by: Ian Lance Taylor Reviewed-on: https://go-review.googlesource.com/70846 Run-TryBot: Russ Cox TryBot-Result: Russ Cox --- src/time/time.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/time/time.go b/src/time/time.go index 8a29eef263..0f29b0ff93 100644 --- a/src/time/time.go +++ b/src/time/time.go @@ -1383,7 +1383,7 @@ func Date(year int, month Month, day, hour, min, sec, nsec int, loc *Location) T } // Truncate returns the result of rounding t down to a multiple of d (since the zero time). -// If d <= 0, Truncate returns t unchanged. +// If d <= 0, Truncate returns t stripped of any monotonic clock reading but otherwise unchanged. // // Truncate operates on the time as an absolute duration since the // zero time; it does not operate on the presentation form of the @@ -1400,7 +1400,7 @@ func (t Time) Truncate(d Duration) Time { // Round returns the result of rounding t to the nearest multiple of d (since the zero time). // The rounding behavior for halfway values is to round up. -// If d <= 0, Round returns t unchanged. +// If d <= 0, Round returns t stripped of any monotonic clock reading but otherwise unchanged. // // Round operates on the time as an absolute duration since the // zero time; it does not operate on the presentation form of the From ccd5abc10555ed39b6b8b1e5c12e5ecce734b72c Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Sun, 3 Sep 2017 12:31:14 -0700 Subject: [PATCH 12/42] [release-branch.go1.9] cmd/compile: simplify "missing function body" error message Fixes #21747. Change-Id: I6a68370be3b7510ce364ddd1e41a1d767ce3a67f Reviewed-on: https://go-review.googlesource.com/61311 Run-TryBot: Matthew Dempsky TryBot-Result: Gobot Gobot Reviewed-by: Robert Griesemer Reviewed-on: https://go-review.googlesource.com/70972 Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/noder.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 3977be1d73..5f19948222 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -401,7 +401,7 @@ func (p *noder) funcDecl(fun *syntax.FuncDecl) *Node { f.Func.Endlineno = lineno } else { if pure_go || strings.HasPrefix(f.funcname(), "init.") { - yyerrorl(f.Pos, "missing function body for %q", f.funcname()) + yyerrorl(f.Pos, "missing function body") } } From 8c7fa95ad3420779ecffc9c72afc82bf074c6a88 Mon Sep 17 00:00:00 2001 From: "Bryan C. Mills" Date: Fri, 8 Sep 2017 14:36:43 -0400 Subject: [PATCH 13/42] [release-branch.go1.9] expvar: make (*Map).Init clear existing keys fixes #21619 Change-Id: I5bb513dfc8cac875b06a262eec40b5863ae23a4c Reviewed-on: https://go-review.googlesource.com/62370 Reviewed-by: Ian Lance Taylor Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-on: https://go-review.googlesource.com/70973 Run-TryBot: Russ Cox --- src/expvar/expvar.go | 12 +++++++++++- src/expvar/expvar_test.go | 22 ++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/src/expvar/expvar.go b/src/expvar/expvar.go index 64dae70c62..8290e0bd72 100644 --- a/src/expvar/expvar.go +++ b/src/expvar/expvar.go @@ -125,7 +125,17 @@ func (v *Map) String() string { return b.String() } -func (v *Map) Init() *Map { return v } +// Init removes all keys from the map. +func (v *Map) Init() *Map { + v.keysMu.Lock() + defer v.keysMu.Unlock() + v.keys = v.keys[:0] + v.m.Range(func(k, _ interface{}) bool { + v.m.Delete(k) + return true + }) + return v +} // updateKeys updates the sorted list of keys in v.keys. func (v *Map) addKey(key string) { diff --git a/src/expvar/expvar_test.go b/src/expvar/expvar_test.go index 7014063d4f..728e763896 100644 --- a/src/expvar/expvar_test.go +++ b/src/expvar/expvar_test.go @@ -161,6 +161,28 @@ func BenchmarkStringSet(b *testing.B) { }) } +func TestMapInit(t *testing.T) { + RemoveAll() + colors := NewMap("bike-shed-colors") + colors.Add("red", 1) + colors.Add("blue", 1) + colors.Add("chartreuse", 1) + + n := 0 + colors.Do(func(KeyValue) { n++ }) + if n != 3 { + t.Errorf("after three Add calls with distinct keys, Do should invoke f 3 times; got %v", n) + } + + colors.Init() + + n = 0 + colors.Do(func(KeyValue) { n++ }) + if n != 0 { + t.Errorf("after Init, Do should invoke f 0 times; got %v", n) + } +} + func TestMapCounter(t *testing.T) { RemoveAll() colors := NewMap("bike-shed-colors") From 5c48811aec6fab7979edcde4de09a0d2380fb175 Mon Sep 17 00:00:00 2001 From: "zhongtao.chen" Date: Tue, 22 Aug 2017 10:33:10 +0800 Subject: [PATCH 14/42] [release-branch.go1.9] cmd/compile: limit the number of simultaneously opened files to avoid EMFILE/ENFILE errors If the Go packages with enough source files,it will cause EMFILE/ENFILE error, Fix this by limiting the number of simultaneously opened files. Fixes #21621 Change-Id: I8555d79242d2f90771e37e073b7540fc7194a64a Reviewed-on: https://go-review.googlesource.com/57751 Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor Reviewed-on: https://go-review.googlesource.com/63752 Run-TryBot: Russ Cox --- src/cmd/compile/internal/gc/noder.go | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 5f19948222..7a1ea2707c 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -7,6 +7,7 @@ package gc import ( "fmt" "os" + "runtime" "strconv" "strings" "unicode/utf8" @@ -20,12 +21,16 @@ import ( func parseFiles(filenames []string) uint { var lines uint var noders []*noder + // Limit the number of simultaneously open files. + sem := make(chan struct{}, runtime.GOMAXPROCS(0)+10) for _, filename := range filenames { p := &noder{err: make(chan syntax.Error)} noders = append(noders, p) go func(filename string) { + sem <- struct{}{} + defer func() { <-sem }() defer close(p.err) base := src.NewFileBase(filename, absFilename(filename)) From 0b55d8dbfc6892433b0329b3834d50f72d90db58 Mon Sep 17 00:00:00 2001 From: David Crawshaw Date: Wed, 13 Sep 2017 19:04:25 -0400 Subject: [PATCH 15/42] [release-branch.go1.9] cmd/compile: replace GOROOT in //line directives The compiler replaces any path of the form /path/to/goroot/src/net/port.go with GOROOT/src/net/port.go so that the same object file is produced if the GOROOT is moved. It was skipping this transformation for any absolute path into the GOROOT that came from //line directives, such as those generated by cmd/cgo. Fixes #21373 Fixes #21720 Fixes #21825 Change-Id: I2784c701b4391cfb92e23efbcb091a84957d61dd Reviewed-on: https://go-review.googlesource.com/63693 Run-TryBot: David Crawshaw Reviewed-by: Matthew Dempsky Reviewed-on: https://go-review.googlesource.com/70975 Run-TryBot: Russ Cox TryBot-Result: Gobot Gobot --- src/cmd/compile/internal/gc/noder.go | 6 +++++- src/cmd/compile/internal/syntax/nodes_test.go | 2 +- src/cmd/compile/internal/syntax/parser.go | 16 +++++++++++----- .../compile/internal/syntax/parser_test.go | 14 +++++++++++--- .../compile/internal/syntax/printer_test.go | 2 +- src/cmd/compile/internal/syntax/syntax.go | 19 +++++++++++++------ src/cmd/internal/objabi/line.go | 2 +- 7 files changed, 43 insertions(+), 18 deletions(-) diff --git a/src/cmd/compile/internal/gc/noder.go b/src/cmd/compile/internal/gc/noder.go index 7a1ea2707c..54c48434cc 100644 --- a/src/cmd/compile/internal/gc/noder.go +++ b/src/cmd/compile/internal/gc/noder.go @@ -41,7 +41,7 @@ func parseFiles(filenames []string) uint { } defer f.Close() - p.file, _ = syntax.Parse(base, f, p.error, p.pragma, syntax.CheckBranches) // errors are tracked via p.error + p.file, _ = syntax.Parse(base, f, p.error, p.pragma, fileh, syntax.CheckBranches) // errors are tracked via p.error }(filename) } @@ -70,6 +70,10 @@ func yyerrorpos(pos src.Pos, format string, args ...interface{}) { var pathPrefix string +func fileh(name string) string { + return objabi.AbsFile("", name, pathPrefix) +} + func absFilename(name string) string { return objabi.AbsFile(Ctxt.Pathname, name, pathPrefix) } diff --git a/src/cmd/compile/internal/syntax/nodes_test.go b/src/cmd/compile/internal/syntax/nodes_test.go index be9d5d897c..1bba9eeacf 100644 --- a/src/cmd/compile/internal/syntax/nodes_test.go +++ b/src/cmd/compile/internal/syntax/nodes_test.go @@ -291,7 +291,7 @@ func testPos(t *testing.T, list []test, prefix, suffix string, extract func(*Fil } // build syntaxt tree - file, err := ParseBytes(nil, []byte(src), nil, nil, 0) + file, err := ParseBytes(nil, []byte(src), nil, nil, nil, 0) if err != nil { t.Errorf("parse error: %s: %v (%s)", src, err, test.nodetyp) continue diff --git a/src/cmd/compile/internal/syntax/parser.go b/src/cmd/compile/internal/syntax/parser.go index bcf56d5faa..b9129b0d9c 100644 --- a/src/cmd/compile/internal/syntax/parser.go +++ b/src/cmd/compile/internal/syntax/parser.go @@ -16,9 +16,10 @@ const debug = false const trace = false type parser struct { - base *src.PosBase - errh ErrorHandler - mode Mode + base *src.PosBase + errh ErrorHandler + fileh FilenameHandler + mode Mode scanner first error // first error encountered @@ -29,9 +30,10 @@ type parser struct { indent []byte // tracing support } -func (p *parser) init(base *src.PosBase, r io.Reader, errh ErrorHandler, pragh PragmaHandler, mode Mode) { +func (p *parser) init(base *src.PosBase, r io.Reader, errh ErrorHandler, pragh PragmaHandler, fileh FilenameHandler, mode Mode) { p.base = base p.errh = errh + p.fileh = fileh p.mode = mode p.scanner.init( r, @@ -76,7 +78,11 @@ func (p *parser) updateBase(line, col uint, text string) { p.error_at(p.pos_at(line, col+uint(i+1)), "invalid line number: "+nstr) return } - p.base = src.NewLinePragmaBase(src.MakePos(p.base.Pos().Base(), line, col), text[:i], uint(n)) + absFile := text[:i] + if p.fileh != nil { + absFile = p.fileh(absFile) + } + p.base = src.NewLinePragmaBase(src.MakePos(p.base.Pos().Base(), line, col), absFile, uint(n)) } func (p *parser) got(tok token) bool { diff --git a/src/cmd/compile/internal/syntax/parser_test.go b/src/cmd/compile/internal/syntax/parser_test.go index 4c317dab60..0478088ec8 100644 --- a/src/cmd/compile/internal/syntax/parser_test.go +++ b/src/cmd/compile/internal/syntax/parser_test.go @@ -131,7 +131,7 @@ func verifyPrint(filename string, ast1 *File) { panic(err) } - ast2, err := ParseBytes(src.NewFileBase(filename, filename), buf1.Bytes(), nil, nil, 0) + ast2, err := ParseBytes(src.NewFileBase(filename, filename), buf1.Bytes(), nil, nil, nil, 0) if err != nil { panic(err) } @@ -155,7 +155,7 @@ func verifyPrint(filename string, ast1 *File) { } func TestIssue17697(t *testing.T) { - _, err := ParseBytes(nil, nil, nil, nil, 0) // return with parser error, don't panic + _, err := ParseBytes(nil, nil, nil, nil, nil, 0) // return with parser error, don't panic if err == nil { t.Errorf("no error reported") } @@ -199,8 +199,16 @@ func TestLineDirectives(t *testing.T) { // test effect of //line directive on (relative) position information {"//line foo:123\n foo", "syntax error: package statement must be first", "foo", 123 - linebase, 3}, {"//line foo:123\n//line bar:345\nfoo", "syntax error: package statement must be first", "bar", 345 - linebase, 0}, + + {"//line " + runtime.GOROOT() + "/src/a/a.go:123\n foo", "syntax error: package statement must be first", "$GOROOT/src/a/a.go", 123 - linebase, 3}, } { - _, err := ParseBytes(nil, []byte(test.src), nil, nil, 0) + fileh := func(name string) string { + if strings.HasPrefix(name, runtime.GOROOT()) { + return "$GOROOT" + name[len(runtime.GOROOT()):] + } + return name + } + _, err := ParseBytes(nil, []byte(test.src), nil, nil, fileh, 0) if err == nil { t.Errorf("%s: no error reported", test.src) continue diff --git a/src/cmd/compile/internal/syntax/printer_test.go b/src/cmd/compile/internal/syntax/printer_test.go index 14652f4ac6..bbf75a957d 100644 --- a/src/cmd/compile/internal/syntax/printer_test.go +++ b/src/cmd/compile/internal/syntax/printer_test.go @@ -29,7 +29,7 @@ func TestPrintString(t *testing.T) { "package p; type _ = int; type T1 = struct{}; type ( _ = *struct{}; T2 = float32 )", // TODO(gri) expand } { - ast, err := ParseBytes(nil, []byte(want), nil, nil, 0) + ast, err := ParseBytes(nil, []byte(want), nil, nil, nil, 0) if err != nil { t.Error(err) continue diff --git a/src/cmd/compile/internal/syntax/syntax.go b/src/cmd/compile/internal/syntax/syntax.go index ed5e254724..f58d5efd29 100644 --- a/src/cmd/compile/internal/syntax/syntax.go +++ b/src/cmd/compile/internal/syntax/syntax.go @@ -39,11 +39,15 @@ type ErrorHandler func(err error) // appropriate. type Pragma uint16 -// A PragmaHandler is used to process //line and //go: directives as +// A PragmaHandler is used to process //go: directives as // they're scanned. The returned Pragma value will be unioned into the // next FuncDecl node. type PragmaHandler func(pos src.Pos, text string) Pragma +// A FilenameHandler is used to process each filename encountered +// in //line directives. The returned value is used as the absolute filename. +type FilenameHandler func(name string) string + // Parse parses a single Go source file from src and returns the corresponding // syntax tree. If there are errors, Parse will return the first error found, // and a possibly partially constructed syntax tree, or nil if no correct package @@ -55,8 +59,11 @@ type PragmaHandler func(pos src.Pos, text string) Pragma // // If a PragmaHandler is provided, it is called with each pragma encountered. // +// If a FilenameHandler is provided, it is called to process each filename +// encountered in //line directives. +// // The Mode argument is currently ignored. -func Parse(base *src.PosBase, src io.Reader, errh ErrorHandler, pragh PragmaHandler, mode Mode) (_ *File, first error) { +func Parse(base *src.PosBase, src io.Reader, errh ErrorHandler, pragh PragmaHandler, fileh FilenameHandler, mode Mode) (_ *File, first error) { defer func() { if p := recover(); p != nil { if err, ok := p.(Error); ok { @@ -68,14 +75,14 @@ func Parse(base *src.PosBase, src io.Reader, errh ErrorHandler, pragh PragmaHand }() var p parser - p.init(base, src, errh, pragh, mode) + p.init(base, src, errh, pragh, fileh, mode) p.next() return p.fileOrNil(), p.first } // ParseBytes behaves like Parse but it reads the source from the []byte slice provided. -func ParseBytes(base *src.PosBase, src []byte, errh ErrorHandler, pragh PragmaHandler, mode Mode) (*File, error) { - return Parse(base, &bytesReader{src}, errh, pragh, mode) +func ParseBytes(base *src.PosBase, src []byte, errh ErrorHandler, pragh PragmaHandler, fileh FilenameHandler, mode Mode) (*File, error) { + return Parse(base, &bytesReader{src}, errh, pragh, fileh, mode) } type bytesReader struct { @@ -101,5 +108,5 @@ func ParseFile(filename string, errh ErrorHandler, pragh PragmaHandler, mode Mod return nil, err } defer f.Close() - return Parse(src.NewFileBase(filename, filename), f, errh, pragh, mode) + return Parse(src.NewFileBase(filename, filename), f, errh, pragh, nil, mode) } diff --git a/src/cmd/internal/objabi/line.go b/src/cmd/internal/objabi/line.go index ed509b7001..1c671b211f 100644 --- a/src/cmd/internal/objabi/line.go +++ b/src/cmd/internal/objabi/line.go @@ -44,7 +44,7 @@ func AbsFile(dir, file, pathPrefix string) string { abs = "??" } - return filepath.Clean(abs) + return abs } // Does s have t as a path prefix? From bd34e74134645b7a7109dbf0361eb1ceb1c3d1ba Mon Sep 17 00:00:00 2001 From: hagen1778 Date: Tue, 19 Sep 2017 20:28:11 +0300 Subject: [PATCH 16/42] [release-branch.go1.9] log: fix data race on log.Output There was unprotected access to Logger.flag in log.Output which could lead to data race in cases when log.SetFlags called simultaneously. For example, "hot" switching on/off debug-mode for Logger by log.SetFlags while application still writing logs. Fixes #21935 Change-Id: I36be25f23cad44cde62ed1af28a30d276400e1b8 Reviewed-on: https://go-review.googlesource.com/64710 Reviewed-by: Joe Tsai Run-TryBot: Joe Tsai TryBot-Result: Gobot Gobot Reviewed-on: https://go-review.googlesource.com/70976 Run-TryBot: Russ Cox --- src/log/log.go | 6 +----- src/log/log_test.go | 11 +++++++++++ 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/src/log/log.go b/src/log/log.go index 587904b11c..e8e0c96636 100644 --- a/src/log/log.go +++ b/src/log/log.go @@ -147,11 +147,7 @@ func (l *Logger) formatHeader(buf *[]byte, t time.Time, file string, line int) { // provided for generality, although at the moment on all pre-defined // paths it will be 2. func (l *Logger) Output(calldepth int, s string) error { - // Get time early if we need it. - var now time.Time - if l.flag&(Ldate|Ltime|Lmicroseconds) != 0 { - now = time.Now() - } + now := time.Now() // get this early. var file string var line int l.mu.Lock() diff --git a/src/log/log_test.go b/src/log/log_test.go index 966fdf306b..adc15e7e8e 100644 --- a/src/log/log_test.go +++ b/src/log/log_test.go @@ -88,6 +88,17 @@ func TestOutput(t *testing.T) { } } +func TestOutputRace(t *testing.T) { + var b bytes.Buffer + l := New(&b, "", 0) + for i := 0; i < 100; i++ { + go func() { + l.SetFlags(0) + }() + l.Output(0, "") + } +} + func TestFlagAndPrefixSetting(t *testing.T) { var b bytes.Buffer l := New(&b, "Test:", LstdFlags) From ff8289f87936bd840daf6e704885f62399d01126 Mon Sep 17 00:00:00 2001 From: Cherry Zhang Date: Thu, 31 Aug 2017 22:02:37 -0400 Subject: [PATCH 17/42] [release-branch.go1.9] reflect: fix pointer past-the-end in Call with zero-sized return value If a function with nonzero frame but zero-sized return value is Call'd, we may write a past-the-end pointer in preparing the return Values. Fix by return the zero value for zero-sized return value. Fixes #21717. Change-Id: I5351cd86d898467170a888b4c3fc9392f0e7aa3b Reviewed-on: https://go-review.googlesource.com/60811 Run-TryBot: Cherry Zhang TryBot-Result: Gobot Gobot Reviewed-by: Austin Clements Reviewed-on: https://go-review.googlesource.com/70971 Run-TryBot: Russ Cox Reviewed-by: Cherry Zhang --- src/reflect/all_test.go | 25 +++++++++++++++++++++++++ src/reflect/value.go | 10 ++++++++-- 2 files changed, 33 insertions(+), 2 deletions(-) diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go index 5a5c91b751..33694fd10e 100644 --- a/src/reflect/all_test.go +++ b/src/reflect/all_test.go @@ -19,6 +19,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "testing" "time" "unicode" @@ -1546,6 +1547,30 @@ func TestCallWithStruct(t *testing.T) { } } +func TestCallReturnsEmpty(t *testing.T) { + // Issue 21717: past-the-end pointer write in Call with + // nonzero-sized frame and zero-sized return value. + runtime.GC() + var finalized uint32 + f := func() (emptyStruct, *int) { + i := new(int) + runtime.SetFinalizer(i, func(*int) { atomic.StoreUint32(&finalized, 1) }) + return emptyStruct{}, i + } + v := ValueOf(f).Call(nil)[0] // out[0] should not alias out[1]'s memory, so the finalizer should run. + timeout := time.After(5 * time.Second) + for atomic.LoadUint32(&finalized) == 0 { + select { + case <-timeout: + t.Fatal("finalizer did not run") + default: + } + runtime.Gosched() + runtime.GC() + } + runtime.KeepAlive(v) +} + func BenchmarkCall(b *testing.B) { fv := ValueOf(func(a, b string) {}) b.ReportAllocs() diff --git a/src/reflect/value.go b/src/reflect/value.go index 8488e8dec1..2b0ca05c70 100644 --- a/src/reflect/value.go +++ b/src/reflect/value.go @@ -456,8 +456,14 @@ func (v Value) call(op string, in []Value) []Value { tv := t.Out(i) a := uintptr(tv.Align()) off = (off + a - 1) &^ (a - 1) - fl := flagIndir | flag(tv.Kind()) - ret[i] = Value{tv.common(), unsafe.Pointer(uintptr(args) + off), fl} + if tv.Size() != 0 { + fl := flagIndir | flag(tv.Kind()) + ret[i] = Value{tv.common(), unsafe.Pointer(uintptr(args) + off), fl} + } else { + // For zero-sized return value, args+off may point to the next object. + // In this case, return the zero value instead. + ret[i] = Zero(tv) + } off += tv.Size() } } From 1ded8334f7e99f6fd9284dd71f9fcf72da10c3c8 Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Tue, 19 Sep 2017 14:28:03 -0700 Subject: [PATCH 18/42] [release-branch.go1.9] cmd/compile/internal/syntax: fix source buffer refilling The previous code seems to have an off-by-1 in it somewhere, the consequence being that we didn't properly preserve all of the old buffer contents that we intended to. After spending a while looking at the existing window-shifting logic, I wasn't able to understand exactly how it was supposed to work or where the issue was, so I rewrote it to be (at least IMO) more obviously correct. Fixes #21938. Change-Id: I1ed7bbc1e1751a52ab5f7cf0411ae289586dc345 Reviewed-on: https://go-review.googlesource.com/64830 Run-TryBot: Matthew Dempsky TryBot-Result: Gobot Gobot Reviewed-by: Robert Griesemer Reviewed-on: https://go-review.googlesource.com/70977 Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/syntax/scanner_test.go | 13 +++++++++++++ src/cmd/compile/internal/syntax/source.go | 11 ++++++----- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/src/cmd/compile/internal/syntax/scanner_test.go b/src/cmd/compile/internal/syntax/scanner_test.go index e434db9a91..53995e0c79 100644 --- a/src/cmd/compile/internal/syntax/scanner_test.go +++ b/src/cmd/compile/internal/syntax/scanner_test.go @@ -7,6 +7,7 @@ package syntax import ( "fmt" "os" + "strings" "testing" ) @@ -367,3 +368,15 @@ func TestScanErrors(t *testing.T) { } } } + +func TestIssue21938(t *testing.T) { + s := "/*" + strings.Repeat(" ", 4089) + "*/ .5" + + var got scanner + got.init(strings.NewReader(s), nil, nil) + got.next() + + if got.tok != _Literal || got.lit != ".5" { + t.Errorf("got %s %q; want %s %q", got.tok, got.lit, _Literal, ".5") + } +} diff --git a/src/cmd/compile/internal/syntax/source.go b/src/cmd/compile/internal/syntax/source.go index 93547213c0..4e3551225a 100644 --- a/src/cmd/compile/internal/syntax/source.go +++ b/src/cmd/compile/internal/syntax/source.go @@ -164,11 +164,12 @@ func (s *source) fill() { s.lit = append(s.lit, s.buf[s.suf:s.r0]...) s.suf = 1 // == s.r0 after slide below } - s.offs += s.r0 - 1 - r := s.r - s.r0 + 1 // last read char plus one byte - s.w = r + copy(s.buf[r:], s.buf[s.r:s.w]) - s.r = r - s.r0 = 1 + n := s.r0 - 1 + copy(s.buf[:], s.buf[n:s.w]) + s.offs += n + s.r0 = 1 // eqv: s.r0 -= n + s.r -= n + s.w -= n } // read more data: try a limited number of times From 8d4279c11157a7f6b645f8085d23436f322ffb58 Mon Sep 17 00:00:00 2001 From: Alex Brainman Date: Mon, 25 Sep 2017 18:54:14 +1000 Subject: [PATCH 19/42] [release-branch.go1.9] internal/poll: be explicit when using runtime netpoller internal/poll package assumes that only net sockets use runtime netpoller on windows. We get memory corruption if other file handles are passed into runtime poller. Make FD.Init receive and use useNetpoller argument, so FD.Init caller is explicit about using runtime netpoller. Fixes #21172 Change-Id: I60e2bfedf9dda9b341eb7a3e5221035db29f5739 Reviewed-on: https://go-review.googlesource.com/65810 Reviewed-by: Ian Lance Taylor Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-on: https://go-review.googlesource.com/71132 Run-TryBot: Russ Cox Reviewed-by: Alex Brainman --- src/internal/poll/fd_unix.go | 1 + src/internal/poll/fd_windows.go | 5 +++-- src/net/fd_windows.go | 2 +- src/os/file_windows.go | 2 +- 4 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/internal/poll/fd_unix.go b/src/internal/poll/fd_unix.go index c40c701f59..d9538e364b 100644 --- a/src/internal/poll/fd_unix.go +++ b/src/internal/poll/fd_unix.go @@ -42,6 +42,7 @@ type FD struct { // This can be called multiple times on a single FD. // The net argument is a network name from the net package (e.g., "tcp"), // or "file". +// Set pollable to true if fd should be managed by runtime netpoll. func (fd *FD) Init(net string, pollable bool) error { // We don't actually care about the various network types. if net == "file" { diff --git a/src/internal/poll/fd_windows.go b/src/internal/poll/fd_windows.go index f416158bbc..b0991a29f2 100644 --- a/src/internal/poll/fd_windows.go +++ b/src/internal/poll/fd_windows.go @@ -302,7 +302,8 @@ var logInitFD func(net string, fd *FD, err error) // This can be called multiple times on a single FD. // The net argument is a network name from the net package (e.g., "tcp"), // or "file" or "console" or "dir". -func (fd *FD) Init(net string) (string, error) { +// Set pollable to true if fd should be managed by runtime netpoll. +func (fd *FD) Init(net string, pollable bool) (string, error) { if initErr != nil { return "", initErr } @@ -323,7 +324,7 @@ func (fd *FD) Init(net string) (string, error) { } var err error - if !fd.isFile && !fd.isConsole && !fd.isDir { + if pollable { // Only call init for a network socket. // This means that we don't add files to the runtime poller. // Adding files to the runtime poller can confuse matters diff --git a/src/net/fd_windows.go b/src/net/fd_windows.go index c2156b255e..563558dc52 100644 --- a/src/net/fd_windows.go +++ b/src/net/fd_windows.go @@ -52,7 +52,7 @@ func newFD(sysfd syscall.Handle, family, sotype int, net string) (*netFD, error) } func (fd *netFD) init() error { - errcall, err := fd.pfd.Init(fd.net) + errcall, err := fd.pfd.Init(fd.net, true) if errcall != "" { err = wrapSyscallError(errcall, err) } diff --git a/src/os/file_windows.go b/src/os/file_windows.go index 93b6c135c7..e2be192bcb 100644 --- a/src/os/file_windows.go +++ b/src/os/file_windows.go @@ -54,7 +54,7 @@ func newFile(h syscall.Handle, name string, kind string) *File { // Ignore initialization errors. // Assume any problems will show up in later I/O. - f.pfd.Init(kind) + f.pfd.Init(kind, false) return f } From 0ab99b396df9fc45c5cf1ac412da4f1848c3462c Mon Sep 17 00:00:00 2001 From: Lynn Boger Date: Tue, 19 Sep 2017 17:36:57 -0400 Subject: [PATCH 20/42] [release-branch.go1.9] cmd/compile: fix regression in PPC64.rules move zero When a MOVDstorezero (8 bytes) is used the offset field in the instruction must be a multiple of 4. This situation had been corrected in the rules for other types of stores but not for the zero case. This also removes some of the special MOVDstorezero cases since they can be handled by the general LowerZero case. Updates made to the ssa test for lowering zero moves to include cases where the target is not aligned to at least 4. Fixes #21947 Change-Id: I7cceceb1be4898c77cd3b5e78b58dce0a7e28edd Reviewed-on: https://go-review.googlesource.com/64970 Run-TryBot: Lynn Boger TryBot-Result: Gobot Gobot Reviewed-by: Carlos Eduardo Seo Reviewed-by: Cherry Zhang Reviewed-on: https://go-review.googlesource.com/70978 Run-TryBot: Russ Cox --- .../internal/gc/testdata/gen/zeroGen.go | 63 +++++ src/cmd/compile/internal/gc/testdata/zero.go | 216 ++++++++++++++++++ src/cmd/compile/internal/ssa/gen/PPC64.rules | 58 ++--- src/cmd/compile/internal/ssa/rewritePPC64.go | 200 ++++++---------- 4 files changed, 364 insertions(+), 173 deletions(-) diff --git a/src/cmd/compile/internal/gc/testdata/gen/zeroGen.go b/src/cmd/compile/internal/gc/testdata/gen/zeroGen.go index fa70b16495..c764c369e6 100644 --- a/src/cmd/compile/internal/gc/testdata/gen/zeroGen.go +++ b/src/cmd/compile/internal/gc/testdata/gen/zeroGen.go @@ -19,6 +19,7 @@ import ( // will be written into the parent directory containing the tests. var sizes = [...]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 23, 24, 25, 31, 32, 33, 63, 64, 65, 1023, 1024, 1025} +var usizes = [...]int{8, 16, 24, 32, 64, 256} func main() { w := new(bytes.Buffer) @@ -61,12 +62,74 @@ func main() { fmt.Fprintf(w, "}\n") } + for _, s := range usizes { + // type for test + fmt.Fprintf(w, "type T%du1 struct {\n", s) + fmt.Fprintf(w, " b bool\n") + fmt.Fprintf(w, " val [%d]byte\n", s) + fmt.Fprintf(w, "}\n") + + fmt.Fprintf(w, "type T%du2 struct {\n", s) + fmt.Fprintf(w, " i uint16\n") + fmt.Fprintf(w, " val [%d]byte\n", s) + fmt.Fprintf(w, "}\n") + + // function being tested + fmt.Fprintf(w, "//go:noinline\n") + fmt.Fprintf(w, "func zero%du1_ssa(t *T%du1) {\n", s, s) + fmt.Fprintf(w, " t.val = [%d]byte{}\n", s) + fmt.Fprintf(w, "}\n") + + // function being tested + fmt.Fprintf(w, "//go:noinline\n") + fmt.Fprintf(w, "func zero%du2_ssa(t *T%du2) {\n", s, s) + fmt.Fprintf(w, " t.val = [%d]byte{}\n", s) + fmt.Fprintf(w, "}\n") + + // testing harness + fmt.Fprintf(w, "func testZero%du() {\n", s) + fmt.Fprintf(w, " a := T%du1{false, [%d]byte{", s, s) + for i := 0; i < s; i++ { + fmt.Fprintf(w, "255,") + } + fmt.Fprintf(w, "}}\n") + fmt.Fprintf(w, " zero%du1_ssa(&a)\n", s) + fmt.Fprintf(w, " want := T%du1{false, [%d]byte{", s, s) + for i := 0; i < s; i++ { + fmt.Fprintf(w, "0,") + } + fmt.Fprintf(w, "}}\n") + fmt.Fprintf(w, " if a != want {\n") + fmt.Fprintf(w, " fmt.Printf(\"zero%du2 got=%%v, want %%v\\n\", a, want)\n", s) + fmt.Fprintf(w, " failed=true\n") + fmt.Fprintf(w, " }\n") + fmt.Fprintf(w, " b := T%du2{15, [%d]byte{", s, s) + for i := 0; i < s; i++ { + fmt.Fprintf(w, "255,") + } + fmt.Fprintf(w, "}}\n") + fmt.Fprintf(w, " zero%du2_ssa(&b)\n", s) + fmt.Fprintf(w, " wantb := T%du2{15, [%d]byte{", s, s) + for i := 0; i < s; i++ { + fmt.Fprintf(w, "0,") + } + fmt.Fprintf(w, "}}\n") + fmt.Fprintf(w, " if b != wantb {\n") + fmt.Fprintf(w, " fmt.Printf(\"zero%du2 got=%%v, want %%v\\n\", b, wantb)\n", s) + fmt.Fprintf(w, " failed=true\n") + fmt.Fprintf(w, " }\n") + fmt.Fprintf(w, "}\n") + } + // boilerplate at end fmt.Fprintf(w, "var failed bool\n") fmt.Fprintf(w, "func main() {\n") for _, s := range sizes { fmt.Fprintf(w, " testZero%d()\n", s) } + for _, s := range usizes { + fmt.Fprintf(w, " testZero%du()\n", s) + } fmt.Fprintf(w, " if failed {\n") fmt.Fprintf(w, " panic(\"failed\")\n") fmt.Fprintf(w, " }\n") diff --git a/src/cmd/compile/internal/gc/testdata/zero.go b/src/cmd/compile/internal/gc/testdata/zero.go index f6354868cb..9d261aa401 100644 --- a/src/cmd/compile/internal/gc/testdata/zero.go +++ b/src/cmd/compile/internal/gc/testdata/zero.go @@ -505,6 +505,216 @@ func testZero1025() { } } +type T8u1 struct { + b bool + val [8]byte +} +type T8u2 struct { + i uint16 + val [8]byte +} + +//go:noinline +func zero8u1_ssa(t *T8u1) { + t.val = [8]byte{} +} + +//go:noinline +func zero8u2_ssa(t *T8u2) { + t.val = [8]byte{} +} +func testZero8u() { + a := T8u1{false, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero8u1_ssa(&a) + want := T8u1{false, [8]byte{0, 0, 0, 0, 0, 0, 0, 0}} + if a != want { + fmt.Printf("zero8u2 got=%v, want %v\n", a, want) + failed = true + } + b := T8u2{15, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}} + zero8u2_ssa(&b) + wantb := T8u2{15, [8]byte{0, 0, 0, 0, 0, 0, 0, 0}} + if b != wantb { + fmt.Printf("zero8u2 got=%v, want %v\n", b, wantb) + failed = true + } +} + +type T16u1 struct { + b bool + val [16]byte +} +type T16u2 struct { + i uint16 + val [16]byte +} + +//go:noinline +func zero16u1_ssa(t *T16u1) { + t.val = [16]byte{} +} + +//go:noinline +func zero16u2_ssa(t *T16u2) { + t.val = [16]byte{} +} +func testZero16u() { + a := T16u1{false, [16]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero16u1_ssa(&a) + want := T16u1{false, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if a != want { + fmt.Printf("zero16u2 got=%v, want %v\n", a, want) + failed = true + } + b := T16u2{15, [16]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero16u2_ssa(&b) + wantb := T16u2{15, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if b != wantb { + fmt.Printf("zero16u2 got=%v, want %v\n", b, wantb) + failed = true + } +} + +type T24u1 struct { + b bool + val [24]byte +} +type T24u2 struct { + i uint16 + val [24]byte +} + +//go:noinline +func zero24u1_ssa(t *T24u1) { + t.val = [24]byte{} +} + +//go:noinline +func zero24u2_ssa(t *T24u2) { + t.val = [24]byte{} +} +func testZero24u() { + a := T24u1{false, [24]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero24u1_ssa(&a) + want := T24u1{false, [24]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if a != want { + fmt.Printf("zero24u2 got=%v, want %v\n", a, want) + failed = true + } + b := T24u2{15, [24]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero24u2_ssa(&b) + wantb := T24u2{15, [24]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if b != wantb { + fmt.Printf("zero24u2 got=%v, want %v\n", b, wantb) + failed = true + } +} + +type T32u1 struct { + b bool + val [32]byte +} +type T32u2 struct { + i uint16 + val [32]byte +} + +//go:noinline +func zero32u1_ssa(t *T32u1) { + t.val = [32]byte{} +} + +//go:noinline +func zero32u2_ssa(t *T32u2) { + t.val = [32]byte{} +} +func testZero32u() { + a := T32u1{false, [32]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero32u1_ssa(&a) + want := T32u1{false, [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if a != want { + fmt.Printf("zero32u2 got=%v, want %v\n", a, want) + failed = true + } + b := T32u2{15, [32]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero32u2_ssa(&b) + wantb := T32u2{15, [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if b != wantb { + fmt.Printf("zero32u2 got=%v, want %v\n", b, wantb) + failed = true + } +} + +type T64u1 struct { + b bool + val [64]byte +} +type T64u2 struct { + i uint16 + val [64]byte +} + +//go:noinline +func zero64u1_ssa(t *T64u1) { + t.val = [64]byte{} +} + +//go:noinline +func zero64u2_ssa(t *T64u2) { + t.val = [64]byte{} +} +func testZero64u() { + a := T64u1{false, [64]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero64u1_ssa(&a) + want := T64u1{false, [64]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if a != want { + fmt.Printf("zero64u2 got=%v, want %v\n", a, want) + failed = true + } + b := T64u2{15, [64]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero64u2_ssa(&b) + wantb := T64u2{15, [64]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if b != wantb { + fmt.Printf("zero64u2 got=%v, want %v\n", b, wantb) + failed = true + } +} + +type T256u1 struct { + b bool + val [256]byte +} +type T256u2 struct { + i uint16 + val [256]byte +} + +//go:noinline +func zero256u1_ssa(t *T256u1) { + t.val = [256]byte{} +} + +//go:noinline +func zero256u2_ssa(t *T256u2) { + t.val = [256]byte{} +} +func testZero256u() { + a := T256u1{false, [256]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero256u1_ssa(&a) + want := T256u1{false, [256]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if a != want { + fmt.Printf("zero256u2 got=%v, want %v\n", a, want) + failed = true + } + b := T256u2{15, [256]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}} + zero256u2_ssa(&b) + wantb := T256u2{15, [256]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + if b != wantb { + fmt.Printf("zero256u2 got=%v, want %v\n", b, wantb) + failed = true + } +} + var failed bool func main() { @@ -533,6 +743,12 @@ func main() { testZero1023() testZero1024() testZero1025() + testZero8u() + testZero16u() + testZero24u() + testZero32u() + testZero64u() + testZero256u() if failed { panic("failed") } diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules index 81ac3c26af..45853b4b48 100644 --- a/src/cmd/compile/internal/ssa/gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules @@ -526,49 +526,29 @@ (MOVBstorezero [6] destptr (MOVHstorezero [4] destptr (MOVWstorezero destptr mem))) -(Zero [8] destptr mem) -> - (MOVDstorezero destptr mem) -// Zero small numbers of words directly. -(Zero [12] destptr mem) -> +// MOVD for store with DS must have offsets that are multiple of 4 +(Zero [8] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 -> + (MOVDstorezero destptr mem) +(Zero [8] destptr mem) -> + (MOVWstorezero [4] destptr + (MOVWstorezero [0] destptr mem)) +// Handle these cases only if aligned properly, otherwise use general case below +(Zero [12] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 -> (MOVWstorezero [8] destptr (MOVDstorezero [0] destptr mem)) -(Zero [16] destptr mem) -> - (MOVDstorezero [8] destptr +(Zero [16] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 -> + (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)) -(Zero [24] destptr mem) -> - (MOVDstorezero [16] destptr - (MOVDstorezero [8] destptr - (MOVDstorezero [0] destptr mem))) -(Zero [32] destptr mem) -> - (MOVDstorezero [24] destptr - (MOVDstorezero [16] destptr - (MOVDstorezero [8] destptr - (MOVDstorezero [0] destptr mem)))) - -(Zero [40] destptr mem) -> - (MOVDstorezero [32] destptr - (MOVDstorezero [24] destptr - (MOVDstorezero [16] destptr - (MOVDstorezero [8] destptr - (MOVDstorezero [0] destptr mem))))) - -(Zero [48] destptr mem) -> - (MOVDstorezero [40] destptr - (MOVDstorezero [32] destptr - (MOVDstorezero [24] destptr - (MOVDstorezero [16] destptr - (MOVDstorezero [8] destptr - (MOVDstorezero [0] destptr mem)))))) - -(Zero [56] destptr mem) -> - (MOVDstorezero [48] destptr - (MOVDstorezero [40] destptr - (MOVDstorezero [32] destptr - (MOVDstorezero [24] destptr - (MOVDstorezero [16] destptr - (MOVDstorezero [8] destptr - (MOVDstorezero [0] destptr mem))))))) +(Zero [24] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 -> + (MOVDstorezero [16] destptr + (MOVDstorezero [8] destptr + (MOVDstorezero [0] destptr mem))) +(Zero [32] {t} destptr mem) && t.(*types.Type).Alignment()%4 == 0 -> + (MOVDstorezero [24] destptr + (MOVDstorezero [16] destptr + (MOVDstorezero [8] destptr + (MOVDstorezero [0] destptr mem)))) // Handle cases not handled above (Zero [s] ptr mem) -> (LoweredZero [s] ptr mem) diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index 20e354cb4a..75bf763d12 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -10727,9 +10727,28 @@ func rewriteValuePPC64_OpZero_0(v *Value) bool { v.AddArg(v0) return true } + // match: (Zero [8] {t} destptr mem) + // cond: t.(*types.Type).Alignment()%4 == 0 + // result: (MOVDstorezero destptr mem) + for { + if v.AuxInt != 8 { + break + } + t := v.Aux + _ = v.Args[1] + destptr := v.Args[0] + mem := v.Args[1] + if !(t.(*types.Type).Alignment()%4 == 0) { + break + } + v.reset(OpPPC64MOVDstorezero) + v.AddArg(destptr) + v.AddArg(mem) + return true + } // match: (Zero [8] destptr mem) // cond: - // result: (MOVDstorezero destptr mem) + // result: (MOVWstorezero [4] destptr (MOVWstorezero [0] destptr mem)) for { if v.AuxInt != 8 { break @@ -10737,25 +10756,10 @@ func rewriteValuePPC64_OpZero_0(v *Value) bool { _ = v.Args[1] destptr := v.Args[0] mem := v.Args[1] - v.reset(OpPPC64MOVDstorezero) - v.AddArg(destptr) - v.AddArg(mem) - return true - } - // match: (Zero [12] destptr mem) - // cond: - // result: (MOVWstorezero [8] destptr (MOVDstorezero [0] destptr mem)) - for { - if v.AuxInt != 12 { - break - } - _ = v.Args[1] - destptr := v.Args[0] - mem := v.Args[1] v.reset(OpPPC64MOVWstorezero) - v.AuxInt = 8 + v.AuxInt = 4 v.AddArg(destptr) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) + v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem) v0.AuxInt = 0 v0.AddArg(destptr) v0.AddArg(mem) @@ -10767,16 +10771,44 @@ func rewriteValuePPC64_OpZero_0(v *Value) bool { func rewriteValuePPC64_OpZero_10(v *Value) bool { b := v.Block _ = b - // match: (Zero [16] destptr mem) - // cond: + // match: (Zero [12] {t} destptr mem) + // cond: t.(*types.Type).Alignment()%4 == 0 + // result: (MOVWstorezero [8] destptr (MOVDstorezero [0] destptr mem)) + for { + if v.AuxInt != 12 { + break + } + t := v.Aux + _ = v.Args[1] + destptr := v.Args[0] + mem := v.Args[1] + if !(t.(*types.Type).Alignment()%4 == 0) { + break + } + v.reset(OpPPC64MOVWstorezero) + v.AuxInt = 8 + v.AddArg(destptr) + v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) + v0.AuxInt = 0 + v0.AddArg(destptr) + v0.AddArg(mem) + v.AddArg(v0) + return true + } + // match: (Zero [16] {t} destptr mem) + // cond: t.(*types.Type).Alignment()%4 == 0 // result: (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)) for { if v.AuxInt != 16 { break } + t := v.Aux _ = v.Args[1] destptr := v.Args[0] mem := v.Args[1] + if !(t.(*types.Type).Alignment()%4 == 0) { + break + } v.reset(OpPPC64MOVDstorezero) v.AuxInt = 8 v.AddArg(destptr) @@ -10787,16 +10819,20 @@ func rewriteValuePPC64_OpZero_10(v *Value) bool { v.AddArg(v0) return true } - // match: (Zero [24] destptr mem) - // cond: - // result: (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))) + // match: (Zero [24] {t} destptr mem) + // cond: t.(*types.Type).Alignment()%4 == 0 + // result: (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))) for { if v.AuxInt != 24 { break } + t := v.Aux _ = v.Args[1] destptr := v.Args[0] mem := v.Args[1] + if !(t.(*types.Type).Alignment()%4 == 0) { + break + } v.reset(OpPPC64MOVDstorezero) v.AuxInt = 16 v.AddArg(destptr) @@ -10811,16 +10847,20 @@ func rewriteValuePPC64_OpZero_10(v *Value) bool { v.AddArg(v0) return true } - // match: (Zero [32] destptr mem) - // cond: - // result: (MOVDstorezero [24] destptr (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)))) + // match: (Zero [32] {t} destptr mem) + // cond: t.(*types.Type).Alignment()%4 == 0 + // result: (MOVDstorezero [24] destptr (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)))) for { if v.AuxInt != 32 { break } + t := v.Aux _ = v.Args[1] destptr := v.Args[0] mem := v.Args[1] + if !(t.(*types.Type).Alignment()%4 == 0) { + break + } v.reset(OpPPC64MOVDstorezero) v.AuxInt = 24 v.AddArg(destptr) @@ -10839,114 +10879,6 @@ func rewriteValuePPC64_OpZero_10(v *Value) bool { v.AddArg(v0) return true } - // match: (Zero [40] destptr mem) - // cond: - // result: (MOVDstorezero [32] destptr (MOVDstorezero [24] destptr (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))))) - for { - if v.AuxInt != 40 { - break - } - _ = v.Args[1] - destptr := v.Args[0] - mem := v.Args[1] - v.reset(OpPPC64MOVDstorezero) - v.AuxInt = 32 - v.AddArg(destptr) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v0.AuxInt = 24 - v0.AddArg(destptr) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v1.AuxInt = 16 - v1.AddArg(destptr) - v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v2.AuxInt = 8 - v2.AddArg(destptr) - v3 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v3.AuxInt = 0 - v3.AddArg(destptr) - v3.AddArg(mem) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) - return true - } - // match: (Zero [48] destptr mem) - // cond: - // result: (MOVDstorezero [40] destptr (MOVDstorezero [32] destptr (MOVDstorezero [24] destptr (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)))))) - for { - if v.AuxInt != 48 { - break - } - _ = v.Args[1] - destptr := v.Args[0] - mem := v.Args[1] - v.reset(OpPPC64MOVDstorezero) - v.AuxInt = 40 - v.AddArg(destptr) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v0.AuxInt = 32 - v0.AddArg(destptr) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v1.AuxInt = 24 - v1.AddArg(destptr) - v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v2.AuxInt = 16 - v2.AddArg(destptr) - v3 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v3.AuxInt = 8 - v3.AddArg(destptr) - v4 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v4.AuxInt = 0 - v4.AddArg(destptr) - v4.AddArg(mem) - v3.AddArg(v4) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) - return true - } - // match: (Zero [56] destptr mem) - // cond: - // result: (MOVDstorezero [48] destptr (MOVDstorezero [40] destptr (MOVDstorezero [32] destptr (MOVDstorezero [24] destptr (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))))))) - for { - if v.AuxInt != 56 { - break - } - _ = v.Args[1] - destptr := v.Args[0] - mem := v.Args[1] - v.reset(OpPPC64MOVDstorezero) - v.AuxInt = 48 - v.AddArg(destptr) - v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v0.AuxInt = 40 - v0.AddArg(destptr) - v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v1.AuxInt = 32 - v1.AddArg(destptr) - v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v2.AuxInt = 24 - v2.AddArg(destptr) - v3 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v3.AuxInt = 16 - v3.AddArg(destptr) - v4 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v4.AuxInt = 8 - v4.AddArg(destptr) - v5 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem) - v5.AuxInt = 0 - v5.AddArg(destptr) - v5.AddArg(mem) - v4.AddArg(v5) - v3.AddArg(v4) - v2.AddArg(v3) - v1.AddArg(v2) - v0.AddArg(v1) - v.AddArg(v0) - return true - } // match: (Zero [s] ptr mem) // cond: // result: (LoweredZero [s] ptr mem) From ebfcdef90174ea86b2952abec511c0be96b05099 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Mon, 25 Sep 2017 14:58:13 -0400 Subject: [PATCH 21/42] [release-branch.go1.9] runtime: make runtime.GC() trigger GC even if GOGC=off Currently, the priority of checks in (gcTrigger).test() puts the gcpercent<0 test above gcTriggerCycle, which is used for runtime.GC(). This is an unintentional change from 1.8 and before, where runtime.GC() triggered a GC even if GOGC=off. Fix this by rearranging the priority so the gcTriggerCycle test executes even if gcpercent < 0. Fixes #22023. Change-Id: I109328d7b643b6824eb9d79061a9e775f0149575 Reviewed-on: https://go-review.googlesource.com/65994 Run-TryBot: Austin Clements TryBot-Result: Gobot Gobot Reviewed-by: Rick Hudson Reviewed-on: https://go-review.googlesource.com/70979 Run-TryBot: Russ Cox Reviewed-by: Austin Clements --- src/runtime/gc_test.go | 16 ++++++++++++++++ src/runtime/mgc.go | 5 ++++- 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/src/runtime/gc_test.go b/src/runtime/gc_test.go index 25dc869caa..0620f2d61e 100644 --- a/src/runtime/gc_test.go +++ b/src/runtime/gc_test.go @@ -499,3 +499,19 @@ func BenchmarkReadMemStats(b *testing.B) { hugeSink = nil } + +func TestUserForcedGC(t *testing.T) { + // Test that runtime.GC() triggers a GC even if GOGC=off. + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + + var ms1, ms2 runtime.MemStats + runtime.ReadMemStats(&ms1) + runtime.GC() + runtime.ReadMemStats(&ms2) + if ms1.NumGC == ms2.NumGC { + t.Fatalf("runtime.GC() did not trigger GC") + } + if ms1.NumForcedGC == ms2.NumForcedGC { + t.Fatalf("runtime.GC() was not accounted in NumForcedGC") + } +} diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index 111fa781e1..b708720322 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -1158,7 +1158,7 @@ func (t gcTrigger) test() bool { if t.kind == gcTriggerAlways { return true } - if gcphase != _GCoff || gcpercent < 0 { + if gcphase != _GCoff { return false } switch t.kind { @@ -1169,6 +1169,9 @@ func (t gcTrigger) test() bool { // own write. return memstats.heap_live >= memstats.gc_trigger case gcTriggerTime: + if gcpercent < 0 { + return false + } lastgc := int64(atomic.Load64(&memstats.last_gc_nanotime)) return lastgc != 0 && t.now-lastgc > forcegcperiod case gcTriggerCycle: From 87b3a2783973b75357f8844a12aa36abcc479374 Mon Sep 17 00:00:00 2001 From: Elias Naur Date: Wed, 27 Sep 2017 13:36:54 +0200 Subject: [PATCH 22/42] [release-branch.go1.9] net: bump TestDialerDualStackFDLeak timeout on iOS On an iPhone 6 running iOS 11, the TestDialerDualStackFDLeak test started failing with dial durations just above the limit: FAIL: TestDialerDualStackFDLeak (0.21s) dial_test.go:90: got 101.154ms; want <= 95ms Bump the timeout on iOS. For the iOS builder. Change-Id: Id42b471e7cf7d0c84f6e83ed04b395fa1a2d449d Reviewed-on: https://go-review.googlesource.com/66491 Run-TryBot: Elias Naur TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor Reviewed-on: https://go-review.googlesource.com/70987 Run-TryBot: Russ Cox --- src/net/dial_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/net/dial_test.go b/src/net/dial_test.go index a892bf1e14..eba817e706 100644 --- a/src/net/dial_test.go +++ b/src/net/dial_test.go @@ -161,6 +161,8 @@ func dialClosedPort() (actual, expected time.Duration) { // but other platforms should be instantaneous. if runtime.GOOS == "windows" { expected = 1500 * time.Millisecond + } else if runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") { + expected = 150 * time.Millisecond } else { expected = 95 * time.Millisecond } From d80889341c7ce6f2cfdd0a9d2e0feb20c4805bbc Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Mon, 2 Oct 2017 15:47:41 -0700 Subject: [PATCH 23/42] [release-branch.go1.9] cmd/compile: fix merge rules for panic calls Use entire inlining call stack to decide whether two panic calls can be merged. We used to merge panic calls when only the leaf line numbers matched, but that leads to places higher up the call stack being merged incorrectly. Fixes #22083 Change-Id: Ia41400a80de4b6ecf3e5089abce0c42b65e9b38a Reviewed-on: https://go-review.googlesource.com/67632 Run-TryBot: Emmanuel Odeke TryBot-Result: Gobot Gobot Reviewed-by: Robert Griesemer Reviewed-on: https://go-review.googlesource.com/70980 Run-TryBot: Russ Cox Reviewed-by: Keith Randall --- src/cmd/compile/internal/gc/ssa.go | 4 +-- test/fixedbugs/issue22083.go | 41 ++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 2 deletions(-) create mode 100644 test/fixedbugs/issue22083.go diff --git a/src/cmd/compile/internal/gc/ssa.go b/src/cmd/compile/internal/gc/ssa.go index 9c1b3ca69f..63e9622983 100644 --- a/src/cmd/compile/internal/gc/ssa.go +++ b/src/cmd/compile/internal/gc/ssa.go @@ -282,7 +282,7 @@ type state struct { type funcLine struct { f *obj.LSym - file string + base *src.PosBase line uint } @@ -3456,7 +3456,7 @@ func (s *state) check(cmp *ssa.Value, fn *obj.LSym) { bNext := s.f.NewBlock(ssa.BlockPlain) line := s.peekPos() pos := Ctxt.PosTable.Pos(line) - fl := funcLine{f: fn, file: pos.Filename(), line: pos.Line()} + fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()} bPanic := s.panics[fl] if bPanic == nil { bPanic = s.f.NewBlock(ssa.BlockPlain) diff --git a/test/fixedbugs/issue22083.go b/test/fixedbugs/issue22083.go new file mode 100644 index 0000000000..a385102d08 --- /dev/null +++ b/test/fixedbugs/issue22083.go @@ -0,0 +1,41 @@ +// run + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The compiler was panicking on the wrong line number, where +// the panic was occurring in an inlined call. + +package main + +import ( + "runtime/debug" + "strings" +) + +type Wrapper struct { + a []int +} + +func (w Wrapper) Get(i int) int { + return w.a[i] +} + +func main() { + defer func() { + e := recover() + if e == nil { + panic("bounds check didn't fail") + } + stk := string(debug.Stack()) + if !strings.Contains(stk, "issue22083.go:40") { + panic("wrong stack trace: " + stk) + } + }() + foo := Wrapper{a: []int{0, 1, 2}} + _ = foo.Get(0) + _ = foo.Get(1) + _ = foo.Get(2) + _ = foo.Get(3) // stack trace should mention this line +} From 7dadd8d5172d44a7a9eccf4770ea299d7ef37b14 Mon Sep 17 00:00:00 2001 From: Author Name Date: Sat, 30 Sep 2017 13:47:48 -0700 Subject: [PATCH 24/42] [release-branch.go1.9] net: increase expected time to dial a closed port on all Darwin ports All current darwin architectures seem to take at least 100ms to dial a closed port, and that was making the all.bash script fail. Fixes #22062 Change-Id: Ib79c4b7a5db2373c95ce5d993cdcbee55cc0667f Reviewed-on: https://go-review.googlesource.com/67350 Reviewed-by: Ian Lance Taylor Reviewed-on: https://go-review.googlesource.com/70988 Run-TryBot: Russ Cox TryBot-Result: Gobot Gobot --- src/net/dial_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/net/dial_test.go b/src/net/dial_test.go index eba817e706..13fa9faacb 100644 --- a/src/net/dial_test.go +++ b/src/net/dial_test.go @@ -161,7 +161,7 @@ func dialClosedPort() (actual, expected time.Duration) { // but other platforms should be instantaneous. if runtime.GOOS == "windows" { expected = 1500 * time.Millisecond - } else if runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") { + } else if runtime.GOOS == "darwin" { expected = 150 * time.Millisecond } else { expected = 95 * time.Millisecond From a1e34abfb388237c46eaa133e2737a72f5693e24 Mon Sep 17 00:00:00 2001 From: Jeff Date: Thu, 5 Oct 2017 10:11:17 -0700 Subject: [PATCH 25/42] [release-branch.go1.9] net/smtp: NewClient: set tls field to true when already using a TLS connection Change-Id: I34008f56c191df0edcaafc20d569bbc6184f89fc Reviewed-on: https://go-review.googlesource.com/68470 Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor Reviewed-on: https://go-review.googlesource.com/70982 Run-TryBot: Russ Cox --- src/net/smtp/smtp.go | 1 + src/net/smtp/smtp_test.go | 47 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/src/net/smtp/smtp.go b/src/net/smtp/smtp.go index 28472e447b..767b077fe0 100644 --- a/src/net/smtp/smtp.go +++ b/src/net/smtp/smtp.go @@ -67,6 +67,7 @@ func NewClient(conn net.Conn, host string) (*Client, error) { return nil, err } c := &Client{Text: text, conn: conn, serverName: host, localName: "localhost"} + _, c.tls = conn.(*tls.Conn) return c, nil } diff --git a/src/net/smtp/smtp_test.go b/src/net/smtp/smtp_test.go index ff6585e69b..606a715ce4 100644 --- a/src/net/smtp/smtp_test.go +++ b/src/net/smtp/smtp_test.go @@ -364,6 +364,53 @@ HELO localhost QUIT ` +func TestNewClientWithTLS(t *testing.T) { + cert, err := tls.X509KeyPair(localhostCert, localhostKey) + if err != nil { + t.Fatalf("loadcert: %v", err) + } + + config := tls.Config{Certificates: []tls.Certificate{cert}} + + ln, err := tls.Listen("tcp", "127.0.0.1:0", &config) + if err != nil { + ln, err = tls.Listen("tcp", "[::1]:0", &config) + if err != nil { + t.Fatalf("server: listen: %s", err) + } + } + + go func() { + conn, err := ln.Accept() + if err != nil { + t.Fatalf("server: accept: %s", err) + return + } + defer conn.Close() + + _, err = conn.Write([]byte("220 SIGNS\r\n")) + if err != nil { + t.Fatalf("server: write: %s", err) + return + } + }() + + config.InsecureSkipVerify = true + conn, err := tls.Dial("tcp", ln.Addr().String(), &config) + if err != nil { + t.Fatalf("client: dial: %s", err) + } + defer conn.Close() + + client, err := NewClient(conn, ln.Addr().String()) + if err != nil { + t.Fatalf("smtp: newclient: %s", err) + } + if !client.tls { + t.Errorf("client.tls Got: %t Expected: %t", client.tls, true) + } +} + func TestHello(t *testing.T) { if len(helloServer) != len(helloClient) { From bfc22319aa349f014a34d73cff074bf3cce0df9c Mon Sep 17 00:00:00 2001 From: Adam Langley Date: Fri, 6 Oct 2017 12:46:22 -0700 Subject: [PATCH 26/42] [release-branch.go1.9] crypto/x509: reject intermediates with unknown critical extensions. In https://golang.org/cl/9390 I messed up and put the critical extension test in the wrong function. Thus it only triggered for leaf certificates and not for intermediates or roots. In practice, this is not expected to have a security impact in the web PKI. Change-Id: I4f2464ef2fb71b5865389901f293062ba1327702 Reviewed-on: https://go-review.googlesource.com/69294 Run-TryBot: Adam Langley TryBot-Result: Gobot Gobot Reviewed-by: Russ Cox Reviewed-on: https://go-review.googlesource.com/70983 Run-TryBot: Russ Cox Reviewed-by: Ian Lance Taylor --- src/crypto/x509/verify.go | 8 +-- src/crypto/x509/verify_test.go | 96 ++++++++++++++++++++++++++++++++++ src/crypto/x509/x509_test.go | 68 ------------------------ 3 files changed, 100 insertions(+), 72 deletions(-) diff --git a/src/crypto/x509/verify.go b/src/crypto/x509/verify.go index 2b4f39d62e..1193a266a9 100644 --- a/src/crypto/x509/verify.go +++ b/src/crypto/x509/verify.go @@ -191,6 +191,10 @@ func matchNameConstraint(domain, constraint string) bool { // isValid performs validity checks on the c. func (c *Certificate) isValid(certType int, currentChain []*Certificate, opts *VerifyOptions) error { + if len(c.UnhandledCriticalExtensions) > 0 { + return UnhandledCriticalExtension{} + } + if len(currentChain) > 0 { child := currentChain[len(currentChain)-1] if !bytes.Equal(child.RawIssuer, c.RawSubject) { @@ -285,10 +289,6 @@ func (c *Certificate) Verify(opts VerifyOptions) (chains [][]*Certificate, err e return c.systemVerify(&opts) } - if len(c.UnhandledCriticalExtensions) > 0 { - return nil, UnhandledCriticalExtension{} - } - if opts.Roots == nil { opts.Roots = systemRootsPool() if opts.Roots == nil { diff --git a/src/crypto/x509/verify_test.go b/src/crypto/x509/verify_test.go index 335c477d0d..41e295d3e5 100644 --- a/src/crypto/x509/verify_test.go +++ b/src/crypto/x509/verify_test.go @@ -296,6 +296,30 @@ var verifyTests = []verifyTest{ errorCallback: expectNameConstraintsError, }, + { + // Test that unknown critical extensions in a leaf cause a + // verify error. + leaf: criticalExtLeafWithExt, + dnsName: "example.com", + intermediates: []string{criticalExtIntermediate}, + roots: []string{criticalExtRoot}, + currentTime: 1486684488, + systemSkip: true, + + errorCallback: expectUnhandledCriticalExtension, + }, + { + // Test that unknown critical extensions in an intermediate + // cause a verify error. + leaf: criticalExtLeaf, + dnsName: "example.com", + intermediates: []string{criticalExtIntermediateWithExt}, + roots: []string{criticalExtRoot}, + currentTime: 1486684488, + systemSkip: true, + + errorCallback: expectUnhandledCriticalExtension, + }, } func expectHostnameError(t *testing.T, i int, err error) (ok bool) { @@ -379,6 +403,14 @@ func expectNotAuthorizedError(t *testing.T, i int, err error) (ok bool) { return true } +func expectUnhandledCriticalExtension(t *testing.T, i int, err error) (ok bool) { + if _, ok := err.(UnhandledCriticalExtension); !ok { + t.Errorf("#%d: error was not an UnhandledCriticalExtension: %s", i, err) + return false + } + return true +} + func certificateFromPEM(pemBytes string) (*Certificate, error) { block, _ := pem.Decode([]byte(pemBytes)) if block == nil { @@ -1596,3 +1628,67 @@ w67CoNRb81dy+4Q1lGpA8ORoLWh5fIq2t2eNGc4qB8vlTIKiESzAwu7u3sRfuWQi 4R+gnfLd37FWflMHwztFbVTuNtPOljCX0LN7KcuoXYlr05RhQrmoN7fQHsrZMNLs 8FVjHdKKu+uPstwd04Uy4BR/H2y1yerN9j/L6ZkMl98iiA== -----END CERTIFICATE-----` + +const criticalExtRoot = `-----BEGIN CERTIFICATE----- +MIIBqzCCAVGgAwIBAgIJAJ+mI/85cXApMAoGCCqGSM49BAMCMB0xDDAKBgNVBAoT +A09yZzENMAsGA1UEAxMEUm9vdDAeFw0xNTAxMDEwMDAwMDBaFw0yNTAxMDEwMDAw +MDBaMB0xDDAKBgNVBAoTA09yZzENMAsGA1UEAxMEUm9vdDBZMBMGByqGSM49AgEG +CCqGSM49AwEHA0IABJGp9joiG2QSQA+1FczEDAsWo84rFiP3GTL+n+ugcS6TyNib +gzMsdbJgVi+a33y0SzLZxB+YvU3/4KTk8yKLC+2jejB4MA4GA1UdDwEB/wQEAwIC +BDAdBgNVHSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB +/zAZBgNVHQ4EEgQQQDfXAftAL7gcflQEJ4xZATAbBgNVHSMEFDASgBBAN9cB+0Av +uBx+VAQnjFkBMAoGCCqGSM49BAMCA0gAMEUCIFeSV00fABFceWR52K+CfIgOHotY +FizzGiLB47hGwjMuAiEA8e0um2Kr8FPQ4wmFKaTRKHMaZizCGl3m+RG5QsE1KWo= +-----END CERTIFICATE-----` + +const criticalExtIntermediate = `-----BEGIN CERTIFICATE----- +MIIBszCCAVmgAwIBAgIJAL2kcGZKpzVqMAoGCCqGSM49BAMCMB0xDDAKBgNVBAoT +A09yZzENMAsGA1UEAxMEUm9vdDAeFw0xNTAxMDEwMDAwMDBaFw0yNTAxMDEwMDAw +MDBaMCUxDDAKBgNVBAoTA09yZzEVMBMGA1UEAxMMSW50ZXJtZWRpYXRlMFkwEwYH +KoZIzj0CAQYIKoZIzj0DAQcDQgAESqVq92iPEq01cL4o99WiXDc5GZjpjNlzMS1n +rk8oHcVDp4tQRRQG3F4A6dF1rn/L923ha3b0fhDLlAvXZB+7EKN6MHgwDgYDVR0P +AQH/BAQDAgIEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAPBgNVHRMB +Af8EBTADAQH/MBkGA1UdDgQSBBCMGmiotXbbXVd7H40UsgajMBsGA1UdIwQUMBKA +EEA31wH7QC+4HH5UBCeMWQEwCgYIKoZIzj0EAwIDSAAwRQIhAOhhNRb6KV7h3wbE +cdap8bojzvUcPD78fbsQPCNw1jPxAiBOeAJhlTwpKn9KHpeJphYSzydj9NqcS26Y +xXbdbm27KQ== +-----END CERTIFICATE-----` + +const criticalExtLeafWithExt = `-----BEGIN CERTIFICATE----- +MIIBxTCCAWugAwIBAgIJAJZAUtw5ccb1MAoGCCqGSM49BAMCMCUxDDAKBgNVBAoT +A09yZzEVMBMGA1UEAxMMSW50ZXJtZWRpYXRlMB4XDTE1MDEwMTAwMDAwMFoXDTI1 +MDEwMTAwMDAwMFowJDEMMAoGA1UEChMDT3JnMRQwEgYDVQQDEwtleGFtcGxlLmNv +bTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABF3ABa2+B6gUyg6ayCaRQWYY/+No +6PceLqEavZNUeVNuz7bS74Toy8I7R3bGMkMgbKpLSPlPTroAATvebTXoBaijgYQw +gYEwDgYDVR0PAQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcD +AjAMBgNVHRMBAf8EAjAAMBkGA1UdDgQSBBBRNtBL2vq8nCV3qVp7ycxMMBsGA1Ud +IwQUMBKAEIwaaKi1dttdV3sfjRSyBqMwCgYDUQMEAQH/BAAwCgYIKoZIzj0EAwID +SAAwRQIgVjy8GBgZFiagexEuDLqtGjIRJQtBcf7lYgf6XFPH1h4CIQCT6nHhGo6E +I+crEm4P5q72AnA/Iy0m24l7OvLuXObAmg== +-----END CERTIFICATE-----` + +const criticalExtIntermediateWithExt = `-----BEGIN CERTIFICATE----- +MIIB2TCCAX6gAwIBAgIIQD3NrSZtcUUwCgYIKoZIzj0EAwIwHTEMMAoGA1UEChMD +T3JnMQ0wCwYDVQQDEwRSb290MB4XDTE1MDEwMTAwMDAwMFoXDTI1MDEwMTAwMDAw +MFowPTEMMAoGA1UEChMDT3JnMS0wKwYDVQQDEyRJbnRlcm1lZGlhdGUgd2l0aCBD +cml0aWNhbCBFeHRlbnNpb24wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQtnmzH +mcRm10bdDBnJE7xQEJ25cLCL5okuEphRR0Zneo6+nQZikoh+UBbtt5GV3Dms7LeP +oF5HOplYDCd8wi/wo4GHMIGEMA4GA1UdDwEB/wQEAwICBDAdBgNVHSUEFjAUBggr +BgEFBQcDAQYIKwYBBQUHAwIwDwYDVR0TAQH/BAUwAwEB/zAZBgNVHQ4EEgQQKxdv +UuQZ6sO3XvBsxgNZ3zAbBgNVHSMEFDASgBBAN9cB+0AvuBx+VAQnjFkBMAoGA1ED +BAEB/wQAMAoGCCqGSM49BAMCA0kAMEYCIQCQzTPd6XKex+OAPsKT/1DsoMsg8vcG +c2qZ4Q0apT/kvgIhAKu2TnNQMIUdcO0BYQIl+Uhxc78dc9h4lO+YJB47pHGx +-----END CERTIFICATE-----` + +const criticalExtLeaf = `-----BEGIN CERTIFICATE----- +MIIBzzCCAXWgAwIBAgIJANoWFIlhCI9MMAoGCCqGSM49BAMCMD0xDDAKBgNVBAoT +A09yZzEtMCsGA1UEAxMkSW50ZXJtZWRpYXRlIHdpdGggQ3JpdGljYWwgRXh0ZW5z +aW9uMB4XDTE1MDEwMTAwMDAwMFoXDTI1MDEwMTAwMDAwMFowJDEMMAoGA1UEChMD +T3JnMRQwEgYDVQQDEwtleGFtcGxlLmNvbTBZMBMGByqGSM49AgEGCCqGSM49AwEH +A0IABG1Lfh8A0Ho2UvZN5H0+ONil9c8jwtC0y0xIZftyQE+Fwr9XwqG3rV2g4M1h +GnJa9lV9MPHg8+b85Hixm0ZSw7SjdzB1MA4GA1UdDwEB/wQEAwIFoDAdBgNVHSUE +FjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAZBgNVHQ4EEgQQ +UNhY4JhezH9gQYqvDMWrWDAbBgNVHSMEFDASgBArF29S5Bnqw7de8GzGA1nfMAoG +CCqGSM49BAMCA0gAMEUCIQClA3d4tdrDu9Eb5ZBpgyC+fU1xTZB0dKQHz6M5fPZA +2AIgN96lM+CPGicwhN24uQI6flOsO3H0TJ5lNzBYLtnQtlc= +-----END CERTIFICATE-----` diff --git a/src/crypto/x509/x509_test.go b/src/crypto/x509/x509_test.go index 2d1acf93bf..b800191db3 100644 --- a/src/crypto/x509/x509_test.go +++ b/src/crypto/x509/x509_test.go @@ -523,74 +523,6 @@ func TestCreateSelfSignedCertificate(t *testing.T) { } } -func TestUnknownCriticalExtension(t *testing.T) { - priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - t.Fatalf("Failed to generate ECDSA key: %s", err) - } - - oids := []asn1.ObjectIdentifier{ - // This OID is in the PKIX arc, but unknown. - {2, 5, 29, 999999}, - // This is a nonsense, unassigned OID. - {1, 2, 3, 4}, - } - - for _, oid := range oids { - template := Certificate{ - SerialNumber: big.NewInt(1), - Subject: pkix.Name{ - CommonName: "foo", - }, - NotBefore: time.Unix(1000, 0), - NotAfter: time.Now().AddDate(1, 0, 0), - - BasicConstraintsValid: true, - IsCA: true, - - KeyUsage: KeyUsageCertSign, - ExtKeyUsage: []ExtKeyUsage{ExtKeyUsageServerAuth}, - - ExtraExtensions: []pkix.Extension{ - { - Id: oid, - Critical: true, - Value: nil, - }, - }, - } - - derBytes, err := CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv) - if err != nil { - t.Fatalf("failed to create certificate: %s", err) - } - - cert, err := ParseCertificate(derBytes) - if err != nil { - t.Fatalf("Certificate with unknown critical extension was not parsed: %s", err) - } - - roots := NewCertPool() - roots.AddCert(cert) - - // Setting Roots ensures that Verify won't delegate to the OS - // library and thus the correct error should always be - // returned. - _, err = cert.Verify(VerifyOptions{Roots: roots}) - if err == nil { - t.Fatal("Certificate with unknown critical extension was verified without error") - } - if _, ok := err.(UnhandledCriticalExtension); !ok { - t.Fatalf("Error was %#v, but wanted one of type UnhandledCriticalExtension", err) - } - - cert.UnhandledCriticalExtensions = nil - if _, err = cert.Verify(VerifyOptions{Roots: roots}); err != nil { - t.Errorf("Certificate failed to verify after unhandled critical extensions were cleared: %s", err) - } - } -} - // Self-signed certificate using ECDSA with SHA1 & secp256r1 var ecdsaSHA1CertPem = ` -----BEGIN CERTIFICATE----- From 39d4bb9c0f3ac3f87de64b069ea176595e39d46c Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Tue, 10 Oct 2017 14:10:28 -0700 Subject: [PATCH 27/42] [release-branch.go1.9] cmd/go: correct directory used in checkNestedVCS test This error was not used when using git because nested git is permitted. Add test using Mercurial, so that at least we have a test, even though the test is not run by default. Fixes #22157 Fixes #22201 Change-Id: If521f3c09b0754e00e56fa3cd0364764a57a43ad Reviewed-on: https://go-review.googlesource.com/69670 Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-by: Russ Cox Reviewed-on: https://go-review.googlesource.com/70984 Run-TryBot: Russ Cox Reviewed-by: Ian Lance Taylor --- src/cmd/go/go_test.go | 9 ++++----- src/cmd/go/internal/get/vcs.go | 2 +- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go index c1b3975c7b..234e560e6b 100644 --- a/src/cmd/go/go_test.go +++ b/src/cmd/go/go_test.go @@ -1164,7 +1164,7 @@ func testMove(t *testing.T, vcs, url, base, config string) { tg.runFail("get", "-d", "-u", url) tg.grepStderr("is a custom import path for", "go get -d -u "+url+" failed for wrong reason") tg.runFail("get", "-d", "-f", "-u", url) - tg.grepStderr("validating server certificate|not found", "go get -d -f -u "+url+" failed for wrong reason") + tg.grepStderr("validating server certificate|[nN]ot [fF]ound", "go get -d -f -u "+url+" failed for wrong reason") } func TestInternalPackageErrorsAreHandled(t *testing.T) { @@ -1185,10 +1185,9 @@ func TestMoveGit(t *testing.T) { testMove(t, "git", "rsc.io/pdf", "pdf", "rsc.io/pdf/.git/config") } -// TODO(rsc): Set up a test case on bitbucket for hg. -// func TestMoveHG(t *testing.T) { -// testMove(t, "hg", "rsc.io/x86/x86asm", "x86", "rsc.io/x86/.hg/hgrc") -// } +func TestMoveHG(t *testing.T) { + testMove(t, "hg", "vcs-test.golang.org/go/custom-hg-hello", "custom-hg-hello", "vcs-test.golang.org/go/custom-hg-hello/.hg/hgrc") +} // TODO(rsc): Set up a test case on SourceForge (?) for svn. // func testMoveSVN(t *testing.T) { diff --git a/src/cmd/go/internal/get/vcs.go b/src/cmd/go/internal/get/vcs.go index f0e253ffb4..86d2e32efb 100644 --- a/src/cmd/go/internal/get/vcs.go +++ b/src/cmd/go/internal/get/vcs.go @@ -557,7 +557,7 @@ func checkNestedVCS(vcs *vcsCmd, dir, srcRoot string) error { otherDir := dir for len(otherDir) > len(srcRoot) { for _, otherVCS := range vcsList { - if _, err := os.Stat(filepath.Join(dir, "."+otherVCS.cmd)); err == nil { + if _, err := os.Stat(filepath.Join(otherDir, "."+otherVCS.cmd)); err == nil { // Allow expected vcs in original dir. if otherDir == dir && otherVCS == vcs { continue From f259aed0822dd42182b297740e3c5dcd09e40a84 Mon Sep 17 00:00:00 2001 From: Alex Brainman Date: Wed, 11 Oct 2017 18:15:25 +1100 Subject: [PATCH 28/42] [release-branch.go1.9] internal/poll: do not call SetFileCompletionNotificationModes if it is broken Current code assumes that SetFileCompletionNotificationModes is safe to call even if we know that it is not safe to use FILE_SKIP_COMPLETION_PORT_ON_SUCCESS flag. It appears (see issue #22149), SetFileCompletionNotificationModes crashes when we call it without FILE_SKIP_COMPLETION_PORT_ON_SUCCESS flag. Do not call SetFileCompletionNotificationModes in that situation. We are allowed to do that, because SetFileCompletionNotificationModes is just an optimisation. Fixes #22149 Change-Id: I0ad3aff4eabd8c27739417a62c286b1819ae166a Reviewed-on: https://go-review.googlesource.com/69870 Reviewed-by: Ian Lance Taylor Reviewed-on: https://go-review.googlesource.com/70989 Run-TryBot: Russ Cox TryBot-Result: Gobot Gobot Reviewed-by: Alex Brainman --- src/internal/poll/fd_windows.go | 64 +++++++++++++++++++-------------- 1 file changed, 37 insertions(+), 27 deletions(-) diff --git a/src/internal/poll/fd_windows.go b/src/internal/poll/fd_windows.go index b0991a29f2..2927463eee 100644 --- a/src/internal/poll/fd_windows.go +++ b/src/internal/poll/fd_windows.go @@ -31,11 +31,40 @@ var ( // package uses CancelIoEx API, if present, otherwise it fallback // to CancelIo. -var ( - canCancelIO bool // determines if CancelIoEx API is present - skipSyncNotif bool - hasLoadSetFileCompletionNotificationModes bool -) +var canCancelIO bool // determines if CancelIoEx API is present + +// This package uses SetFileCompletionNotificationModes Windows API +// to skip calling GetQueuedCompletionStatus if an IO operation completes +// synchronously. Unfortuently SetFileCompletionNotificationModes is not +// available on Windows XP. Also there is a known bug where +// SetFileCompletionNotificationModes crashes on some systems +// (see http://support.microsoft.com/kb/2568167 for details). + +var useSetFileCompletionNotificationModes bool // determines is SetFileCompletionNotificationModes is present and safe to use + +// checkSetFileCompletionNotificationModes verifies that +// SetFileCompletionNotificationModes Windows API is present +// on the system and is safe to use. +// See http://support.microsoft.com/kb/2568167 for details. +func checkSetFileCompletionNotificationModes() { + err := syscall.LoadSetFileCompletionNotificationModes() + if err != nil { + return + } + protos := [2]int32{syscall.IPPROTO_TCP, 0} + var buf [32]syscall.WSAProtocolInfo + len := uint32(unsafe.Sizeof(buf)) + n, err := syscall.WSAEnumProtocols(&protos[0], &buf[0], &len) + if err != nil { + return + } + for i := int32(0); i < n; i++ { + if buf[i].ServiceFlags1&syscall.XP1_IFS_HANDLES == 0 { + return + } + } + useSetFileCompletionNotificationModes = true +} func init() { var d syscall.WSAData @@ -44,26 +73,7 @@ func init() { initErr = e } canCancelIO = syscall.LoadCancelIoEx() == nil - hasLoadSetFileCompletionNotificationModes = syscall.LoadSetFileCompletionNotificationModes() == nil - if hasLoadSetFileCompletionNotificationModes { - // It's not safe to use FILE_SKIP_COMPLETION_PORT_ON_SUCCESS if non IFS providers are installed: - // http://support.microsoft.com/kb/2568167 - skipSyncNotif = true - protos := [2]int32{syscall.IPPROTO_TCP, 0} - var buf [32]syscall.WSAProtocolInfo - len := uint32(unsafe.Sizeof(buf)) - n, err := syscall.WSAEnumProtocols(&protos[0], &buf[0], &len) - if err != nil { - skipSyncNotif = false - } else { - for i := int32(0); i < n; i++ { - if buf[i].ServiceFlags1&syscall.XP1_IFS_HANDLES == 0 { - skipSyncNotif = false - break - } - } - } - } + checkSetFileCompletionNotificationModes() } // operation contains superset of data necessary to perform all async IO. @@ -344,12 +354,12 @@ func (fd *FD) Init(net string, pollable bool) (string, error) { if err != nil { return "", err } - if hasLoadSetFileCompletionNotificationModes { + if useSetFileCompletionNotificationModes { // We do not use events, so we can skip them always. flags := uint8(syscall.FILE_SKIP_SET_EVENT_ON_HANDLE) // It's not safe to skip completion notifications for UDP: // http://blogs.technet.com/b/winserverperformance/archive/2008/06/26/designing-applications-for-high-performance-part-iii.aspx - if skipSyncNotif && (net == "tcp" || net == "file") { + if net == "tcp" || net == "file" { flags |= syscall.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS } err := syscall.SetFileCompletionNotificationModes(fd.Sysfd, flags) From 7e7cb30475d69bfbaaa8c7519c551f9bd243a756 Mon Sep 17 00:00:00 2001 From: Alex Brainman Date: Wed, 11 Oct 2017 18:23:30 +1100 Subject: [PATCH 29/42] [release-branch.go1.9] internal/poll: only call SetFileCompletionNotificationModes for sockets CL 36799 made SetFileCompletionNotificationModes to be called for file handles. I don't think it is correct. Revert that change. Fixes #22024 Fixes #22207 Change-Id: I26260e8a727131cffbf60958d79eca2457495554 Reviewed-on: https://go-review.googlesource.com/69871 Reviewed-by: Ian Lance Taylor Reviewed-on: https://go-review.googlesource.com/70990 Run-TryBot: Russ Cox TryBot-Result: Gobot Gobot Reviewed-by: Alex Brainman --- src/internal/poll/fd_windows.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/internal/poll/fd_windows.go b/src/internal/poll/fd_windows.go index 2927463eee..27fef04be0 100644 --- a/src/internal/poll/fd_windows.go +++ b/src/internal/poll/fd_windows.go @@ -354,12 +354,12 @@ func (fd *FD) Init(net string, pollable bool) (string, error) { if err != nil { return "", err } - if useSetFileCompletionNotificationModes { + if pollable && useSetFileCompletionNotificationModes { // We do not use events, so we can skip them always. flags := uint8(syscall.FILE_SKIP_SET_EVENT_ON_HANDLE) // It's not safe to skip completion notifications for UDP: // http://blogs.technet.com/b/winserverperformance/archive/2008/06/26/designing-applications-for-high-performance-part-iii.aspx - if net == "tcp" || net == "file" { + if net == "tcp" { flags |= syscall.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS } err := syscall.SetFileCompletionNotificationModes(fd.Sysfd, flags) From fd17253587862248ee9a30a89e59db2fa9b77d1d Mon Sep 17 00:00:00 2001 From: Daniel Theophanes Date: Sat, 23 Sep 2017 15:30:46 -0700 Subject: [PATCH 30/42] [release-branch.go1.9] database/sql: prevent race in driver by locking dc in Next Database drivers should be called from a single goroutine to ease driver's design. If a driver chooses to handle context cancels internally it may do so. The sql package violated this agreement when calling Next or NextResultSet. It was possible for a concurrent rollback triggered from a context cancel to call a Tx.Rollback (which takes a driver connection lock) while a Rows.Next is in progress (which does not tack the driver connection lock). The current internal design of the sql package is each call takes roughly two locks: a closemu lock which prevents an disposing of internal resources (assigning nil or removing from lists) and a driver connection lock that prevents calling driver code from multiple goroutines. Fixes #21117 Change-Id: Ie340dc752a503089c27f57ffd43e191534829360 Reviewed-on: https://go-review.googlesource.com/65731 Reviewed-by: Ian Lance Taylor Reviewed-on: https://go-review.googlesource.com/71510 Run-TryBot: Russ Cox TryBot-Result: Gobot Gobot Reviewed-by: Daniel Theophanes --- src/database/sql/fakedb_test.go | 1 + src/database/sql/sql.go | 12 ++++++++++++ src/database/sql/sql_test.go | 8 +++++++- 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/src/database/sql/fakedb_test.go b/src/database/sql/fakedb_test.go index 4dcd096ca4..8e77df4ace 100644 --- a/src/database/sql/fakedb_test.go +++ b/src/database/sql/fakedb_test.go @@ -943,6 +943,7 @@ type rowsCursor struct { } func (rc *rowsCursor) touchMem() { + rc.parentMem.touchMem() rc.line++ } diff --git a/src/database/sql/sql.go b/src/database/sql/sql.go index c609fe4cc4..89976c7fd0 100644 --- a/src/database/sql/sql.go +++ b/src/database/sql/sql.go @@ -2454,6 +2454,12 @@ func (rs *Rows) nextLocked() (doClose, ok bool) { if rs.lastcols == nil { rs.lastcols = make([]driver.Value, len(rs.rowsi.Columns())) } + + // Lock the driver connection before calling the driver interface + // rowsi to prevent a Tx from rolling back the connection at the same time. + rs.dc.Lock() + defer rs.dc.Unlock() + rs.lasterr = rs.rowsi.Next(rs.lastcols) if rs.lasterr != nil { // Close the connection if there is a driver error. @@ -2503,6 +2509,12 @@ func (rs *Rows) NextResultSet() bool { doClose = true return false } + + // Lock the driver connection before calling the driver interface + // rowsi to prevent a Tx from rolling back the connection at the same time. + rs.dc.Lock() + defer rs.dc.Unlock() + rs.lasterr = nextResultSet.NextResultSet() if rs.lasterr != nil { doClose = true diff --git a/src/database/sql/sql_test.go b/src/database/sql/sql_test.go index c935eb4348..dd59ab9853 100644 --- a/src/database/sql/sql_test.go +++ b/src/database/sql/sql_test.go @@ -3106,6 +3106,9 @@ func TestIssue6081(t *testing.T) { // In the test, a context is canceled while the query is in process so // the internal rollback will run concurrently with the explicitly called // Tx.Rollback. +// +// The addition of calling rows.Next also tests +// Issue 21117. func TestIssue18429(t *testing.T) { db := newTestDB(t, "people") defer closeDB(t, db) @@ -3116,7 +3119,7 @@ func TestIssue18429(t *testing.T) { const milliWait = 30 - for i := 0; i < 100; i++ { + for i := 0; i < 1000; i++ { sem <- true wg.Add(1) go func() { @@ -3138,6 +3141,9 @@ func TestIssue18429(t *testing.T) { // reported. rows, _ := tx.QueryContext(ctx, "WAIT|"+qwait+"|SELECT|people|name|") if rows != nil { + // Call Next to test Issue 21117 and check for races. + for rows.Next() { + } rows.Close() } // This call will race with the context cancel rollback to complete From d487b15a61eee87c304277842a4624dca0c6bddd Mon Sep 17 00:00:00 2001 From: Matthew Dempsky Date: Fri, 13 Oct 2017 14:47:45 -0700 Subject: [PATCH 31/42] [release-branch.go1.9] cmd/compile: omit ICE diagnostics after normal error messages After we detect errors, the AST is in a precarious state and more likely to trip useless ICE failures. Instead let the user fix any existing errors and see if the ICE persists. This makes Fatalf more consistent with how panics are handled by hidePanic. While here, also fix detection for release versions: release version strings begin with "go" ("go1.8", "go1.9.1", etc), not "release". Fixes #22252. Change-Id: I1c400af62fb49dd979b96e1bf0fb295a81c8b336 Reviewed-on: https://go-review.googlesource.com/70850 Run-TryBot: Matthew Dempsky TryBot-Result: Gobot Gobot Reviewed-by: Russ Cox Reviewed-on: https://go-review.googlesource.com/70985 Run-TryBot: Russ Cox Reviewed-by: Matthew Dempsky --- src/cmd/compile/internal/gc/subr.go | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/src/cmd/compile/internal/gc/subr.go b/src/cmd/compile/internal/gc/subr.go index d79789c4fe..047acee05f 100644 --- a/src/cmd/compile/internal/gc/subr.go +++ b/src/cmd/compile/internal/gc/subr.go @@ -166,20 +166,22 @@ func Warnl(line src.XPos, fmt_ string, args ...interface{}) { func Fatalf(fmt_ string, args ...interface{}) { flusherrors() - fmt.Printf("%v: internal compiler error: ", linestr(lineno)) - fmt.Printf(fmt_, args...) - fmt.Printf("\n") - - // If this is a released compiler version, ask for a bug report. - if strings.HasPrefix(objabi.Version, "release") { + if Debug_panic != 0 || nsavederrors+nerrors == 0 { + fmt.Printf("%v: internal compiler error: ", linestr(lineno)) + fmt.Printf(fmt_, args...) fmt.Printf("\n") - fmt.Printf("Please file a bug report including a short program that triggers the error.\n") - fmt.Printf("https://golang.org/issue/new\n") - } else { - // Not a release; dump a stack trace, too. - fmt.Println() - os.Stdout.Write(debug.Stack()) - fmt.Println() + + // If this is a released compiler version, ask for a bug report. + if strings.HasPrefix(objabi.Version, "go") { + fmt.Printf("\n") + fmt.Printf("Please file a bug report including a short program that triggers the error.\n") + fmt.Printf("https://golang.org/issue/new\n") + } else { + // Not a release; dump a stack trace, too. + fmt.Println() + os.Stdout.Write(debug.Stack()) + fmt.Println() + } } hcrash() From 0758d2b9da81f45c758d028570782194db92f6be Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Tue, 17 Oct 2017 15:10:59 -0400 Subject: [PATCH 32/42] [release-branch.go1.9] cmd/go: clean up x.exe properly in TestImportMain More generally I'm concerned about these tests using $GOROOT/src/cmd/go as scratch space, especially combined wtih tg.parallel() - it's easy to believe some other test might inadvertently also try to write x.exe about the same time. This CL only solves the "didn't clean up x.exe" problem and leaves for another day the "probably shouldn't write to cmd/go at all" problem. Fixes #22266. Change-Id: I651534d70e2d360138e0373fb4a316081872550b Reviewed-on: https://go-review.googlesource.com/71410 Run-TryBot: Russ Cox TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor Reviewed-on: https://go-review.googlesource.com/71530 --- src/cmd/go/go_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go index 234e560e6b..04df14fca9 100644 --- a/src/cmd/go/go_test.go +++ b/src/cmd/go/go_test.go @@ -2847,7 +2847,7 @@ func TestImportMain(t *testing.T) { func TestFoo(t *testing.T) {} `) tg.setenv("GOPATH", tg.path(".")) - tg.creatingTemp("x") + tg.creatingTemp("x" + exeSuffix) tg.run("build", "x") tg.run("test", "x") From 8bb333a9c0945666e7b2e162f85a5fff74ccf1b4 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 25 Oct 2017 10:57:00 -0400 Subject: [PATCH 33/42] [release-branch.go1.9] doc: document Go 1.9.2 Change-Id: I7d63e747e798d588bdcf2b79b6ecd21fce7bbc9c Reviewed-on: https://go-review.googlesource.com/73391 Run-TryBot: Russ Cox Reviewed-by: Chris Broadfoot Reviewed-on: https://go-review.googlesource.com/73490 TryBot-Result: Gobot Gobot --- doc/devel/release.html | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/doc/devel/release.html b/doc/devel/release.html index bb30ff8946..eac2ddd3ef 100644 --- a/doc/devel/release.html +++ b/doc/devel/release.html @@ -38,6 +38,17 @@ See the Go 1.9.1 milestone on our issue tracker for details.

    +

    +go1.9.2 (released 2017/10/25) includes fixes to the compiler, linker, runtime, +documentation, go command, +and the crypto/x509, database/sql, log, +and net/smtp packages. +It includes a fix to a bug introduced in Go 1.9.1 that broke go get +of non-Git repositories under certain conditions. +See the Go +1.9.2 milestone on our issue tracker for details. +

    +

    go1.8 (released 2017/02/16)

    From 9be38a15e486f09663cf324539e2cb5045d54d80 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 25 Oct 2017 11:13:23 -0400 Subject: [PATCH 34/42] [release-branch.go1.9] runtime: avoid monotonic time zero on systems with low-res timers Otherwise low-res timers cause problems at call sites that expect to be able to use 0 as meaning "no time set" and therefore expect that nanotime never returns 0 itself. For example, sched.lastpoll == 0 means no last poll. Fixes #22394. Change-Id: Iea28acfddfff6f46bc90f041ec173e0fea591285 Reviewed-on: https://go-review.googlesource.com/73410 Run-TryBot: Russ Cox TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor Reviewed-by: Austin Clements Reviewed-on: https://go-review.googlesource.com/73491 TryBot-Result: Russ Cox --- src/runtime/proc.go | 3 +++ src/runtime/time.go | 8 +++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/runtime/proc.go b/src/runtime/proc.go index a631a016a3..5787991f07 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -142,6 +142,9 @@ func main() { } runtime_init() // must be before defer + if nanotime() == 0 { + throw("nanotime returning zero") + } // Defer unlock so that runtime.Goexit during init does the unlock too. needUnlock := true diff --git a/src/runtime/time.go b/src/runtime/time.go index abf200d7d3..23f61d62d0 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -309,4 +309,10 @@ func time_runtimeNano() int64 { return nanotime() } -var startNano int64 = nanotime() +// Monotonic times are reported as offsets from startNano. +// We initialize startNano to nanotime() - 1 so that on systems where +// monotonic time resolution is fairly low (e.g. Windows 2008 +// which appears to have a default resolution of 15ms), +// we avoid ever reporting a nanotime of 0. +// (Callers may want to use 0 as "time not set".) +var startNano int64 = nanotime() - 1 From f69668e1d0831ab3d3c8a6590e0d5b477ae74c1a Mon Sep 17 00:00:00 2001 From: Ian Lance Taylor Date: Fri, 13 Oct 2017 12:22:20 -0700 Subject: [PATCH 35/42] [release-branch.go1.9] os: skip TestPipeThreads as flaky for 1.9 Updates #21559 Change-Id: I90fa8b4ef97c4251440270491ac4c833d76ee872 Reviewed-on: https://go-review.googlesource.com/70771 Run-TryBot: Russ Cox TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor --- src/os/os_test.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/os/os_test.go b/src/os/os_test.go index dbe4ff8830..d04ba917b3 100644 --- a/src/os/os_test.go +++ b/src/os/os_test.go @@ -2176,6 +2176,8 @@ func TestPipeThreads(t *testing.T) { t.Skip("skipping on Plan 9; does not support runtime poller") } + testenv.SkipFlaky(t, 21559) + threads := 100 // OpenBSD has a low default for max number of files. From 33ce1682c7bd2d85d0f31c98d923b12c339d8a74 Mon Sep 17 00:00:00 2001 From: Hiroshi Ioka Date: Tue, 29 Aug 2017 07:39:57 +0900 Subject: [PATCH 36/42] [release-branch.go1.9] cmd/cgo: avoid using common names for sniffing Current code uses names like "x" and "s" which can conflict with user's code easily. Use cryptographic names. Fixes #21668 Change-Id: Ib6d3d6327aa5b92d95c71503d42e3a79d96c8e16 Reviewed-on: https://go-review.googlesource.com/59710 Reviewed-by: Ian Lance Taylor Reviewed-on: https://go-review.googlesource.com/59730 Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-by: Hiroshi Ioka Reviewed-by: Chris Broadfoot Reviewed-on: https://go-review.googlesource.com/70849 Run-TryBot: Russ Cox --- misc/cgo/test/issue21668.go | 13 +++++++++++++ src/cmd/cgo/gcc.go | 20 ++++++++++---------- 2 files changed, 23 insertions(+), 10 deletions(-) create mode 100644 misc/cgo/test/issue21668.go diff --git a/misc/cgo/test/issue21668.go b/misc/cgo/test/issue21668.go new file mode 100644 index 0000000000..f15b9202ac --- /dev/null +++ b/misc/cgo/test/issue21668.go @@ -0,0 +1,13 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Fail to guess the kind of the constant "x". +// No runtime test; just make sure it compiles. + +package cgotest + +// const int x = 42; +import "C" + +var issue21668_X = C.x diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go index ff8b81354b..c47d1fba8d 100644 --- a/src/cmd/cgo/gcc.go +++ b/src/cmd/cgo/gcc.go @@ -296,15 +296,15 @@ func (p *Package) guessKinds(f *File) []*Name { // For each name, we generate these lines, where xxx is the index in toSniff plus one. // // #line xxx "not-declared" - // void __cgo_f_xxx_1(void) { __typeof__(name) *__cgo_undefined__; } + // void __cgo_f_xxx_1(void) { __typeof__(name) *__cgo_undefined__1; } // #line xxx "not-type" - // void __cgo_f_xxx_2(void) { name *__cgo_undefined__; } + // void __cgo_f_xxx_2(void) { name *__cgo_undefined__2; } // #line xxx "not-int-const" - // void __cgo_f_xxx_3(void) { enum { __cgo_undefined__ = (name)*1 }; } + // void __cgo_f_xxx_3(void) { enum { __cgo_undefined__3 = (name)*1 }; } // #line xxx "not-num-const" - // void __cgo_f_xxx_4(void) { static const double x = (name); } + // void __cgo_f_xxx_4(void) { static const double __cgo_undefined__4 = (name); } // #line xxx "not-str-lit" - // void __cgo_f_xxx_5(void) { static const char x[] = (name); } + // void __cgo_f_xxx_5(void) { static const char __cgo_undefined__5[] = (name); } // #line xxx "not-signed-int-const" // #if 0 < -(name) // #line xxx "not-signed-int-const" @@ -327,15 +327,15 @@ func (p *Package) guessKinds(f *File) []*Name { for i, n := range names { fmt.Fprintf(&b, "#line %d \"not-declared\"\n"+ - "void __cgo_f_%d_1(void) { __typeof__(%s) *__cgo_undefined__; }\n"+ + "void __cgo_f_%d_1(void) { __typeof__(%s) *__cgo_undefined__1; }\n"+ "#line %d \"not-type\"\n"+ - "void __cgo_f_%d_2(void) { %s *__cgo_undefined__; }\n"+ + "void __cgo_f_%d_2(void) { %s *__cgo_undefined__2; }\n"+ "#line %d \"not-int-const\"\n"+ - "void __cgo_f_%d_3(void) { enum { __cgo_undefined__ = (%s)*1 }; }\n"+ + "void __cgo_f_%d_3(void) { enum { __cgo_undefined__3 = (%s)*1 }; }\n"+ "#line %d \"not-num-const\"\n"+ - "void __cgo_f_%d_4(void) { static const double x = (%s); }\n"+ + "void __cgo_f_%d_4(void) { static const double __cgo_undefined__4 = (%s); }\n"+ "#line %d \"not-str-lit\"\n"+ - "void __cgo_f_%d_5(void) { static const char s[] = (%s); }\n"+ + "void __cgo_f_%d_5(void) { static const char __cgo_undefined__5[] = (%s); }\n"+ "#line %d \"not-signed-int-const\"\n"+ "#if 0 < (%s)\n"+ "#line %d \"not-signed-int-const\"\n"+ From dffc9319f187b8a993968833b3692fc31abbdf1e Mon Sep 17 00:00:00 2001 From: Hiroshi Ioka Date: Thu, 31 Aug 2017 13:49:43 +0900 Subject: [PATCH 37/42] [release-branch.go1.9] cmd/cgo: support large unsigned macro again The approach of https://golang.org/cl/43476 turned out incorrect. The problem is that the sniff introduced by the CL only work for simple expression. And when it fails it fallback to uint64, not int64, which breaks backward compatibility. In this CL, we use DWARF for guessing kind instead. That should be more reliable than previous approach. And importanly, it fallbacks to int64 even if it fails to guess kind. Fixes #21708 Change-Id: I39a18cb2efbe4faa9becdcf53d5ac68dba180d47 Reviewed-on: https://go-review.googlesource.com/60510 Run-TryBot: Ian Lance Taylor TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor Reviewed-on: https://go-review.googlesource.com/60810 Reviewed-by: Hiroshi Ioka Reviewed-by: Chris Broadfoot Reviewed-on: https://go-review.googlesource.com/70970 Run-TryBot: Russ Cox --- misc/cgo/test/cgo_test.go | 1 + misc/cgo/test/issue21708.go | 16 +++++++++++++++ src/cmd/cgo/gcc.go | 39 ++++++++++--------------------------- src/cmd/cgo/main.go | 4 ++-- 4 files changed, 29 insertions(+), 31 deletions(-) create mode 100644 misc/cgo/test/issue21708.go diff --git a/misc/cgo/test/cgo_test.go b/misc/cgo/test/cgo_test.go index f7cf6f613c..9485e25bf4 100644 --- a/misc/cgo/test/cgo_test.go +++ b/misc/cgo/test/cgo_test.go @@ -80,5 +80,6 @@ func Test20369(t *testing.T) { test20369(t) } func Test18720(t *testing.T) { test18720(t) } func Test20266(t *testing.T) { test20266(t) } func Test20129(t *testing.T) { test20129(t) } +func Test21708(t *testing.T) { test21708(t) } func BenchmarkCgoCall(b *testing.B) { benchCgoCall(b) } diff --git a/misc/cgo/test/issue21708.go b/misc/cgo/test/issue21708.go new file mode 100644 index 0000000000..d413e3c57a --- /dev/null +++ b/misc/cgo/test/issue21708.go @@ -0,0 +1,16 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +// #include +// #define CAST_TO_INT64 (int64_t)(-1) +import "C" +import "testing" + +func test21708(t *testing.T) { + if got, want := C.CAST_TO_INT64, -1; got != want { + t.Errorf("C.CAST_TO_INT64 == %v, expected %v", got, want) + } +} diff --git a/src/cmd/cgo/gcc.go b/src/cmd/cgo/gcc.go index c47d1fba8d..c104067a93 100644 --- a/src/cmd/cgo/gcc.go +++ b/src/cmd/cgo/gcc.go @@ -305,18 +305,12 @@ func (p *Package) guessKinds(f *File) []*Name { // void __cgo_f_xxx_4(void) { static const double __cgo_undefined__4 = (name); } // #line xxx "not-str-lit" // void __cgo_f_xxx_5(void) { static const char __cgo_undefined__5[] = (name); } - // #line xxx "not-signed-int-const" - // #if 0 < -(name) - // #line xxx "not-signed-int-const" - // #error found unsigned int - // #endif // // If we see an error at not-declared:xxx, the corresponding name is not declared. // If we see an error at not-type:xxx, the corresponding name is a type. // If we see an error at not-int-const:xxx, the corresponding name is not an integer constant. // If we see an error at not-num-const:xxx, the corresponding name is not a number constant. // If we see an error at not-str-lit:xxx, the corresponding name is not a string literal. - // If we see an error at not-signed-int-const:xxx, the corresponding name is not a signed integer literal. // // The specific input forms are chosen so that they are valid C syntax regardless of // whether name denotes a type or an expression. @@ -335,18 +329,12 @@ func (p *Package) guessKinds(f *File) []*Name { "#line %d \"not-num-const\"\n"+ "void __cgo_f_%d_4(void) { static const double __cgo_undefined__4 = (%s); }\n"+ "#line %d \"not-str-lit\"\n"+ - "void __cgo_f_%d_5(void) { static const char __cgo_undefined__5[] = (%s); }\n"+ - "#line %d \"not-signed-int-const\"\n"+ - "#if 0 < (%s)\n"+ - "#line %d \"not-signed-int-const\"\n"+ - "#error found unsigned int\n"+ - "#endif\n", + "void __cgo_f_%d_5(void) { static const char __cgo_undefined__5[] = (%s); }\n", i+1, i+1, n.C, i+1, i+1, n.C, i+1, i+1, n.C, i+1, i+1, n.C, i+1, i+1, n.C, - i+1, n.C, i+1, ) } fmt.Fprintf(&b, "#line 1 \"completed\"\n"+ @@ -365,7 +353,6 @@ func (p *Package) guessKinds(f *File) []*Name { notNumConst notStrLiteral notDeclared - notSignedIntConst ) sawUnmatchedErrors := false for _, line := range strings.Split(stderr, "\n") { @@ -419,8 +406,6 @@ func (p *Package) guessKinds(f *File) []*Name { sniff[i] |= notNumConst case "not-str-lit": sniff[i] |= notStrLiteral - case "not-signed-int-const": - sniff[i] |= notSignedIntConst default: if isError { sawUnmatchedErrors = true @@ -436,7 +421,7 @@ func (p *Package) guessKinds(f *File) []*Name { } for i, n := range names { - switch sniff[i] &^ notSignedIntConst { + switch sniff[i] { default: var tpos token.Pos for _, ref := range f.Ref { @@ -447,11 +432,7 @@ func (p *Package) guessKinds(f *File) []*Name { } error_(tpos, "could not determine kind of name for C.%s", fixGo(n.Go)) case notStrLiteral | notType: - if sniff[i]¬SignedIntConst != 0 { - n.Kind = "uconst" - } else { - n.Kind = "iconst" - } + n.Kind = "iconst" case notIntConst | notStrLiteral | notType: n.Kind = "fconst" case notIntConst | notNumConst | notType: @@ -496,7 +477,7 @@ func (p *Package) loadDWARF(f *File, names []*Name) { b.WriteString("#line 1 \"cgo-dwarf-inference\"\n") for i, n := range names { fmt.Fprintf(&b, "__typeof__(%s) *__cgo__%d;\n", n.C, i) - if n.Kind == "iconst" || n.Kind == "uconst" { + if n.Kind == "iconst" { fmt.Fprintf(&b, "enum { __cgo_enum__%d = %s };\n", i, n.C) } } @@ -505,7 +486,7 @@ func (p *Package) loadDWARF(f *File, names []*Name) { // so we can read them out of the object file. fmt.Fprintf(&b, "long long __cgodebug_ints[] = {\n") for _, n := range names { - if n.Kind == "iconst" || n.Kind == "uconst" { + if n.Kind == "iconst" { fmt.Fprintf(&b, "\t%s,\n", n.C) } else { fmt.Fprintf(&b, "\t0,\n") @@ -614,11 +595,11 @@ func (p *Package) loadDWARF(f *File, names []*Name) { switch n.Kind { case "iconst": if i < len(ints) { - n.Const = fmt.Sprintf("%#x", ints[i]) - } - case "uconst": - if i < len(ints) { - n.Const = fmt.Sprintf("%#x", uint64(ints[i])) + if _, ok := types[i].(*dwarf.UintType); ok { + n.Const = fmt.Sprintf("%#x", uint64(ints[i])) + } else { + n.Const = fmt.Sprintf("%#x", ints[i]) + } } case "fconst": if i < len(floats) { diff --git a/src/cmd/cgo/main.go b/src/cmd/cgo/main.go index 3dc3d141b7..3ad13ef9c7 100644 --- a/src/cmd/cgo/main.go +++ b/src/cmd/cgo/main.go @@ -88,7 +88,7 @@ type Name struct { Mangle string // name used in generated Go C string // name used in C Define string // #define expansion - Kind string // "iconst", "uconst", "fconst", "sconst", "type", "var", "fpvar", "func", "not-type" + Kind string // "iconst", "fconst", "sconst", "type", "var", "fpvar", "func", "not-type" Type *Type // the type of xxx FuncType *FuncType AddError bool @@ -100,7 +100,7 @@ func (n *Name) IsVar() bool { return n.Kind == "var" || n.Kind == "fpvar" } -// IsConst reports whether Kind is either "iconst", "uconst", "fconst" or "sconst" +// IsConst reports whether Kind is either "iconst", "fconst" or "sconst" func (n *Name) IsConst() bool { return strings.HasSuffix(n.Kind, "const") } From f36b12657c71753029aeefa5e0af3c1607ffe9bb Mon Sep 17 00:00:00 2001 From: Michael Matloob Date: Tue, 12 Sep 2017 12:22:22 -0400 Subject: [PATCH 38/42] [release-branch.go1.9] runtime: in cpuProfile.addExtra, set p.lostExtra to 0 after flush After the number of lost extra events are written to the the cpuprof log, the number of lost extra events should be set to zero, or else, the next time time addExtra is logged, lostExtra will be overcounted. This change resets lostExtra after its value is written to the log. Fixes #21836 Change-Id: I8a6ac9c61e579e7a5ca7bdb0f3463f8ae8b9f864 Reviewed-on: https://go-review.googlesource.com/63270 Reviewed-by: Austin Clements Run-TryBot: Austin Clements TryBot-Result: Gobot Gobot Reviewed-on: https://go-review.googlesource.com/70974 Run-TryBot: Russ Cox --- src/runtime/cpuprof.go | 1 + 1 file changed, 1 insertion(+) diff --git a/src/runtime/cpuprof.go b/src/runtime/cpuprof.go index fb841a9f3d..e00dcb1bbd 100644 --- a/src/runtime/cpuprof.go +++ b/src/runtime/cpuprof.go @@ -160,6 +160,7 @@ func (p *cpuProfile) addExtra() { funcPC(_ExternalCode) + sys.PCQuantum, } cpuprof.log.write(nil, 0, hdr[:], lostStk[:]) + p.lostExtra = 0 } } From 79996e4a1d33b7404ee076d7455ff8dcc7270250 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Mon, 28 Aug 2017 12:57:52 -0700 Subject: [PATCH 39/42] [release-branch.go1.9] cmd/compile: avoid generating large offsets The assembler barfs on large offsets. Make sure that all the instructions that need to have their offsets in an int32 1) check on any rule that computes offsets for such instructions 2) change their aux fields so the check builder checks it. The assembler also silently misassembled offsets between 1<<31 and 1<<32. Add a check in the assembler to barf on those as well. Fixes #21655 Change-Id: Iebf24bf10f9f37b3ea819ceb7d588251c0f46d7d Reviewed-on: https://go-review.googlesource.com/59630 Run-TryBot: Keith Randall TryBot-Result: Gobot Gobot Reviewed-by: David Chase Reviewed-on: https://go-review.googlesource.com/70981 Run-TryBot: Russ Cox Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/gen/AMD64.rules | 132 +++---- src/cmd/compile/internal/ssa/gen/AMD64Ops.go | 17 +- src/cmd/compile/internal/ssa/opGen.go | 16 +- src/cmd/compile/internal/ssa/rewriteAMD64.go | 362 ++++++++++++++----- src/cmd/internal/obj/x86/asm6.go | 7 + test/fixedbugs/issue21655.go | 40 ++ 6 files changed, 410 insertions(+), 164 deletions(-) create mode 100644 test/fixedbugs/issue21655.go diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index 1900f5e794..f1d53bc51f 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -1177,82 +1177,82 @@ (MOVQstoreconstidx1 [c] {sym} ptr (SHLQconst [3] idx) mem) -> (MOVQstoreconstidx8 [c] {sym} ptr idx mem) // combine ADDQ into indexed loads and stores -(MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem) -(MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem) -(MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVWloadidx2 [c+d] {sym} ptr idx mem) -(MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem) -(MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVLloadidx4 [c+d] {sym} ptr idx mem) -(MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem) -(MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVQloadidx8 [c+d] {sym} ptr idx mem) -(MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem) -(MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSSloadidx4 [c+d] {sym} ptr idx mem) -(MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem) -(MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) -> (MOVSDloadidx8 [c+d] {sym} ptr idx mem) +(MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem) +(MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem) +(MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVWloadidx2 [c+d] {sym} ptr idx mem) +(MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem) +(MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVLloadidx4 [c+d] {sym} ptr idx mem) +(MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem) +(MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVQloadidx8 [c+d] {sym} ptr idx mem) +(MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem) +(MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSSloadidx4 [c+d] {sym} ptr idx mem) +(MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem) +(MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) && is32Bit(c+d) -> (MOVSDloadidx8 [c+d] {sym} ptr idx mem) -(MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) -(MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) -(MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) -(MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) -(MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) -> (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) +(MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) +(MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) +(MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) +(MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) +(MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) && is32Bit(c+d) -> (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) -(MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem) -(MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem) -(MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) -(MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem) -(MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) -(MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem) -(MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) -(MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem) -(MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) -(MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem) -(MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) -> (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) +(MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVBloadidx1 [c+d] {sym} ptr idx mem) +(MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVWloadidx1 [c+d] {sym} ptr idx mem) +(MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+2*d) -> (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) +(MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVLloadidx1 [c+d] {sym} ptr idx mem) +(MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+4*d) -> (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) +(MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVQloadidx1 [c+d] {sym} ptr idx mem) +(MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+8*d) -> (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) +(MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVSSloadidx1 [c+d] {sym} ptr idx mem) +(MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+4*d) -> (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) +(MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+d) -> (MOVSDloadidx1 [c+d] {sym} ptr idx mem) +(MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) && is32Bit(c+8*d) -> (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) -(MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) -(MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) -(MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) -(MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) -(MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) -(MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) -> (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) +(MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+2*d) -> (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) +(MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+4*d) -> (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) +(MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+8*d) -> (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) +(MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+4*d) -> (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) +(MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+d) -> (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) +(MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) && is32Bit(c+8*d) -> (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) -(MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) -> +(MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) -> +(MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) -> +(MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) -> +(MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) -> +(MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) -> +(MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) -> +(MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) && ValAndOff(x).canAdd(c) -> (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) -> +(MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) -> (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) -> +(MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) -> (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) -> +(MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(2*c) -> (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) -(MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) -> +(MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) -> (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) -> +(MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(4*c) -> (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) -(MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) -> +(MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(c) -> (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) -(MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) -> +(MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) && ValAndOff(x).canAdd(8*c) -> (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) // fold LEAQs together @@ -2301,22 +2301,22 @@ (ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x) (LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(c+d) -> (LEAL [c+d] {s} x) -(MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) -> +(MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) -(MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) -> +(MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) -(MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) -> +(MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) -(MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) -> +(MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) -(MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) -> +(MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) -(MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) -> +(MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) -(MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) -> +(MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) -(MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) -> +(MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && canMergeSym(sym1, sym2) && is32Bit(off1+off2) -> (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) (MOVQstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd(off) -> diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go index c51cbd2238..c984cbfb12 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go +++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go @@ -20,6 +20,7 @@ import "strings" // - Unused portions of AuxInt (or the Val portion of ValAndOff) are // filled by sign-extending the used portion. Users of AuxInt which interpret // AuxInt as unsigned (e.g. shifts) must be careful. +// - All SymOff opcodes require their offset to fit in an int32. // Suffixes encode the bit width of various instructions. // Q (quad word) = 64 bit @@ -189,17 +190,17 @@ func init() { // binary ops {name: "ADDQ", argLength: 2, reg: gp21sp, asm: "ADDQ", commutative: true, clobberFlags: true}, // arg0 + arg1 {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1 - {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int64", typ: "UInt64", clobberFlags: true}, // arg0 + auxint + {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int32", typ: "UInt64", clobberFlags: true}, // arg0 + auxint {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", clobberFlags: true}, // arg0 + auxint {name: "SUBQ", argLength: 2, reg: gp21, asm: "SUBQ", resultInArg0: true, clobberFlags: true}, // arg0 - arg1 {name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true, clobberFlags: true}, // arg0 - arg1 - {name: "SUBQconst", argLength: 1, reg: gp11, asm: "SUBQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 - auxint + {name: "SUBQconst", argLength: 1, reg: gp11, asm: "SUBQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint {name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint {name: "MULQ", argLength: 2, reg: gp21, asm: "IMULQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1 {name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1 - {name: "MULQconst", argLength: 1, reg: gp11, asm: "IMULQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 * auxint + {name: "MULQconst", argLength: 1, reg: gp11, asm: "IMULQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint {name: "MULLconst", argLength: 1, reg: gp11, asm: "IMULL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint {name: "HMULQ", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULQ", clobberFlags: true}, // (arg0 * arg1) >> width @@ -221,24 +222,24 @@ func init() { {name: "ANDQ", argLength: 2, reg: gp21, asm: "ANDQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1 {name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1 - {name: "ANDQconst", argLength: 1, reg: gp11, asm: "ANDQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 & auxint + {name: "ANDQconst", argLength: 1, reg: gp11, asm: "ANDQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint {name: "ANDLconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint {name: "ORQ", argLength: 2, reg: gp21, asm: "ORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1 {name: "ORL", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1 - {name: "ORQconst", argLength: 1, reg: gp11, asm: "ORQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 | auxint + {name: "ORQconst", argLength: 1, reg: gp11, asm: "ORQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint {name: "ORLconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint {name: "XORQ", argLength: 2, reg: gp21, asm: "XORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1 {name: "XORL", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1 - {name: "XORQconst", argLength: 1, reg: gp11, asm: "XORQ", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint + {name: "XORQconst", argLength: 1, reg: gp11, asm: "XORQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint {name: "XORLconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint {name: "CMPQ", argLength: 2, reg: gp2flags, asm: "CMPQ", typ: "Flags"}, // arg0 compare to arg1 {name: "CMPL", argLength: 2, reg: gp2flags, asm: "CMPL", typ: "Flags"}, // arg0 compare to arg1 {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1 {name: "CMPB", argLength: 2, reg: gp2flags, asm: "CMPB", typ: "Flags"}, // arg0 compare to arg1 - {name: "CMPQconst", argLength: 1, reg: gp1flags, asm: "CMPQ", typ: "Flags", aux: "Int64"}, // arg0 compare to auxint + {name: "CMPQconst", argLength: 1, reg: gp1flags, asm: "CMPQ", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint {name: "CMPLconst", argLength: 1, reg: gp1flags, asm: "CMPL", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", typ: "Flags", aux: "Int16"}, // arg0 compare to auxint {name: "CMPBconst", argLength: 1, reg: gp1flags, asm: "CMPB", typ: "Flags", aux: "Int8"}, // arg0 compare to auxint @@ -255,7 +256,7 @@ func init() { {name: "TESTL", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0 {name: "TESTW", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0 {name: "TESTB", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTB", typ: "Flags"}, // (arg0 & arg1) compare to 0 - {name: "TESTQconst", argLength: 1, reg: gp1flags, asm: "TESTQ", typ: "Flags", aux: "Int64"}, // (arg0 & auxint) compare to 0 + {name: "TESTQconst", argLength: 1, reg: gp1flags, asm: "TESTQ", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0 {name: "TESTLconst", argLength: 1, reg: gp1flags, asm: "TESTL", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0 {name: "TESTWconst", argLength: 1, reg: gp1flags, asm: "TESTW", typ: "Flags", aux: "Int16"}, // (arg0 & auxint) compare to 0 {name: "TESTBconst", argLength: 1, reg: gp1flags, asm: "TESTB", typ: "Flags", aux: "Int8"}, // (arg0 & auxint) compare to 0 diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index ae2dd5f550..763a1cbd4d 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -4824,7 +4824,7 @@ var opcodeTable = [...]opInfo{ }, { name: "ADDQconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, clobberFlags: true, asm: x86.AADDQ, @@ -4886,7 +4886,7 @@ var opcodeTable = [...]opInfo{ }, { name: "SUBQconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -4952,7 +4952,7 @@ var opcodeTable = [...]opInfo{ }, { name: "MULQconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -5232,7 +5232,7 @@ var opcodeTable = [...]opInfo{ }, { name: "ANDQconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -5298,7 +5298,7 @@ var opcodeTable = [...]opInfo{ }, { name: "ORQconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -5364,7 +5364,7 @@ var opcodeTable = [...]opInfo{ }, { name: "XORQconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, resultInArg0: true, clobberFlags: true, @@ -5440,7 +5440,7 @@ var opcodeTable = [...]opInfo{ }, { name: "CMPQconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, asm: x86.ACMPQ, reg: regInfo{ @@ -5598,7 +5598,7 @@ var opcodeTable = [...]opInfo{ }, { name: "TESTQconst", - auxType: auxInt64, + auxType: auxInt32, argLen: 1, asm: x86.ATESTQ, reg: regInfo{ diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index f9a94cac36..f2f4896410 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4680,7 +4680,7 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { return true } // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem) - // cond: canMergeSym(sym1, sym2) + // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { off1 := v.AuxInt @@ -4694,7 +4694,7 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { sym2 := v_0.Aux base := v_0.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } v.reset(OpAMD64MOVBload) @@ -4732,7 +4732,7 @@ func rewriteValueAMD64_OpAMD64MOVBload_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -4746,6 +4746,9 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVBloadidx1) v.AuxInt = c + d v.Aux = sym @@ -4755,7 +4758,7 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { return true } // match: (MOVBloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -4769,6 +4772,9 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { d := v_1.AuxInt ptr := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVBloadidx1) v.AuxInt = c + d v.Aux = sym @@ -4778,7 +4784,7 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { return true } // match: (MOVBloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -4792,6 +4798,9 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVBloadidx1) v.AuxInt = c + d v.Aux = sym @@ -4801,7 +4810,7 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { return true } // match: (MOVBloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVBloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -4815,6 +4824,9 @@ func rewriteValueAMD64_OpAMD64MOVBloadidx1_0(v *Value) bool { idx := v_0.Args[0] ptr := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVBloadidx1) v.AuxInt = c + d v.Aux = sym @@ -5440,7 +5452,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { return true } // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) - // cond: canMergeSym(sym1, sym2) + // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { off1 := v.AuxInt @@ -5455,7 +5467,7 @@ func rewriteValueAMD64_OpAMD64MOVBstore_10(v *Value) bool { base := v_0.Args[0] val := v.Args[1] mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } v.reset(OpAMD64MOVBstore) @@ -5679,7 +5691,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconst_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { // match: (MOVBstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -5693,6 +5705,9 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVBstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -5702,7 +5717,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { return true } // match: (MOVBstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVBstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -5716,6 +5731,9 @@ func rewriteValueAMD64_OpAMD64MOVBstoreconstidx1_0(v *Value) bool { c := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVBstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -5766,7 +5784,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { b := v.Block _ = b // match: (MOVBstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -5781,6 +5799,9 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVBstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -5791,7 +5812,7 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { return true } // match: (MOVBstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVBstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -5806,6 +5827,9 @@ func rewriteValueAMD64_OpAMD64MOVBstoreidx1_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVBstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -6837,7 +6861,7 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { return true } // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem) - // cond: canMergeSym(sym1, sym2) + // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { off1 := v.AuxInt @@ -6851,7 +6875,7 @@ func rewriteValueAMD64_OpAMD64MOVLload_0(v *Value) bool { sym2 := v_0.Aux base := v_0.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } v.reset(OpAMD64MOVLload) @@ -6939,7 +6963,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { return true } // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -6953,6 +6977,9 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVLloadidx1) v.AuxInt = c + d v.Aux = sym @@ -6962,7 +6989,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { return true } // match: (MOVLloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -6976,6 +7003,9 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { d := v_1.AuxInt ptr := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVLloadidx1) v.AuxInt = c + d v.Aux = sym @@ -6985,7 +7015,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { return true } // match: (MOVLloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -6999,6 +7029,9 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVLloadidx1) v.AuxInt = c + d v.Aux = sym @@ -7008,7 +7041,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { return true } // match: (MOVLloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVLloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -7022,6 +7055,9 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { idx := v_0.Args[0] ptr := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVLloadidx1) v.AuxInt = c + d v.Aux = sym @@ -7034,7 +7070,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx1_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { // match: (MOVLloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVLloadidx4 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -7048,6 +7084,9 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVLloadidx4) v.AuxInt = c + d v.Aux = sym @@ -7057,7 +7096,7 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { return true } // match: (MOVLloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+4*d) // result: (MOVLloadidx4 [c+4*d] {sym} ptr idx mem) for { c := v.AuxInt @@ -7071,6 +7110,9 @@ func rewriteValueAMD64_OpAMD64MOVLloadidx4_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + 4*d)) { + break + } v.reset(OpAMD64MOVLloadidx4) v.AuxInt = c + 4*d v.Aux = sym @@ -7390,7 +7432,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) - // cond: canMergeSym(sym1, sym2) + // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { off1 := v.AuxInt @@ -7405,7 +7447,7 @@ func rewriteValueAMD64_OpAMD64MOVLstore_10(v *Value) bool { base := v_0.Args[0] val := v.Args[1] mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } v.reset(OpAMD64MOVLstore) @@ -7693,7 +7735,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { return true } // match: (MOVLstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -7707,6 +7749,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVLstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -7716,7 +7761,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { return true } // match: (MOVLstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVLstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -7730,6 +7775,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx1_0(v *Value) bool { c := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVLstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -7785,7 +7833,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { typ := &b.Func.Config.Types _ = typ // match: (MOVLstoreconstidx4 [x] {sym} (ADDQconst [c] ptr) idx mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVLstoreconstidx4 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -7799,6 +7847,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVLstoreconstidx4) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -7808,7 +7859,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { return true } // match: (MOVLstoreconstidx4 [x] {sym} ptr (ADDQconst [c] idx) mem) - // cond: + // cond: ValAndOff(x).canAdd(4*c) // result: (MOVLstoreconstidx4 [ValAndOff(x).add(4*c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -7822,6 +7873,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreconstidx4_0(v *Value) bool { c := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(ValAndOff(x).canAdd(4 * c)) { + break + } v.reset(OpAMD64MOVLstoreconstidx4) v.AuxInt = ValAndOff(x).add(4 * c) v.Aux = sym @@ -7903,7 +7957,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { return true } // match: (MOVLstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -7918,6 +7972,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -7928,7 +7985,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { return true } // match: (MOVLstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVLstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -7943,6 +8000,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx1_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVLstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -8063,7 +8123,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { b := v.Block _ = b // match: (MOVLstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVLstoreidx4 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -8078,6 +8138,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVLstoreidx4) v.AuxInt = c + d v.Aux = sym @@ -8088,7 +8151,7 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { return true } // match: (MOVLstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+4*d) // result: (MOVLstoreidx4 [c+4*d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -8103,6 +8166,9 @@ func rewriteValueAMD64_OpAMD64MOVLstoreidx4_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + 4*d)) { + break + } v.reset(OpAMD64MOVLstoreidx4) v.AuxInt = c + 4*d v.Aux = sym @@ -8543,7 +8609,7 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { return true } // match: (MOVQload [off1] {sym1} (LEAL [off2] {sym2} base) mem) - // cond: canMergeSym(sym1, sym2) + // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { off1 := v.AuxInt @@ -8557,7 +8623,7 @@ func rewriteValueAMD64_OpAMD64MOVQload_0(v *Value) bool { sym2 := v_0.Aux base := v_0.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } v.reset(OpAMD64MOVQload) @@ -8645,7 +8711,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { return true } // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -8659,6 +8725,9 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVQloadidx1) v.AuxInt = c + d v.Aux = sym @@ -8668,7 +8737,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { return true } // match: (MOVQloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -8682,6 +8751,9 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { d := v_1.AuxInt ptr := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVQloadidx1) v.AuxInt = c + d v.Aux = sym @@ -8691,7 +8763,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { return true } // match: (MOVQloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -8705,6 +8777,9 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVQloadidx1) v.AuxInt = c + d v.Aux = sym @@ -8714,7 +8789,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { return true } // match: (MOVQloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVQloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -8728,6 +8803,9 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { idx := v_0.Args[0] ptr := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVQloadidx1) v.AuxInt = c + d v.Aux = sym @@ -8740,7 +8818,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx1_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { // match: (MOVQloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVQloadidx8 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -8754,6 +8832,9 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVQloadidx8) v.AuxInt = c + d v.Aux = sym @@ -8763,7 +8844,7 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { return true } // match: (MOVQloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+8*d) // result: (MOVQloadidx8 [c+8*d] {sym} ptr idx mem) for { c := v.AuxInt @@ -8777,6 +8858,9 @@ func rewriteValueAMD64_OpAMD64MOVQloadidx8_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + 8*d)) { + break + } v.reset(OpAMD64MOVQloadidx8) v.AuxInt = c + 8*d v.Aux = sym @@ -8954,7 +9038,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { return true } // match: (MOVQstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) - // cond: canMergeSym(sym1, sym2) + // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { off1 := v.AuxInt @@ -8969,7 +9053,7 @@ func rewriteValueAMD64_OpAMD64MOVQstore_0(v *Value) bool { base := v_0.Args[0] val := v.Args[1] mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } v.reset(OpAMD64MOVQstore) @@ -9215,7 +9299,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { return true } // match: (MOVQstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -9229,6 +9313,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVQstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -9238,7 +9325,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { return true } // match: (MOVQstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVQstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -9252,6 +9339,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { c := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVQstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -9264,7 +9354,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx1_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { // match: (MOVQstoreconstidx8 [x] {sym} (ADDQconst [c] ptr) idx mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVQstoreconstidx8 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -9278,6 +9368,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVQstoreconstidx8) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -9287,7 +9380,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { return true } // match: (MOVQstoreconstidx8 [x] {sym} ptr (ADDQconst [c] idx) mem) - // cond: + // cond: ValAndOff(x).canAdd(8*c) // result: (MOVQstoreconstidx8 [ValAndOff(x).add(8*c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -9301,6 +9394,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreconstidx8_0(v *Value) bool { c := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(ValAndOff(x).canAdd(8 * c)) { + break + } v.reset(OpAMD64MOVQstoreconstidx8) v.AuxInt = ValAndOff(x).add(8 * c) v.Aux = sym @@ -9340,7 +9436,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { return true } // match: (MOVQstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -9355,6 +9451,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -9365,7 +9464,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { return true } // match: (MOVQstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVQstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -9380,6 +9479,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVQstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -9393,7 +9495,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx1_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { // match: (MOVQstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVQstoreidx8 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -9408,6 +9510,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVQstoreidx8) v.AuxInt = c + d v.Aux = sym @@ -9418,7 +9523,7 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { return true } // match: (MOVQstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+8*d) // result: (MOVQstoreidx8 [c+8*d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -9433,6 +9538,9 @@ func rewriteValueAMD64_OpAMD64MOVQstoreidx8_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + 8*d)) { + break + } v.reset(OpAMD64MOVQstoreidx8) v.AuxInt = c + 8*d v.Aux = sym @@ -9605,7 +9713,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { return true } // match: (MOVSDloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -9619,6 +9727,9 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSDloadidx1) v.AuxInt = c + d v.Aux = sym @@ -9628,7 +9739,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { return true } // match: (MOVSDloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSDloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -9642,6 +9753,9 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSDloadidx1) v.AuxInt = c + d v.Aux = sym @@ -9654,7 +9768,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx1_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { // match: (MOVSDloadidx8 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSDloadidx8 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -9668,6 +9782,9 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSDloadidx8) v.AuxInt = c + d v.Aux = sym @@ -9677,7 +9794,7 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { return true } // match: (MOVSDloadidx8 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+8*d) // result: (MOVSDloadidx8 [c+8*d] {sym} ptr idx mem) for { c := v.AuxInt @@ -9691,6 +9808,9 @@ func rewriteValueAMD64_OpAMD64MOVSDloadidx8_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + 8*d)) { + break + } v.reset(OpAMD64MOVSDloadidx8) v.AuxInt = c + 8*d v.Aux = sym @@ -9874,7 +9994,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { return true } // match: (MOVSDstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -9889,6 +10009,9 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSDstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -9899,7 +10022,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { return true } // match: (MOVSDstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSDstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -9914,6 +10037,9 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSDstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -9927,7 +10053,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx1_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { // match: (MOVSDstoreidx8 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSDstoreidx8 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -9942,6 +10068,9 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSDstoreidx8) v.AuxInt = c + d v.Aux = sym @@ -9952,7 +10081,7 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { return true } // match: (MOVSDstoreidx8 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+8*d) // result: (MOVSDstoreidx8 [c+8*d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -9967,6 +10096,9 @@ func rewriteValueAMD64_OpAMD64MOVSDstoreidx8_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + 8*d)) { + break + } v.reset(OpAMD64MOVSDstoreidx8) v.AuxInt = c + 8*d v.Aux = sym @@ -10139,7 +10271,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { return true } // match: (MOVSSloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -10153,6 +10285,9 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSSloadidx1) v.AuxInt = c + d v.Aux = sym @@ -10162,7 +10297,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { return true } // match: (MOVSSloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSSloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -10176,6 +10311,9 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSSloadidx1) v.AuxInt = c + d v.Aux = sym @@ -10188,7 +10326,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx1_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { // match: (MOVSSloadidx4 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSSloadidx4 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -10202,6 +10340,9 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSSloadidx4) v.AuxInt = c + d v.Aux = sym @@ -10211,7 +10352,7 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { return true } // match: (MOVSSloadidx4 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+4*d) // result: (MOVSSloadidx4 [c+4*d] {sym} ptr idx mem) for { c := v.AuxInt @@ -10225,6 +10366,9 @@ func rewriteValueAMD64_OpAMD64MOVSSloadidx4_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + 4*d)) { + break + } v.reset(OpAMD64MOVSSloadidx4) v.AuxInt = c + 4*d v.Aux = sym @@ -10408,7 +10552,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { return true } // match: (MOVSSstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -10423,6 +10567,9 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSSstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -10433,7 +10580,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { return true } // match: (MOVSSstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSSstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -10448,6 +10595,9 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSSstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -10461,7 +10611,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx1_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { // match: (MOVSSstoreidx4 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVSSstoreidx4 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -10476,6 +10626,9 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVSSstoreidx4) v.AuxInt = c + d v.Aux = sym @@ -10486,7 +10639,7 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { return true } // match: (MOVSSstoreidx4 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+4*d) // result: (MOVSSstoreidx4 [c+4*d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -10501,6 +10654,9 @@ func rewriteValueAMD64_OpAMD64MOVSSstoreidx4_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + 4*d)) { + break + } v.reset(OpAMD64MOVSSstoreidx4) v.AuxInt = c + 4*d v.Aux = sym @@ -11028,7 +11184,7 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { return true } // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem) - // cond: canMergeSym(sym1, sym2) + // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem) for { off1 := v.AuxInt @@ -11042,7 +11198,7 @@ func rewriteValueAMD64_OpAMD64MOVWload_0(v *Value) bool { sym2 := v_0.Aux base := v_0.Args[0] mem := v.Args[1] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } v.reset(OpAMD64MOVWload) @@ -11130,7 +11286,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { return true } // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -11144,6 +11300,9 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVWloadidx1) v.AuxInt = c + d v.Aux = sym @@ -11153,7 +11312,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { return true } // match: (MOVWloadidx1 [c] {sym} idx (ADDQconst [d] ptr) mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -11167,6 +11326,9 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { d := v_1.AuxInt ptr := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVWloadidx1) v.AuxInt = c + d v.Aux = sym @@ -11176,7 +11338,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { return true } // match: (MOVWloadidx1 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -11190,6 +11352,9 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVWloadidx1) v.AuxInt = c + d v.Aux = sym @@ -11199,7 +11364,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { return true } // match: (MOVWloadidx1 [c] {sym} (ADDQconst [d] idx) ptr mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVWloadidx1 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -11213,6 +11378,9 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { idx := v_0.Args[0] ptr := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVWloadidx1) v.AuxInt = c + d v.Aux = sym @@ -11225,7 +11393,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx1_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { // match: (MOVWloadidx2 [c] {sym} (ADDQconst [d] ptr) idx mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVWloadidx2 [c+d] {sym} ptr idx mem) for { c := v.AuxInt @@ -11239,6 +11407,9 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVWloadidx2) v.AuxInt = c + d v.Aux = sym @@ -11248,7 +11419,7 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { return true } // match: (MOVWloadidx2 [c] {sym} ptr (ADDQconst [d] idx) mem) - // cond: + // cond: is32Bit(c+2*d) // result: (MOVWloadidx2 [c+2*d] {sym} ptr idx mem) for { c := v.AuxInt @@ -11262,6 +11433,9 @@ func rewriteValueAMD64_OpAMD64MOVWloadidx2_0(v *Value) bool { d := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(is32Bit(c + 2*d)) { + break + } v.reset(OpAMD64MOVWloadidx2) v.AuxInt = c + 2*d v.Aux = sym @@ -11581,7 +11755,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore_0(v *Value) bool { } func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem) - // cond: canMergeSym(sym1, sym2) + // cond: canMergeSym(sym1, sym2) && is32Bit(off1+off2) // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem) for { off1 := v.AuxInt @@ -11596,7 +11770,7 @@ func rewriteValueAMD64_OpAMD64MOVWstore_10(v *Value) bool { base := v_0.Args[0] val := v.Args[1] mem := v.Args[2] - if !(canMergeSym(sym1, sym2)) { + if !(canMergeSym(sym1, sym2) && is32Bit(off1+off2)) { break } v.reset(OpAMD64MOVWstore) @@ -11873,7 +12047,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { return true } // match: (MOVWstoreconstidx1 [x] {sym} (ADDQconst [c] ptr) idx mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -11887,6 +12061,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVWstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -11896,7 +12073,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { return true } // match: (MOVWstoreconstidx1 [x] {sym} ptr (ADDQconst [c] idx) mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVWstoreconstidx1 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -11910,6 +12087,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx1_0(v *Value) bool { c := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVWstoreconstidx1) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -11960,7 +12140,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { b := v.Block _ = b // match: (MOVWstoreconstidx2 [x] {sym} (ADDQconst [c] ptr) idx mem) - // cond: + // cond: ValAndOff(x).canAdd(c) // result: (MOVWstoreconstidx2 [ValAndOff(x).add(c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -11974,6 +12154,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { ptr := v_0.Args[0] idx := v.Args[1] mem := v.Args[2] + if !(ValAndOff(x).canAdd(c)) { + break + } v.reset(OpAMD64MOVWstoreconstidx2) v.AuxInt = ValAndOff(x).add(c) v.Aux = sym @@ -11983,7 +12166,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { return true } // match: (MOVWstoreconstidx2 [x] {sym} ptr (ADDQconst [c] idx) mem) - // cond: + // cond: ValAndOff(x).canAdd(2*c) // result: (MOVWstoreconstidx2 [ValAndOff(x).add(2*c)] {sym} ptr idx mem) for { x := v.AuxInt @@ -11997,6 +12180,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreconstidx2_0(v *Value) bool { c := v_1.AuxInt idx := v_1.Args[0] mem := v.Args[2] + if !(ValAndOff(x).canAdd(2 * c)) { + break + } v.reset(OpAMD64MOVWstoreconstidx2) v.AuxInt = ValAndOff(x).add(2 * c) v.Aux = sym @@ -12075,7 +12261,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { return true } // match: (MOVWstoreidx1 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -12090,6 +12276,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -12100,7 +12289,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { return true } // match: (MOVWstoreidx1 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVWstoreidx1 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -12115,6 +12304,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx1_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVWstoreidx1) v.AuxInt = c + d v.Aux = sym @@ -12235,7 +12427,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { b := v.Block _ = b // match: (MOVWstoreidx2 [c] {sym} (ADDQconst [d] ptr) idx val mem) - // cond: + // cond: is32Bit(c+d) // result: (MOVWstoreidx2 [c+d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -12250,6 +12442,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { idx := v.Args[1] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + d)) { + break + } v.reset(OpAMD64MOVWstoreidx2) v.AuxInt = c + d v.Aux = sym @@ -12260,7 +12455,7 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { return true } // match: (MOVWstoreidx2 [c] {sym} ptr (ADDQconst [d] idx) val mem) - // cond: + // cond: is32Bit(c+2*d) // result: (MOVWstoreidx2 [c+2*d] {sym} ptr idx val mem) for { c := v.AuxInt @@ -12275,6 +12470,9 @@ func rewriteValueAMD64_OpAMD64MOVWstoreidx2_0(v *Value) bool { idx := v_1.Args[0] val := v.Args[2] mem := v.Args[3] + if !(is32Bit(c + 2*d)) { + break + } v.reset(OpAMD64MOVWstoreidx2) v.AuxInt = c + 2*d v.Aux = sym diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go index bcf9318e2e..5f3a8c45d5 100644 --- a/src/cmd/internal/obj/x86/asm6.go +++ b/src/cmd/internal/obj/x86/asm6.go @@ -2269,6 +2269,13 @@ func oclass(ctxt *obj.Link, p *obj.Prog, a *obj.Addr) int { return Yxxx } if ctxt.Arch.Family == sys.AMD64 { + // Offset must fit in a 32-bit signed field (or fit in a 32-bit unsigned field + // where the sign extension doesn't matter). + // Note: The latter happens only in assembly, for example crypto/sha1/sha1block_amd64.s. + if !(a.Offset == int64(int32(a.Offset)) || + a.Offset == int64(uint32(a.Offset)) && p.As == ALEAL) { + return Yxxx + } switch a.Name { case obj.NAME_EXTERN, obj.NAME_STATIC, obj.NAME_GOTREF: // Global variables can't use index registers and their diff --git a/test/fixedbugs/issue21655.go b/test/fixedbugs/issue21655.go new file mode 100644 index 0000000000..4060c8ddbb --- /dev/null +++ b/test/fixedbugs/issue21655.go @@ -0,0 +1,40 @@ +// compile + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Make sure assembly offsets don't get too large. + +// To trigger issue21655, the index offset needs to be small +// enough to fit into an int32 (to get rewritten to an ADDQconst) +// but large enough to overflow an int32 after multiplying by the stride. + +package main + +func f1(a []int64, i int64) int64 { + return a[i+1<<30] +} +func f2(a []int32, i int64) int32 { + return a[i+1<<30] +} +func f3(a []int16, i int64) int16 { + return a[i+1<<30] +} +func f4(a []int8, i int64) int8 { + return a[i+1<<31] +} +func f5(a []float64, i int64) float64 { + return a[i+1<<30] +} +func f6(a []float32, i int64) float32 { + return a[i+1<<30] +} + +// Note: Before the fix for issue 21655, f{1,2,5,6} made +// the compiler crash. f3 silently generated the wrong +// code, using an offset of -1<<31 instead of 1<<31. +// (This is due to the assembler accepting offsets +// like 0x80000000 and silently using them as +// signed 32 bit offsets.) +// f4 was ok, but testing it can't hurt. From 78952c06c53c8455d03430b17b5a7fe2693b5d35 Mon Sep 17 00:00:00 2001 From: Keith Randall Date: Thu, 21 Sep 2017 12:52:38 -0700 Subject: [PATCH 40/42] [release-branch.go1.9] cmd/compile: fix sign-extension merging rules If we have y = (MOVBQSX x) z = (MOVWQSX y) We used to use this rewrite rule: (MOVWQSX x:(MOVBQSX _)) -> x But that resulted in replacing z with a value whose type is only int16. Then if z is spilled and restored, it gets zero extended instead of sign extended. Instead use the rule (MOVWQSX (MOVBQSX x)) -> (MOVBQSX x) The result is has the correct type, so it can be spilled and restored correctly. It might mean that a few more extension ops might not be eliminated, but that's the price for correctness. Fixes #21963 Change-Id: I6ec82c3d2dbe43cc1fee6fb2bd6b3a72fca3af00 Reviewed-on: https://go-review.googlesource.com/65290 Reviewed-by: Cherry Zhang Run-TryBot: Cherry Zhang TryBot-Result: Gobot Gobot Reviewed-on: https://go-review.googlesource.com/70986 Run-TryBot: Russ Cox Reviewed-by: Keith Randall --- src/cmd/compile/internal/ssa/gen/AMD64.rules | 26 ++-- src/cmd/compile/internal/ssa/rewriteAMD64.go | 144 +++++++++---------- test/fixedbugs/issue21963.go | 27 ++++ 3 files changed, 113 insertions(+), 84 deletions(-) create mode 100644 test/fixedbugs/issue21963.go diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules index f1d53bc51f..ff38be550e 100644 --- a/src/cmd/compile/internal/ssa/gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules @@ -2405,15 +2405,17 @@ (BSFQ (ORQconst [1<<16] (MOVWQZX x))) -> (BSFQ (ORQconst [1<<16] x)) // Redundant sign/zero extensions -(MOVLQSX x:(MOVLQSX _)) -> x -(MOVLQSX x:(MOVWQSX _)) -> x -(MOVLQSX x:(MOVBQSX _)) -> x -(MOVWQSX x:(MOVWQSX _)) -> x -(MOVWQSX x:(MOVBQSX _)) -> x -(MOVBQSX x:(MOVBQSX _)) -> x -(MOVLQZX x:(MOVLQZX _)) -> x -(MOVLQZX x:(MOVWQZX _)) -> x -(MOVLQZX x:(MOVBQZX _)) -> x -(MOVWQZX x:(MOVWQZX _)) -> x -(MOVWQZX x:(MOVBQZX _)) -> x -(MOVBQZX x:(MOVBQZX _)) -> x +// Note: see issue 21963. We have to make sure we use the right type on +// the resulting extension (the outer type, not the inner type). +(MOVLQSX (MOVLQSX x)) -> (MOVLQSX x) +(MOVLQSX (MOVWQSX x)) -> (MOVWQSX x) +(MOVLQSX (MOVBQSX x)) -> (MOVBQSX x) +(MOVWQSX (MOVWQSX x)) -> (MOVWQSX x) +(MOVWQSX (MOVBQSX x)) -> (MOVBQSX x) +(MOVBQSX (MOVBQSX x)) -> (MOVBQSX x) +(MOVLQZX (MOVLQZX x)) -> (MOVLQZX x) +(MOVLQZX (MOVWQZX x)) -> (MOVWQZX x) +(MOVLQZX (MOVBQZX x)) -> (MOVBQZX x) +(MOVWQZX (MOVWQZX x)) -> (MOVWQZX x) +(MOVWQZX (MOVBQZX x)) -> (MOVBQZX x) +(MOVBQZX (MOVBQZX x)) -> (MOVBQZX x) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index f2f4896410..9213616f83 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -4319,16 +4319,16 @@ func rewriteValueAMD64_OpAMD64MOVBQSX_0(v *Value) bool { v.AddArg(x) return true } - // match: (MOVBQSX x:(MOVBQSX _)) + // match: (MOVBQSX (MOVBQSX x)) // cond: - // result: x + // result: (MOVBQSX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVBQSX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVBQSX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVBQSX) v.AddArg(x) return true } @@ -4536,16 +4536,16 @@ func rewriteValueAMD64_OpAMD64MOVBQZX_0(v *Value) bool { v.AddArg(x) return true } - // match: (MOVBQZX x:(MOVBQZX _)) + // match: (MOVBQZX (MOVBQZX x)) // cond: - // result: x + // result: (MOVBQZX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVBQZX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVBQZX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVBQZX) v.AddArg(x) return true } @@ -6392,42 +6392,42 @@ func rewriteValueAMD64_OpAMD64MOVLQSX_0(v *Value) bool { v.AddArg(x) return true } - // match: (MOVLQSX x:(MOVLQSX _)) + // match: (MOVLQSX (MOVLQSX x)) // cond: - // result: x + // result: (MOVLQSX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVLQSX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVLQSX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVLQSX) v.AddArg(x) return true } - // match: (MOVLQSX x:(MOVWQSX _)) + // match: (MOVLQSX (MOVWQSX x)) // cond: - // result: x + // result: (MOVWQSX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVWQSX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVWQSX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVWQSX) v.AddArg(x) return true } - // match: (MOVLQSX x:(MOVBQSX _)) + // match: (MOVLQSX (MOVBQSX x)) // cond: - // result: x + // result: (MOVBQSX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVBQSX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVBQSX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVBQSX) v.AddArg(x) return true } @@ -6611,42 +6611,42 @@ func rewriteValueAMD64_OpAMD64MOVLQZX_0(v *Value) bool { v.AddArg(x) return true } - // match: (MOVLQZX x:(MOVLQZX _)) + // match: (MOVLQZX (MOVLQZX x)) // cond: - // result: x + // result: (MOVLQZX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVLQZX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVLQZX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVLQZX) v.AddArg(x) return true } - // match: (MOVLQZX x:(MOVWQZX _)) + // match: (MOVLQZX (MOVWQZX x)) // cond: - // result: x + // result: (MOVWQZX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVWQZX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVWQZX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVWQZX) v.AddArg(x) return true } - // match: (MOVLQZX x:(MOVBQZX _)) + // match: (MOVLQZX (MOVBQZX x)) // cond: - // result: x + // result: (MOVBQZX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVBQZX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVBQZX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVBQZX) v.AddArg(x) return true } @@ -10767,29 +10767,29 @@ func rewriteValueAMD64_OpAMD64MOVWQSX_0(v *Value) bool { v.AddArg(x) return true } - // match: (MOVWQSX x:(MOVWQSX _)) + // match: (MOVWQSX (MOVWQSX x)) // cond: - // result: x + // result: (MOVWQSX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVWQSX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVWQSX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVWQSX) v.AddArg(x) return true } - // match: (MOVWQSX x:(MOVBQSX _)) + // match: (MOVWQSX (MOVBQSX x)) // cond: - // result: x + // result: (MOVBQSX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVBQSX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVBQSX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVBQSX) v.AddArg(x) return true } @@ -10999,29 +10999,29 @@ func rewriteValueAMD64_OpAMD64MOVWQZX_0(v *Value) bool { v.AddArg(x) return true } - // match: (MOVWQZX x:(MOVWQZX _)) + // match: (MOVWQZX (MOVWQZX x)) // cond: - // result: x + // result: (MOVWQZX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVWQZX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVWQZX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVWQZX) v.AddArg(x) return true } - // match: (MOVWQZX x:(MOVBQZX _)) + // match: (MOVWQZX (MOVBQZX x)) // cond: - // result: x + // result: (MOVBQZX x) for { - x := v.Args[0] - if x.Op != OpAMD64MOVBQZX { + v_0 := v.Args[0] + if v_0.Op != OpAMD64MOVBQZX { break } - v.reset(OpCopy) - v.Type = x.Type + x := v_0.Args[0] + v.reset(OpAMD64MOVBQZX) v.AddArg(x) return true } diff --git a/test/fixedbugs/issue21963.go b/test/fixedbugs/issue21963.go new file mode 100644 index 0000000000..996bd63d09 --- /dev/null +++ b/test/fixedbugs/issue21963.go @@ -0,0 +1,27 @@ +// run + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "runtime" +) + +//go:noinline +func f(x []int32, y *int8) int32 { + c := int32(int16(*y)) + runtime.GC() + return x[0] * c +} + +func main() { + var x = [1]int32{5} + var y int8 = -1 + if got, want := f(x[:], &y), int32(-5); got != want { + panic(fmt.Sprintf("wanted %d, got %d", want, got)) + } +} From d93cb46280ae4710c5c6113159c7973a08a72249 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Sat, 14 Oct 2017 22:47:24 -0400 Subject: [PATCH 41/42] [release-branch.go1.9] runtime: use simple, more robust fastrandn CL 36932 (speed up fastrandn) made it faster but introduced bad interference with some properties of fastrand itself, making fastrandn not very random in certain ways. In particular, certain selects are demonstrably unfair. For Go 1.10 the new faster fastrandn has induced a new fastrand, which in turn has caused other follow-on bugs that are still being discovered and fixed. For Go 1.9.2, just go back to the barely slower % implementation that we used in Go 1.8 and earlier. This should restore fairness in select and any other problems caused by the clever fastrandn. The test in this CL is copied from CL 62530. Fixes #22253. Change-Id: Ibcf948a7bce981452e05c90dbdac122043f6f813 Reviewed-on: https://go-review.googlesource.com/70991 Run-TryBot: Russ Cox TryBot-Result: Gobot Gobot Reviewed-by: Ian Lance Taylor Reviewed-by: Keith Randall --- src/runtime/chan_test.go | 57 ++++++++++++++++++++++++++++++++++++++++ src/runtime/stubs.go | 8 +++--- 2 files changed, 62 insertions(+), 3 deletions(-) diff --git a/src/runtime/chan_test.go b/src/runtime/chan_test.go index a75fa1b992..0c94cf1a63 100644 --- a/src/runtime/chan_test.go +++ b/src/runtime/chan_test.go @@ -5,6 +5,7 @@ package runtime_test import ( + "math" "runtime" "sync" "sync/atomic" @@ -430,6 +431,62 @@ func TestSelectStress(t *testing.T) { wg.Wait() } +func TestSelectFairness(t *testing.T) { + const trials = 10000 + c1 := make(chan byte, trials+1) + c2 := make(chan byte, trials+1) + for i := 0; i < trials+1; i++ { + c1 <- 1 + c2 <- 2 + } + c3 := make(chan byte) + c4 := make(chan byte) + out := make(chan byte) + done := make(chan byte) + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + for { + var b byte + select { + case b = <-c3: + case b = <-c4: + case b = <-c1: + case b = <-c2: + } + select { + case out <- b: + case <-done: + return + } + } + }() + cnt1, cnt2 := 0, 0 + for i := 0; i < trials; i++ { + switch b := <-out; b { + case 1: + cnt1++ + case 2: + cnt2++ + default: + t.Fatalf("unexpected value %d on channel", b) + } + } + // If the select in the goroutine is fair, + // cnt1 and cnt2 should be about the same value. + // With 10,000 trials, the expected margin of error at + // a confidence level of five nines is 4.4172 / (2 * Sqrt(10000)). + r := float64(cnt1) / trials + e := math.Abs(r - 0.5) + t.Log(cnt1, cnt2, r, e) + if e > 4.4172/(2*math.Sqrt(trials)) { + t.Errorf("unfair select: in %d trials, results were %d, %d", trials, cnt1, cnt2) + } + close(done) + wg.Wait() +} + func TestChanSendInterface(t *testing.T) { type mt struct{} m := &mt{} diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go index c4f32a8482..72d21187ec 100644 --- a/src/runtime/stubs.go +++ b/src/runtime/stubs.go @@ -105,9 +105,11 @@ func fastrand() uint32 { //go:nosplit func fastrandn(n uint32) uint32 { - // This is similar to fastrand() % n, but faster. - // See http://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ - return uint32(uint64(fastrand()) * uint64(n) >> 32) + // Don't be clever. + // fastrand is not good enough for cleverness. + // Just use mod. + // See golang.org/issue/21806. + return fastrand() % n } //go:linkname sync_fastrand sync.fastrand From 2ea7d3461bb41d0ae12b56ee52d43314bcdb97f9 Mon Sep 17 00:00:00 2001 From: Russ Cox Date: Wed, 25 Oct 2017 14:29:22 -0400 Subject: [PATCH 42/42] [release-branch.go1.9] go1.9.2 Change-Id: Idb72e9f562887680e0b287649a4ae1325d7e3eb5 Reviewed-on: https://go-review.googlesource.com/71271 Run-TryBot: Russ Cox Reviewed-by: Chris Broadfoot TryBot-Result: Gobot Gobot --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 66a2beedba..988b0518c0 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -go1.9.1 \ No newline at end of file +go1.9.2 \ No newline at end of file