Merge remote-tracking branch 'upstream/master' into heapsampling

This commit is contained in:
Raul Silvera 2019-01-18 08:41:43 -08:00
commit 47b1d87e13
10 changed files with 275 additions and 85 deletions

View File

@ -239,9 +239,9 @@ for {
except for calls that simultaneously cross between Go and
assembly <em>and</em> cross a package boundary. If linking results
in an error like "relocation target not defined for ABIInternal (but
is defined for ABI0)", please refer to help section of the ABI
design document.
<!-- TODO(austin): Link to the design doc. -->
is defined for ABI0)", please refer to the
<a href="https://github.com/golang/proposal/blob/master/design/27539-internal-abi.md#compatibility">compatibility section</a>
of the ABI design document.
</p>
<p><!-- CL 145179 -->

View File

@ -0,0 +1,17 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Error with newline inserted into constant expression.
// Compilation test only, nothing to run.
package cgotest
// static void issue29781F(char **p, int n) {}
// #define ISSUE29781C 0
import "C"
func issue29781G() {
var p *C.char
C.issue29781F(&p, C.ISSUE29781C+1)
}

View File

@ -131,12 +131,27 @@ func gofmt(n interface{}) string {
// AST expression onto a single line. The lexer normally inserts a
// semicolon at each newline, so we can replace newline with semicolon.
// However, we can't do that in cases where the lexer would not insert
// a semicolon. Fortunately we only have to worry about cases that
// can occur in an expression passed through gofmt, which just means
// composite literals.
// a semicolon. We only have to worry about cases that can occur in an
// expression passed through gofmt, which means composite literals and
// (due to the printer possibly inserting newlines because of position
// information) operators.
var gofmtLineReplacer = strings.NewReplacer(
"{\n", "{",
",\n", ",",
"++\n", "++;",
"--\n", "--;",
"+\n", "+",
"-\n", "-",
"*\n", "*",
"/\n", "/",
"%\n", "%",
"&\n", "&",
"|\n", "|",
"^\n", "^",
"<\n", "<",
">\n", ">",
"=\n", "=",
",\n", ",",
"\n", ";",
)

View File

@ -866,12 +866,54 @@ func (tg *testgoData) failSSH() {
func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) {
if testing.Short() {
t.Skip("don't rebuild the standard library in short mode")
t.Skip("skipping lengthy test in short mode")
}
tg := testgo(t)
defer tg.cleanup()
// Copy the runtime packages into a temporary GOROOT
// so that we can change files.
for _, copydir := range []string{
"src/runtime",
"src/internal/bytealg",
"src/internal/cpu",
"src/unsafe",
filepath.Join("pkg", runtime.GOOS+"_"+runtime.GOARCH),
filepath.Join("pkg/tool", runtime.GOOS+"_"+runtime.GOARCH),
"pkg/include",
} {
srcdir := filepath.Join(testGOROOT, copydir)
tg.tempDir(filepath.Join("goroot", copydir))
err := filepath.Walk(srcdir,
func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
srcrel, err := filepath.Rel(srcdir, path)
if err != nil {
return err
}
dest := filepath.Join("goroot", copydir, srcrel)
data, err := ioutil.ReadFile(path)
if err != nil {
return err
}
tg.tempFile(dest, string(data))
if err := os.Chmod(tg.path(dest), info.Mode()); err != nil {
return err
}
return nil
})
if err != nil {
t.Fatal(err)
}
}
tg.setenv("GOROOT", tg.path("goroot"))
addVar := func(name string, idx int) (restore func()) {
data, err := ioutil.ReadFile(name)
if err != nil {
@ -900,7 +942,7 @@ func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) {
// Changing mtime of runtime/internal/sys/sys.go
// should have no effect: only the content matters.
// In fact this should be true even outside a release branch.
sys := runtime.GOROOT() + "/src/runtime/internal/sys/sys.go"
sys := tg.path("goroot/src/runtime/internal/sys/sys.go")
tg.sleep()
restore := addVar(sys, 0)
restore()
@ -915,7 +957,7 @@ func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) {
restore()
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after changing back to old release")
addVar(sys, 2)
tg.wantStale("p1", "stale dependency: runtime/internal/sys", "./testgo list claims p1 is NOT stale, incorrectly, after changing sys.go again")
tg.wantStale("p1", "stale dependency: runtime", "./testgo list claims p1 is NOT stale, incorrectly, after changing sys.go again")
tg.run("install", "-i", "p1")
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with new release")
@ -924,9 +966,6 @@ func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) {
tg.wantStale("p1", "stale dependency: runtime/internal/sys", "./testgo list claims p1 is NOT stale, incorrectly, after restoring sys.go")
tg.run("install", "-i", "p1")
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with old release")
// Everything is out of date. Rebuild to leave things in a better state.
tg.run("install", "std")
}
func testLocalRun(tg *testgoData, exepath, local, match string) {

View File

@ -38,8 +38,8 @@ func (z *Rat) Scan(s fmt.ScanState, ch rune) error {
}
// SetString sets z to the value of s and returns z and a boolean indicating
// success. s can be given as a fraction "a/b" or as a floating-point number
// optionally followed by an exponent. The entire string (not just a prefix)
// success. s can be given as a fraction "a/b" or as a decimal floating-point
// number optionally followed by an exponent. The entire string (not just a prefix)
// must be valid for success. If the operation failed, the value of z is
// undefined but the returned value is nil.
func (z *Rat) SetString(s string) (*Rat, bool) {
@ -78,6 +78,7 @@ func (z *Rat) SetString(s string) (*Rat, bool) {
}
// mantissa
// TODO(gri) allow other bases besides 10 for mantissa and exponent? (issue #29799)
var ecorr int
z.a.abs, _, ecorr, err = z.a.abs.scan(r, 10, true)
if err != nil {

View File

@ -478,10 +478,10 @@ func urlErrorOp(method string) string {
// error.
//
// If the returned error is nil, the Response will contain a non-nil
// Body which the user is expected to close. If the Body is not
// closed, the Client's underlying RoundTripper (typically Transport)
// may not be able to re-use a persistent TCP connection to the server
// for a subsequent "keep-alive" request.
// Body which the user is expected to close. If the Body is not both
// read to EOF and closed, the Client's underlying RoundTripper
// (typically Transport) may not be able to re-use a persistent TCP
// connection to the server for a subsequent "keep-alive" request.
//
// The request Body, if non-nil, will be closed by the underlying
// Transport, even on errors.

View File

@ -419,6 +419,115 @@ func (s *mspan) physPageBounds() (uintptr, uintptr) {
return start, end
}
func (h *mheap) coalesce(s *mspan) {
// We scavenge s at the end after coalescing if s or anything
// it merged with is marked scavenged.
needsScavenge := false
prescavenged := s.released() // number of bytes already scavenged.
// merge is a helper which merges other into s, deletes references to other
// in heap metadata, and then discards it. other must be adjacent to s.
merge := func(other *mspan) {
// Adjust s via base and npages and also in heap metadata.
s.npages += other.npages
s.needzero |= other.needzero
if other.startAddr < s.startAddr {
s.startAddr = other.startAddr
h.setSpan(s.base(), s)
} else {
h.setSpan(s.base()+s.npages*pageSize-1, s)
}
// If before or s are scavenged, then we need to scavenge the final coalesced span.
needsScavenge = needsScavenge || other.scavenged || s.scavenged
prescavenged += other.released()
// The size is potentially changing so the treap needs to delete adjacent nodes and
// insert back as a combined node.
if other.scavenged {
h.scav.removeSpan(other)
} else {
h.free.removeSpan(other)
}
other.state = mSpanDead
h.spanalloc.free(unsafe.Pointer(other))
}
// realign is a helper which shrinks other and grows s such that their
// boundary is on a physical page boundary.
realign := func(a, b, other *mspan) {
// Caller must ensure a.startAddr < b.startAddr and that either a or
// b is s. a and b must be adjacent. other is whichever of the two is
// not s.
// If pageSize <= physPageSize then spans are always aligned
// to physical page boundaries, so just exit.
if pageSize <= physPageSize {
return
}
// Since we're resizing other, we must remove it from the treap.
if other.scavenged {
h.scav.removeSpan(other)
} else {
h.free.removeSpan(other)
}
// Round boundary to the nearest physical page size, toward the
// scavenged span.
boundary := b.startAddr
if a.scavenged {
boundary &^= (physPageSize - 1)
} else {
boundary = (boundary + physPageSize - 1) &^ (physPageSize - 1)
}
a.npages = (boundary - a.startAddr) / pageSize
b.npages = (b.startAddr + b.npages*pageSize - boundary) / pageSize
b.startAddr = boundary
h.setSpan(boundary-1, a)
h.setSpan(boundary, b)
// Re-insert other now that it has a new size.
if other.scavenged {
h.scav.insert(other)
} else {
h.free.insert(other)
}
}
// Coalesce with earlier, later spans.
if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
if s.scavenged == before.scavenged {
merge(before)
} else {
realign(before, s, before)
}
}
// Now check to see if next (greater addresses) span is free and can be coalesced.
if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == mSpanFree {
if s.scavenged == after.scavenged {
merge(after)
} else {
realign(s, after, after)
}
}
if needsScavenge {
// When coalescing spans, some physical pages which
// were not returned to the OS previously because
// they were only partially covered by the span suddenly
// become available for scavenging. We want to make sure
// those holes are filled in, and the span is properly
// scavenged. Rather than trying to detect those holes
// directly, we collect how many bytes were already
// scavenged above and subtract that from heap_released
// before re-scavenging the entire newly-coalesced span,
// which will implicitly bump up heap_released.
memstats.heap_released -= uint64(prescavenged)
s.scavenge()
}
}
func (s *mspan) scavenge() uintptr {
// start and end must be rounded in, otherwise madvise
// will round them *out* and release more memory
@ -1215,62 +1324,8 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
s.unusedsince = nanotime()
}
// We scavenge s at the end after coalescing if s or anything
// it merged with is marked scavenged.
needsScavenge := false
prescavenged := s.released() // number of bytes already scavenged.
// Coalesce with earlier, later spans.
if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
// Now adjust s.
s.startAddr = before.startAddr
s.npages += before.npages
s.needzero |= before.needzero
h.setSpan(before.base(), s)
// If before or s are scavenged, then we need to scavenge the final coalesced span.
needsScavenge = needsScavenge || before.scavenged || s.scavenged
prescavenged += before.released()
// The size is potentially changing so the treap needs to delete adjacent nodes and
// insert back as a combined node.
if before.scavenged {
h.scav.removeSpan(before)
} else {
h.free.removeSpan(before)
}
before.state = mSpanDead
h.spanalloc.free(unsafe.Pointer(before))
}
// Now check to see if next (greater addresses) span is free and can be coalesced.
if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == mSpanFree {
s.npages += after.npages
s.needzero |= after.needzero
h.setSpan(s.base()+s.npages*pageSize-1, s)
needsScavenge = needsScavenge || after.scavenged || s.scavenged
prescavenged += after.released()
if after.scavenged {
h.scav.removeSpan(after)
} else {
h.free.removeSpan(after)
}
after.state = mSpanDead
h.spanalloc.free(unsafe.Pointer(after))
}
if needsScavenge {
// When coalescing spans, some physical pages which
// were not returned to the OS previously because
// they were only partially covered by the span suddenly
// become available for scavenging. We want to make sure
// those holes are filled in, and the span is properly
// scavenged. Rather than trying to detect those holes
// directly, we collect how many bytes were already
// scavenged above and subtract that from heap_released
// before re-scavenging the entire newly-coalesced span,
// which will implicitly bump up heap_released.
memstats.heap_released -= uint64(prescavenged)
s.scavenge()
}
// Coalesce span with neighbors.
h.coalesce(s)
// Insert s into the appropriate treap.
if s.scavenged {
@ -1304,6 +1359,10 @@ func (h *mheap) scavengeLargest(nbytes uintptr) {
}
n := t.prev()
h.free.erase(t)
// Now that s is scavenged, we must eagerly coalesce it
// with its neighbors to prevent having two spans with
// the same scavenged state adjacent to each other.
h.coalesce(s)
t = n
h.scav.insert(s)
released += r
@ -1323,6 +1382,10 @@ func (h *mheap) scavengeAll(now, limit uint64) uintptr {
r := s.scavenge()
if r != 0 {
h.free.erase(t)
// Now that s is scavenged, we must eagerly coalesce it
// with its neighbors to prevent having two spans with
// the same scavenged state adjacent to each other.
h.coalesce(s)
h.scav.insert(s)
released += r
}

View File

@ -22,7 +22,10 @@ var memProfBuf bytes.Buffer
var memProfStr string
func MemProf() {
for i := 0; i < 1000; i++ {
// Force heap sampling for determinism.
runtime.MemProfileRate = 1
for i := 0; i < 10; i++ {
fmt.Fprintf(&memProfBuf, "%*d\n", i, i)
}
memProfStr = memProfBuf.String()

View File

@ -706,6 +706,55 @@ func TestRacyOutput(t *T) {
}
}
// The late log message did not include the test name. Issue 29388.
func TestLogAfterComplete(t *T) {
ctx := newTestContext(1, newMatcher(regexp.MatchString, "", ""))
var buf bytes.Buffer
t1 := &T{
common: common{
// Use a buffered channel so that tRunner can write
// to it although nothing is reading from it.
signal: make(chan bool, 1),
w: &buf,
},
context: ctx,
}
c1 := make(chan bool)
c2 := make(chan string)
tRunner(t1, func(t *T) {
t.Run("TestLateLog", func(t *T) {
go func() {
defer close(c2)
defer func() {
p := recover()
if p == nil {
c2 <- "subtest did not panic"
return
}
s, ok := p.(string)
if !ok {
c2 <- fmt.Sprintf("subtest panic with unexpected value %v", p)
return
}
const want = "Log in goroutine after TestLateLog has completed"
if !strings.Contains(s, want) {
c2 <- fmt.Sprintf("subtest panic %q does not contain %q", s, want)
}
}()
<-c1
t.Log("log after test")
}()
})
})
close(c1)
if s := <-c2; s != "" {
t.Error(s)
}
}
func TestBenchmark(t *T) {
res := Benchmark(func(b *B) {
for i := 0; i < 5; i++ {

View File

@ -618,17 +618,20 @@ func (c *common) log(s string) {
func (c *common) logDepth(s string, depth int) {
c.mu.Lock()
defer c.mu.Unlock()
// If this test has already finished try and log this message with our parent
// with this test name tagged so we know where it came from.
// If we don't have a parent panic.
if c.done {
if c.parent != nil {
c.parent.logDepth(s, depth+1)
} else {
panic("Log in goroutine after " + c.name + " has completed")
}
} else {
if !c.done {
c.output = append(c.output, c.decorate(s, depth+1)...)
} else {
// This test has already finished. Try and log this message
// with our parent. If we don't have a parent, panic.
for parent := c.parent; parent != nil; parent = parent.parent {
parent.mu.Lock()
defer parent.mu.Unlock()
if !parent.done {
parent.output = append(parent.output, parent.decorate(s, depth+1)...)
return
}
}
panic("Log in goroutine after " + c.name + " has completed")
}
}