From 006a5e7d00992cfae6ac406959512d680025f75c Mon Sep 17 00:00:00 2001
From: Ian Lance Taylor
Date: Fri, 11 Jan 2019 14:06:45 -0800
Subject: [PATCH 01/10] testing: report the failing test in a late log panic
Updates #29388
Change-Id: Icb0e6048d05fde7a5486b923ff62147edb5c8dac
Reviewed-on: https://go-review.googlesource.com/c/157617
Run-TryBot: Ian Lance Taylor
TryBot-Result: Gobot Gobot
Reviewed-by: Brad Fitzpatrick
Reviewed-by: Damien Neil
---
src/testing/sub_test.go | 49 +++++++++++++++++++++++++++++++++++++++++
src/testing/testing.go | 23 ++++++++++---------
2 files changed, 62 insertions(+), 10 deletions(-)
diff --git a/src/testing/sub_test.go b/src/testing/sub_test.go
index 8c989714a1..5a6d51be59 100644
--- a/src/testing/sub_test.go
+++ b/src/testing/sub_test.go
@@ -706,6 +706,55 @@ func TestRacyOutput(t *T) {
}
}
+// The late log message did not include the test name. Issue 29388.
+func TestLogAfterComplete(t *T) {
+ ctx := newTestContext(1, newMatcher(regexp.MatchString, "", ""))
+ var buf bytes.Buffer
+ t1 := &T{
+ common: common{
+ // Use a buffered channel so that tRunner can write
+ // to it although nothing is reading from it.
+ signal: make(chan bool, 1),
+ w: &buf,
+ },
+ context: ctx,
+ }
+
+ c1 := make(chan bool)
+ c2 := make(chan string)
+ tRunner(t1, func(t *T) {
+ t.Run("TestLateLog", func(t *T) {
+ go func() {
+ defer close(c2)
+ defer func() {
+ p := recover()
+ if p == nil {
+ c2 <- "subtest did not panic"
+ return
+ }
+ s, ok := p.(string)
+ if !ok {
+ c2 <- fmt.Sprintf("subtest panic with unexpected value %v", p)
+ return
+ }
+ const want = "Log in goroutine after TestLateLog has completed"
+ if !strings.Contains(s, want) {
+ c2 <- fmt.Sprintf("subtest panic %q does not contain %q", s, want)
+ }
+ }()
+
+ <-c1
+ t.Log("log after test")
+ }()
+ })
+ })
+ close(c1)
+
+ if s := <-c2; s != "" {
+ t.Error(s)
+ }
+}
+
func TestBenchmark(t *T) {
res := Benchmark(func(b *B) {
for i := 0; i < 5; i++ {
diff --git a/src/testing/testing.go b/src/testing/testing.go
index 0ac51b6fe5..3068630e8a 100644
--- a/src/testing/testing.go
+++ b/src/testing/testing.go
@@ -618,17 +618,20 @@ func (c *common) log(s string) {
func (c *common) logDepth(s string, depth int) {
c.mu.Lock()
defer c.mu.Unlock()
- // If this test has already finished try and log this message with our parent
- // with this test name tagged so we know where it came from.
- // If we don't have a parent panic.
- if c.done {
- if c.parent != nil {
- c.parent.logDepth(s, depth+1)
- } else {
- panic("Log in goroutine after " + c.name + " has completed")
- }
- } else {
+ if !c.done {
c.output = append(c.output, c.decorate(s, depth+1)...)
+ } else {
+ // This test has already finished. Try and log this message
+ // with our parent. If we don't have a parent, panic.
+ for parent := c.parent; parent != nil; parent = parent.parent {
+ parent.mu.Lock()
+ defer parent.mu.Unlock()
+ if !parent.done {
+ parent.output = append(parent.output, parent.decorate(s, depth+1)...)
+ return
+ }
+ }
+ panic("Log in goroutine after " + c.name + " has completed")
}
}
From e2ff73286f68543d024f632a1764e93a6b21ccee Mon Sep 17 00:00:00 2001
From: Austin Clements
Date: Wed, 16 Jan 2019 21:42:00 -0500
Subject: [PATCH 02/10] doc/go1.12: link to ABIInternal design document
The ABI changes should be completely transparent to Go code, but could
cause linking issues in certain situations involving assembly code
reaching across package boundaries. If users encounter linking
problems, point them to the "Compatibility" section of the ABI design
document, which gives some guidance.
Change-Id: I4156d164562e2ec0de7ae8f9a3631a32ec45b317
Reviewed-on: https://go-review.googlesource.com/c/158237
Reviewed-by: Brad Fitzpatrick
---
doc/go1.12.html | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/doc/go1.12.html b/doc/go1.12.html
index e228d98a8c..41ebd50cb2 100644
--- a/doc/go1.12.html
+++ b/doc/go1.12.html
@@ -239,9 +239,9 @@ for {
except for calls that simultaneously cross between Go and
assembly and cross a package boundary. If linking results
in an error like "relocation target not defined for ABIInternal (but
- is defined for ABI0)", please refer to help section of the ABI
- design document.
-
+ is defined for ABI0)", please refer to the
+ compatibility section
+ of the ABI design document.
From 79ac638e41abed1d8f46762ecd6d63a93d9bb382 Mon Sep 17 00:00:00 2001
From: Michael Anthony Knyszek
Date: Mon, 14 Jan 2019 21:27:29 +0000
Subject: [PATCH 03/10] runtime: refactor coalescing into its own method
The coalescing process is complex and in a follow-up change we'll need
to do it in more than one place, so this change factors out the
coalescing code in freeSpanLocked into a method on mheap.
Change-Id: Ia266b6cb1157c1b8d3d8a4287b42fbcc032bbf3a
Reviewed-on: https://go-review.googlesource.com/c/157838
Run-TryBot: Michael Knyszek
TryBot-Result: Gobot Gobot
Reviewed-by: Austin Clements
---
src/runtime/mheap.go | 117 ++++++++++++++++++++++---------------------
1 file changed, 61 insertions(+), 56 deletions(-)
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index f5b5ba99b8..d409662451 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -419,6 +419,65 @@ func (s *mspan) physPageBounds() (uintptr, uintptr) {
return start, end
}
+func (h *mheap) coalesce(s *mspan) {
+ // We scavenge s at the end after coalescing if s or anything
+ // it merged with is marked scavenged.
+ needsScavenge := false
+ prescavenged := s.released() // number of bytes already scavenged.
+
+ // Coalesce with earlier, later spans.
+ if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
+ // Now adjust s.
+ s.startAddr = before.startAddr
+ s.npages += before.npages
+ s.needzero |= before.needzero
+ h.setSpan(before.base(), s)
+ // If before or s are scavenged, then we need to scavenge the final coalesced span.
+ needsScavenge = needsScavenge || before.scavenged || s.scavenged
+ prescavenged += before.released()
+ // The size is potentially changing so the treap needs to delete adjacent nodes and
+ // insert back as a combined node.
+ if before.scavenged {
+ h.scav.removeSpan(before)
+ } else {
+ h.free.removeSpan(before)
+ }
+ before.state = mSpanDead
+ h.spanalloc.free(unsafe.Pointer(before))
+ }
+
+ // Now check to see if next (greater addresses) span is free and can be coalesced.
+ if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == mSpanFree {
+ s.npages += after.npages
+ s.needzero |= after.needzero
+ h.setSpan(s.base()+s.npages*pageSize-1, s)
+ needsScavenge = needsScavenge || after.scavenged || s.scavenged
+ prescavenged += after.released()
+ if after.scavenged {
+ h.scav.removeSpan(after)
+ } else {
+ h.free.removeSpan(after)
+ }
+ after.state = mSpanDead
+ h.spanalloc.free(unsafe.Pointer(after))
+ }
+
+ if needsScavenge {
+ // When coalescing spans, some physical pages which
+ // were not returned to the OS previously because
+ // they were only partially covered by the span suddenly
+ // become available for scavenging. We want to make sure
+ // those holes are filled in, and the span is properly
+ // scavenged. Rather than trying to detect those holes
+ // directly, we collect how many bytes were already
+ // scavenged above and subtract that from heap_released
+ // before re-scavenging the entire newly-coalesced span,
+ // which will implicitly bump up heap_released.
+ memstats.heap_released -= uint64(prescavenged)
+ s.scavenge()
+ }
+}
+
func (s *mspan) scavenge() uintptr {
// start and end must be rounded in, otherwise madvise
// will round them *out* and release more memory
@@ -1215,62 +1274,8 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool, unusedsince i
s.unusedsince = nanotime()
}
- // We scavenge s at the end after coalescing if s or anything
- // it merged with is marked scavenged.
- needsScavenge := false
- prescavenged := s.released() // number of bytes already scavenged.
-
- // Coalesce with earlier, later spans.
- if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
- // Now adjust s.
- s.startAddr = before.startAddr
- s.npages += before.npages
- s.needzero |= before.needzero
- h.setSpan(before.base(), s)
- // If before or s are scavenged, then we need to scavenge the final coalesced span.
- needsScavenge = needsScavenge || before.scavenged || s.scavenged
- prescavenged += before.released()
- // The size is potentially changing so the treap needs to delete adjacent nodes and
- // insert back as a combined node.
- if before.scavenged {
- h.scav.removeSpan(before)
- } else {
- h.free.removeSpan(before)
- }
- before.state = mSpanDead
- h.spanalloc.free(unsafe.Pointer(before))
- }
-
- // Now check to see if next (greater addresses) span is free and can be coalesced.
- if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == mSpanFree {
- s.npages += after.npages
- s.needzero |= after.needzero
- h.setSpan(s.base()+s.npages*pageSize-1, s)
- needsScavenge = needsScavenge || after.scavenged || s.scavenged
- prescavenged += after.released()
- if after.scavenged {
- h.scav.removeSpan(after)
- } else {
- h.free.removeSpan(after)
- }
- after.state = mSpanDead
- h.spanalloc.free(unsafe.Pointer(after))
- }
-
- if needsScavenge {
- // When coalescing spans, some physical pages which
- // were not returned to the OS previously because
- // they were only partially covered by the span suddenly
- // become available for scavenging. We want to make sure
- // those holes are filled in, and the span is properly
- // scavenged. Rather than trying to detect those holes
- // directly, we collect how many bytes were already
- // scavenged above and subtract that from heap_released
- // before re-scavenging the entire newly-coalesced span,
- // which will implicitly bump up heap_released.
- memstats.heap_released -= uint64(prescavenged)
- s.scavenge()
- }
+ // Coalesce span with neighbors.
+ h.coalesce(s)
// Insert s into the appropriate treap.
if s.scavenged {
From 2f99e889f02df9ef88fb1d26194eb2e8e725fda5 Mon Sep 17 00:00:00 2001
From: Michael Anthony Knyszek
Date: Tue, 15 Jan 2019 23:48:57 +0000
Subject: [PATCH 04/10] runtime: de-duplicate coalescing code
Currently the code surrounding coalescing is duplicated between merging
with the span before the span considered for coalescing and merging with
the span after. This change factors out the shared portions of these
codepaths into a local closure which acts as a helper.
Change-Id: I7919fbed3f9a833eafb324a21a4beaa81f2eaa91
Reviewed-on: https://go-review.googlesource.com/c/158077
Run-TryBot: Michael Knyszek
TryBot-Result: Gobot Gobot
Reviewed-by: Austin Clements
---
src/runtime/mheap.go | 50 ++++++++++++++++++++++----------------------
1 file changed, 25 insertions(+), 25 deletions(-)
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index d409662451..6a7f9bacdb 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -425,41 +425,41 @@ func (h *mheap) coalesce(s *mspan) {
needsScavenge := false
prescavenged := s.released() // number of bytes already scavenged.
- // Coalesce with earlier, later spans.
- if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
- // Now adjust s.
- s.startAddr = before.startAddr
- s.npages += before.npages
- s.needzero |= before.needzero
- h.setSpan(before.base(), s)
+ // merge is a helper which merges other into s, deletes references to other
+ // in heap metadata, and then discards it.
+ merge := func(other *mspan) {
+ // Adjust s via base and npages.
+ if other.startAddr < s.startAddr {
+ s.startAddr = other.startAddr
+ }
+ s.npages += other.npages
+ s.needzero |= other.needzero
+
// If before or s are scavenged, then we need to scavenge the final coalesced span.
- needsScavenge = needsScavenge || before.scavenged || s.scavenged
- prescavenged += before.released()
+ needsScavenge = needsScavenge || other.scavenged || s.scavenged
+ prescavenged += other.released()
+
// The size is potentially changing so the treap needs to delete adjacent nodes and
// insert back as a combined node.
- if before.scavenged {
- h.scav.removeSpan(before)
+ if other.scavenged {
+ h.scav.removeSpan(other)
} else {
- h.free.removeSpan(before)
+ h.free.removeSpan(other)
}
- before.state = mSpanDead
- h.spanalloc.free(unsafe.Pointer(before))
+ other.state = mSpanDead
+ h.spanalloc.free(unsafe.Pointer(other))
+ }
+
+ // Coalesce with earlier, later spans.
+ if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
+ merge(before)
+ h.setSpan(s.base(), s)
}
// Now check to see if next (greater addresses) span is free and can be coalesced.
if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == mSpanFree {
- s.npages += after.npages
- s.needzero |= after.needzero
+ merge(after)
h.setSpan(s.base()+s.npages*pageSize-1, s)
- needsScavenge = needsScavenge || after.scavenged || s.scavenged
- prescavenged += after.released()
- if after.scavenged {
- h.scav.removeSpan(after)
- } else {
- h.free.removeSpan(after)
- }
- after.state = mSpanDead
- h.spanalloc.free(unsafe.Pointer(after))
}
if needsScavenge {
From 6e9f664b9a68f2de84be9697c6ac851c7c7e1c26 Mon Sep 17 00:00:00 2001
From: Michael Anthony Knyszek
Date: Wed, 16 Jan 2019 00:15:34 +0000
Subject: [PATCH 05/10] runtime: don't coalesce scavenged spans with
unscavenged spans
As a result of changes earlier in Go 1.12, the scavenger became much
more aggressive. In particular, when scavenged and unscavenged spans
coalesced, they would always become scavenged. This resulted in most
spans becoming scavenged over time. While this is good for keeping the
RSS of the program low, it also causes many more undue page faults and
many more calls to madvise.
For most applications, the impact of this was negligible. But for
applications that repeatedly grow and shrink the heap by large amounts,
the overhead can be significant. The overhead was especially obvious on
older versions of Linux where MADV_FREE isn't available and
MADV_DONTNEED must be used.
This change makes it so that scavenged spans will never coalesce with
unscavenged spans. This results in fewer page faults overall. Aside
from this, the expected impact of this change is more heap growths on
average, as span allocations will be less likely to be fulfilled. To
mitigate this slightly, this change also coalesces spans eagerly after
scavenging, to at least ensure that all scavenged spans and all
unscavenged spans are coalesced with each other.
Also, this change adds additional logic in the case where two adjacent
spans cannot coalesce. In this case, on platforms where the physical
page size is larger than the runtime's page size, we realign the
boundary between the two adjacent spans to a physical page boundary. The
advantage of this approach is that "unscavengable" spans, that is, spans
which cannot be scavenged because they don't cover at least a single
physical page are grown to a size where they have a higher likelihood of
being discovered by the runtime's scavenging mechanisms when they border
a scavenged span. This helps prevent the runtime from accruing pockets
of "unscavengable" memory in between scavenged spans, preventing them
from coalescing.
We specifically choose to apply this logic to all spans because it
simplifies the code, even though it isn't strictly necessary. The
expectation is that this change will result in a slight loss in
performance on platforms where the physical page size is larger than the
runtime page size.
Update #14045.
Change-Id: I64fd43eac1d6de6f51d7a2ecb72670f10bb12589
Reviewed-on: https://go-review.googlesource.com/c/158078
Run-TryBot: Michael Knyszek
TryBot-Result: Gobot Gobot
Reviewed-by: Austin Clements
---
src/runtime/mheap.go | 76 ++++++++++++++++++++++++++++++++++++++------
1 file changed, 67 insertions(+), 9 deletions(-)
diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go
index 6a7f9bacdb..1bf7bbecc0 100644
--- a/src/runtime/mheap.go
+++ b/src/runtime/mheap.go
@@ -426,14 +426,17 @@ func (h *mheap) coalesce(s *mspan) {
prescavenged := s.released() // number of bytes already scavenged.
// merge is a helper which merges other into s, deletes references to other
- // in heap metadata, and then discards it.
+ // in heap metadata, and then discards it. other must be adjacent to s.
merge := func(other *mspan) {
- // Adjust s via base and npages.
- if other.startAddr < s.startAddr {
- s.startAddr = other.startAddr
- }
+ // Adjust s via base and npages and also in heap metadata.
s.npages += other.npages
s.needzero |= other.needzero
+ if other.startAddr < s.startAddr {
+ s.startAddr = other.startAddr
+ h.setSpan(s.base(), s)
+ } else {
+ h.setSpan(s.base()+s.npages*pageSize-1, s)
+ }
// If before or s are scavenged, then we need to scavenge the final coalesced span.
needsScavenge = needsScavenge || other.scavenged || s.scavenged
@@ -450,16 +453,63 @@ func (h *mheap) coalesce(s *mspan) {
h.spanalloc.free(unsafe.Pointer(other))
}
+ // realign is a helper which shrinks other and grows s such that their
+ // boundary is on a physical page boundary.
+ realign := func(a, b, other *mspan) {
+ // Caller must ensure a.startAddr < b.startAddr and that either a or
+ // b is s. a and b must be adjacent. other is whichever of the two is
+ // not s.
+
+ // If pageSize <= physPageSize then spans are always aligned
+ // to physical page boundaries, so just exit.
+ if pageSize <= physPageSize {
+ return
+ }
+ // Since we're resizing other, we must remove it from the treap.
+ if other.scavenged {
+ h.scav.removeSpan(other)
+ } else {
+ h.free.removeSpan(other)
+ }
+ // Round boundary to the nearest physical page size, toward the
+ // scavenged span.
+ boundary := b.startAddr
+ if a.scavenged {
+ boundary &^= (physPageSize - 1)
+ } else {
+ boundary = (boundary + physPageSize - 1) &^ (physPageSize - 1)
+ }
+ a.npages = (boundary - a.startAddr) / pageSize
+ b.npages = (b.startAddr + b.npages*pageSize - boundary) / pageSize
+ b.startAddr = boundary
+
+ h.setSpan(boundary-1, a)
+ h.setSpan(boundary, b)
+
+ // Re-insert other now that it has a new size.
+ if other.scavenged {
+ h.scav.insert(other)
+ } else {
+ h.free.insert(other)
+ }
+ }
+
// Coalesce with earlier, later spans.
if before := spanOf(s.base() - 1); before != nil && before.state == mSpanFree {
- merge(before)
- h.setSpan(s.base(), s)
+ if s.scavenged == before.scavenged {
+ merge(before)
+ } else {
+ realign(before, s, before)
+ }
}
// Now check to see if next (greater addresses) span is free and can be coalesced.
if after := spanOf(s.base() + s.npages*pageSize); after != nil && after.state == mSpanFree {
- merge(after)
- h.setSpan(s.base()+s.npages*pageSize-1, s)
+ if s.scavenged == after.scavenged {
+ merge(after)
+ } else {
+ realign(s, after, after)
+ }
}
if needsScavenge {
@@ -1309,6 +1359,10 @@ func (h *mheap) scavengeLargest(nbytes uintptr) {
}
n := t.prev()
h.free.erase(t)
+ // Now that s is scavenged, we must eagerly coalesce it
+ // with its neighbors to prevent having two spans with
+ // the same scavenged state adjacent to each other.
+ h.coalesce(s)
t = n
h.scav.insert(s)
released += r
@@ -1328,6 +1382,10 @@ func (h *mheap) scavengeAll(now, limit uint64) uintptr {
r := s.scavenge()
if r != 0 {
h.free.erase(t)
+ // Now that s is scavenged, we must eagerly coalesce it
+ // with its neighbors to prevent having two spans with
+ // the same scavenged state adjacent to each other.
+ h.coalesce(s)
h.scav.insert(s)
released += r
}
From 33caf3be833ba1fe9b74aa4c314f5b82bb696b86 Mon Sep 17 00:00:00 2001
From: Robert Griesemer
Date: Thu, 17 Jan 2019 14:49:30 -0800
Subject: [PATCH 06/10] math/big: document that Rat.SetString accepts _decimal_
float representations
Updates #29799.
Change-Id: I267c2c3ba3964e96903954affc248d0c52c4916c
Reviewed-on: https://go-review.googlesource.com/c/158397
Reviewed-by: Brad Fitzpatrick
---
src/math/big/ratconv.go | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/src/math/big/ratconv.go b/src/math/big/ratconv.go
index 157d8d006d..5656280e84 100644
--- a/src/math/big/ratconv.go
+++ b/src/math/big/ratconv.go
@@ -38,8 +38,8 @@ func (z *Rat) Scan(s fmt.ScanState, ch rune) error {
}
// SetString sets z to the value of s and returns z and a boolean indicating
-// success. s can be given as a fraction "a/b" or as a floating-point number
-// optionally followed by an exponent. The entire string (not just a prefix)
+// success. s can be given as a fraction "a/b" or as a decimal floating-point
+// number optionally followed by an exponent. The entire string (not just a prefix)
// must be valid for success. If the operation failed, the value of z is
// undefined but the returned value is nil.
func (z *Rat) SetString(s string) (*Rat, bool) {
@@ -78,6 +78,7 @@ func (z *Rat) SetString(s string) (*Rat, bool) {
}
// mantissa
+ // TODO(gri) allow other bases besides 10 for mantissa and exponent? (issue #29799)
var ecorr int
z.a.abs, _, ecorr, err = z.a.abs.scan(r, 10, true)
if err != nil {
From 246e6ceb3bdbcfdb9f562c67622c3d67a45eb70d Mon Sep 17 00:00:00 2001
From: Brad Fitzpatrick
Date: Thu, 17 Jan 2019 15:22:47 -0800
Subject: [PATCH 07/10] net/http: clarify Transport connection reuse docs a bit
Updates #26095 (or fixes it)
Change-Id: I92488dabe823b82e1ba534648fe6d63d25d0ae9f
Reviewed-on: https://go-review.googlesource.com/c/158417
Reviewed-by: Ian Lance Taylor
---
src/net/http/client.go | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/src/net/http/client.go b/src/net/http/client.go
index ea6c071911..921f86bd92 100644
--- a/src/net/http/client.go
+++ b/src/net/http/client.go
@@ -478,10 +478,10 @@ func urlErrorOp(method string) string {
// error.
//
// If the returned error is nil, the Response will contain a non-nil
-// Body which the user is expected to close. If the Body is not
-// closed, the Client's underlying RoundTripper (typically Transport)
-// may not be able to re-use a persistent TCP connection to the server
-// for a subsequent "keep-alive" request.
+// Body which the user is expected to close. If the Body is not both
+// read to EOF and closed, the Client's underlying RoundTripper
+// (typically Transport) may not be able to re-use a persistent TCP
+// connection to the server for a subsequent "keep-alive" request.
//
// The request Body, if non-nil, will be closed by the underlying
// Transport, even on errors.
From dd1889cb22c1fd37cf444d672274a8460fe6e2bf Mon Sep 17 00:00:00 2001
From: Ian Lance Taylor
Date: Thu, 17 Jan 2019 18:35:43 -0800
Subject: [PATCH 08/10] cmd/cgo: don't replace newlines with semicolons in
expressions
Fixes #29781
Change-Id: Id032d07a54b8c24f0c6d3f6e512932f76920ee04
Reviewed-on: https://go-review.googlesource.com/c/158457
Run-TryBot: Ian Lance Taylor
TryBot-Result: Gobot Gobot
Reviewed-by: Brad Fitzpatrick
---
misc/cgo/test/issue29781.go | 17 +++++++++++++++++
src/cmd/cgo/godefs.go | 21 ++++++++++++++++++---
2 files changed, 35 insertions(+), 3 deletions(-)
create mode 100644 misc/cgo/test/issue29781.go
diff --git a/misc/cgo/test/issue29781.go b/misc/cgo/test/issue29781.go
new file mode 100644
index 0000000000..0fd8c08b8e
--- /dev/null
+++ b/misc/cgo/test/issue29781.go
@@ -0,0 +1,17 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Error with newline inserted into constant expression.
+// Compilation test only, nothing to run.
+
+package cgotest
+
+// static void issue29781F(char **p, int n) {}
+// #define ISSUE29781C 0
+import "C"
+
+func issue29781G() {
+ var p *C.char
+ C.issue29781F(&p, C.ISSUE29781C+1)
+}
diff --git a/src/cmd/cgo/godefs.go b/src/cmd/cgo/godefs.go
index c0cd8e002f..7185ea0de7 100644
--- a/src/cmd/cgo/godefs.go
+++ b/src/cmd/cgo/godefs.go
@@ -131,12 +131,27 @@ func gofmt(n interface{}) string {
// AST expression onto a single line. The lexer normally inserts a
// semicolon at each newline, so we can replace newline with semicolon.
// However, we can't do that in cases where the lexer would not insert
-// a semicolon. Fortunately we only have to worry about cases that
-// can occur in an expression passed through gofmt, which just means
-// composite literals.
+// a semicolon. We only have to worry about cases that can occur in an
+// expression passed through gofmt, which means composite literals and
+// (due to the printer possibly inserting newlines because of position
+// information) operators.
var gofmtLineReplacer = strings.NewReplacer(
"{\n", "{",
",\n", ",",
+ "++\n", "++;",
+ "--\n", "--;",
+ "+\n", "+",
+ "-\n", "-",
+ "*\n", "*",
+ "/\n", "/",
+ "%\n", "%",
+ "&\n", "&",
+ "|\n", "|",
+ "^\n", "^",
+ "<\n", "<",
+ ">\n", ">",
+ "=\n", "=",
+ ",\n", ",",
"\n", ";",
)
From c077c74312563aad8117fe9f40fa85e1b5c87ac2 Mon Sep 17 00:00:00 2001
From: Ian Lance Taylor
Date: Sun, 16 Dec 2018 16:06:13 -0800
Subject: [PATCH 09/10] cmd/go: don't modify GOROOT in
TestNewReleaseRebuildsStalePackagesInGOPATH
Fixes #29263
Change-Id: I06ba135dc491fd01fc06ccaad4ef98103d4b86c4
Reviewed-on: https://go-review.googlesource.com/c/154460
Run-TryBot: Ian Lance Taylor
TryBot-Result: Gobot Gobot
Reviewed-by: Bryan C. Mills
---
src/cmd/go/go_test.go | 51 ++++++++++++++++++++++++++++++++++++++-----
1 file changed, 45 insertions(+), 6 deletions(-)
diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go
index e2ddc58a5d..c58bc7408d 100644
--- a/src/cmd/go/go_test.go
+++ b/src/cmd/go/go_test.go
@@ -866,12 +866,54 @@ func (tg *testgoData) failSSH() {
func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) {
if testing.Short() {
- t.Skip("don't rebuild the standard library in short mode")
+ t.Skip("skipping lengthy test in short mode")
}
tg := testgo(t)
defer tg.cleanup()
+ // Copy the runtime packages into a temporary GOROOT
+ // so that we can change files.
+ for _, copydir := range []string{
+ "src/runtime",
+ "src/internal/bytealg",
+ "src/internal/cpu",
+ "src/unsafe",
+ filepath.Join("pkg", runtime.GOOS+"_"+runtime.GOARCH),
+ filepath.Join("pkg/tool", runtime.GOOS+"_"+runtime.GOARCH),
+ "pkg/include",
+ } {
+ srcdir := filepath.Join(testGOROOT, copydir)
+ tg.tempDir(filepath.Join("goroot", copydir))
+ err := filepath.Walk(srcdir,
+ func(path string, info os.FileInfo, err error) error {
+ if err != nil {
+ return err
+ }
+ if info.IsDir() {
+ return nil
+ }
+ srcrel, err := filepath.Rel(srcdir, path)
+ if err != nil {
+ return err
+ }
+ dest := filepath.Join("goroot", copydir, srcrel)
+ data, err := ioutil.ReadFile(path)
+ if err != nil {
+ return err
+ }
+ tg.tempFile(dest, string(data))
+ if err := os.Chmod(tg.path(dest), info.Mode()); err != nil {
+ return err
+ }
+ return nil
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+ tg.setenv("GOROOT", tg.path("goroot"))
+
addVar := func(name string, idx int) (restore func()) {
data, err := ioutil.ReadFile(name)
if err != nil {
@@ -900,7 +942,7 @@ func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) {
// Changing mtime of runtime/internal/sys/sys.go
// should have no effect: only the content matters.
// In fact this should be true even outside a release branch.
- sys := runtime.GOROOT() + "/src/runtime/internal/sys/sys.go"
+ sys := tg.path("goroot/src/runtime/internal/sys/sys.go")
tg.sleep()
restore := addVar(sys, 0)
restore()
@@ -915,7 +957,7 @@ func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) {
restore()
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale, incorrectly, after changing back to old release")
addVar(sys, 2)
- tg.wantStale("p1", "stale dependency: runtime/internal/sys", "./testgo list claims p1 is NOT stale, incorrectly, after changing sys.go again")
+ tg.wantStale("p1", "stale dependency: runtime", "./testgo list claims p1 is NOT stale, incorrectly, after changing sys.go again")
tg.run("install", "-i", "p1")
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with new release")
@@ -924,9 +966,6 @@ func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) {
tg.wantStale("p1", "stale dependency: runtime/internal/sys", "./testgo list claims p1 is NOT stale, incorrectly, after restoring sys.go")
tg.run("install", "-i", "p1")
tg.wantNotStale("p1", "", "./testgo list claims p1 is stale after building with old release")
-
- // Everything is out of date. Rebuild to leave things in a better state.
- tg.run("install", "std")
}
func testLocalRun(tg *testgoData, exepath, local, match string) {
From dc889025c714313fbe912ad1253f0ec748a6e58c Mon Sep 17 00:00:00 2001
From: Raul Silvera
Date: Fri, 18 Jan 2019 00:14:29 +0000
Subject: [PATCH 10/10] runtime: sample large heap allocations correctly
Remove an unnecessary check on the heap sampling code that forced sampling
of all heap allocations larger than the sampling rate. This need to follow
a poisson process so that they can be correctly unsampled. Maintain a check
for MemProfileRate==1 to provide a mechanism for full sampling, as
documented in https://golang.org/pkg/runtime/#pkg-variables.
Additional testing for this change is on cl/129117.
Fixes #26618
Change-Id: I7802bde2afc655cf42cffac34af9bafeb3361957
GitHub-Last-Rev: 471f747af845395d458096bea26daa93b91120be
GitHub-Pull-Request: golang/go#29791
Reviewed-on: https://go-review.googlesource.com/c/158337
Run-TryBot: Austin Clements
TryBot-Result: Gobot Gobot
Reviewed-by: Hyang-Ah Hana Kim
---
src/runtime/malloc.go | 2 +-
src/runtime/testdata/testprog/memprof.go | 5 ++++-
2 files changed, 5 insertions(+), 2 deletions(-)
diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go
index c1a89dc588..8c617bb42b 100644
--- a/src/runtime/malloc.go
+++ b/src/runtime/malloc.go
@@ -1012,7 +1012,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
}
if rate := MemProfileRate; rate > 0 {
- if size < uintptr(rate) && int32(size) < c.next_sample {
+ if rate != 1 && int32(size) < c.next_sample {
c.next_sample -= int32(size)
} else {
mp := acquirem()
diff --git a/src/runtime/testdata/testprog/memprof.go b/src/runtime/testdata/testprog/memprof.go
index a22fee61d7..7b134bc078 100644
--- a/src/runtime/testdata/testprog/memprof.go
+++ b/src/runtime/testdata/testprog/memprof.go
@@ -21,7 +21,10 @@ var memProfBuf bytes.Buffer
var memProfStr string
func MemProf() {
- for i := 0; i < 1000; i++ {
+ // Force heap sampling for determinism.
+ runtime.MemProfileRate = 1
+
+ for i := 0; i < 10; i++ {
fmt.Fprintf(&memProfBuf, "%*d\n", i, i)
}
memProfStr = memProfBuf.String()