diff --git a/src/runtime/gc_test.go b/src/runtime/gc_test.go index 4c281ce52b..c5c8a4cecf 100644 --- a/src/runtime/gc_test.go +++ b/src/runtime/gc_test.go @@ -12,6 +12,7 @@ import ( "runtime" "runtime/debug" "sort" + "strings" "sync" "sync/atomic" "testing" @@ -192,6 +193,15 @@ func TestPeriodicGC(t *testing.T) { } } +func TestGcZombieReporting(t *testing.T) { + // This test is somewhat sensitive to how the allocator works. + got := runTestProg(t, "testprog", "GCZombie") + want := "found pointer to free object" + if !strings.Contains(got, want) { + t.Fatalf("expected %q in output, but got %q", want, got) + } +} + func BenchmarkSetTypePtr(b *testing.B) { benchSetType(b, new(*byte)) } diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go index f9b03d3594..3aa3afc028 100644 --- a/src/runtime/mgcsweep.go +++ b/src/runtime/mgcsweep.go @@ -439,10 +439,31 @@ func (s *mspan) sweep(preserve bool) bool { } } + // Check for zombie objects. + if s.freeindex < s.nelems { + // Everything < freeindex is allocated and hence + // cannot be zombies. + // + // Check the first bitmap byte, where we have to be + // careful with freeindex. + obj := s.freeindex + if (*s.gcmarkBits.bytep(obj / 8)&^*s.allocBits.bytep(obj / 8))>>(obj%8) != 0 { + s.reportZombies() + } + // Check remaining bytes. + for i := obj/8 + 1; i < divRoundUp(s.nelems, 8); i++ { + if *s.gcmarkBits.bytep(i)&^*s.allocBits.bytep(i) != 0 { + s.reportZombies() + } + } + } + // Count the number of free objects in this span. nalloc := uint16(s.countAlloc()) nfreed := s.allocCount - nalloc if nalloc > s.allocCount { + // The zombie check above should have caught this in + // more detail. print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n") throw("sweep increased allocation count") } @@ -755,6 +776,57 @@ func (s *mspan) oldSweep(preserve bool) bool { return res } +// reportZombies reports any marked but free objects in s and throws. +// +// This generally means one of the following: +// +// 1. User code converted a pointer to a uintptr and then back +// unsafely, and a GC ran while the uintptr was the only reference to +// an object. +// +// 2. User code (or a compiler bug) constructed a bad pointer that +// points to a free slot, often a past-the-end pointer. +// +// 3. The GC two cycles ago missed a pointer and freed a live object, +// but it was still live in the last cycle, so this GC cycle found a +// pointer to that object and marked it. +func (s *mspan) reportZombies() { + printlock() + print("runtime: marked free object in span ", s, ", elemsize=", s.elemsize, " freeindex=", s.freeindex, " (bad use of unsafe.Pointer? try -d=checkptr)\n") + mbits := s.markBitsForBase() + abits := s.allocBitsForIndex(0) + for i := uintptr(0); i < s.nelems; i++ { + addr := s.base() + i*s.elemsize + print(hex(addr)) + alloc := i < s.freeindex || abits.isMarked() + if alloc { + print(" alloc") + } else { + print(" free ") + } + if mbits.isMarked() { + print(" marked ") + } else { + print(" unmarked") + } + zombie := mbits.isMarked() && !alloc + if zombie { + print(" zombie") + } + print("\n") + if zombie { + length := s.elemsize + if length > 1024 { + length = 1024 + } + hexdumpWords(addr, addr+length, nil) + } + mbits.advance() + abits.advance() + } + throw("found pointer to free object") +} + // deductSweepCredit deducts sweep credit for allocating a span of // size spanBytes. This must be performed *before* the span is // allocated to ensure the system has enough credit. If necessary, it diff --git a/src/runtime/testdata/testprog/gc.go b/src/runtime/testdata/testprog/gc.go index f691a1d127..74732cd9f4 100644 --- a/src/runtime/testdata/testprog/gc.go +++ b/src/runtime/testdata/testprog/gc.go @@ -11,6 +11,7 @@ import ( "runtime/debug" "sync/atomic" "time" + "unsafe" ) func init() { @@ -19,6 +20,7 @@ func init() { register("GCSys", GCSys) register("GCPhys", GCPhys) register("DeferLiveness", DeferLiveness) + register("GCZombie", GCZombie) } func GCSys() { @@ -264,3 +266,37 @@ func DeferLiveness() { func escape(x interface{}) { sink2 = x; sink2 = nil } var sink2 interface{} + +// Test zombie object detection and reporting. +func GCZombie() { + // Allocate several objects of unusual size (so free slots are + // unlikely to all be re-allocated by the runtime). + const size = 190 + const count = 8192 / size + keep := make([]*byte, 0, (count+1)/2) + free := make([]uintptr, 0, (count+1)/2) + zombies := make([]*byte, 0, len(free)) + for i := 0; i < count; i++ { + obj := make([]byte, size) + p := &obj[0] + if i%2 == 0 { + keep = append(keep, p) + } else { + free = append(free, uintptr(unsafe.Pointer(p))) + } + } + + // Free the unreferenced objects. + runtime.GC() + + // Bring the free objects back to life. + for _, p := range free { + zombies = append(zombies, (*byte)(unsafe.Pointer(p))) + } + + // GC should detect the zombie objects. + runtime.GC() + println("failed") + runtime.KeepAlive(keep) + runtime.KeepAlive(zombies) +}