diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index 006274757e..82b7fa68ae 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -1270,7 +1270,8 @@ func pprof_mutexProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok // of calling ThreadCreateProfile directly. func ThreadCreateProfile(p []StackRecord) (n int, ok bool) { return threadCreateProfileInternal(len(p), func(r profilerecord.StackRecord) { - copy(p[0].Stack0[:], r.Stack) + i := copy(p[0].Stack0[:], r.Stack) + clear(p[0].Stack0[i:]) p = p[1:] }) } @@ -1649,7 +1650,8 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) { return } for i, mr := range records[0:n] { - copy(p[i].Stack0[:], mr.Stack) + l := copy(p[i].Stack0[:], mr.Stack) + clear(p[i].Stack0[l:]) } return } diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index 30ef50b1c0..d16acf54da 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -2441,16 +2441,7 @@ func TestTimeVDSO(t *testing.T) { } func TestProfilerStackDepth(t *testing.T) { - // Disable sampling, otherwise it's difficult to assert anything. - oldMemRate := runtime.MemProfileRate - runtime.MemProfileRate = 1 - runtime.SetBlockProfileRate(1) - oldMutexRate := runtime.SetMutexProfileFraction(1) - t.Cleanup(func() { - runtime.MemProfileRate = oldMemRate - runtime.SetBlockProfileRate(0) - runtime.SetMutexProfileFraction(oldMutexRate) - }) + t.Cleanup(disableSampling()) const depth = 128 go produceProfileEvents(t, depth) @@ -2742,3 +2733,84 @@ runtime/pprof.inlineA`, }) } } + +func TestProfileRecordNullPadding(t *testing.T) { + // Produce events for the different profile types. + t.Cleanup(disableSampling()) + memSink = make([]byte, 1) // MemProfile + <-time.After(time.Millisecond) // BlockProfile + blockMutex(t) // MutexProfile + runtime.GC() + + // Test that all profile records are null padded. + testProfileRecordNullPadding(t, "MutexProfile", runtime.MutexProfile) + testProfileRecordNullPadding(t, "GoroutineProfile", runtime.GoroutineProfile) + testProfileRecordNullPadding(t, "BlockProfile", runtime.BlockProfile) + testProfileRecordNullPadding(t, "MemProfile/inUseZero=true", func(p []runtime.MemProfileRecord) (int, bool) { + return runtime.MemProfile(p, true) + }) + testProfileRecordNullPadding(t, "MemProfile/inUseZero=false", func(p []runtime.MemProfileRecord) (int, bool) { + return runtime.MemProfile(p, false) + }) + // Not testing ThreadCreateProfile because it is broken, see issue 6104. +} + +func testProfileRecordNullPadding[T runtime.StackRecord | runtime.MemProfileRecord | runtime.BlockProfileRecord](t *testing.T, name string, fn func([]T) (int, bool)) { + stack0 := func(sr *T) *[32]uintptr { + switch t := any(sr).(type) { + case *runtime.StackRecord: + return &t.Stack0 + case *runtime.MemProfileRecord: + return &t.Stack0 + case *runtime.BlockProfileRecord: + return &t.Stack0 + default: + panic(fmt.Sprintf("unexpected type %T", sr)) + } + } + + t.Run(name, func(t *testing.T) { + var p []T + for { + n, ok := fn(p) + if ok { + p = p[:n] + break + } + p = make([]T, n*2) + for i := range p { + s0 := stack0(&p[i]) + for j := range s0 { + // Poison the Stack0 array to identify lack of zero padding + s0[j] = ^uintptr(0) + } + } + } + + if len(p) == 0 { + t.Fatal("no records found") + } + + for _, sr := range p { + for i, v := range stack0(&sr) { + if v == ^uintptr(0) { + t.Fatalf("record p[%d].Stack0 is not null padded: %+v", i, sr) + } + } + } + }) +} + +// disableSampling configures the profilers to capture all events, otherwise +// it's difficult to assert anything. +func disableSampling() func() { + oldMemRate := runtime.MemProfileRate + runtime.MemProfileRate = 1 + runtime.SetBlockProfileRate(1) + oldMutexRate := runtime.SetMutexProfileFraction(1) + return func() { + runtime.MemProfileRate = oldMemRate + runtime.SetBlockProfileRate(0) + runtime.SetMutexProfileFraction(oldMutexRate) + } +}