diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 4e7e606db9..35fd08af50 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -1368,7 +1368,7 @@ HaveSpan: // Trace the span alloc. if traceAllocFreeEnabled() { - trace := traceAcquire() + trace := traceTryAcquire() if trace.ok() { trace.SpanAlloc(s) traceRelease(trace) @@ -1556,7 +1556,7 @@ func (h *mheap) freeSpan(s *mspan) { systemstack(func() { // Trace the span free. if traceAllocFreeEnabled() { - trace := traceAcquire() + trace := traceTryAcquire() if trace.ok() { trace.SpanFree(s) traceRelease(trace) @@ -1595,7 +1595,7 @@ func (h *mheap) freeSpan(s *mspan) { func (h *mheap) freeManual(s *mspan, typ spanAllocType) { // Trace the span free. if traceAllocFreeEnabled() { - trace := traceAcquire() + trace := traceTryAcquire() if trace.ok() { trace.SpanFree(s) traceRelease(trace) diff --git a/src/runtime/stack.go b/src/runtime/stack.go index 6d24814271..cdf859a7ff 100644 --- a/src/runtime/stack.go +++ b/src/runtime/stack.go @@ -416,7 +416,7 @@ func stackalloc(n uint32) stack { } if traceAllocFreeEnabled() { - trace := traceAcquire() + trace := traceTryAcquire() if trace.ok() { trace.GoroutineStackAlloc(uintptr(v), uintptr(n)) traceRelease(trace) @@ -466,7 +466,7 @@ func stackfree(stk stack) { return } if traceAllocFreeEnabled() { - trace := traceAcquire() + trace := traceTryAcquire() if trace.ok() { trace.GoroutineStackFree(uintptr(v)) traceRelease(trace) diff --git a/src/runtime/traceruntime.go b/src/runtime/traceruntime.go index 5497913066..195b3e1c37 100644 --- a/src/runtime/traceruntime.go +++ b/src/runtime/traceruntime.go @@ -184,6 +184,22 @@ func traceAcquire() traceLocker { return traceAcquireEnabled() } +// traceTryAcquire is like traceAcquire, but may return an invalid traceLocker even +// if tracing is enabled. For example, it will return !ok if traceAcquire is being +// called with an active traceAcquire on the M (reentrant locking). This exists for +// optimistically emitting events in the few contexts where tracing is now allowed. +// +// nosplit for alignment with traceTryAcquire, so it can be used in the +// same contexts. +// +//go:nosplit +func traceTryAcquire() traceLocker { + if !traceEnabled() { + return traceLocker{} + } + return traceTryAcquireEnabled() +} + // traceAcquireEnabled is the traceEnabled path for traceAcquire. It's explicitly // broken out to make traceAcquire inlineable to keep the overhead of the tracer // when it's disabled low. @@ -228,6 +244,26 @@ func traceAcquireEnabled() traceLocker { return traceLocker{mp, gen} } +// traceTryAcquireEnabled is like traceAcquireEnabled but may return an invalid +// traceLocker under some conditions. See traceTryAcquire for more details. +// +// nosplit for alignment with traceAcquireEnabled, so it can be used in the +// same contexts. +// +//go:nosplit +func traceTryAcquireEnabled() traceLocker { + // Any time we acquire a traceLocker, we may flush a trace buffer. But + // buffer flushes are rare. Record the lock edge even if it doesn't happen + // this time. + lockRankMayTraceFlush() + + // Check if we're already locked. If so, return an invalid traceLocker. + if getg().m.trace.seqlock.Load()%2 == 1 { + return traceLocker{} + } + return traceAcquireEnabled() +} + // ok returns true if the traceLocker is valid (i.e. tracing is enabled). // // nosplit because it's called on the syscall path when stack movement is forbidden.