runtime: replace trace.enabled with traceEnabled

[git-generate]
cd src/runtime
grep -l 'trace\.enabled' *.go | grep -v "trace.go" | xargs sed -i 's/trace\.enabled/traceEnabled()/g'

Change-Id: I14c7821c1134690b18c8abc0edd27abcdabcad72
Reviewed-on: https://go-review.googlesource.com/c/go/+/494181
Run-TryBot: Michael Knyszek <mknyszek@google.com>
Auto-Submit: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Michael Pratt <mpratt@google.com>
This commit is contained in:
Michael Anthony Knyszek 2023-05-09 19:37:24 +00:00 committed by Gopher Robot
parent 7484dd30fd
commit 8992bb19ad
8 changed files with 60 additions and 60 deletions

View File

@ -161,7 +161,7 @@ func debugCallWrap(dispatch uintptr) {
gp.schedlink = 0 gp.schedlink = 0
// Park the calling goroutine. // Park the calling goroutine.
if trace.enabled { if traceEnabled() {
traceGoPark(traceEvGoBlock, 1) traceGoPark(traceEvGoBlock, 1)
} }
casGToWaiting(gp, _Grunning, waitReasonDebugCall) casGToWaiting(gp, _Grunning, waitReasonDebugCall)
@ -220,7 +220,7 @@ func debugCallWrap1() {
// Switch back to the calling goroutine. At some point // Switch back to the calling goroutine. At some point
// the scheduler will schedule us again and we'll // the scheduler will schedule us again and we'll
// finish exiting. // finish exiting.
if trace.enabled { if traceEnabled() {
traceGoSched() traceGoSched()
} }
casgstatus(gp, _Grunning, _Grunnable) casgstatus(gp, _Grunning, _Grunnable)
@ -229,7 +229,7 @@ func debugCallWrap1() {
globrunqput(gp) globrunqput(gp)
unlock(&sched.lock) unlock(&sched.lock)
if trace.enabled { if traceEnabled() {
traceGoUnpark(callingG, 0) traceGoUnpark(callingG, 0)
} }
casgstatus(callingG, _Gwaiting, _Grunnable) casgstatus(callingG, _Gwaiting, _Grunnable)

View File

@ -84,7 +84,7 @@ func (c *mcentral) cacheSpan() *mspan {
deductSweepCredit(spanBytes, 0) deductSweepCredit(spanBytes, 0)
traceDone := false traceDone := false
if trace.enabled { if traceEnabled() {
traceGCSweepStart() traceGCSweepStart()
} }
@ -157,7 +157,7 @@ func (c *mcentral) cacheSpan() *mspan {
} }
sweep.active.end(sl) sweep.active.end(sl)
} }
if trace.enabled { if traceEnabled() {
traceGCSweepDone() traceGCSweepDone()
traceDone = true traceDone = true
} }
@ -170,7 +170,7 @@ func (c *mcentral) cacheSpan() *mspan {
// At this point s is a span that should have free slots. // At this point s is a span that should have free slots.
havespan: havespan:
if trace.enabled && !traceDone { if traceEnabled() && !traceDone {
traceGCSweepDone() traceGCSweepDone()
} }
n := int(s.nelems) - int(s.allocCount) n := int(s.nelems) - int(s.allocCount)

View File

@ -629,7 +629,7 @@ func gcStart(trigger gcTrigger) {
// Update it under gcsema to avoid gctrace getting wrong values. // Update it under gcsema to avoid gctrace getting wrong values.
work.userForced = trigger.kind == gcTriggerCycle work.userForced = trigger.kind == gcTriggerCycle
if trace.enabled { if traceEnabled() {
traceGCStart() traceGCStart()
} }
@ -658,7 +658,7 @@ func gcStart(trigger gcTrigger) {
now := nanotime() now := nanotime()
work.tSweepTerm = now work.tSweepTerm = now
work.pauseStart = now work.pauseStart = now
if trace.enabled { if traceEnabled() {
traceGCSTWStart(1) traceGCSTWStart(1)
} }
systemstack(stopTheWorldWithSema) systemstack(stopTheWorldWithSema)
@ -726,7 +726,7 @@ func gcStart(trigger gcTrigger) {
// Concurrent mark. // Concurrent mark.
systemstack(func() { systemstack(func() {
now = startTheWorldWithSema(trace.enabled) now = startTheWorldWithSema(traceEnabled())
work.pauseNS += now - work.pauseStart work.pauseNS += now - work.pauseStart
work.tMark = now work.tMark = now
memstats.gcPauseDist.record(now - work.pauseStart) memstats.gcPauseDist.record(now - work.pauseStart)
@ -848,7 +848,7 @@ top:
work.tMarkTerm = now work.tMarkTerm = now
work.pauseStart = now work.pauseStart = now
getg().m.preemptoff = "gcing" getg().m.preemptoff = "gcing"
if trace.enabled { if traceEnabled() {
traceGCSTWStart(0) traceGCSTWStart(0)
} }
systemstack(stopTheWorldWithSema) systemstack(stopTheWorldWithSema)
@ -878,7 +878,7 @@ top:
if restart { if restart {
getg().m.preemptoff = "" getg().m.preemptoff = ""
systemstack(func() { systemstack(func() {
now := startTheWorldWithSema(trace.enabled) now := startTheWorldWithSema(traceEnabled())
work.pauseNS += now - work.pauseStart work.pauseNS += now - work.pauseStart
memstats.gcPauseDist.record(now - work.pauseStart) memstats.gcPauseDist.record(now - work.pauseStart)
}) })
@ -972,7 +972,7 @@ func gcMarkTermination() {
mp.traceback = 0 mp.traceback = 0
casgstatus(curgp, _Gwaiting, _Grunning) casgstatus(curgp, _Gwaiting, _Grunning)
if trace.enabled { if traceEnabled() {
traceGCDone() traceGCDone()
} }
@ -1092,7 +1092,7 @@ func gcMarkTermination() {
throw("failed to set sweep barrier") throw("failed to set sweep barrier")
} }
systemstack(func() { startTheWorldWithSema(trace.enabled) }) systemstack(func() { startTheWorldWithSema(traceEnabled()) })
// Flush the heap profile so we can start a new cycle next GC. // Flush the heap profile so we can start a new cycle next GC.
// This is relatively expensive, so we don't do it with the // This is relatively expensive, so we don't do it with the

View File

@ -466,7 +466,7 @@ retry:
} }
} }
if trace.enabled && !traced { if traceEnabled() && !traced {
traced = true traced = true
traceGCMarkAssistStart() traceGCMarkAssistStart()
} }

View File

@ -803,7 +803,7 @@ func (c *gcControllerState) findRunnableGCWorker(pp *p, now int64) (*g, int64) {
// Run the background mark worker. // Run the background mark worker.
gp := node.gp.ptr() gp := node.gp.ptr()
casgstatus(gp, _Gwaiting, _Grunnable) casgstatus(gp, _Gwaiting, _Grunnable)
if trace.enabled { if traceEnabled() {
traceGoUnpark(gp, 0) traceGoUnpark(gp, 0)
} }
return gp, now return gp, now
@ -823,7 +823,7 @@ func (c *gcControllerState) resetLive(bytesMarked uint64) {
c.triggered = ^uint64(0) // Reset triggered. c.triggered = ^uint64(0) // Reset triggered.
// heapLive was updated, so emit a trace event. // heapLive was updated, so emit a trace event.
if trace.enabled { if traceEnabled() {
traceHeapAlloc(bytesMarked) traceHeapAlloc(bytesMarked)
} }
} }
@ -852,7 +852,7 @@ func (c *gcControllerState) markWorkerStop(mode gcMarkWorkerMode, duration int64
func (c *gcControllerState) update(dHeapLive, dHeapScan int64) { func (c *gcControllerState) update(dHeapLive, dHeapScan int64) {
if dHeapLive != 0 { if dHeapLive != 0 {
live := gcController.heapLive.Add(dHeapLive) live := gcController.heapLive.Add(dHeapLive)
if trace.enabled { if traceEnabled() {
// gcController.heapLive changed. // gcController.heapLive changed.
traceHeapAlloc(live) traceHeapAlloc(live)
} }
@ -1417,7 +1417,7 @@ func gcControllerCommit() {
// TODO(mknyszek): This isn't really accurate any longer because the heap // TODO(mknyszek): This isn't really accurate any longer because the heap
// goal is computed dynamically. Still useful to snapshot, but not as useful. // goal is computed dynamically. Still useful to snapshot, but not as useful.
if trace.enabled { if traceEnabled() {
traceHeapGoal() traceHeapGoal()
} }

View File

@ -512,7 +512,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
throw("mspan.sweep: bad span state") throw("mspan.sweep: bad span state")
} }
if trace.enabled { if traceEnabled() {
traceGCSweepSpan(s.npages * _PageSize) traceGCSweepSpan(s.npages * _PageSize)
} }
@ -651,7 +651,7 @@ func (sl *sweepLocked) sweep(preserve bool) bool {
s.allocCount = nalloc s.allocCount = nalloc
s.freeindex = 0 // reset allocation index to start of span. s.freeindex = 0 // reset allocation index to start of span.
s.freeIndexForScan = 0 s.freeIndexForScan = 0
if trace.enabled { if traceEnabled() {
getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
} }
@ -871,7 +871,7 @@ func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) {
return return
} }
if trace.enabled { if traceEnabled() {
traceGCSweepStart() traceGCSweepStart()
} }
@ -911,7 +911,7 @@ retry:
} }
} }
if trace.enabled { if traceEnabled() {
traceGCSweepDone() traceGCSweepDone()
} }
} }

View File

@ -798,7 +798,7 @@ func (h *mheap) reclaim(npage uintptr) {
// traceGCSweepStart/Done pair on the P. // traceGCSweepStart/Done pair on the P.
mp := acquirem() mp := acquirem()
if trace.enabled { if traceEnabled() {
traceGCSweepStart() traceGCSweepStart()
} }
@ -846,7 +846,7 @@ func (h *mheap) reclaim(npage uintptr) {
unlock(&h.lock) unlock(&h.lock)
} }
if trace.enabled { if traceEnabled() {
traceGCSweepDone() traceGCSweepDone()
} }
releasem(mp) releasem(mp)
@ -918,7 +918,7 @@ func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr {
n -= uintptr(len(inUse) * 8) n -= uintptr(len(inUse) * 8)
} }
sweep.active.end(sl) sweep.active.end(sl)
if trace.enabled { if traceEnabled() {
unlock(&h.lock) unlock(&h.lock)
// Account for pages scanned but not reclaimed. // Account for pages scanned but not reclaimed.
traceGCSweepSpan((n0 - nFreed) * pageSize) traceGCSweepSpan((n0 - nFreed) * pageSize)

View File

@ -876,7 +876,7 @@ func fastrandinit() {
// Mark gp ready to run. // Mark gp ready to run.
func ready(gp *g, traceskip int, next bool) { func ready(gp *g, traceskip int, next bool) {
if trace.enabled { if traceEnabled() {
traceGoUnpark(gp, traceskip) traceGoUnpark(gp, traceskip)
} }
@ -1274,7 +1274,7 @@ func stopTheWorldWithSema() {
for _, pp := range allp { for _, pp := range allp {
s := pp.status s := pp.status
if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) { if s == _Psyscall && atomic.Cas(&pp.status, s, _Pgcstop) {
if trace.enabled { if traceEnabled() {
traceGoSysBlock(pp) traceGoSysBlock(pp)
traceProcStop(pp) traceProcStop(pp)
} }
@ -1703,7 +1703,7 @@ func forEachP(fn func(*p)) {
for _, p2 := range allp { for _, p2 := range allp {
s := p2.status s := p2.status
if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) { if s == _Psyscall && p2.runSafePointFn == 1 && atomic.Cas(&p2.status, s, _Pidle) {
if trace.enabled { if traceEnabled() {
traceGoSysBlock(p2) traceGoSysBlock(p2)
traceProcStop(p2) traceProcStop(p2)
} }
@ -2003,7 +2003,7 @@ func oneNewExtraM() {
if raceenabled { if raceenabled {
gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum) gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
} }
if trace.enabled { if traceEnabled() {
// Trigger two trace events for the locked g in the extra m, // Trigger two trace events for the locked g in the extra m,
// since the next event of the g will be traceEvGoSysExit in exitsyscall, // since the next event of the g will be traceEvGoSysExit in exitsyscall,
// while calling from C thread to Go. // while calling from C thread to Go.
@ -2501,7 +2501,7 @@ func handoffp(pp *p) {
return return
} }
// if there's trace work to do, start it straight away // if there's trace work to do, start it straight away
if (trace.enabled || trace.shutdown) && traceReaderAvailable() != nil { if (traceEnabled() || trace.shutdown) && traceReaderAvailable() != nil {
startm(pp, false, false) startm(pp, false, false)
return return
} }
@ -2707,7 +2707,7 @@ func execute(gp *g, inheritTime bool) {
setThreadCPUProfiler(hz) setThreadCPUProfiler(hz)
} }
if trace.enabled { if traceEnabled() {
// GoSysExit has to happen when we have a P, but before GoStart. // GoSysExit has to happen when we have a P, but before GoStart.
// So we emit it here. // So we emit it here.
if gp.syscallsp != 0 && gp.sysblocktraced { if gp.syscallsp != 0 && gp.sysblocktraced {
@ -2747,7 +2747,7 @@ top:
now, pollUntil, _ := checkTimers(pp, 0) now, pollUntil, _ := checkTimers(pp, 0)
// Try to schedule the trace reader. // Try to schedule the trace reader.
if trace.enabled || trace.shutdown { if traceEnabled() || trace.shutdown {
gp := traceReader() gp := traceReader()
if gp != nil { if gp != nil {
casgstatus(gp, _Gwaiting, _Grunnable) casgstatus(gp, _Gwaiting, _Grunnable)
@ -2814,7 +2814,7 @@ top:
gp := list.pop() gp := list.pop()
injectglist(&list) injectglist(&list)
casgstatus(gp, _Gwaiting, _Grunnable) casgstatus(gp, _Gwaiting, _Grunnable)
if trace.enabled { if traceEnabled() {
traceGoUnpark(gp, 0) traceGoUnpark(gp, 0)
} }
return gp, false, false return gp, false, false
@ -2859,7 +2859,7 @@ top:
pp.gcMarkWorkerMode = gcMarkWorkerIdleMode pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
gp := node.gp.ptr() gp := node.gp.ptr()
casgstatus(gp, _Gwaiting, _Grunnable) casgstatus(gp, _Gwaiting, _Grunnable)
if trace.enabled { if traceEnabled() {
traceGoUnpark(gp, 0) traceGoUnpark(gp, 0)
} }
return gp, false, false return gp, false, false
@ -2874,7 +2874,7 @@ top:
gp, otherReady := beforeIdle(now, pollUntil) gp, otherReady := beforeIdle(now, pollUntil)
if gp != nil { if gp != nil {
casgstatus(gp, _Gwaiting, _Grunnable) casgstatus(gp, _Gwaiting, _Grunnable)
if trace.enabled { if traceEnabled() {
traceGoUnpark(gp, 0) traceGoUnpark(gp, 0)
} }
return gp, false, false return gp, false, false
@ -2985,7 +2985,7 @@ top:
// Run the idle worker. // Run the idle worker.
pp.gcMarkWorkerMode = gcMarkWorkerIdleMode pp.gcMarkWorkerMode = gcMarkWorkerIdleMode
casgstatus(gp, _Gwaiting, _Grunnable) casgstatus(gp, _Gwaiting, _Grunnable)
if trace.enabled { if traceEnabled() {
traceGoUnpark(gp, 0) traceGoUnpark(gp, 0)
} }
return gp, false, false return gp, false, false
@ -3042,7 +3042,7 @@ top:
gp := list.pop() gp := list.pop()
injectglist(&list) injectglist(&list)
casgstatus(gp, _Gwaiting, _Grunnable) casgstatus(gp, _Gwaiting, _Grunnable)
if trace.enabled { if traceEnabled() {
traceGoUnpark(gp, 0) traceGoUnpark(gp, 0)
} }
return gp, false, false return gp, false, false
@ -3310,7 +3310,7 @@ func injectglist(glist *gList) {
if glist.empty() { if glist.empty() {
return return
} }
if trace.enabled { if traceEnabled() {
for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() { for gp := glist.head.ptr(); gp != nil; gp = gp.schedlink.ptr() {
traceGoUnpark(gp, 0) traceGoUnpark(gp, 0)
} }
@ -3541,7 +3541,7 @@ func parkunlock_c(gp *g, lock unsafe.Pointer) bool {
func park_m(gp *g) { func park_m(gp *g) {
mp := getg().m mp := getg().m
if trace.enabled { if traceEnabled() {
traceGoPark(mp.waittraceev, mp.waittraceskip) traceGoPark(mp.waittraceev, mp.waittraceskip)
} }
@ -3555,7 +3555,7 @@ func park_m(gp *g) {
mp.waitunlockf = nil mp.waitunlockf = nil
mp.waitlock = nil mp.waitlock = nil
if !ok { if !ok {
if trace.enabled { if traceEnabled() {
traceGoUnpark(gp, 2) traceGoUnpark(gp, 2)
} }
casgstatus(gp, _Gwaiting, _Grunnable) casgstatus(gp, _Gwaiting, _Grunnable)
@ -3582,7 +3582,7 @@ func goschedImpl(gp *g) {
// Gosched continuation on g0. // Gosched continuation on g0.
func gosched_m(gp *g) { func gosched_m(gp *g) {
if trace.enabled { if traceEnabled() {
traceGoSched() traceGoSched()
} }
goschedImpl(gp) goschedImpl(gp)
@ -3595,14 +3595,14 @@ func goschedguarded_m(gp *g) {
gogo(&gp.sched) // never return gogo(&gp.sched) // never return
} }
if trace.enabled { if traceEnabled() {
traceGoSched() traceGoSched()
} }
goschedImpl(gp) goschedImpl(gp)
} }
func gopreempt_m(gp *g) { func gopreempt_m(gp *g) {
if trace.enabled { if traceEnabled() {
traceGoPreempt() traceGoPreempt()
} }
goschedImpl(gp) goschedImpl(gp)
@ -3612,7 +3612,7 @@ func gopreempt_m(gp *g) {
// //
//go:systemstack //go:systemstack
func preemptPark(gp *g) { func preemptPark(gp *g) {
if trace.enabled { if traceEnabled() {
traceGoPark(traceEvGoBlock, 0) traceGoPark(traceEvGoBlock, 0)
} }
status := readgstatus(gp) status := readgstatus(gp)
@ -3656,7 +3656,7 @@ func goyield() {
} }
func goyield_m(gp *g) { func goyield_m(gp *g) {
if trace.enabled { if traceEnabled() {
traceGoPreempt() traceGoPreempt()
} }
pp := gp.m.p.ptr() pp := gp.m.p.ptr()
@ -3671,7 +3671,7 @@ func goexit1() {
if raceenabled { if raceenabled {
racegoend() racegoend()
} }
if trace.enabled { if traceEnabled() {
traceGoEnd() traceGoEnd()
} }
mcall(goexit0) mcall(goexit0)
@ -3841,7 +3841,7 @@ func reentersyscall(pc, sp uintptr) {
}) })
} }
if trace.enabled { if traceEnabled() {
systemstack(traceGoSysCall) systemstack(traceGoSysCall)
// systemstack itself clobbers g.sched.{pc,sp} and we might // systemstack itself clobbers g.sched.{pc,sp} and we might
// need them later when the G is genuinely blocked in a // need them later when the G is genuinely blocked in a
@ -3900,7 +3900,7 @@ func entersyscall_gcwait() {
lock(&sched.lock) lock(&sched.lock)
if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) { if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) {
if trace.enabled { if traceEnabled() {
traceGoSysBlock(pp) traceGoSysBlock(pp)
traceProcStop(pp) traceProcStop(pp)
} }
@ -3957,7 +3957,7 @@ func entersyscallblock() {
} }
func entersyscallblock_handoff() { func entersyscallblock_handoff() {
if trace.enabled { if traceEnabled() {
traceGoSysCall() traceGoSysCall()
traceGoSysBlock(getg().m.p.ptr()) traceGoSysBlock(getg().m.p.ptr())
} }
@ -3998,7 +3998,7 @@ func exitsyscall() {
tryRecordGoroutineProfileWB(gp) tryRecordGoroutineProfileWB(gp)
}) })
} }
if trace.enabled { if traceEnabled() {
if oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick { if oldp != gp.m.p.ptr() || gp.m.syscalltick != gp.m.p.ptr().syscalltick {
systemstack(traceGoStart) systemstack(traceGoStart)
} }
@ -4030,7 +4030,7 @@ func exitsyscall() {
} }
gp.sysexitticks = 0 gp.sysexitticks = 0
if trace.enabled { if traceEnabled() {
// Wait till traceGoSysBlock event is emitted. // Wait till traceGoSysBlock event is emitted.
// This ensures consistency of the trace (the goroutine is started after it is blocked). // This ensures consistency of the trace (the goroutine is started after it is blocked).
for oldp != nil && oldp.syscalltick == gp.m.syscalltick { for oldp != nil && oldp.syscalltick == gp.m.syscalltick {
@ -4081,7 +4081,7 @@ func exitsyscallfast(oldp *p) bool {
var ok bool var ok bool
systemstack(func() { systemstack(func() {
ok = exitsyscallfast_pidle() ok = exitsyscallfast_pidle()
if ok && trace.enabled { if ok && traceEnabled() {
if oldp != nil { if oldp != nil {
// Wait till traceGoSysBlock event is emitted. // Wait till traceGoSysBlock event is emitted.
// This ensures consistency of the trace (the goroutine is started after it is blocked). // This ensures consistency of the trace (the goroutine is started after it is blocked).
@ -4107,7 +4107,7 @@ func exitsyscallfast(oldp *p) bool {
func exitsyscallfast_reacquired() { func exitsyscallfast_reacquired() {
gp := getg() gp := getg()
if gp.m.syscalltick != gp.m.p.ptr().syscalltick { if gp.m.syscalltick != gp.m.p.ptr().syscalltick {
if trace.enabled { if traceEnabled() {
// The p was retaken and then enter into syscall again (since gp.m.syscalltick has changed). // The p was retaken and then enter into syscall again (since gp.m.syscalltick has changed).
// traceGoSysBlock for this syscall was already emitted, // traceGoSysBlock for this syscall was already emitted,
// but here we effectively retake the p from the new syscall running on the same p. // but here we effectively retake the p from the new syscall running on the same p.
@ -4399,7 +4399,7 @@ func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g {
racereleasemergeg(newg, unsafe.Pointer(&labelSync)) racereleasemergeg(newg, unsafe.Pointer(&labelSync))
} }
} }
if trace.enabled { if traceEnabled() {
traceGoCreate(newg, newg.startpc) traceGoCreate(newg, newg.startpc)
} }
releasem(mp) releasem(mp)
@ -5009,7 +5009,7 @@ func procresize(nprocs int32) *p {
if old < 0 || nprocs <= 0 { if old < 0 || nprocs <= 0 {
throw("procresize: invalid arg") throw("procresize: invalid arg")
} }
if trace.enabled { if traceEnabled() {
traceGomaxprocs(nprocs) traceGomaxprocs(nprocs)
} }
@ -5075,7 +5075,7 @@ func procresize(nprocs int32) *p {
// because p.destroy itself has write barriers, so we // because p.destroy itself has write barriers, so we
// need to do that from a valid P. // need to do that from a valid P.
if gp.m.p != 0 { if gp.m.p != 0 {
if trace.enabled { if traceEnabled() {
// Pretend that we were descheduled // Pretend that we were descheduled
// and then scheduled again to keep // and then scheduled again to keep
// the trace sane. // the trace sane.
@ -5089,7 +5089,7 @@ func procresize(nprocs int32) *p {
pp.m = 0 pp.m = 0
pp.status = _Pidle pp.status = _Pidle
acquirep(pp) acquirep(pp)
if trace.enabled { if traceEnabled() {
traceGoStart() traceGoStart()
} }
} }
@ -5154,7 +5154,7 @@ func acquirep(pp *p) {
// from a potentially stale mcache. // from a potentially stale mcache.
pp.mcache.prepareForSweep() pp.mcache.prepareForSweep()
if trace.enabled { if traceEnabled() {
traceProcStart() traceProcStart()
} }
} }
@ -5196,7 +5196,7 @@ func releasep() *p {
print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n") print("releasep: m=", gp.m, " m->p=", gp.m.p.ptr(), " p->m=", hex(pp.m), " p->status=", pp.status, "\n")
throw("releasep: invalid p state") throw("releasep: invalid p state")
} }
if trace.enabled { if traceEnabled() {
traceProcStop(gp.m.p.ptr()) traceProcStop(gp.m.p.ptr())
} }
gp.m.p = 0 gp.m.p = 0
@ -5543,7 +5543,7 @@ func retake(now int64) uint32 {
// increment nmidle and report deadlock. // increment nmidle and report deadlock.
incidlelocked(-1) incidlelocked(-1)
if atomic.Cas(&pp.status, s, _Pidle) { if atomic.Cas(&pp.status, s, _Pidle) {
if trace.enabled { if traceEnabled() {
traceGoSysBlock(pp) traceGoSysBlock(pp)
traceProcStop(pp) traceProcStop(pp)
} }