runtime: don't attempt to steal from idle Ps

Work stealing is a scalability bottleneck in the scheduler. Since each P
has a work queue, work stealing must look at every P to determine if
there is any work. The number of Ps scales linearly with GOMAXPROCS
(i.e., the number of Ps _is_ GOMAXPROCS), thus this work scales linearly
with GOMAXPROCS.

Work stealing is a later attempt by a P to find work before it goes
idle. Since the P has no work of its own, extra costs here tend not to
directly affect application-level benchmarks. Where they show up is
extra CPU usage by the process as a whole. These costs get particularly
expensive for applications that transition between blocked and running
frequently.

Long term, we need a more scalable approach in general, but for now we
can make a simple observation: idle Ps ([1]) cannot possibly have
anything in their runq, so we need not bother checking at all.

We track idle Ps via a new global bitmap, updated in pidleput/pidleget.
This is already a slow path (requires sched.lock), so we don't expect
high contention there.

Using a single bitmap avoids the need to touch every P to read p.status.
Currently, the bitmap approach is not significantly better than reading
p.status. However, in a future CL I'd like to apply a similiar
optimization to timers. Once done, findrunnable would not touch most Ps
at all (in mostly idle programs), which will avoid memory latency to
pull those Ps into cache.

When reading this bitmap, we are racing with Ps going in and out of
idle, so there are a few cases to consider:

1. _Prunning -> _Pidle: Running P goes idle after we check the bitmap.
In this case, we will try to steal (and find nothing) so there is no
harm.

2. _Pidle -> _Prunning while spinning: A P that starts running may queue
new work that we miss. This is OK: (a) that P cannot go back to sleep
without completing its work, and (b) more fundamentally, we will recheck
after we drop our P.

3. _Pidle -> _Prunning after spinning: After spinning, we really can
miss work from a newly woken P. (a) above still applies here as well,
but this is also the same delicate dance case described in findrunnable:
if nothing is spinning anymore, the other P will unpark a thread to run
the work it submits.

Benchmark results from WakeupParallel/syscall/pair/race/1ms (see
golang.org/cl/228577):

name                            old msec          new msec   delta
Perf-task-clock-8               250 ± 1%          247 ± 4%     ~     (p=0.690 n=5+5)
Perf-task-clock-16              258 ± 2%          259 ± 2%     ~     (p=0.841 n=5+5)
Perf-task-clock-32              284 ± 2%          270 ± 4%   -4.94%  (p=0.032 n=5+5)
Perf-task-clock-64              326 ± 3%          303 ± 2%   -6.92%  (p=0.008 n=5+5)
Perf-task-clock-128             407 ± 2%          363 ± 5%  -10.69%  (p=0.008 n=5+5)
Perf-task-clock-256             561 ± 1%          481 ± 1%  -14.20%  (p=0.016 n=4+5)
Perf-task-clock-512             840 ± 5%          683 ± 2%  -18.70%  (p=0.008 n=5+5)
Perf-task-clock-1024          1.38k ±14%        1.07k ± 2%  -21.85%  (p=0.008 n=5+5)

[1] "Idle Ps" here refers to _Pidle Ps in the sched.pidle list. In other
contexts, Ps may temporarily transition through _Pidle (e.g., in
handoffp); those Ps may have work.

Updates #28808
Updates #18237

Change-Id: Ieeb958bd72e7d8fb375b0b1f414e8d7378b14e29
Reviewed-on: https://go-review.googlesource.com/c/go/+/259578
Run-TryBot: Michael Pratt <mpratt@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
Reviewed-by: Austin Clements <austin@google.com>
Trust: Michael Pratt <mpratt@google.com>
This commit is contained in:
Michael Pratt 2020-10-01 15:21:37 -04:00
parent 44dbeaf356
commit 4a2cc73f87
2 changed files with 82 additions and 8 deletions

View File

@ -2295,8 +2295,12 @@ top:
if _p_ == p2 {
continue
}
if gp := runqsteal(_p_, p2, stealRunNextG); gp != nil {
return gp, false
// Don't bother to attempt to steal if p2 is idle.
if !idlepMask.read(enum.position()) {
if gp := runqsteal(_p_, p2, stealRunNextG); gp != nil {
return gp, false
}
}
// Consider stealing timers from p2.
@ -2307,8 +2311,13 @@ top:
// and is not marked for preemption. If p2 is running
// and not being preempted we assume it will handle its
// own timers.
//
// If we're still looking for work after checking all
// the P's, then go ahead and steal from an active P.
//
// TODO(prattmic): Maintain a global look-aside similar
// to idlepMask to avoid looking at p2 if it can't
// possibly have timers.
if i > 2 || (i > 1 && shouldStealTimers(p2)) {
tnow, w, ran := checkTimers(p2, now)
now = tnow
@ -2379,6 +2388,9 @@ stop:
// safe-points. We don't need to snapshot the contents because
// everything up to cap(allp) is immutable.
allpSnapshot := allp
// Also snapshot idlepMask. Value changes are OK, but we can't allow
// len to change out from under us.
idlepMaskSnapshot := idlepMask
// return P and block
lock(&sched.lock)
@ -2419,8 +2431,8 @@ stop:
}
// check all runqueues once again
for _, _p_ := range allpSnapshot {
if !runqempty(_p_) {
for id, _p_ := range allpSnapshot {
if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(_p_) {
lock(&sched.lock)
_p_ = pidleget()
unlock(&sched.lock)
@ -4398,6 +4410,8 @@ func procresize(nprocs int32) *p {
}
sched.procresizetime = now
maskWords := (nprocs+31) / 32
// Grow allp if necessary.
if nprocs > int32(len(allp)) {
// Synchronize with retake, which could be running
@ -4412,6 +4426,15 @@ func procresize(nprocs int32) *p {
copy(nallp, allp[:cap(allp)])
allp = nallp
}
if maskWords <= int32(cap(idlepMask)) {
idlepMask = idlepMask[:maskWords]
} else {
nidlepMask := make([]uint32, maskWords)
// No need to copy beyond len, old Ps are irrelevant.
copy(nidlepMask, idlepMask)
idlepMask = nidlepMask
}
unlock(&allpLock)
}
@ -4470,6 +4493,7 @@ func procresize(nprocs int32) *p {
if int32(len(allp)) != nprocs {
lock(&allpLock)
allp = allp[:nprocs]
idlepMask = idlepMask[:maskWords]
unlock(&allpLock)
}
@ -5153,8 +5177,46 @@ func globrunqget(_p_ *p, max int32) *g {
return gp
}
// Put p to on _Pidle list.
// pIdleMask is a bitmap of of Ps in the _Pidle list, one bit per P.
type pIdleMask []uint32
// read returns true if P id is in the _Pidle list, and thus cannot have work.
func (p pIdleMask) read(id uint32) bool {
word := id / 32
mask := uint32(1) << (id % 32)
return (atomic.Load(&p[word]) & mask) != 0
}
// set sets P id as idle in mask.
//
// Must be called only for a P owned by the caller. In order to maintain
// consistency, a P going idle must the idle mask simultaneously with updates
// to the idle P list under the sched.lock, otherwise a racing pidleget may
// clear the mask before pidleput sets the mask, corrupting the bitmap.
//
// N.B., procresize takes ownership of all Ps in stopTheWorldWithSema.
func (p pIdleMask) set(id int32) {
word := id / 32
mask := uint32(1) << (id % 32)
atomic.Or(&p[word], mask)
}
// clear sets P id as non-idle in mask.
//
// See comment on set.
func (p pIdleMask) clear(id int32) {
word := id / 32
mask := uint32(1) << (id % 32)
atomic.And(&p[word], ^mask)
}
// pidleput puts p to on the _Pidle list.
//
// This releases ownership of p. Once sched.lock is released it is no longer
// safe to use p.
//
// sched.lock must be held.
//
// May run during STW, so write barriers are not allowed.
//go:nowritebarrierrec
func pidleput(_p_ *p) {
@ -5163,13 +5225,16 @@ func pidleput(_p_ *p) {
if !runqempty(_p_) {
throw("pidleput: P has non-empty run queue")
}
idlepMask.set(_p_.id)
_p_.link = sched.pidle
sched.pidle.set(_p_)
atomic.Xadd(&sched.npidle, 1) // TODO: fast atomic
}
// Try get a p from _Pidle list.
// pidleget tries to get a p from the _Pidle list, acquiring ownership.
//
// sched.lock must be held.
//
// May run during STW, so write barriers are not allowed.
//go:nowritebarrierrec
func pidleget() *p {
@ -5177,6 +5242,7 @@ func pidleget() *p {
_p_ := sched.pidle.ptr()
if _p_ != nil {
idlepMask.clear(_p_.id)
sched.pidle = _p_.link
atomic.Xadd(&sched.npidle, -1) // TODO: fast atomic
}

View File

@ -1035,14 +1035,22 @@ func (w waitReason) String() string {
var (
allglen uintptr
allm *m
allp []*p // len(allp) == gomaxprocs; may change at safe points, otherwise immutable
allpLock mutex // Protects P-less reads of allp and all writes
gomaxprocs int32
ncpu int32
forcegc forcegcstate
sched schedt
newprocs int32
// allpLock protects P-less reads and size changes of allp and
// idlepMask, and all writes to allp.
allpLock mutex
// len(allp) == gomaxprocs; may change at safe points, otherwise
// immutable.
allp []*p
// Bitmask of Ps in _Pidle list, one bit per P. Reads and writes must
// be atomic. Length may change at safe points.
idlepMask pIdleMask
// Information about what cpu features are available.
// Packages outside the runtime should not use these
// as they are not an external api.