diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index dfddd8c6f6..cbdf2b8375 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -601,7 +601,13 @@ func gcFlushBgCredit(scanWork int64) { gp.gcAssistBytes = 0 xgp := gp gp = gp.schedlink.ptr() - ready(xgp, 0, true) + // It's important that we *not* put xgp in + // runnext. Otherwise, it's possible for user + // code to exploit the GC worker's high + // scheduler priority to get itself always run + // before other goroutines and always in the + // fresh quantum started by GC. + ready(xgp, 0, false) } else { // Partially satisfy this assist. gp.gcAssistBytes += scanBytes diff --git a/src/runtime/proc_test.go b/src/runtime/proc_test.go index 8994121071..22e4dca771 100644 --- a/src/runtime/proc_test.go +++ b/src/runtime/proc_test.go @@ -344,6 +344,14 @@ func TestGCFairness(t *testing.T) { } } +func TestGCFairness2(t *testing.T) { + output := runTestProg(t, "testprog", "GCFairness2") + want := "OK\n" + if output != want { + t.Fatalf("want %s, got %s\n", want, output) + } +} + func TestNumGoroutine(t *testing.T) { output := runTestProg(t, "testprog", "NumGoroutine") want := "1\n" diff --git a/src/runtime/testdata/testprog/gc.go b/src/runtime/testdata/testprog/gc.go index 0676e9a4ec..a0c1f82b56 100644 --- a/src/runtime/testdata/testprog/gc.go +++ b/src/runtime/testdata/testprog/gc.go @@ -8,11 +8,14 @@ import ( "fmt" "os" "runtime" + "runtime/debug" + "sync/atomic" "time" ) func init() { register("GCFairness", GCFairness) + register("GCFairness2", GCFairness2) register("GCSys", GCSys) } @@ -72,3 +75,35 @@ func GCFairness() { time.Sleep(10 * time.Millisecond) fmt.Println("OK") } + +func GCFairness2() { + // Make sure user code can't exploit the GC's high priority + // scheduling to make scheduling of user code unfair. See + // issue #15706. + runtime.GOMAXPROCS(1) + debug.SetGCPercent(1) + var count [3]int64 + var sink [3]interface{} + for i := range count { + go func(i int) { + for { + sink[i] = make([]byte, 1024) + atomic.AddInt64(&count[i], 1) + } + }(i) + } + // Note: If the unfairness is really bad, it may not even get + // past the sleep. + // + // If the scheduling rules change, this may not be enough time + // to let all goroutines run, but for now we cycle through + // them rapidly. + time.Sleep(30 * time.Millisecond) + for i := range count { + if atomic.LoadInt64(&count[i]) == 0 { + fmt.Printf("goroutine %d did not run\n", i) + return + } + } + fmt.Println("OK") +}