mirror of https://github.com/golang/go.git
runtime: convert forcegc helper to Go
Also fix a bunch of bugs:
1. Accesses to last_gc must be atomic (it's int64).
2. last_gc still can be 0 during first checks in sysmon, check for 0.
3. forcegc.g can be unitialized when sysmon accesses it:
forcegc.g is initialized by main goroutine (forcegc.g = newproc1(...)),
and main goroutine is unsynchronized with both sysmon and forcegc goroutine.
Initialize forcegc.g in the forcegc goroutine itself instead.
LGTM=khr
R=golang-codereviews, khr
CC=golang-codereviews, rsc
https://golang.org/cl/136770043
This commit is contained in:
parent
ef64d9ffcc
commit
42486ffc5d
|
|
@ -1450,7 +1450,7 @@ gc(struct gc_args *args)
|
|||
mstats.next_gc = mstats.heap_alloc+mstats.heap_alloc*runtime·gcpercent/100;
|
||||
|
||||
t4 = runtime·nanotime();
|
||||
mstats.last_gc = runtime·unixnanotime(); // must be Unix time to make sense to user
|
||||
runtime·atomicstore64(&mstats.last_gc, runtime·unixnanotime()); // must be Unix time to make sense to user
|
||||
mstats.pause_ns[mstats.numgc%nelem(mstats.pause_ns)] = t4 - t0;
|
||||
mstats.pause_total_ns += t4 - t0;
|
||||
mstats.numgc++;
|
||||
|
|
|
|||
|
|
@ -88,6 +88,7 @@ static Mutex allglock; // the following vars are protected by this lock or by st
|
|||
G** runtime·allg;
|
||||
uintptr runtime·allglen;
|
||||
static uintptr allgcap;
|
||||
ForceGCState runtime·forcegc;
|
||||
|
||||
void runtime·mstart(void);
|
||||
static void runqput(P*, G*);
|
||||
|
|
@ -130,15 +131,6 @@ static bool exitsyscallfast(void);
|
|||
static bool haveexperiment(int8*);
|
||||
static void allgadd(G*);
|
||||
|
||||
static void forcegchelper(void);
|
||||
static struct
|
||||
{
|
||||
Mutex lock;
|
||||
G* g;
|
||||
FuncVal fv;
|
||||
uint32 idle;
|
||||
} forcegc;
|
||||
|
||||
extern String runtime·buildVersion;
|
||||
|
||||
// The bootstrap sequence is:
|
||||
|
|
@ -254,8 +246,6 @@ runtime·main(void)
|
|||
|
||||
if(g->m != &runtime·m0)
|
||||
runtime·throw("runtime·main not on m0");
|
||||
forcegc.fv.fn = forcegchelper;
|
||||
forcegc.g = runtime·newproc1(&forcegc.fv, nil, 0, 0, runtime·main);
|
||||
main·init();
|
||||
|
||||
if(g->defer != &d || d.fn != &initDone)
|
||||
|
|
@ -2779,7 +2769,7 @@ static void
|
|||
sysmon(void)
|
||||
{
|
||||
uint32 idle, delay, nscavenge;
|
||||
int64 now, unixnow, lastpoll, lasttrace;
|
||||
int64 now, unixnow, lastpoll, lasttrace, lastgc;
|
||||
int64 forcegcperiod, scavengelimit, lastscavenge, maxsleep;
|
||||
G *gp;
|
||||
|
||||
|
|
@ -2854,12 +2844,13 @@ sysmon(void)
|
|||
idle++;
|
||||
|
||||
// check if we need to force a GC
|
||||
if(unixnow - mstats.last_gc > forcegcperiod && runtime·atomicload(&forcegc.idle)) {
|
||||
runtime·lock(&forcegc.lock);
|
||||
forcegc.idle = 0;
|
||||
forcegc.g->schedlink = nil;
|
||||
injectglist(forcegc.g);
|
||||
runtime·unlock(&forcegc.lock);
|
||||
lastgc = runtime·atomicload64(&mstats.last_gc);
|
||||
if(lastgc != 0 && unixnow - lastgc > forcegcperiod && runtime·atomicload(&runtime·forcegc.idle)) {
|
||||
runtime·lock(&runtime·forcegc.lock);
|
||||
runtime·forcegc.idle = 0;
|
||||
runtime·forcegc.g->schedlink = nil;
|
||||
injectglist(runtime·forcegc.g);
|
||||
runtime·unlock(&runtime·forcegc.lock);
|
||||
}
|
||||
|
||||
// scavenge heap once in a while
|
||||
|
|
@ -2943,23 +2934,6 @@ retake(int64 now)
|
|||
return n;
|
||||
}
|
||||
|
||||
static void
|
||||
forcegchelper(void)
|
||||
{
|
||||
g->issystem = true;
|
||||
for(;;) {
|
||||
runtime·lock(&forcegc.lock);
|
||||
if(forcegc.idle)
|
||||
runtime·throw("forcegc: phase error");
|
||||
runtime·atomicstore(&forcegc.idle, 1);
|
||||
runtime·parkunlock(&forcegc.lock, runtime·gostringnocopy((byte*)"force gc (idle)"));
|
||||
// this goroutine is explicitly resumed by sysmon
|
||||
if(runtime·debug.gctrace > 0)
|
||||
runtime·printf("GC forced\n");
|
||||
runtime·gc(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Tell all goroutines that they have been preempted and they should stop.
|
||||
// This function is purely best-effort. It can fail to inform a goroutine if a
|
||||
// processor just started running it.
|
||||
|
|
|
|||
|
|
@ -29,6 +29,27 @@ const (
|
|||
|
||||
var parkunlock_c byte
|
||||
|
||||
// start forcegc helper goroutine
|
||||
func init() {
|
||||
go func() {
|
||||
forcegc.g = getg()
|
||||
forcegc.g.issystem = true
|
||||
for {
|
||||
lock(&forcegc.lock)
|
||||
if forcegc.idle != 0 {
|
||||
gothrow("forcegc: phase error")
|
||||
}
|
||||
atomicstore(&forcegc.idle, 1)
|
||||
goparkunlock(&forcegc.lock, "force gc (idle)")
|
||||
// this goroutine is explicitly resumed by sysmon
|
||||
if debug.gctrace > 0 {
|
||||
println("GC forced")
|
||||
}
|
||||
gogc(1)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Gosched yields the processor, allowing other goroutines to run. It does not
|
||||
// suspend the current goroutine, so execution resumes automatically.
|
||||
func Gosched() {
|
||||
|
|
|
|||
|
|
@ -92,6 +92,7 @@ typedef struct ParForThread ParForThread;
|
|||
typedef struct CgoMal CgoMal;
|
||||
typedef struct PollDesc PollDesc;
|
||||
typedef struct DebugVars DebugVars;
|
||||
typedef struct ForceGCState ForceGCState;
|
||||
|
||||
/*
|
||||
* Per-CPU declaration.
|
||||
|
|
@ -572,6 +573,13 @@ struct DebugVars
|
|||
int32 scavenge;
|
||||
};
|
||||
|
||||
struct ForceGCState
|
||||
{
|
||||
Mutex lock;
|
||||
G* g;
|
||||
uint32 idle;
|
||||
};
|
||||
|
||||
extern bool runtime·precisestack;
|
||||
extern bool runtime·copystack;
|
||||
|
||||
|
|
@ -774,6 +782,7 @@ extern uint32 runtime·cpuid_edx;
|
|||
extern DebugVars runtime·debug;
|
||||
extern uintptr runtime·maxstacksize;
|
||||
extern Note runtime·signote;
|
||||
extern ForceGCState runtime·forcegc;
|
||||
|
||||
/*
|
||||
* common functions and data
|
||||
|
|
|
|||
Loading…
Reference in New Issue