Revert "internal/lsp/cache: reduce critical sections"

This reverts commit 654a14b527.

Reason for revert: my flawed understanding of the concurrency 

Change-Id: I31a35267323bb1ff4dff1d9244d3ce69c36cdda4
Reviewed-on: https://go-review.googlesource.com/c/tools/+/412694
Run-TryBot: Alan Donovan <adonovan@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Hyang-Ah Hana Kim <hyangah@gmail.com>
gopls-CI: kokoro <noreply+kokoro@google.com>
This commit is contained in:
Alan Donovan 2022-06-16 13:41:08 +00:00
parent d097bc9f9d
commit 041035c34a
4 changed files with 26 additions and 37 deletions

View File

@ -97,6 +97,14 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so
return nil, err
}
// Do not close over the packageHandle or the snapshot in the Bind function.
// This creates a cycle, which causes the finalizers to never run on the handles.
// The possible cycles are:
//
// packageHandle.h.function -> packageHandle
// packageHandle.h.function -> snapshot -> packageHandle
//
m := ph.m
key := ph.key
@ -113,13 +121,6 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so
}(dep)
}
// TODO(adonovan): opt: consider moving the Wait here,
// so that dependencies complete before we start to
// read+parse+typecheck this package. Although the
// read+parse can proceed, typechecking will block
// almost immediately until the imports are done.
// The effect is to increase contention.
data := &packageData{}
data.pkg, data.err = typeCheck(ctx, snapshot, m.Metadata, mode, deps)
// Make sure that the workers above have finished before we return,
@ -447,7 +448,6 @@ func doTypeCheck(ctx context.Context, snapshot *snapshot, m *Metadata, mode sour
}
typeparams.InitInstanceInfo(pkg.typesInfo)
// TODO(adonovan): opt: execute this loop in parallel.
for _, gf := range pkg.m.GoFiles {
// In the presence of line directives, we may need to report errors in
// non-compiled Go files, so we need to register them on the package.

View File

@ -278,7 +278,7 @@ func parseGo(ctx context.Context, fset *token.FileSet, fh source.FileHandle, mod
tok := fset.File(file.Pos())
if tok == nil {
// file.Pos is the location of the package declaration (issue #53202). If there was
// file.Pos is the location of the package declaration. If there was
// none, we can't find the token.File that ParseFile created, and we
// have no choice but to recreate it.
tok = fset.AddFile(fh.URI().Filename(), -1, len(src))

View File

@ -119,6 +119,8 @@ func formatEvent(ctx context.Context, ev core.Event, lm label.Map) string {
}
func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map) context.Context {
t.mu.Lock()
defer t.mu.Unlock()
span := export.GetSpan(ctx)
if span == nil {
return ctx
@ -126,8 +128,11 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map)
switch {
case event.IsStart(ev):
// Just starting: add it to the unfinished map.
// Allocate before the critical section.
if t.sets == nil {
t.sets = make(map[string]*traceSet)
t.unfinished = make(map[export.SpanContext]*traceData)
}
// just starting, add it to the unfinished map
td := &traceData{
TraceID: span.ID.TraceID,
SpanID: span.ID.SpanID,
@ -136,13 +141,6 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map)
Start: span.Start().At(),
Tags: renderLabels(span.Start()),
}
t.mu.Lock()
defer t.mu.Unlock()
if t.sets == nil {
t.sets = make(map[string]*traceSet)
t.unfinished = make(map[export.SpanContext]*traceData)
}
t.unfinished[span.ID] = td
// and wire up parents if we have them
if !span.ParentID.IsValid() {
@ -157,19 +155,7 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map)
parent.Children = append(parent.Children, td)
case event.IsEnd(ev):
// Finishing: must be already in the map.
// Allocate events before the critical section.
events := span.Events()
tdEvents := make([]traceEvent, len(events))
for i, event := range events {
tdEvents[i] = traceEvent{
Time: event.At(),
Tags: renderLabels(event),
}
}
t.mu.Lock()
defer t.mu.Unlock()
// finishing, must be already in the map
td, found := t.unfinished[span.ID]
if !found {
return ctx // if this happens we are in a bad place
@ -178,7 +164,14 @@ func (t *traces) ProcessEvent(ctx context.Context, ev core.Event, lm label.Map)
td.Finish = span.Finish().At()
td.Duration = span.Finish().At().Sub(span.Start().At())
td.Events = tdEvents
events := span.Events()
td.Events = make([]traceEvent, len(events))
for i, event := range events {
td.Events[i] = traceEvent{
Time: event.At(),
Tags: renderLabels(event),
}
}
set, ok := t.sets[span.Name]
if !ok {

View File

@ -181,9 +181,8 @@ func (g *Generation) Bind(key interface{}, function Function, cleanup func(inter
if atomic.LoadUint32(&g.destroyed) != 0 {
panic("operation on generation " + g.name + " destroyed by " + g.destroyedBy)
}
// Avoid 'defer Unlock' to reduce critical section.
g.store.mu.Lock()
defer g.store.mu.Unlock()
h, ok := g.store.handles[key]
if !ok {
h := &Handle{
@ -193,11 +192,8 @@ func (g *Generation) Bind(key interface{}, function Function, cleanup func(inter
cleanup: cleanup,
}
g.store.handles[key] = h
g.store.mu.Unlock()
return h
}
g.store.mu.Unlock()
h.mu.Lock()
defer h.mu.Unlock()
if _, ok := h.generations[g]; !ok {