diff --git a/internal/lsp/cache/analysis.go b/internal/lsp/cache/analysis.go index ed74ee11fb..3257324cf5 100644 --- a/internal/lsp/cache/analysis.go +++ b/internal/lsp/cache/analysis.go @@ -133,7 +133,7 @@ func (s *snapshot) actionHandle(ctx context.Context, id packageID, a *analysis.A } } - h := s.view.session.cache.store.Bind(buildActionKey(a, ph), func(ctx context.Context, arg memoize.Arg) interface{} { + h := s.generation.Bind(buildActionKey(a, ph), func(ctx context.Context, arg memoize.Arg) interface{} { snapshot := arg.(*snapshot) // Analyze dependencies first. results, err := execAll(ctx, snapshot, deps) @@ -151,11 +151,11 @@ func (s *snapshot) actionHandle(ctx context.Context, id packageID, a *analysis.A } func (act *actionHandle) analyze(ctx context.Context, snapshot *snapshot) ([]*source.Error, interface{}, error) { - v, err := act.handle.Get(ctx, snapshot) - if v == nil { + d, err := act.handle.Get(ctx, snapshot.generation, snapshot) + if err != nil { return nil, nil, err } - data, ok := v.(*actionData) + data, ok := d.(*actionData) if !ok { return nil, nil, errors.Errorf("unexpected type for %s:%s", act.pkg.ID(), act.analyzer.Name) } @@ -181,7 +181,7 @@ func execAll(ctx context.Context, snapshot *snapshot, actions []*actionHandle) ( for _, act := range actions { act := act g.Go(func() error { - v, err := act.handle.Get(ctx, snapshot) + v, err := act.handle.Get(ctx, snapshot.generation, snapshot) if err != nil { return err } diff --git a/internal/lsp/cache/cache.go b/internal/lsp/cache/cache.go index 11ee52cc98..da23186686 100644 --- a/internal/lsp/cache/cache.go +++ b/internal/lsp/cache/cache.go @@ -54,10 +54,9 @@ type Cache struct { type fileHandle struct { modTime time.Time uri span.URI - memoize.NoCopy - bytes []byte - hash string - err error + bytes []byte + hash string + err error } func (c *Cache) GetFile(ctx context.Context, uri span.URI) (source.FileHandle, error) { diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go index abb6ba22fa..62067d4efe 100644 --- a/internal/lsp/cache/check.go +++ b/internal/lsp/cache/check.go @@ -51,8 +51,6 @@ func (ph *packageHandle) packageKey() packageKey { // packageData contains the data produced by type-checking a package. type packageData struct { - memoize.NoCopy - pkg *pkg err error } @@ -80,7 +78,7 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id packageID, mode so m := ph.m key := ph.key - h := s.view.session.cache.store.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} { + h := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} { snapshot := arg.(*snapshot) // Begin loading the direct dependencies, in parallel. @@ -202,7 +200,7 @@ func (ph *packageHandle) Check(ctx context.Context, s source.Snapshot) (source.P } func (ph *packageHandle) check(ctx context.Context, s *snapshot) (*pkg, error) { - v, err := ph.handle.Get(ctx, s) + v, err := ph.handle.Get(ctx, s.generation, s) if err != nil { return nil, err } @@ -218,12 +216,8 @@ func (ph *packageHandle) ID() string { return string(ph.m.id) } -func (ph *packageHandle) Cached() (source.Package, error) { - return ph.cached() -} - -func (ph *packageHandle) cached() (*pkg, error) { - v := ph.handle.Cached() +func (ph *packageHandle) cached(g *memoize.Generation) (*pkg, error) { + v := ph.handle.Cached(g) if v == nil { return nil, errors.Errorf("no cached type information for %s", ph.m.pkgPath) } diff --git a/internal/lsp/cache/mod.go b/internal/lsp/cache/mod.go index 395c0e3294..64c50af2c2 100644 --- a/internal/lsp/cache/mod.go +++ b/internal/lsp/cache/mod.go @@ -32,16 +32,14 @@ type parseModHandle struct { } type parseModData struct { - memoize.NoCopy - parsed *source.ParsedModule // err is any error encountered while parsing the file. err error } -func (mh *parseModHandle) parse(ctx context.Context, s source.Snapshot) (*source.ParsedModule, error) { - v, err := mh.handle.Get(ctx, s.(*snapshot)) +func (mh *parseModHandle) parse(ctx context.Context, snapshot *snapshot) (*source.ParsedModule, error) { + v, err := mh.handle.Get(ctx, snapshot.generation, snapshot) if err != nil { return nil, err } @@ -54,7 +52,7 @@ func (s *snapshot) ParseMod(ctx context.Context, modFH source.FileHandle) (*sour return handle.parse(ctx, s) } - h := s.view.session.cache.store.Bind(modFH.FileIdentity(), func(ctx context.Context, _ memoize.Arg) interface{} { + h := s.generation.Bind(modFH.FileIdentity(), func(ctx context.Context, _ memoize.Arg) interface{} { _, done := event.Start(ctx, "cache.ParseModHandle", tag.URI.Of(modFH.URI())) defer done() @@ -182,8 +180,8 @@ type modWhyData struct { err error } -func (mwh *modWhyHandle) why(ctx context.Context, s source.Snapshot) (map[string]string, error) { - v, err := mwh.handle.Get(ctx, s.(*snapshot)) +func (mwh *modWhyHandle) why(ctx context.Context, snapshot *snapshot) (map[string]string, error) { + v, err := mwh.handle.Get(ctx, snapshot.generation, snapshot) if err != nil { return nil, err } @@ -206,7 +204,7 @@ func (s *snapshot) ModWhy(ctx context.Context) (map[string]string, error) { view: s.view.root.Filename(), verb: why, } - h := s.view.session.cache.store.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} { + h := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} { ctx, done := event.Start(ctx, "cache.ModWhyHandle", tag.URI.Of(fh.URI())) defer done() @@ -261,8 +259,8 @@ type modUpgradeData struct { err error } -func (muh *modUpgradeHandle) Upgrades(ctx context.Context, s source.Snapshot) (map[string]string, error) { - v, err := muh.handle.Get(ctx, s.(*snapshot)) +func (muh *modUpgradeHandle) Upgrades(ctx context.Context, snapshot *snapshot) (map[string]string, error) { + v, err := muh.handle.Get(ctx, snapshot.generation, snapshot) if v == nil { return nil, err } @@ -286,7 +284,7 @@ func (s *snapshot) ModUpgrade(ctx context.Context) (map[string]string, error) { view: s.view.root.Filename(), verb: upgrade, } - h := s.view.session.cache.store.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} { + h := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} { ctx, done := event.Start(ctx, "cache.ModUpgradeHandle", tag.URI.Of(fh.URI())) defer done() diff --git a/internal/lsp/cache/mod_tidy.go b/internal/lsp/cache/mod_tidy.go index 5e0c1dbd5c..9ea70786e4 100644 --- a/internal/lsp/cache/mod_tidy.go +++ b/internal/lsp/cache/mod_tidy.go @@ -37,14 +37,12 @@ type modTidyHandle struct { } type modTidyData struct { - memoize.NoCopy - tidied *source.TidiedModule err error } -func (mth *modTidyHandle) tidy(ctx context.Context, s source.Snapshot) (*source.TidiedModule, error) { - v, err := mth.handle.Get(ctx, s.(*snapshot)) +func (mth *modTidyHandle) tidy(ctx context.Context, snapshot *snapshot) (*source.TidiedModule, error) { + v, err := mth.handle.Get(ctx, snapshot.generation, snapshot) if err != nil { return nil, err } @@ -88,7 +86,7 @@ func (s *snapshot) ModTidy(ctx context.Context) (*source.TidiedModule, error) { gomod: modFH.FileIdentity(), cfg: hashConfig(cfg), } - h := s.view.session.cache.store.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} { + h := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} { ctx, done := event.Start(ctx, "cache.ModTidyHandle", tag.URI.Of(modURI)) defer done() diff --git a/internal/lsp/cache/parse.go b/internal/lsp/cache/parse.go index b5ce9dff6a..1c4c64ac86 100644 --- a/internal/lsp/cache/parse.go +++ b/internal/lsp/cache/parse.go @@ -41,8 +41,6 @@ type parseGoHandle struct { } type parseGoData struct { - memoize.NoCopy - parsed *source.ParsedGoFile // If true, we adjusted the AST to make it type check better, and @@ -59,12 +57,12 @@ func (s *snapshot) parseGoHandle(ctx context.Context, fh source.FileHandle, mode if pgh := s.getGoFile(key); pgh != nil { return pgh } - parseHandle := s.view.session.cache.store.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} { + parseHandle := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} { snapshot := arg.(*snapshot) return parseGo(ctx, snapshot.view.session.cache.fset, fh, mode) }) - astHandle := s.view.session.cache.store.Bind(astCacheKey(key), func(ctx context.Context, arg memoize.Arg) interface{} { + astHandle := s.generation.Bind(astCacheKey(key), func(ctx context.Context, arg memoize.Arg) interface{} { snapshot := arg.(*snapshot) return buildASTCache(ctx, snapshot, parseHandle) }) @@ -97,7 +95,7 @@ func (s *snapshot) ParseGo(ctx context.Context, fh source.FileHandle, mode sourc } func (s *snapshot) parseGo(ctx context.Context, pgh *parseGoHandle) (*source.ParsedGoFile, bool, error) { - d, err := pgh.handle.Get(ctx, s) + d, err := pgh.handle.Get(ctx, s.generation, s) if err != nil { return nil, false, err } @@ -112,7 +110,7 @@ func (s *snapshot) PosToDecl(ctx context.Context, pgf *source.ParsedGoFile) (map } pgh := s.parseGoHandle(ctx, fh, pgf.Mode) - d, err := pgh.astCacheHandle.Get(ctx, s) + d, err := pgh.astCacheHandle.Get(ctx, s.generation, s) if err != nil { return nil, err } @@ -128,7 +126,7 @@ func (s *snapshot) PosToField(ctx context.Context, pgf *source.ParsedGoFile) (ma } pgh := s.parseGoHandle(ctx, fh, pgf.Mode) - d, err := pgh.astCacheHandle.Get(ctx, s) + d, err := pgh.astCacheHandle.Get(ctx, s.generation, s) if err != nil || d == nil { return nil, err } @@ -138,8 +136,6 @@ func (s *snapshot) PosToField(ctx context.Context, pgf *source.ParsedGoFile) (ma } type astCacheData struct { - memoize.NoCopy - err error posToDecl map[token.Pos]ast.Decl @@ -156,7 +152,7 @@ func buildASTCache(ctx context.Context, snapshot *snapshot, parseHandle *memoize decls []ast.Decl ) - v, err := parseHandle.Get(ctx, snapshot) + v, err := parseHandle.Get(ctx, snapshot.generation, snapshot) if err != nil { return &astCacheData{err: err} } diff --git a/internal/lsp/cache/session.go b/internal/lsp/cache/session.go index 4374eee5e2..accdae641c 100644 --- a/internal/lsp/cache/session.go +++ b/internal/lsp/cache/session.go @@ -172,22 +172,22 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI, folder: folder, filesByURI: make(map[span.URI]*fileBase), filesByBase: make(map[string][]*fileBase), - snapshot: &snapshot{ - id: snapshotID, - packages: make(map[packageKey]*packageHandle), - ids: make(map[span.URI][]packageID), - metadata: make(map[packageID]*metadata), - files: make(map[span.URI]source.VersionedFileHandle), - goFiles: make(map[parseKey]*parseGoHandle), - importedBy: make(map[packageID][]packageID), - actions: make(map[actionKey]*actionHandle), - workspacePackages: make(map[packageID]packagePath), - unloadableFiles: make(map[span.URI]struct{}), - parseModHandles: make(map[span.URI]*parseModHandle), - }, } - v.snapshot.view = v - v.snapshot.active.Add(1) + v.snapshot = &snapshot{ + id: snapshotID, + view: v, + generation: s.cache.store.Generation(generationName(v, 0)), + packages: make(map[packageKey]*packageHandle), + ids: make(map[span.URI][]packageID), + metadata: make(map[packageID]*metadata), + files: make(map[span.URI]source.VersionedFileHandle), + goFiles: make(map[parseKey]*parseGoHandle), + importedBy: make(map[packageID][]packageID), + actions: make(map[actionKey]*actionHandle), + workspacePackages: make(map[packageID]packagePath), + unloadableFiles: make(map[span.URI]struct{}), + parseModHandles: make(map[span.URI]*parseModHandle), + } if v.session.cache.options != nil { v.session.cache.options(&v.options) @@ -215,10 +215,12 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI, // Initialize the view without blocking. initCtx, initCancel := context.WithCancel(xcontext.Detach(ctx)) v.initCancelFirstAttempt = initCancel - go v.initialize(initCtx, v.snapshot, true) - - v.snapshot.active.Add(1) - return v, v.snapshot, v.snapshot.active.Done, nil + go func() { + release := v.snapshot.generation.Acquire(initCtx) + v.initialize(initCtx, v.snapshot, true) + release() + }() + return v, v.snapshot, v.snapshot.generation.Acquire(ctx), nil } // View returns the view by name. @@ -343,6 +345,7 @@ func (s *Session) updateView(ctx context.Context, view *View, options source.Opt s.views[i] = s.views[len(s.views)-1] s.views[len(s.views)-1] = nil s.views = s.views[:len(s.views)-1] + return nil, err } // substitute the new view into the array where the old view was s.views[i] = v diff --git a/internal/lsp/cache/snapshot.go b/internal/lsp/cache/snapshot.go index 87c164e6e8..da645c047a 100644 --- a/internal/lsp/cache/snapshot.go +++ b/internal/lsp/cache/snapshot.go @@ -38,7 +38,8 @@ type snapshot struct { id uint64 view *View - active sync.WaitGroup + // the cache generation that contains the data for this snapshot. + generation *memoize.Generation // builtin pins the AST and package for builtin.go in memory. builtin *builtinPackageHandle @@ -495,7 +496,7 @@ func (s *snapshot) CachedImportPaths(ctx context.Context) (map[string]source.Pac results := map[string]source.Package{} for _, ph := range s.packages { - cachedPkg, err := ph.cached() + cachedPkg, err := ph.cached(s.generation) if err != nil { continue } @@ -788,12 +789,18 @@ func contains(views []*View, view *View) bool { return false } +func generationName(v *View, snapshotID uint64) string { + return fmt.Sprintf("v%v/%v", v.id, snapshotID) +} + func (s *snapshot) clone(ctx context.Context, withoutURIs map[span.URI]source.VersionedFileHandle, forceReloadMetadata bool) *snapshot { s.mu.Lock() defer s.mu.Unlock() + newGen := s.view.session.cache.store.Generation(generationName(s.view, s.id+1)) result := &snapshot{ id: s.id + 1, + generation: newGen, view: s.view, builtin: s.builtin, ids: make(map[span.URI][]packageID), @@ -812,6 +819,10 @@ func (s *snapshot) clone(ctx context.Context, withoutURIs map[span.URI]source.Ve modWhyHandle: s.modWhyHandle, } + if s.builtin != nil { + newGen.Inherit(s.builtin.handle) + } + // Copy all of the FileHandles. for k, v := range s.files { result.files[k] = v @@ -822,6 +833,7 @@ func (s *snapshot) clone(ctx context.Context, withoutURIs map[span.URI]source.Ve } // Copy all of the modHandles. for k, v := range s.parseModHandles { + newGen.Inherit(v.handle) result.parseModHandles[k] = v } // Copy all of the workspace directories. They may be reset later. @@ -833,6 +845,8 @@ func (s *snapshot) clone(ctx context.Context, withoutURIs map[span.URI]source.Ve if _, ok := withoutURIs[k.file.URI]; ok { continue } + newGen.Inherit(v.handle) + newGen.Inherit(v.astCacheHandle) result.goFiles[k] = v } @@ -935,6 +949,7 @@ func (s *snapshot) clone(ctx context.Context, withoutURIs map[span.URI]source.Ve if _, ok := transitiveIDs[k.id]; ok { continue } + newGen.Inherit(v.handle) result.packages[k] = v } // Copy the package analysis information. @@ -942,6 +957,7 @@ func (s *snapshot) clone(ctx context.Context, withoutURIs map[span.URI]source.Ve if _, ok := transitiveIDs[k.pkg.id]; ok { continue } + newGen.Inherit(v.handle) result.actions[k] = v } // Copy the package metadata. We only need to invalidate packages directly @@ -988,6 +1004,17 @@ copyIDs: result.workspacePackages[id] = pkgPath } + + if result.modTidyHandle != nil { + newGen.Inherit(result.modTidyHandle.handle) + } + if result.modUpgradeHandle != nil { + newGen.Inherit(result.modUpgradeHandle.handle) + } + if result.modWhyHandle != nil { + newGen.Inherit(result.modWhyHandle.handle) + } + // Don't bother copying the importedBy graph, // as it changes each time we update metadata. return result @@ -1106,7 +1133,7 @@ func (s *snapshot) BuiltinPackage(ctx context.Context) (*source.BuiltinPackage, if s.builtin == nil { return nil, errors.Errorf("no builtin package for view %s", s.view.name) } - d, err := s.builtin.handle.Get(ctx, s) + d, err := s.builtin.handle.Get(ctx, s.generation, s) if err != nil { return nil, err } @@ -1126,7 +1153,7 @@ func (s *snapshot) buildBuiltinPackage(ctx context.Context, goFiles []string) er if err != nil { return err } - h := s.view.session.cache.store.Bind(fh.FileIdentity(), func(ctx context.Context, arg memoize.Arg) interface{} { + h := s.generation.Bind(fh.FileIdentity(), func(ctx context.Context, arg memoize.Arg) interface{} { snapshot := arg.(*snapshot) pgh := snapshot.parseGoHandle(ctx, fh, source.ParseFull) diff --git a/internal/lsp/cache/view.go b/internal/lsp/cache/view.go index c9be6c4d57..7110e28a9c 100644 --- a/internal/lsp/cache/view.go +++ b/internal/lsp/cache/view.go @@ -143,8 +143,6 @@ type builtinPackageHandle struct { } type builtinPackageData struct { - memoize.NoCopy - parsed *source.BuiltinPackage err error } @@ -281,7 +279,7 @@ func (v *View) Rebuild(ctx context.Context) (source.Snapshot, func(), error) { if err != nil { return nil, func() {}, err } - snapshot, release := newView.Snapshot() + snapshot, release := newView.Snapshot(ctx) return snapshot, release, nil } @@ -489,7 +487,7 @@ func (v *View) WorkspaceDirectories(ctx context.Context) ([]string, error) { if err != nil { return nil, err } - snapshot, release := v.Snapshot() + snapshot, release := v.Snapshot(ctx) defer release() pm, err := snapshot.ParseMod(ctx, fh) @@ -587,11 +585,14 @@ func (v *View) shutdown(ctx context.Context) { v.initCancelFirstAttempt() v.mu.Lock() - defer v.mu.Unlock() if v.cancel != nil { v.cancel() v.cancel = nil } + v.mu.Unlock() + v.snapshotMu.Lock() + go v.snapshot.generation.Destroy() + v.snapshotMu.Unlock() } func (v *View) BackgroundContext() context.Context { @@ -636,10 +637,9 @@ func checkIgnored(suffix string) bool { return false } -func (v *View) Snapshot() (source.Snapshot, func()) { +func (v *View) Snapshot(ctx context.Context) (source.Snapshot, func()) { s := v.getSnapshot() - s.active.Add(1) - return s, s.active.Done + return s, s.generation.Acquire(ctx) } func (v *View) getSnapshot() *snapshot { @@ -713,12 +713,9 @@ func (v *View) invalidateContent(ctx context.Context, uris map[span.URI]source.V oldSnapshot := v.snapshot v.snapshot = oldSnapshot.clone(ctx, uris, forceReloadMetadata) - // Move the View's reference from the old snapshot to the new one. - oldSnapshot.active.Done() - v.snapshot.active.Add(1) + go oldSnapshot.generation.Destroy() - v.snapshot.active.Add(1) - return v.snapshot, v.snapshot.active.Done + return v.snapshot, v.snapshot.generation.Acquire(ctx) } func (v *View) cancelBackground() { diff --git a/internal/lsp/command.go b/internal/lsp/command.go index 28c8d481a1..96bdd6ee0d 100644 --- a/internal/lsp/command.go +++ b/internal/lsp/command.go @@ -173,7 +173,7 @@ func (s *Server) executeCommand(ctx context.Context, params *protocol.ExecuteCom if err != nil { return nil, err } - snapshot, release := sv.Snapshot() + snapshot, release := sv.Snapshot(ctx) defer release() s.diagnoseSnapshot(snapshot) return nil, nil @@ -188,7 +188,7 @@ func (s *Server) directGoModCommand(ctx context.Context, uri protocol.DocumentUR if err != nil { return err } - snapshot, release := view.Snapshot() + snapshot, release := view.Snapshot(ctx) defer release() return snapshot.RunGoCommandDirect(ctx, verb, args) } diff --git a/internal/lsp/general.go b/internal/lsp/general.go index ddb8ecfc8c..bc367723f7 100644 --- a/internal/lsp/general.go +++ b/internal/lsp/general.go @@ -260,7 +260,7 @@ func (s *Server) updateWatchedDirectories(ctx context.Context, updatedSnapshots if _, ok := seenViews[view]; ok { continue } - snapshot, release := view.Snapshot() + snapshot, release := view.Snapshot(ctx) for _, dir := range snapshot.WorkspaceDirectories(ctx) { dirsToWatch[dir] = struct{}{} } @@ -416,7 +416,7 @@ func (s *Server) beginFileRequest(ctx context.Context, pURI protocol.DocumentURI if err != nil { return nil, nil, false, func() {}, err } - snapshot, release := view.Snapshot() + snapshot, release := view.Snapshot(ctx) fh, err := snapshot.GetFile(ctx, uri) if err != nil { release() diff --git a/internal/lsp/lsp_test.go b/internal/lsp/lsp_test.go index 579f51da4d..1f12bca98b 100644 --- a/internal/lsp/lsp_test.go +++ b/internal/lsp/lsp_test.go @@ -151,7 +151,7 @@ func (r *runner) CodeLens(t *testing.T, uri span.URI, want []protocol.CodeLens) if err != nil { t.Fatal(err) } - snapshot, release := v.Snapshot() + snapshot, release := v.Snapshot(r.ctx) defer release() got, err := mod.CodeLens(r.ctx, snapshot, uri) if err != nil { @@ -168,7 +168,7 @@ func (r *runner) Diagnostics(t *testing.T, uri span.URI, want []*source.Diagnost r.diagnostics = make(map[span.URI]map[string]*source.Diagnostic) v := r.server.session.View(r.data.Config.Dir) // Always run diagnostics with analysis. - snapshot, release := v.Snapshot() + snapshot, release := v.Snapshot(r.ctx) defer release() reports, _ := r.server.diagnose(r.ctx, snapshot, true) for key, diags := range reports { @@ -423,7 +423,7 @@ func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []string) t.Fatal(err) } - snapshot, release := view.Snapshot() + snapshot, release := view.Snapshot(r.ctx) defer release() fh, err := snapshot.GetFile(r.ctx, uri) @@ -442,7 +442,7 @@ func (r *runner) SuggestedFix(t *testing.T, spn span.Span, actionKinds []string) if r.diagnostics == nil { r.diagnostics = make(map[span.URI]map[string]*source.Diagnostic) // Always run diagnostics with analysis. - snapshot, release := view.Snapshot() + snapshot, release := view.Snapshot(r.ctx) defer release() reports, _ := r.server.diagnose(r.ctx, snapshot, true) for key, diags := range reports { @@ -542,7 +542,7 @@ func (r *runner) FunctionExtraction(t *testing.T, start span.Span, end span.Span t.Fatal(err) } - snapshot, release := view.Snapshot() + snapshot, release := view.Snapshot(r.ctx) defer release() fh, err := snapshot.GetFile(r.ctx, uri) diff --git a/internal/lsp/source/source_test.go b/internal/lsp/source/source_test.go index 76fce9bb31..8bc69f3ace 100644 --- a/internal/lsp/source/source_test.go +++ b/internal/lsp/source/source_test.go @@ -81,7 +81,7 @@ func testSource(t *testing.T, exporter packagestest.Exporter) { if err := session.ModifyFiles(ctx, modifications); err != nil { t.Fatal(err) } - snapshot, release := view.Snapshot() + snapshot, release := view.Snapshot(ctx) defer release() r := &runner{ view: view, diff --git a/internal/lsp/source/view.go b/internal/lsp/source/view.go index 9494f944f0..d345321347 100644 --- a/internal/lsp/source/view.go +++ b/internal/lsp/source/view.go @@ -18,7 +18,6 @@ import ( "golang.org/x/tools/go/packages" "golang.org/x/tools/internal/imports" "golang.org/x/tools/internal/lsp/protocol" - "golang.org/x/tools/internal/memoize" "golang.org/x/tools/internal/span" errors "golang.org/x/xerrors" ) @@ -136,7 +135,7 @@ type View interface { // on behalf of this view. BackgroundContext() context.Context - // Shutdown closes this view, and detaches it from it's session. + // Shutdown closes this view, and detaches it from its session. Shutdown(ctx context.Context) // WriteEnv writes the view-specific environment to the io.Writer. @@ -156,7 +155,7 @@ type View interface { SetOptions(context.Context, Options) (View, error) // Snapshot returns the current snapshot for the view. - Snapshot() (Snapshot, func()) + Snapshot(ctx context.Context) (Snapshot, func()) // Rebuild rebuilds the current view, replacing the original view in its session. Rebuild(ctx context.Context) (Snapshot, func(), error) @@ -182,8 +181,6 @@ type BuiltinPackage struct { // A ParsedGoFile contains the results of parsing a Go file. type ParsedGoFile struct { - memoize.NoCopy - URI span.URI Mode ParseMode File *ast.File @@ -197,8 +194,6 @@ type ParsedGoFile struct { // A ParsedModule contains the results of parsing a go.mod file. type ParsedModule struct { - memoize.NoCopy - File *modfile.File Mapper *protocol.ColumnMapper ParseErrors []Error @@ -206,8 +201,6 @@ type ParsedModule struct { // A TidiedModule contains the results of running `go mod tidy` on a module. type TidiedModule struct { - memoize.NoCopy - // The parsed module, which is guaranteed to have parsed successfully. Parsed *ParsedModule // Diagnostics representing changes made by `go mod tidy`. diff --git a/internal/lsp/source/workspace_symbol.go b/internal/lsp/source/workspace_symbol.go index dc75aee81d..d631cacbb1 100644 --- a/internal/lsp/source/workspace_symbol.go +++ b/internal/lsp/source/workspace_symbol.go @@ -47,7 +47,7 @@ func WorkspaceSymbols(ctx context.Context, matcherType SymbolMatcher, style Symb var symbols []protocol.SymbolInformation outer: for _, view := range views { - snapshot, release := view.Snapshot() + snapshot, release := view.Snapshot(ctx) defer release() // TODO: refactor so this runs promptly instead of at the end of the function knownPkgs, err := snapshot.KnownPackages(ctx) if err != nil { diff --git a/internal/lsp/workspace.go b/internal/lsp/workspace.go index 18b3065550..82b7b077b7 100644 --- a/internal/lsp/workspace.go +++ b/internal/lsp/workspace.go @@ -53,7 +53,7 @@ func (s *Server) didChangeConfiguration(ctx context.Context, changed interface{} return err } go func() { - snapshot, release := view.Snapshot() + snapshot, release := view.Snapshot(ctx) defer release() s.diagnoseDetached(snapshot) }() diff --git a/internal/memoize/memoize.go b/internal/memoize/memoize.go index a5e81f096e..c10dc8e8a5 100644 --- a/internal/memoize/memoize.go +++ b/internal/memoize/memoize.go @@ -5,10 +5,6 @@ // Package memoize supports memoizing the return values of functions with // idempotent results that are expensive to compute. // -// The memoized result is returned again the next time the function is invoked. -// To prevent excessive memory use, the return values are only remembered -// for as long as they still have a user. -// // To use this package, build a store and use it to acquire handles with the // Bind method. // @@ -16,20 +12,95 @@ package memoize import ( "context" + "flag" + "fmt" "reflect" - "runtime" "sync" - "unsafe" + "sync/atomic" "golang.org/x/tools/internal/xcontext" ) +var ( + panicOnDestroyed = flag.Bool("memoize_panic_on_destroyed", false, + "Panic when a destroyed generation is read rather than returning an error. "+ + "Panicking may make it easier to debug lifetime errors, especially when "+ + "used with GOTRACEBACK=crash to see all running goroutines.") +) + // Store binds keys to functions, returning handles that can be used to access // the functions results. type Store struct { mu sync.Mutex - // entries is the set of values stored. - entries map[interface{}]uintptr + // handles is the set of values stored. + handles map[interface{}]*Handle + + // generations is the set of generations live in this store. + generations map[*Generation]struct{} +} + +// Generation creates a new Generation associated with s. Destroy must be +// called on the returned Generation once it is no longer in use. name is +// for debugging purposes only. +func (s *Store) Generation(name string) *Generation { + s.mu.Lock() + defer s.mu.Unlock() + if s.handles == nil { + s.handles = map[interface{}]*Handle{} + s.generations = map[*Generation]struct{}{} + } + g := &Generation{store: s, name: name} + s.generations[g] = struct{}{} + return g +} + +// A Generation is a logical point in time of the cache life-cycle. Cache +// entries associated with a Generation will not be removed until the +// Generation is destroyed. +type Generation struct { + // destroyed is 1 after the generation is destroyed. Atomic. + destroyed uint32 + store *Store + name string + // wg tracks the reference count of this generation. + wg sync.WaitGroup +} + +// Destroy waits for all operations referencing g to complete, then removes +// all references to g from cache entries. Cache entries that no longer +// reference any non-destroyed generation are removed. Destroy must be called +// exactly once for each generation. +func (g *Generation) Destroy() { + g.wg.Wait() + atomic.StoreUint32(&g.destroyed, 1) + g.store.mu.Lock() + defer g.store.mu.Unlock() + for k, e := range g.store.handles { + e.mu.Lock() + if _, ok := e.generations[g]; ok { + delete(e.generations, g) // delete even if it's dead, in case of dangling references to the entry. + if len(e.generations) == 0 { + delete(g.store.handles, k) + e.state = stateDestroyed + } + } + e.mu.Unlock() + } + delete(g.store.generations, g) +} + +// Acquire creates a new reference to g, and returns a func to release that +// reference. +func (g *Generation) Acquire(ctx context.Context) func() { + destroyed := atomic.LoadUint32(&g.destroyed) + if ctx.Err() != nil { + return func() {} + } + if destroyed != 0 { + panic("acquire on destroyed generation " + g.name) + } + g.wg.Add(1) + return g.wg.Done } // Arg is a marker interface that can be embedded to indicate a type is @@ -46,6 +117,7 @@ const ( stateIdle = iota stateRunning stateCompleted + stateDestroyed ) // Handle is returned from a store when a key is bound to a function. @@ -61,10 +133,12 @@ const ( // computation is abandoned, and state resets to idle to start the process over // again. type Handle struct { - store *Store - key interface{} + key interface{} + mu sync.Mutex + + // generations is the set of generations in which this handle is valid. + generations map[*Generation]struct{} - mu sync.Mutex state state // done is set in running state, and closed when exiting it. done chan struct{} @@ -78,91 +152,49 @@ type Handle struct { value interface{} } -// Has returns true if they key is currently valid for this store. -func (s *Store) Has(key interface{}) bool { - s.mu.Lock() - defer s.mu.Unlock() - _, found := s.entries[key] - return found -} - // Bind returns a handle for the given key and function. // // Each call to bind will return the same handle if it is already bound. // Bind will always return a valid handle, creating one if needed. // Each key can only have one handle at any given time. -// The value will be held for as long as the handle is, once it has been -// generated. +// The value will be held at least until the associated generation is destroyed. // Bind does not cause the value to be generated. -func (s *Store) Bind(key interface{}, function Function) *Handle { +func (g *Generation) Bind(key interface{}, function Function) *Handle { // panic early if the function is nil // it would panic later anyway, but in a way that was much harder to debug if function == nil { panic("the function passed to bind must not be nil") } - // check if we already have the key - s.mu.Lock() - defer s.mu.Unlock() - h := s.get(key) - if h != nil { - // we have a handle already, just return it + if atomic.LoadUint32(&g.destroyed) != 0 { + panic("operation on destroyed generation " + g.name) + } + g.store.mu.Lock() + defer g.store.mu.Unlock() + h, ok := g.store.handles[key] + if !ok { + h := &Handle{ + key: key, + function: function, + generations: map[*Generation]struct{}{g: {}}, + } + g.store.handles[key] = h return h } - // we have not seen this key before, add a new entry - if s.entries == nil { - s.entries = make(map[interface{}]uintptr) + h.mu.Lock() + defer h.mu.Unlock() + if _, ok := h.generations[g]; !ok { + h.generations[g] = struct{}{} } - h = &Handle{ - store: s, - key: key, - function: function, - } - // now add the weak reference to the handle into the map - s.entries[key] = uintptr(unsafe.Pointer(h)) - // add the deletion the entry when the handle is garbage collected - runtime.SetFinalizer(h, release) return h } -// Find returns the handle associated with a key, if it is bound. -// -// It cannot cause a new handle to be generated, and thus may return nil. -func (s *Store) Find(key interface{}) *Handle { - s.mu.Lock() - defer s.mu.Unlock() - return s.get(key) -} - -// Cached returns the value associated with a key. -// -// It cannot cause the value to be generated. -// It will return the cached value, if present. -func (s *Store) Cached(key interface{}) interface{} { - h := s.Find(key) - if h == nil { - return nil - } - return h.Cached() -} - -//go:nocheckptr -// nocheckptr because: https://github.com/golang/go/issues/35125#issuecomment-545671062 -func (s *Store) get(key interface{}) *Handle { - // this must be called with the store mutex already held - e, found := s.entries[key] - if !found { - return nil - } - return (*Handle)(unsafe.Pointer(e)) -} - // Stats returns the number of each type of value in the store. func (s *Store) Stats() map[reflect.Type]int { s.mu.Lock() defer s.mu.Unlock() result := map[reflect.Type]int{} - for k := range s.entries { + for k := range s.handles { result[reflect.TypeOf(k)]++ } return result @@ -174,14 +206,13 @@ func (s *Store) DebugOnlyIterate(f func(k, v interface{})) { s.mu.Lock() defer s.mu.Unlock() - for k, e := range s.entries { - h := (*Handle)(unsafe.Pointer(e)) + for k, e := range s.handles { var v interface{} - h.mu.Lock() - if h.state == stateCompleted { - v = h.value + e.mu.Lock() + if e.state == stateCompleted { + v = e.value } - h.mu.Unlock() + e.mu.Unlock() if v == nil { continue } @@ -189,13 +220,29 @@ func (s *Store) DebugOnlyIterate(f func(k, v interface{})) { } } +func (g *Generation) Inherit(h *Handle) { + if atomic.LoadUint32(&g.destroyed) != 0 { + panic("inherit on destroyed generation " + g.name) + } + + h.mu.Lock() + defer h.mu.Unlock() + if h.state == stateDestroyed { + panic(fmt.Sprintf("inheriting destroyed handle %#v into generation %v", h.key, g.name)) + } + h.generations[g] = struct{}{} +} + // Cached returns the value associated with a handle. // // It will never cause the value to be generated. // It will return the cached value, if present. -func (h *Handle) Cached() interface{} { +func (h *Handle) Cached(g *Generation) interface{} { h.mu.Lock() defer h.mu.Unlock() + if _, ok := h.generations[g]; !ok { + return nil + } if h.state == stateCompleted { return h.value } @@ -205,34 +252,56 @@ func (h *Handle) Cached() interface{} { // Get returns the value associated with a handle. // // If the value is not yet ready, the underlying function will be invoked. -// This activates the handle, and it will remember the value for as long as it exists. // If ctx is cancelled, Get returns nil. -func (h *Handle) Get(ctx context.Context, arg Arg) (interface{}, error) { +func (h *Handle) Get(ctx context.Context, g *Generation, arg Arg) (interface{}, error) { + release := g.Acquire(ctx) + defer release() + if ctx.Err() != nil { return nil, ctx.Err() } h.mu.Lock() + if _, ok := h.generations[g]; !ok { + h.mu.Unlock() + + err := fmt.Errorf("reading key %#v: generation %v is not known", h.key, g.name) + if *panicOnDestroyed && ctx.Err() != nil { + panic(err) + } + return nil, err + } switch h.state { case stateIdle: - return h.run(ctx, arg) + return h.run(ctx, g, arg) case stateRunning: return h.wait(ctx) case stateCompleted: defer h.mu.Unlock() return h.value, nil + case stateDestroyed: + h.mu.Unlock() + err := fmt.Errorf("Get on destroyed entry %#v in generation %v", h.key, g.name) + if *panicOnDestroyed { + panic(err) + } + return nil, err default: panic("unknown state") } } // run starts h.function and returns the result. h.mu must be locked. -func (h *Handle) run(ctx context.Context, arg Arg) (interface{}, error) { +func (h *Handle) run(ctx context.Context, g *Generation, arg Arg) (interface{}, error) { childCtx, cancel := context.WithCancel(xcontext.Detach(ctx)) h.cancel = cancel h.state = stateRunning h.done = make(chan struct{}) function := h.function // Read under the lock + + // Make sure that the generation isn't destroyed while we're running in it. + release := g.Acquire(ctx) go func() { + defer release() // Just in case the function does something expensive without checking // the context, double-check we're still alive. if childCtx.Err() != nil { @@ -289,15 +358,3 @@ func (h *Handle) wait(ctx context.Context) (interface{}, error) { return nil, ctx.Err() } } - -func release(p interface{}) { - h := p.(*Handle) - h.store.mu.Lock() - defer h.store.mu.Unlock() - // there is a small gap between the garbage collector deciding that the handle - // is liable for collection and the finalizer being called - // if the handle is recovered during that time, you will end up with a valid - // handle that no longer has an entry in the map, and that no longer has a - // finalizer associated with it, but that is okay. - delete(h.store.entries, h.key) -} diff --git a/internal/memoize/memoize_test.go b/internal/memoize/memoize_test.go index 00d003a6c4..e6e7b0bc99 100644 --- a/internal/memoize/memoize_test.go +++ b/internal/memoize/memoize_test.go @@ -5,230 +5,65 @@ package memoize_test import ( - "bytes" "context" - "fmt" - "io" - "runtime" "strings" "testing" - "time" "golang.org/x/tools/internal/memoize" ) -func TestStore(t *testing.T) { - ctx := context.Background() +func TestGet(t *testing.T) { s := &memoize.Store{} - logBuffer := &bytes.Buffer{} - ctx = context.WithValue(ctx, "logger", logBuffer) + g := s.Generation("x") - // These tests check the behavior of the Bind and Get functions. - // They confirm that the functions only ever run once for a given value. - for _, test := range []struct { - name, key, want string - }{ - { - name: "nothing", - }, - { - name: "get 1", - key: "_1", - want: ` -start @1 -simple a = A -simple b = B -simple c = C -end @1 = A B C -`[1:], - }, - { - name: "redo 1", - key: "_1", - want: ``, - }, - { - name: "get 2", - key: "_2", - want: ` -start @2 -simple d = D -simple e = E -simple f = F -end @2 = D E F -`[1:], - }, - { - name: "redo 2", - key: "_2", - want: ``, - }, - { - name: "get 3", - key: "_3", - want: ` -start @3 -end @3 = @1[ A B C] @2[ D E F] -`[1:], - }, - { - name: "get 4", - key: "_4", - want: ` -start @3 -simple g = G -error ERR = fail -simple h = H -end @3 = G !fail H -`[1:], - }, - } { - s.Bind(test.key, generate(s, test.key)).Get(ctx, nil) - got := logBuffer.String() - if got != test.want { - t.Errorf("at %q expected:\n%v\ngot:\n%s", test.name, test.want, got) - } - logBuffer.Reset() - } + evaled := 0 - // This test checks that values are garbage collected and removed from the - // store when they are no longer used. - - pinned := []string{"b", "_1", "_3"} // keys to pin in memory - unpinned := []string{"a", "c", "d", "_2", "_4"} // keys to garbage collect - - // Handles maintain a strong reference to their values. - // By generating handles for the pinned keys and keeping the pins alive in memory, - // we ensure these keys stay cached. - var pins []*memoize.Handle - for _, key := range pinned { - h := s.Bind(key, generate(s, key)) - h.Get(ctx, nil) - pins = append(pins, h) - } - - // Force the garbage collector to run. - runAllFinalizers(t) - - // Confirm our expectation that pinned values should remain cached, - // and unpinned values should be garbage collected. - for _, k := range pinned { - if v := s.Find(k); v == nil { - t.Errorf("pinned value %q was nil", k) - } - } - for _, k := range unpinned { - if v := s.Find(k); v != nil { - t.Errorf("unpinned value %q should be nil, was %v", k, v) - } - } - - // This forces the pins to stay alive until this point in the function. - runtime.KeepAlive(pins) -} - -func runAllFinalizers(t *testing.T) { - // The following is very tricky, so be very when careful changing it. - // It relies on behavior of finalizers that is not guaranteed. - - // First, run the GC to queue the finalizers. - runtime.GC() - - // wait is used to signal that the finalizers are all done. - wait := make(chan struct{}) - - // Register a finalizer against an immediately collectible object. - // - // The finalizer will signal on the wait channel once it executes, - // and it was the most recently registered finalizer, - // so the wait channel will be closed when all of the finalizers have run. - runtime.SetFinalizer(&struct{ s string }{"obj"}, func(_ interface{}) { close(wait) }) - - // Now, run the GC again to pick up the tracker object above. - runtime.GC() - - // Wait for the finalizers to run or a timeout. - select { - case <-wait: - case <-time.Tick(time.Second): - t.Fatalf("finalizers had not run after 1 second") + h := g.Bind("key", func(context.Context, memoize.Arg) interface{} { + evaled++ + return "res" + }) + expectGet(t, h, g, "res") + expectGet(t, h, g, "res") + if evaled != 1 { + t.Errorf("got %v calls to function, wanted 1", evaled) } } -type stringOrError struct { - value string - err error -} - -func (v *stringOrError) String() string { - if v.err != nil { - return v.err.Error() - } - return v.value -} - -func asValue(v interface{}) *stringOrError { - if v == nil { - return nil - } - return v.(*stringOrError) -} - -func generate(s *memoize.Store, key interface{}) memoize.Function { - return func(ctx context.Context, _ memoize.Arg) interface{} { - name := key.(string) - switch name { - case "": - return nil - case "err": - return logGenerator(ctx, s, "ERR", "", fmt.Errorf("fail")) - case "_1": - return joinValues(ctx, s, "@1", "a", "b", "c") - case "_2": - return joinValues(ctx, s, "@2", "d", "e", "f") - case "_3": - return joinValues(ctx, s, "@3", "_1", "_2") - case "_4": - return joinValues(ctx, s, "@3", "g", "err", "h") - default: - return logGenerator(ctx, s, name, strings.ToUpper(name), nil) - } +func expectGet(t *testing.T, h *memoize.Handle, g *memoize.Generation, wantV interface{}) { + gotV, gotErr := h.Get(context.Background(), g, nil) + if gotV != wantV || gotErr != nil { + t.Fatalf("Get() = %v, %v, wanted %v, nil", gotV, gotErr, wantV) } } -// logGenerator generates a *stringOrError value, while logging to the store's logger. -func logGenerator(ctx context.Context, s *memoize.Store, name string, v string, err error) *stringOrError { - // Get the logger from the context. - w := ctx.Value("logger").(io.Writer) - - if err != nil { - fmt.Fprintf(w, "error %v = %v\n", name, err) - } else { - fmt.Fprintf(w, "simple %v = %v\n", name, v) +func expectGetError(t *testing.T, h *memoize.Handle, g *memoize.Generation, substr string) { + gotV, gotErr := h.Get(context.Background(), g, nil) + if gotErr == nil || !strings.Contains(gotErr.Error(), substr) { + t.Fatalf("Get() = %v, %v, wanted err %q", gotV, gotErr, substr) } - return &stringOrError{value: v, err: err} } +func TestGenerations(t *testing.T) { + s := &memoize.Store{} + // Evaluate key in g1. + g1 := s.Generation("g1") + h1 := g1.Bind("key", func(context.Context, memoize.Arg) interface{} { return "res" }) + expectGet(t, h1, g1, "res") -// joinValues binds a list of keys to their values, while logging to the store's logger. -func joinValues(ctx context.Context, s *memoize.Store, name string, keys ...string) *stringOrError { - // Get the logger from the context. - w := ctx.Value("logger").(io.Writer) + // Get key in g2. It should inherit the value from g1. + g2 := s.Generation("g2") + h2 := g2.Bind("key", func(context.Context, memoize.Arg) interface{} { + t.Fatal("h2 should not need evaluation") + return "error" + }) + expectGet(t, h2, g2, "res") - fmt.Fprintf(w, "start %v\n", name) - value := "" - for _, key := range keys { - i, err := s.Bind(key, generate(s, key)).Get(ctx, nil) - if err != nil { - return &stringOrError{err: err} - } - if v := asValue(i); v == nil { - value = value + " " - } else if v.err != nil { - value = value + " !" + v.err.Error() - } else { - value = value + " " + v.value - } - } - fmt.Fprintf(w, "end %v = %v\n", name, value) - return &stringOrError{value: fmt.Sprintf("%s[%v]", name, value)} + // With g1 destroyed, g2 should still work. + g1.Destroy() + expectGet(t, h2, g2, "res") + + // With all generations destroyed, key should be re-evaluated. + g2.Destroy() + g3 := s.Generation("g3") + h3 := g3.Bind("key", func(context.Context, memoize.Arg) interface{} { return "new res" }) + expectGet(t, h3, g3, "new res") } diff --git a/internal/memoize/nocopy.go b/internal/memoize/nocopy.go deleted file mode 100644 index 891322599b..0000000000 --- a/internal/memoize/nocopy.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package memoize - -// NoCopy is a type with no public methods that will trigger a vet check if it -// is ever copied. -// You can embed this in any type intended to be used as a value. This helps -// avoid accidentally holding a copy of a value instead of the value itself. -type NoCopy struct { - noCopy noCopy -} - -// noCopy may be embedded into structs which must not be copied -// after the first use. -// -// See https://golang.org/issues/8005#issuecomment-190753527 -// for details. -type noCopy struct{} - -// Lock is a no-op used by -copylocks checker from `go vet`. -func (*noCopy) Lock() {} -func (*noCopy) Unlock() {}