mirror of https://github.com/golang/go.git
internal/lsp/cache: build a new metadata graph on load
Introduce a metadataGraph.Clone method that can be used to clone a metadata graph, applying a set of updates. During clone, ids and imports are recomputed from scratch based on the known metadata. Also refine the check for "real" packages when determining whether a command-line-arguments package should be kept as a workspace package: if all other packages are invalid, but the command-line-arguments package is valid, we should keep the command-line-arguments package. Updates golang/go#45686 Change-Id: Iea8d4f19c1d1c5a2b0582b9dda5f9143482a34af Reviewed-on: https://go-review.googlesource.com/c/tools/+/340851 Reviewed-by: Alan Donovan <adonovan@google.com> TryBot-Result: Gopher Robot <gobot@golang.org> gopls-CI: kokoro <noreply+kokoro@google.com> Run-TryBot: Robert Findley <rfindley@google.com>
This commit is contained in:
parent
9f38ef7f15
commit
8a9207816c
|
|
@ -47,7 +47,7 @@ func (s *snapshot) dumpWorkspace(context string) {
|
|||
|
||||
for _, id := range ids {
|
||||
pkgPath := s.workspacePackages[id]
|
||||
_, ok := s.meta.metadata[id]
|
||||
debugf(" %s:%s (metadata: %t)", id, pkgPath, ok)
|
||||
m, ok := s.meta.metadata[id]
|
||||
debugf(" %s:%s (metadata: %t; valid: %t)", id, pkgPath, ok, m.Valid)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,12 @@
|
|||
|
||||
package cache
|
||||
|
||||
import "golang.org/x/tools/internal/span"
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"golang.org/x/tools/internal/lsp/source"
|
||||
"golang.org/x/tools/internal/span"
|
||||
)
|
||||
|
||||
// A metadataGraph holds information about a transtively closed import graph of
|
||||
// Go packages, as obtained from go/packages.
|
||||
|
|
@ -13,21 +18,122 @@ import "golang.org/x/tools/internal/span"
|
|||
// TODO(rfindley): make this type immutable, so that it may be shared across
|
||||
// snapshots.
|
||||
type metadataGraph struct {
|
||||
// ids maps file URIs to package IDs. A single file may belong to multiple
|
||||
// packages due to tests packages.
|
||||
ids map[span.URI][]PackageID
|
||||
|
||||
// metadata maps package IDs to their associated metadata.
|
||||
metadata map[PackageID]*KnownMetadata
|
||||
|
||||
// importedBy maps package IDs to the list of packages that import them.
|
||||
importedBy map[PackageID][]PackageID
|
||||
|
||||
// ids maps file URIs to package IDs. A single file may belong to multiple
|
||||
// packages due to tests packages.
|
||||
ids map[span.URI][]PackageID
|
||||
}
|
||||
|
||||
func NewMetadataGraph() *metadataGraph {
|
||||
return &metadataGraph{
|
||||
ids: make(map[span.URI][]PackageID),
|
||||
metadata: make(map[PackageID]*KnownMetadata),
|
||||
importedBy: make(map[PackageID][]PackageID),
|
||||
ids: make(map[span.URI][]PackageID),
|
||||
}
|
||||
}
|
||||
|
||||
// Clone creates a new metadataGraph, applying the given updates to the
|
||||
// receiver.
|
||||
func (g *metadataGraph) Clone(updates map[PackageID]*KnownMetadata) *metadataGraph {
|
||||
result := &metadataGraph{metadata: make(map[PackageID]*KnownMetadata, len(g.metadata))}
|
||||
// Copy metadata.
|
||||
for id, m := range g.metadata {
|
||||
result.metadata[id] = m
|
||||
}
|
||||
for id, m := range updates {
|
||||
if m == nil {
|
||||
delete(result.metadata, id)
|
||||
} else {
|
||||
result.metadata[id] = m
|
||||
}
|
||||
}
|
||||
result.build()
|
||||
return result
|
||||
}
|
||||
|
||||
// build constructs g.importedBy and g.uris from g.metadata.
|
||||
func (g *metadataGraph) build() {
|
||||
// Build the import graph.
|
||||
g.importedBy = make(map[PackageID][]PackageID)
|
||||
for id, m := range g.metadata {
|
||||
for _, importID := range m.Deps {
|
||||
g.importedBy[importID] = append(g.importedBy[importID], id)
|
||||
}
|
||||
}
|
||||
|
||||
// Collect file associations.
|
||||
g.ids = make(map[span.URI][]PackageID)
|
||||
for id, m := range g.metadata {
|
||||
uris := map[span.URI]struct{}{}
|
||||
for _, uri := range m.CompiledGoFiles {
|
||||
uris[uri] = struct{}{}
|
||||
}
|
||||
for _, uri := range m.GoFiles {
|
||||
uris[uri] = struct{}{}
|
||||
}
|
||||
for uri := range uris {
|
||||
g.ids[uri] = append(g.ids[uri], id)
|
||||
}
|
||||
}
|
||||
|
||||
// Sort and filter file associations.
|
||||
//
|
||||
// We choose the first non-empty set of package associations out of the
|
||||
// following. For simplicity, call a non-command-line-arguments package a
|
||||
// "real" package.
|
||||
//
|
||||
// 1: valid real packages
|
||||
// 2: a valid command-line-arguments package
|
||||
// 3: invalid real packages
|
||||
// 4: an invalid command-line-arguments package
|
||||
for uri, ids := range g.ids {
|
||||
sort.Slice(ids, func(i, j int) bool {
|
||||
// Sort valid packages first.
|
||||
validi := g.metadata[ids[i]].Valid
|
||||
validj := g.metadata[ids[j]].Valid
|
||||
if validi != validj {
|
||||
return validi
|
||||
}
|
||||
|
||||
cli := source.IsCommandLineArguments(string(ids[i]))
|
||||
clj := source.IsCommandLineArguments(string(ids[j]))
|
||||
if cli && !clj {
|
||||
return false
|
||||
}
|
||||
if !cli && clj {
|
||||
return true
|
||||
}
|
||||
return ids[i] < ids[j]
|
||||
})
|
||||
|
||||
// Choose the best IDs for each URI, according to the following rules:
|
||||
// - If there are any valid real packages, choose them.
|
||||
// - Else, choose the first valid command-line-argument package, if it exists.
|
||||
// - Else, keep using all the invalid metadata.
|
||||
//
|
||||
// TODO(rfindley): it might be better to track all IDs here, and exclude
|
||||
// them later in PackagesForFile, but this is the existing behavior.
|
||||
hasValidMetadata := false
|
||||
for i, id := range ids {
|
||||
m := g.metadata[id]
|
||||
if m.Valid {
|
||||
hasValidMetadata = true
|
||||
} else if hasValidMetadata {
|
||||
g.ids[uri] = ids[:i]
|
||||
break
|
||||
}
|
||||
// If we've seen *anything* prior to command-line arguments package, take
|
||||
// it. Note that ids[0] may itself be command-line-arguments.
|
||||
if i > 0 && source.IsCommandLineArguments(string(id)) {
|
||||
g.ids[uri] = ids[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -149,7 +149,7 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf
|
|||
}
|
||||
|
||||
moduleErrs := make(map[string][]packages.Error) // module path -> errors
|
||||
var newMetadata []*Metadata
|
||||
updates := make(map[PackageID]*KnownMetadata)
|
||||
for _, pkg := range pkgs {
|
||||
// The Go command returns synthetic list results for module queries that
|
||||
// encountered module errors.
|
||||
|
|
@ -194,26 +194,36 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...interf
|
|||
if s.view.allFilesExcluded(pkg) {
|
||||
continue
|
||||
}
|
||||
// Set the metadata for this package.
|
||||
// TODO: once metadata is immutable, we shouldn't have to lock here.
|
||||
s.mu.Lock()
|
||||
seen := make(map[PackageID]struct{})
|
||||
m, err := s.setMetadataLocked(ctx, PackagePath(pkg.PkgPath), pkg, cfg, query, seen)
|
||||
err := s.setMetadataLocked(ctx, PackagePath(pkg.PkgPath), pkg, cfg, query, updates, nil)
|
||||
s.mu.Unlock()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newMetadata = append(newMetadata, m)
|
||||
}
|
||||
|
||||
// Rebuild package data when metadata is updated.
|
||||
s.rebuildPackageData()
|
||||
s.mu.Lock()
|
||||
s.meta = s.meta.Clone(updates)
|
||||
// Invalidate any packages we may have associated with this metadata.
|
||||
//
|
||||
// TODO(rfindley): if we didn't already invalidate these in snapshot.clone,
|
||||
// shouldn't we invalidate the reverse transitive closure?
|
||||
for _, m := range updates {
|
||||
for _, mode := range []source.ParseMode{source.ParseHeader, source.ParseExported, source.ParseFull} {
|
||||
key := packageKey{mode, m.ID}
|
||||
delete(s.packages, key)
|
||||
}
|
||||
}
|
||||
s.workspacePackages = computeWorkspacePackages(s.meta)
|
||||
s.dumpWorkspace("load")
|
||||
s.mu.Unlock()
|
||||
|
||||
// Now that the workspace has been rebuilt, verify that we can build package handles.
|
||||
// Rebuild the workspace package handle for any packages we invalidated.
|
||||
//
|
||||
// TODO(rfindley): what's the point of returning an error here? Probably we
|
||||
// can simply remove this step: The package handle will be rebuilt as needed.
|
||||
for _, m := range newMetadata {
|
||||
for _, m := range updates {
|
||||
if _, err := s.buildPackageHandle(ctx, m.ID, s.workspaceParseMode(m.ID)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -431,27 +441,41 @@ func getWorkspaceDir(ctx context.Context, h *memoize.Handle, g *memoize.Generati
|
|||
// setMetadataLocked extracts metadata from pkg and records it in s. It
|
||||
// recurs through pkg.Imports to ensure that metadata exists for all
|
||||
// dependencies.
|
||||
func (s *snapshot) setMetadataLocked(ctx context.Context, pkgPath PackagePath, pkg *packages.Package, cfg *packages.Config, query []string, seen map[PackageID]struct{}) (*Metadata, error) {
|
||||
func (s *snapshot) setMetadataLocked(ctx context.Context, pkgPath PackagePath, pkg *packages.Package, cfg *packages.Config, query []string, updates map[PackageID]*KnownMetadata, path []PackageID) error {
|
||||
id := PackageID(pkg.ID)
|
||||
if new := updates[id]; new != nil {
|
||||
return nil
|
||||
}
|
||||
if source.IsCommandLineArguments(pkg.ID) {
|
||||
suffix := ":" + strings.Join(query, ",")
|
||||
id = PackageID(string(id) + suffix)
|
||||
pkgPath = PackagePath(string(pkgPath) + suffix)
|
||||
}
|
||||
if _, ok := seen[id]; ok {
|
||||
return nil, fmt.Errorf("import cycle detected: %q", id)
|
||||
if _, ok := updates[id]; ok {
|
||||
// If we've already seen this dependency, there may be an import cycle, or
|
||||
// we may have reached the same package transitively via distinct paths.
|
||||
// Check the path to confirm.
|
||||
for _, prev := range path {
|
||||
if prev == id {
|
||||
return fmt.Errorf("import cycle detected: %q", id)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Recreate the metadata rather than reusing it to avoid locking.
|
||||
m := &Metadata{
|
||||
ID: id,
|
||||
PkgPath: pkgPath,
|
||||
Name: PackageName(pkg.Name),
|
||||
ForTest: PackagePath(packagesinternal.GetForTest(pkg)),
|
||||
TypesSizes: pkg.TypesSizes,
|
||||
Config: cfg,
|
||||
Module: pkg.Module,
|
||||
depsErrors: packagesinternal.GetDepsErrors(pkg),
|
||||
m := &KnownMetadata{
|
||||
Metadata: &Metadata{
|
||||
ID: id,
|
||||
PkgPath: pkgPath,
|
||||
Name: PackageName(pkg.Name),
|
||||
ForTest: PackagePath(packagesinternal.GetForTest(pkg)),
|
||||
TypesSizes: pkg.TypesSizes,
|
||||
Config: cfg,
|
||||
Module: pkg.Module,
|
||||
depsErrors: packagesinternal.GetDepsErrors(pkg),
|
||||
},
|
||||
Valid: true,
|
||||
}
|
||||
updates[id] = m
|
||||
|
||||
// Identify intermediate test variants for later filtering. See the
|
||||
// documentation of IsIntermediateTestVariant for more information.
|
||||
|
|
@ -493,15 +517,6 @@ func (s *snapshot) setMetadataLocked(ctx context.Context, pkgPath PackagePath, p
|
|||
}
|
||||
}
|
||||
|
||||
s.updateIDForURIsLocked(id, uris)
|
||||
|
||||
// TODO(rstambler): is this still necessary?
|
||||
copied := map[PackageID]struct{}{
|
||||
id: {},
|
||||
}
|
||||
for k, v := range seen {
|
||||
copied[k] = v
|
||||
}
|
||||
for importPath, importPkg := range pkg.Imports {
|
||||
importPkgPath := PackagePath(importPath)
|
||||
importID := PackageID(importPkg.ID)
|
||||
|
|
@ -517,32 +532,13 @@ func (s *snapshot) setMetadataLocked(ctx context.Context, pkgPath PackagePath, p
|
|||
continue
|
||||
}
|
||||
if s.noValidMetadataForIDLocked(importID) {
|
||||
if _, err := s.setMetadataLocked(ctx, importPkgPath, importPkg, cfg, query, copied); err != nil {
|
||||
if err := s.setMetadataLocked(ctx, importPkgPath, importPkg, cfg, query, updates, append(path, id)); err != nil {
|
||||
event.Error(ctx, "error in dependency", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add the metadata to the cache.
|
||||
|
||||
// If we've already set the metadata for this snapshot, reuse it.
|
||||
if original, ok := s.meta.metadata[m.ID]; ok && original.Valid {
|
||||
// Since we've just reloaded, clear out shouldLoad.
|
||||
original.ShouldLoad = false
|
||||
m = original.Metadata
|
||||
} else {
|
||||
s.meta.metadata[m.ID] = &KnownMetadata{
|
||||
Metadata: m,
|
||||
Valid: true,
|
||||
}
|
||||
// Invalidate any packages we may have associated with this metadata.
|
||||
for _, mode := range []source.ParseMode{source.ParseHeader, source.ParseExported, source.ParseFull} {
|
||||
key := packageKey{mode, m.ID}
|
||||
delete(s.packages, key)
|
||||
}
|
||||
}
|
||||
|
||||
return m, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// computeWorkspacePackages computes workspace packages for the given metadata
|
||||
|
|
@ -588,13 +584,16 @@ func computeWorkspacePackages(meta *metadataGraph) map[PackageID]PackagePath {
|
|||
// allFilesHaveRealPackages reports whether all files referenced by m are
|
||||
// contained in a "real" package (not command-line-arguments).
|
||||
//
|
||||
// If m is valid but all "real" packages containing any file are invalid, this
|
||||
// function returns false.
|
||||
//
|
||||
// If m is not a command-line-arguments package, this is trivially true.
|
||||
func allFilesHaveRealPackages(g *metadataGraph, m *KnownMetadata) bool {
|
||||
n := len(m.CompiledGoFiles)
|
||||
checkURIs:
|
||||
for _, uri := range append(m.CompiledGoFiles[0:n:n], m.GoFiles...) {
|
||||
for _, id := range g.ids[uri] {
|
||||
if !source.IsCommandLineArguments(string(id)) {
|
||||
if !source.IsCommandLineArguments(string(id)) && (g.metadata[id].Valid || !m.Valid) {
|
||||
continue checkURIs
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -716,16 +716,6 @@ func (s *snapshot) getImportedByLocked(id PackageID) []PackageID {
|
|||
return s.meta.importedBy[id]
|
||||
}
|
||||
|
||||
func (s *snapshot) rebuildPackageData() {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// Completely invalidate the original map.
|
||||
s.meta.importedBy = make(map[PackageID][]PackageID)
|
||||
s.rebuildImportGraph()
|
||||
s.workspacePackages = computeWorkspacePackages(s.meta)
|
||||
}
|
||||
|
||||
func (s *snapshot) rebuildImportGraph() {
|
||||
for id, m := range s.meta.metadata {
|
||||
for _, importID := range m.Deps {
|
||||
|
|
@ -1306,41 +1296,6 @@ func (s *snapshot) noValidMetadataForIDLocked(id PackageID) bool {
|
|||
return m == nil || !m.Valid
|
||||
}
|
||||
|
||||
// updateIDForURIsLocked adds the given ID to the set of known IDs for the given URI.
|
||||
// Any existing invalid IDs are removed from the set of known IDs. IDs that are
|
||||
// not "command-line-arguments" are preferred, so if a new ID comes in for a
|
||||
// URI that previously only had "command-line-arguments", the new ID will
|
||||
// replace the "command-line-arguments" ID.
|
||||
func (s *snapshot) updateIDForURIsLocked(id PackageID, uris map[span.URI]struct{}) {
|
||||
for uri := range uris {
|
||||
// Collect the new set of IDs, preserving any valid existing IDs.
|
||||
newIDs := []PackageID{id}
|
||||
for _, existingID := range s.meta.ids[uri] {
|
||||
// Don't set duplicates of the same ID.
|
||||
if existingID == id {
|
||||
continue
|
||||
}
|
||||
// If the package previously only had a command-line-arguments ID,
|
||||
// delete the command-line-arguments workspace package.
|
||||
if source.IsCommandLineArguments(string(existingID)) {
|
||||
delete(s.workspacePackages, existingID)
|
||||
continue
|
||||
}
|
||||
// If the metadata for an existing ID is invalid, and we are
|
||||
// setting metadata for a new, valid ID--don't preserve the old ID.
|
||||
if m, ok := s.meta.metadata[existingID]; !ok || !m.Valid {
|
||||
continue
|
||||
}
|
||||
newIDs = append(newIDs, existingID)
|
||||
}
|
||||
sort.Slice(newIDs, func(i, j int) bool {
|
||||
return newIDs[i] < newIDs[j]
|
||||
})
|
||||
s.meta.ids[uri] = newIDs
|
||||
}
|
||||
s.dumpWorkspace("updateIDs")
|
||||
}
|
||||
|
||||
func (s *snapshot) isWorkspacePackage(id PackageID) bool {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
|
@ -1984,7 +1939,7 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC
|
|||
|
||||
// If the workspace mode has changed, we must delete all metadata, as it
|
||||
// is unusable and may produce confusing or incorrect diagnostics.
|
||||
// If a file has been deleted, we must delete metadata all packages
|
||||
// If a file has been deleted, we must delete metadata for all packages
|
||||
// containing that file.
|
||||
workspaceModeChanged := s.workspaceMode() != result.workspaceMode()
|
||||
skipID := map[PackageID]bool{}
|
||||
|
|
|
|||
Loading…
Reference in New Issue