gopls/internal/lsp: fix diagnostic suppression when folders change

Diagnostic suppression used the view-relative snapshot ID to avoid
publishing stale diagnostics. When the layout of views changes due to a
didChangeWorkspaceFolders notification, this suppression is broken as
snapshot IDs reset to 0. Fix this (hopefully temporarily) by introducing
a globally monotonic snapshot ID.

Fixes golang/go#56731

Change-Id: Ib108b1436e800cf5a45fbba298c9975a2cf1d735
Reviewed-on: https://go-review.googlesource.com/c/tools/+/450275
Reviewed-by: Alan Donovan <adonovan@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
gopls-CI: kokoro <noreply+kokoro@google.com>
Run-TryBot: Robert Findley <rfindley@google.com>
Auto-Submit: Robert Findley <rfindley@google.com>
This commit is contained in:
Robert Findley 2022-11-14 11:58:07 -05:00 committed by Gopher Robot
parent e3b3c0100d
commit 0c71b564b9
9 changed files with 128 additions and 66 deletions

View File

@ -107,9 +107,9 @@ func (s *snapshot) buildPackageHandle(ctx context.Context, id PackageID, mode so
// top-level is valid, all of its dependencies should be as well.
if err != nil || m.Valid && !depHandle.m.Valid {
if err != nil {
event.Error(ctx, fmt.Sprintf("%s: no dep handle for %s", id, depID), err, tag.Snapshot.Of(s.id))
event.Error(ctx, fmt.Sprintf("%s: no dep handle for %s", id, depID), err, source.SnapshotLabels(s)...)
} else {
event.Log(ctx, fmt.Sprintf("%s: invalid dep handle for %s", id, depID), tag.Snapshot.Of(s.id))
event.Log(ctx, fmt.Sprintf("%s: invalid dep handle for %s", id, depID), source.SnapshotLabels(s)...)
}
// This check ensures we break out of the slow

View File

@ -141,10 +141,11 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...loadSc
}
// This log message is sought for by TestReloadOnlyOnce.
labels := append(source.SnapshotLabels(s), tag.Query.Of(query), tag.PackageCount.Of(len(pkgs)))
if err != nil {
event.Error(ctx, eventName, err, tag.Snapshot.Of(s.ID()), tag.Directory.Of(cfg.Dir), tag.Query.Of(query), tag.PackageCount.Of(len(pkgs)))
event.Error(ctx, eventName, err, labels...)
} else {
event.Log(ctx, eventName, tag.Snapshot.Of(s.ID()), tag.Directory.Of(cfg.Dir), tag.Query.Of(query), tag.PackageCount.Of(len(pkgs)))
event.Log(ctx, eventName, labels...)
}
if len(pkgs) == 0 {
@ -174,11 +175,12 @@ func (s *snapshot) load(ctx context.Context, allowNetwork bool, scopes ...loadSc
}
if !containsDir || s.view.Options().VerboseOutput {
event.Log(ctx, eventName,
tag.Snapshot.Of(s.ID()),
event.Log(ctx, eventName, append(
source.SnapshotLabels(s),
tag.Package.Of(pkg.ID),
tag.Files.Of(pkg.CompiledGoFiles))
tag.Files.Of(pkg.CompiledGoFiles))...)
}
// Ignore packages with no sources, since we will never be able to
// correctly invalidate that metadata.
if len(pkg.GoFiles) == 0 && len(pkg.CompiledGoFiles) == 0 {

View File

@ -187,7 +187,7 @@ func (s *Session) NewView(ctx context.Context, name string, folder span.URI, opt
return view, snapshot, release, nil
}
func (s *Session) createView(ctx context.Context, name string, folder span.URI, options *source.Options, snapshotID uint64) (*View, *snapshot, func(), error) {
func (s *Session) createView(ctx context.Context, name string, folder span.URI, options *source.Options, seqID uint64) (*View, *snapshot, func(), error) {
index := atomic.AddInt64(&viewIndex, 1)
if s.cache.options != nil {
@ -264,7 +264,8 @@ func (s *Session) createView(ctx context.Context, name string, folder span.URI,
},
}
v.snapshot = &snapshot{
id: snapshotID,
sequenceID: seqID,
globalID: nextSnapshotID(),
view: v,
backgroundCtx: backgroundCtx,
cancel: cancel,
@ -401,7 +402,7 @@ func (s *Session) updateView(ctx context.Context, view *View, options *source.Op
view.snapshotMu.Unlock()
panic("updateView called after View was already shut down")
}
snapshotID := view.snapshot.id
seqID := view.snapshot.sequenceID // Preserve sequence IDs when updating a view in place.
view.snapshotMu.Unlock()
i, err := s.dropView(ctx, view)
@ -409,7 +410,7 @@ func (s *Session) updateView(ctx context.Context, view *View, options *source.Op
return nil, err
}
v, _, release, err := s.createView(ctx, view.name, view.folder, options, snapshotID)
v, _, release, err := s.createView(ctx, view.name, view.folder, options, seqID)
release()
if err != nil {

View File

@ -44,8 +44,9 @@ import (
)
type snapshot struct {
id uint64
view *View
sequenceID uint64
globalID source.GlobalSnapshotID
view *View
cancel func()
backgroundCtx context.Context
@ -156,6 +157,12 @@ type snapshot struct {
unprocessedSubdirChanges []*fileChange
}
var globalSnapshotID uint64
func nextSnapshotID() source.GlobalSnapshotID {
return source.GlobalSnapshotID(atomic.AddUint64(&globalSnapshotID, 1))
}
var _ memoize.RefCounted = (*snapshot)(nil) // snapshots are reference-counted
// Acquire prevents the snapshot from being destroyed until the returned function is called.
@ -170,7 +177,7 @@ var _ memoize.RefCounted = (*snapshot)(nil) // snapshots are reference-counted
func (s *snapshot) Acquire() func() {
type uP = unsafe.Pointer
if destroyedBy := atomic.LoadPointer((*uP)(uP(&s.destroyedBy))); destroyedBy != nil {
log.Panicf("%d: acquire() after Destroy(%q)", s.id, *(*string)(destroyedBy))
log.Panicf("%d: acquire() after Destroy(%q)", s.globalID, *(*string)(destroyedBy))
}
s.refcount.Add(1)
return s.refcount.Done
@ -209,7 +216,7 @@ func (s *snapshot) destroy(destroyedBy string) {
// Not foolproof: another thread could acquire() at this moment.
type uP = unsafe.Pointer // looking forward to generics...
if old := atomic.SwapPointer((*uP)(uP(&s.destroyedBy)), uP(&destroyedBy)); old != nil {
log.Panicf("%d: Destroy(%q) after Destroy(%q)", s.id, destroyedBy, *(*string)(old))
log.Panicf("%d: Destroy(%q) after Destroy(%q)", s.globalID, destroyedBy, *(*string)(old))
}
s.packages.Destroy()
@ -232,8 +239,12 @@ func (s *snapshot) destroy(destroyedBy string) {
}
}
func (s *snapshot) ID() uint64 {
return s.id
func (s *snapshot) SequenceID() uint64 {
return s.sequenceID
}
func (s *snapshot) GlobalID() source.GlobalSnapshotID {
return s.globalID
}
func (s *snapshot) View() source.View {
@ -1726,7 +1737,8 @@ func (s *snapshot) clone(ctx, bgCtx context.Context, changes map[span.URI]*fileC
bgCtx, cancel := context.WithCancel(bgCtx)
result := &snapshot{
id: s.id + 1,
sequenceID: s.sequenceID + 1,
globalID: nextSnapshotID(),
store: s.store,
view: s.view,
backgroundCtx: bgCtx,

View File

@ -43,8 +43,8 @@ const (
// A diagnosticReport holds results for a single diagnostic source.
type diagnosticReport struct {
snapshotID uint64 // snapshot ID on which the report was computed
publishedHash string // last published hash for this (URI, source)
snapshotID source.GlobalSnapshotID // global snapshot ID on which the report was computed
publishedHash string // last published hash for this (URI, source)
diags map[string]*source.Diagnostic
}
@ -68,7 +68,7 @@ type fileReports struct {
// yet published.
//
// This prevents gopls from publishing stale diagnostics.
publishedSnapshotID uint64
publishedSnapshotID source.GlobalSnapshotID
// publishedHash is a hash of the latest diagnostics published for the file.
publishedHash string
@ -141,7 +141,7 @@ func (s *Server) diagnoseSnapshots(snapshots map[source.Snapshot][]span.URI, onD
func (s *Server) diagnoseSnapshot(snapshot source.Snapshot, changedURIs []span.URI, onDisk bool) {
ctx := snapshot.BackgroundContext()
ctx, done := event.Start(ctx, "Server.diagnoseSnapshot", tag.Snapshot.Of(snapshot.ID()))
ctx, done := event.Start(ctx, "Server.diagnoseSnapshot", source.SnapshotLabels(snapshot)...)
defer done()
delay := snapshot.View().Options().DiagnosticsDelay
@ -155,7 +155,13 @@ func (s *Server) diagnoseSnapshot(snapshot source.Snapshot, changedURIs []span.U
// delay.
s.diagnoseChangedFiles(ctx, snapshot, changedURIs, onDisk)
s.publishDiagnostics(ctx, false, snapshot)
if ok := <-s.diagDebouncer.debounce(snapshot.View().Name(), snapshot.ID(), time.After(delay)); ok {
// We debounce diagnostics separately for each view, using the snapshot
// local ID as logical ordering.
//
// TODO(rfindley): it would be cleaner to simply put the diagnostic
// debouncer on the view, and remove the "key" argument to debouncing.
if ok := <-s.diagDebouncer.debounce(snapshot.View().Name(), snapshot.SequenceID(), time.After(delay)); ok {
s.diagnose(ctx, snapshot, false)
s.publishDiagnostics(ctx, true, snapshot)
}
@ -168,7 +174,7 @@ func (s *Server) diagnoseSnapshot(snapshot source.Snapshot, changedURIs []span.U
}
func (s *Server) diagnoseChangedFiles(ctx context.Context, snapshot source.Snapshot, uris []span.URI, onDisk bool) {
ctx, done := event.Start(ctx, "Server.diagnoseChangedFiles", tag.Snapshot.Of(snapshot.ID()))
ctx, done := event.Start(ctx, "Server.diagnoseChangedFiles", source.SnapshotLabels(snapshot)...)
defer done()
packages := make(map[source.Package]struct{})
@ -215,7 +221,7 @@ func (s *Server) diagnoseChangedFiles(ctx context.Context, snapshot source.Snaps
// diagnose is a helper function for running diagnostics with a given context.
// Do not call it directly. forceAnalysis is only true for testing purposes.
func (s *Server) diagnose(ctx context.Context, snapshot source.Snapshot, forceAnalysis bool) {
ctx, done := event.Start(ctx, "Server.diagnose", tag.Snapshot.Of(snapshot.ID()))
ctx, done := event.Start(ctx, "Server.diagnose", source.SnapshotLabels(snapshot)...)
defer done()
// Wait for a free diagnostics slot.
@ -235,9 +241,7 @@ func (s *Server) diagnose(ctx context.Context, snapshot source.Snapshot, forceAn
// common code for dispatching diagnostics
store := func(dsource diagnosticSource, operation string, diagsByFileID map[source.VersionedFileIdentity][]*source.Diagnostic, err error) {
if err != nil {
event.Error(ctx, "warning: while "+operation, err,
tag.Directory.Of(snapshot.View().Folder().Filename()),
tag.Snapshot.Of(snapshot.ID()))
event.Error(ctx, "warning: while "+operation, err, source.SnapshotLabels(snapshot)...)
}
for id, diags := range diagsByFileID {
if id.URI == "" {
@ -346,7 +350,7 @@ func (s *Server) diagnose(ctx context.Context, snapshot source.Snapshot, forceAn
}
func (s *Server) diagnosePkg(ctx context.Context, snapshot source.Snapshot, pkg source.Package, alwaysAnalyze bool) {
ctx, done := event.Start(ctx, "Server.diagnosePkg", tag.Snapshot.Of(snapshot.ID()), tag.Package.Of(string(pkg.ID())))
ctx, done := event.Start(ctx, "Server.diagnosePkg", append(source.SnapshotLabels(snapshot), tag.Package.Of(string(pkg.ID())))...)
defer done()
enableDiagnostics := false
includeAnalysis := alwaysAnalyze // only run analyses for packages with open files
@ -361,7 +365,7 @@ func (s *Server) diagnosePkg(ctx context.Context, snapshot source.Snapshot, pkg
pkgDiagnostics, err := snapshot.DiagnosePackage(ctx, pkg)
if err != nil {
event.Error(ctx, "warning: diagnosing package", err, tag.Snapshot.Of(snapshot.ID()), tag.Package.Of(string(pkg.ID())))
event.Error(ctx, "warning: diagnosing package", err, append(source.SnapshotLabels(snapshot), tag.Package.Of(string(pkg.ID())))...)
return
}
for _, cgf := range pkg.CompiledGoFiles() {
@ -374,7 +378,7 @@ func (s *Server) diagnosePkg(ctx context.Context, snapshot source.Snapshot, pkg
if includeAnalysis && !pkg.HasListOrParseErrors() {
reports, err := source.Analyze(ctx, snapshot, pkg, false)
if err != nil {
event.Error(ctx, "warning: analyzing package", err, tag.Snapshot.Of(snapshot.ID()), tag.Package.Of(string(pkg.ID())))
event.Error(ctx, "warning: analyzing package", err, append(source.SnapshotLabels(snapshot), tag.Package.Of(string(pkg.ID())))...)
return
}
for _, cgf := range pkg.CompiledGoFiles() {
@ -390,7 +394,7 @@ func (s *Server) diagnosePkg(ctx context.Context, snapshot source.Snapshot, pkg
if enableGCDetails {
gcReports, err := source.GCOptimizationDetails(ctx, snapshot, pkg)
if err != nil {
event.Error(ctx, "warning: gc details", err, tag.Snapshot.Of(snapshot.ID()), tag.Package.Of(string(pkg.ID())))
event.Error(ctx, "warning: gc details", err, append(source.SnapshotLabels(snapshot), tag.Package.Of(string(pkg.ID())))...)
}
s.gcOptimizationDetailsMu.Lock()
_, enableGCDetails := s.gcOptimizationDetails[pkg.ID()]
@ -452,13 +456,13 @@ func (s *Server) storeDiagnostics(snapshot source.Snapshot, uri span.URI, dsourc
}
report := s.diagnostics[uri].reports[dsource]
// Don't set obsolete diagnostics.
if report.snapshotID > snapshot.ID() {
if report.snapshotID > snapshot.GlobalID() {
return
}
if report.diags == nil || report.snapshotID != snapshot.ID() {
if report.diags == nil || report.snapshotID != snapshot.GlobalID() {
report.diags = map[string]*source.Diagnostic{}
}
report.snapshotID = snapshot.ID()
report.snapshotID = snapshot.GlobalID()
for _, d := range diags {
report.diags[hashDiagnostics(d)] = d
}
@ -488,7 +492,7 @@ func (s *Server) showCriticalErrorStatus(ctx context.Context, snapshot source.Sn
// status bar.
var errMsg string
if err != nil {
event.Error(ctx, "errors loading workspace", err.MainError, tag.Snapshot.Of(snapshot.ID()), tag.Directory.Of(snapshot.View().Folder()))
event.Error(ctx, "errors loading workspace", err.MainError, source.SnapshotLabels(snapshot)...)
for _, d := range err.Diagnostics {
s.storeDiagnostics(snapshot, d.URI, modSource, []*source.Diagnostic{d})
}
@ -567,31 +571,34 @@ Otherwise, see the troubleshooting guidelines for help investigating (https://gi
// publishDiagnostics collects and publishes any unpublished diagnostic reports.
func (s *Server) publishDiagnostics(ctx context.Context, final bool, snapshot source.Snapshot) {
ctx, done := event.Start(ctx, "Server.publishDiagnostics", tag.Snapshot.Of(snapshot.ID()))
ctx, done := event.Start(ctx, "Server.publishDiagnostics", source.SnapshotLabels(snapshot)...)
defer done()
s.diagnosticsMu.Lock()
defer s.diagnosticsMu.Unlock()
// TODO(rfindley): remove this noisy (and not useful) logging.
published := 0
defer func() {
log.Trace.Logf(ctx, "published %d diagnostics", published)
}()
for uri, r := range s.diagnostics {
// Snapshot IDs are always increasing, so we use them instead of file
// versions to create the correct order for diagnostics.
// Global snapshot IDs are monotonic, so we use them to enforce an ordering
// for diagnostics.
//
// If we've already delivered diagnostics for a future snapshot for this
// file, do not deliver them.
if r.publishedSnapshotID > snapshot.ID() {
// file, do not deliver them. See golang/go#42837 for an example of why
// this is necessary.
//
// TODO(rfindley): even using a global snapshot ID, this mechanism is
// potentially racy: elsewhere in the code (e.g. invalidateContent) we
// allow for multiple views track a given file. In this case, we should
// either only report diagnostics for snapshots from the "best" view of a
// URI, or somehow merge diagnostics from multiple views.
if r.publishedSnapshotID > snapshot.GlobalID() {
continue
}
anyReportsChanged := false
reportHashes := map[diagnosticSource]string{}
var diags []*source.Diagnostic
for dsource, report := range r.reports {
if report.snapshotID != snapshot.ID() {
if report.snapshotID != snapshot.GlobalID() {
continue
}
var reportDiags []*source.Diagnostic
@ -611,12 +618,13 @@ func (s *Server) publishDiagnostics(ctx context.Context, final bool, snapshot so
// new information.
continue
}
source.SortDiagnostics(diags)
hash := hashDiagnostics(diags...)
if hash == r.publishedHash && !r.mustPublish {
// Update snapshotID to be the latest snapshot for which this diagnostic
// hash is valid.
r.publishedSnapshotID = snapshot.ID()
r.publishedSnapshotID = snapshot.GlobalID()
continue
}
var version int32
@ -628,10 +636,9 @@ func (s *Server) publishDiagnostics(ctx context.Context, final bool, snapshot so
URI: protocol.URIFromSpanURI(uri),
Version: version,
}); err == nil {
published++
r.publishedHash = hash
r.mustPublish = false // diagnostics have been successfully published
r.publishedSnapshotID = snapshot.ID()
r.publishedSnapshotID = snapshot.GlobalID()
for dsource, hash := range reportHashes {
report := r.reports[dsource]
report.publishedHash = hash

View File

@ -19,7 +19,6 @@ import (
"golang.org/x/tools/gopls/internal/lsp/protocol"
"golang.org/x/tools/gopls/internal/lsp/source"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/event/tag"
"golang.org/x/vuln/osv"
)
@ -27,7 +26,7 @@ import (
//
// It waits for completion of type-checking of all active packages.
func Diagnostics(ctx context.Context, snapshot source.Snapshot) (map[source.VersionedFileIdentity][]*source.Diagnostic, error) {
ctx, done := event.Start(ctx, "mod.Diagnostics", tag.Snapshot.Of(snapshot.ID()))
ctx, done := event.Start(ctx, "mod.Diagnostics", source.SnapshotLabels(snapshot)...)
defer done()
return collectDiagnostics(ctx, snapshot, ModDiagnostics)
@ -36,7 +35,7 @@ func Diagnostics(ctx context.Context, snapshot source.Snapshot) (map[source.Vers
// UpgradeDiagnostics returns upgrade diagnostics for the modules in the
// workspace with known upgrades.
func UpgradeDiagnostics(ctx context.Context, snapshot source.Snapshot) (map[source.VersionedFileIdentity][]*source.Diagnostic, error) {
ctx, done := event.Start(ctx, "mod.UpgradeDiagnostics", tag.Snapshot.Of(snapshot.ID()))
ctx, done := event.Start(ctx, "mod.UpgradeDiagnostics", source.SnapshotLabels(snapshot)...)
defer done()
return collectDiagnostics(ctx, snapshot, ModUpgradeDiagnostics)
@ -45,7 +44,7 @@ func UpgradeDiagnostics(ctx context.Context, snapshot source.Snapshot) (map[sour
// VulnerabilityDiagnostics returns vulnerability diagnostics for the active modules in the
// workspace with known vulnerabilites.
func VulnerabilityDiagnostics(ctx context.Context, snapshot source.Snapshot) (map[source.VersionedFileIdentity][]*source.Diagnostic, error) {
ctx, done := event.Start(ctx, "mod.VulnerabilityDiagnostics", tag.Snapshot.Of(snapshot.ID()))
ctx, done := event.Start(ctx, "mod.VulnerabilityDiagnostics", source.SnapshotLabels(snapshot)...)
defer done()
return collectDiagnostics(ctx, snapshot, ModVulnerabilityDiagnostics)

View File

@ -24,13 +24,34 @@ import (
"golang.org/x/tools/gopls/internal/govulncheck"
"golang.org/x/tools/gopls/internal/lsp/protocol"
"golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/event/label"
"golang.org/x/tools/internal/event/tag"
"golang.org/x/tools/internal/gocommand"
"golang.org/x/tools/internal/imports"
)
// A GlobalSnapshotID uniquely identifies a snapshot within this process and
// increases monotonically with snapshot creation time.
//
// We use a distinct integral type for global IDs to help enforce correct
// usage.
type GlobalSnapshotID uint64
// Snapshot represents the current state for the given view.
type Snapshot interface {
ID() uint64
// SequenceID is the sequence id of this snapshot within its containing
// view.
//
// Relative to their view sequence ids are monotonically increasing, but this
// does not hold globally: when new views are created their initial snapshot
// has sequence ID 0. For operations that span multiple views, use global
// IDs.
SequenceID() uint64
// GlobalID is a globally unique identifier for this snapshot. Global IDs are
// monotonic: subsequent snapshots will have higher global ID, though
// subsequent snapshots in a view may not have adjacent global IDs.
GlobalID() GlobalSnapshotID
// View returns the View associated with this snapshot.
View() View
@ -192,6 +213,12 @@ type Snapshot interface {
BuildGoplsMod(ctx context.Context) (*modfile.File, error)
}
// SnapshotLabels returns a new slice of labels that should be used for events
// related to a snapshot.
func SnapshotLabels(snapshot Snapshot) []label.Label {
return []label.Label{tag.Snapshot.Of(snapshot.SequenceID()), tag.Directory.Of(snapshot.View().Folder())}
}
// PackageFilter sets how a package is filtered out from a set of packages
// containing a given file.
type PackageFilter int

View File

@ -15,11 +15,10 @@ import (
"golang.org/x/tools/gopls/internal/lsp/source"
"golang.org/x/tools/gopls/internal/span"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/event/tag"
)
func Diagnostics(ctx context.Context, snapshot source.Snapshot) (map[source.VersionedFileIdentity][]*source.Diagnostic, error) {
ctx, done := event.Start(ctx, "work.Diagnostics", tag.Snapshot.Of(snapshot.ID()))
ctx, done := event.Start(ctx, "work.Diagnostics", source.SnapshotLabels(snapshot)...)
defer done()
reports := map[source.VersionedFileIdentity][]*source.Diagnostic{}

View File

@ -207,21 +207,30 @@ package b
env.OpenFile("a/empty.go")
env.OpenFile("b/go.mod")
env.Await(
env.DiagnosticAtRegexp("a/a.go", "package a"),
env.DiagnosticAtRegexp("b/go.mod", "module b.com"),
OutstandingWork(lsp.WorkspaceLoadFailure, msg),
env.AfterChange(
env.DiagnosticAtRegexp("a/a.go", "package a"),
env.DiagnosticAtRegexp("b/go.mod", "module b.com"),
OutstandingWork(lsp.WorkspaceLoadFailure, msg),
),
)
// Changing the workspace folders to the valid modules should resolve
// the workspace error.
// the workspace errors and diagnostics.
//
// TODO(rfindley): verbose work tracking doesn't follow changing the
// workspace folder, therefore we can't invoke AfterChange here.
env.ChangeWorkspaceFolders("a", "b")
env.Await(NoOutstandingWork())
env.Await(
EmptyDiagnostics("a/a.go"),
EmptyDiagnostics("b/go.mod"),
NoOutstandingWork(),
)
env.ChangeWorkspaceFolders(".")
// TODO(rfindley): when GO111MODULE=auto, we need to open or change a
// file here in order to detect a critical error. This is because gopls
// has forgotten about a/a.go, and therefor doesn't hit the heuristic
// has forgotten about a/a.go, and therefore doesn't hit the heuristic
// "all packages are command-line-arguments".
//
// This is broken, and could be fixed by adjusting the heuristic to
@ -229,7 +238,13 @@ package b
// (better) trying to get workspace packages for each open file. See
// also golang/go#54261.
env.OpenFile("b/b.go")
env.Await(OutstandingWork(lsp.WorkspaceLoadFailure, msg))
env.Await(
// TODO(rfindley): fix these missing diagnostics.
// env.DiagnosticAtRegexp("a/a.go", "package a"),
// env.DiagnosticAtRegexp("b/go.mod", "module b.com"),
env.DiagnosticAtRegexp("b/b.go", "package b"),
OutstandingWork(lsp.WorkspaceLoadFailure, msg),
)
})
})
}