internal/lsp: add experimental support for multi-phase diagnostics

An experimental new feature is added to run parsing and checking on
modified files immediately, and run analysis and diagnostics for
transitive dependencies only after debouncing. This feature is disabled
by default.

Also, some refactoring is done along the way:
 + Clean up diagnostic functions a bit using a report collection type.
 + Factor out parsing diagnostics in options.go.

Change-Id: I2f14f9e30d79153cb4219207de3d9e77e1f8415b
Reviewed-on: https://go-review.googlesource.com/c/tools/+/255778
Run-TryBot: Robert Findley <rfindley@google.com>
gopls-CI: kokoro <noreply+kokoro@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Heschi Kreinick <heschi@google.com>
Trust: Robert Findley <rfindley@google.com>
This commit is contained in:
Rob Findley 2020-09-17 16:58:16 -04:00 committed by Robert Findley
parent 0d28ed0cbe
commit 8445f4f065
11 changed files with 338 additions and 75 deletions

View File

@ -244,6 +244,16 @@ their expected values.
Default: `true`.
### **experimentalDiagnosticsDelay** *time.Duration*
experimentalDiagnosticsDelay controls the amount of time that gopls waits
after the most recent file modification before computing deep diagnostics.
Simple diagnostics (parsing and type-checking) are always run immediately
on recently modified packages.
This option must be set to a valid duration string, for example `"250ms"`.
Default: `0`.
<!-- END Experimental: DO NOT MANUALLY EDIT THIS SECTION -->
## Debugging

View File

@ -57,7 +57,12 @@ func (s *snapshot) load(ctx context.Context, scopes ...interface{}) error {
// go list and should already be GOPATH-vendorized when appropriate.
query = append(query, string(scope))
case fileURI:
query = append(query, fmt.Sprintf("file=%s", span.URI(scope).Filename()))
uri := span.URI(scope)
// Don't try to load a file that doesn't exist.
fh := s.FindFile(uri)
if fh != nil {
query = append(query, fmt.Sprintf("file=%s", uri.Filename()))
}
case moduleLoadScope:
query = append(query, fmt.Sprintf("%s/...", scope))
case viewLoadScope:
@ -76,6 +81,9 @@ func (s *snapshot) load(ctx context.Context, scopes ...interface{}) error {
containsDir = true
}
}
if len(query) == 0 {
return nil
}
sort.Strings(query) // for determinism
ctx, done := event.Start(ctx, "cache.view.load", tag.Query.Of(query))

View File

@ -217,7 +217,7 @@ func (s *Server) runCommand(ctx context.Context, work *workDone, command *source
}
snapshot, release := sv.Snapshot(ctx)
defer release()
s.diagnoseSnapshot(snapshot)
s.diagnoseSnapshot(snapshot, nil)
case source.CommandGenerateGoplsMod:
var v source.View
if len(args) == 0 {

81
internal/lsp/debounce.go Normal file
View File

@ -0,0 +1,81 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lsp
import (
"sync"
"time"
)
type debounceFunc struct {
order uint64
done chan struct{}
}
type debouncer struct {
mu sync.Mutex
funcs map[string]*debounceFunc
}
func newDebouncer() *debouncer {
return &debouncer{
funcs: make(map[string]*debounceFunc),
}
}
// debounce waits timeout before running f, if no subsequent call is made with
// the same key in the intervening time. If a later call to debounce with the
// same key occurs while the original call is blocking, the original call will
// return immediately without running its f.
//
// If order is specified, it will be used to order calls logically, so calls
// with lesser order will not cancel calls with greater order.
func (d *debouncer) debounce(key string, order uint64, timeout time.Duration, f func()) {
if timeout == 0 {
// Degenerate case: no debouncing.
f()
return
}
// First, atomically acquire the current func, cancel it, and insert this
// call into d.funcs.
d.mu.Lock()
current, ok := d.funcs[key]
if ok && current.order > order {
// If we have a logical ordering of events (as is the case for snapshots),
// don't overwrite a later event with an earlier event.
d.mu.Unlock()
return
}
if ok {
close(current.done)
}
done := make(chan struct{})
next := &debounceFunc{
order: order,
done: done,
}
d.funcs[key] = next
d.mu.Unlock()
// Next, wait to be cancelled or for our wait to expire. There is a race here
// that we must handle: our timer could expire while another goroutine holds
// d.mu.
select {
case <-done:
case <-time.After(timeout):
d.mu.Lock()
if d.funcs[key] != next {
// We lost the race: another event has arrived for the key and started
// waiting. We could reasonably choose to run f at this point, but doing
// nothing is simpler.
d.mu.Unlock()
return
}
delete(d.funcs, key)
d.mu.Unlock()
f()
}
}

View File

@ -0,0 +1,87 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lsp
import (
"sync"
"testing"
"time"
)
func TestDebouncer(t *testing.T) {
t.Parallel()
type event struct {
key string
order uint64
fired bool
wantFired bool
}
tests := []struct {
label string
events []*event
}{
{
label: "overridden",
events: []*event{
{key: "a", order: 1, wantFired: false},
{key: "a", order: 2, wantFired: true},
},
},
{
label: "distinct labels",
events: []*event{
{key: "a", order: 1, wantFired: true},
{key: "b", order: 2, wantFired: true},
},
},
{
label: "reverse order",
events: []*event{
{key: "a", order: 2, wantFired: true},
{key: "a", order: 1, wantFired: false},
},
},
{
label: "multiple overrides",
events: []*event{
{key: "a", order: 1, wantFired: false},
{key: "a", order: 2, wantFired: false},
{key: "a", order: 3, wantFired: false},
{key: "a", order: 4, wantFired: false},
{key: "a", order: 5, wantFired: true},
},
},
}
for _, test := range tests {
test := test
t.Run(test.label, func(t *testing.T) {
t.Parallel()
d := newDebouncer()
var wg sync.WaitGroup
for i, e := range test.events {
wg.Add(1)
go func(e *event) {
d.debounce(e.key, e.order, 100*time.Millisecond, func() {
e.fired = true
})
wg.Done()
}(e)
// For a bit more fidelity, sleep to try to make things actually
// execute in order. This doesn't have to be perfect, but could be done
// properly using fake timers.
if i < len(test.events)-1 {
time.Sleep(10 * time.Millisecond)
}
}
wg.Wait()
for _, event := range test.events {
if event.fired != event.wantFired {
t.Errorf("(key: %q, order: %d): fired = %t, want %t",
event.key, event.order, event.fired, event.wantFired)
}
}
})
}
}

View File

@ -30,6 +30,47 @@ type idWithAnalysis struct {
withAnalysis bool
}
// A reportSet collects diagnostics for publication, sorting them by file and
// de-duplicating.
type reportSet struct {
mu sync.Mutex
// lazily allocated
reports map[idWithAnalysis]map[string]*source.Diagnostic
}
func (s *reportSet) add(id source.VersionedFileIdentity, withAnalysis bool, diags ...*source.Diagnostic) {
s.mu.Lock()
defer s.mu.Unlock()
if s.reports == nil {
s.reports = make(map[idWithAnalysis]map[string]*source.Diagnostic)
}
key := idWithAnalysis{
id: id,
withAnalysis: withAnalysis,
}
if _, ok := s.reports[key]; !ok {
s.reports[key] = map[string]*source.Diagnostic{}
}
for _, d := range diags {
s.reports[key][diagnosticKey(d)] = d
}
}
// diagnosticKey creates a unique identifier for a given diagnostic, since we
// cannot use source.Diagnostics as map keys. This is used to de-duplicate
// diagnostics.
func diagnosticKey(d *source.Diagnostic) string {
var tags, related string
for _, t := range d.Tags {
tags += fmt.Sprintf("%s", t)
}
for _, r := range d.Related {
related += fmt.Sprintf("%s%s%s", r.URI, r.Message, r.Range)
}
key := fmt.Sprintf("%s%s%s%s%s%s", d.Message, d.Range, d.Severity, d.Source, tags, related)
return fmt.Sprintf("%x", sha256.Sum256([]byte(key)))
}
func (s *Server) diagnoseDetached(snapshot source.Snapshot) {
ctx := snapshot.View().BackgroundContext()
ctx = xcontext.Detach(ctx)
@ -41,18 +82,69 @@ func (s *Server) diagnoseDetached(snapshot source.Snapshot) {
s.publishReports(ctx, snapshot, reports)
}
func (s *Server) diagnoseSnapshot(snapshot source.Snapshot) {
func (s *Server) diagnoseSnapshot(snapshot source.Snapshot, changedURIs []span.URI) {
ctx := snapshot.View().BackgroundContext()
delay := snapshot.View().Options().ExperimentalDiagnosticsDelay
if delay > 0 {
// Experimental 2-phase diagnostics.
//
// The first phase just parses and checks packages that have been affected
// by file modifications (no analysis).
//
// The second phase does everything, and is debounced by the configured delay.
reports, err := s.diagnoseChangedFiles(ctx, snapshot, changedURIs)
if err != nil {
if !errors.Is(err, context.Canceled) {
event.Error(ctx, "diagnosing changed files", err)
}
}
s.publishReports(ctx, snapshot, reports)
s.debouncer.debounce(snapshot.View().Name(), snapshot.ID(), delay, func() {
reports, _ := s.diagnose(ctx, snapshot, false)
s.publishReports(ctx, snapshot, reports)
})
return
}
// Ignore possible workspace configuration warnings in the normal flow.
reports, _ := s.diagnose(ctx, snapshot, false)
s.publishReports(ctx, snapshot, reports)
}
func (s *Server) diagnoseChangedFiles(ctx context.Context, snapshot source.Snapshot, uris []span.URI) (*reportSet, error) {
ctx, done := event.Start(ctx, "Server.diagnoseChangedFiles")
defer done()
packages := make(map[source.Package]struct{})
for _, uri := range uris {
pkgs, err := snapshot.PackagesForFile(ctx, uri, source.TypecheckWorkspace)
if err != nil {
// TODO (rFindley): we should probably do something with the error here,
// but as of now this can fail repeatedly if load fails, so can be too
// noisy to log (and we'll handle things later in the slow pass).
continue
}
for _, pkg := range pkgs {
packages[pkg] = struct{}{}
}
}
reports := new(reportSet)
for pkg := range packages {
pkgReports, _, err := source.Diagnostics(ctx, snapshot, pkg, false)
if err != nil {
return nil, err
}
for id, diags := range pkgReports {
reports.add(id, false, diags...)
}
}
return reports, nil
}
// diagnose is a helper function for running diagnostics with a given context.
// Do not call it directly.
func (s *Server) diagnose(ctx context.Context, snapshot source.Snapshot, alwaysAnalyze bool) (map[idWithAnalysis]map[string]*source.Diagnostic, *protocol.ShowMessageParams) {
ctx, done := event.Start(ctx, "lsp:background-worker")
func (s *Server) diagnose(ctx context.Context, snapshot source.Snapshot, alwaysAnalyze bool) (diagReports *reportSet, _ *protocol.ShowMessageParams) {
ctx, done := event.Start(ctx, "Server.diagnose")
defer done()
// Wait for a free diagnostics slot.
@ -61,24 +153,11 @@ func (s *Server) diagnose(ctx context.Context, snapshot source.Snapshot, alwaysA
return nil, nil
case s.diagnosticsSema <- struct{}{}:
}
defer func() { <-s.diagnosticsSema }()
defer func() {
<-s.diagnosticsSema
}()
var reportsMu sync.Mutex
reports := map[idWithAnalysis]map[string]*source.Diagnostic{}
addReport := func(id source.VersionedFileIdentity, withAnalysis bool, diags []*source.Diagnostic) {
reportsMu.Lock()
defer reportsMu.Unlock()
key := idWithAnalysis{
id: id,
withAnalysis: withAnalysis,
}
if _, ok := reports[key]; !ok {
reports[key] = map[string]*source.Diagnostic{}
}
for _, d := range diags {
reports[key][diagnosticKey(d)] = d
}
}
reports := new(reportSet)
// First, diagnose the go.mod file.
modReports, modErr := mod.Diagnostics(ctx, snapshot)
@ -93,7 +172,7 @@ func (s *Server) diagnose(ctx context.Context, snapshot source.Snapshot, alwaysA
event.Error(ctx, "missing URI for module diagnostics", fmt.Errorf("empty URI"), tag.Directory.Of(snapshot.View().Folder().Filename()))
continue
}
addReport(id, true, diags) // treat go.mod diagnostics like analyses
reports.add(id, true, diags...) // treat go.mod diagnostics like analyses
}
// Diagnose all of the packages in the workspace.
@ -104,10 +183,7 @@ func (s *Server) diagnose(ctx context.Context, snapshot source.Snapshot, alwaysA
}
// Some error messages can be displayed as diagnostics.
if errList := (*source.ErrorList)(nil); errors.As(err, &errList) {
if r, err := errorsToDiagnostic(ctx, snapshot, *errList); err == nil {
for k, v := range r {
reports[k] = v
}
if err := errorsToDiagnostic(ctx, snapshot, *errList, reports); err == nil {
return reports, nil
}
}
@ -172,7 +248,7 @@ If you believe this is a mistake, please file an issue: https://github.com/golan
// Add all reports to the global map, checking for duplicates.
for id, diags := range pkgReports {
addReport(id, withAnalysis, diags)
reports.add(id, withAnalysis, diags...)
}
// If gc optimization details are available, add them to the
// diagnostic reports.
@ -182,7 +258,7 @@ If you believe this is a mistake, please file an issue: https://github.com/golan
event.Error(ctx, "warning: gc details", err, tag.Snapshot.Of(snapshot.ID()))
}
for id, diags := range gcReports {
addReport(id, withAnalysis, diags)
reports.add(id, withAnalysis, diags...)
}
}
}(pkg)
@ -196,7 +272,7 @@ If you believe this is a mistake, please file an issue: https://github.com/golan
// meaning that we have already seen its package.
var seen bool
for _, withAnalysis := range []bool{true, false} {
_, ok := reports[idWithAnalysis{
_, ok := reports.reports[idWithAnalysis{
id: o.VersionedFileIdentity(),
withAnalysis: withAnalysis,
}]
@ -209,7 +285,7 @@ If you believe this is a mistake, please file an issue: https://github.com/golan
if diagnostic == nil {
continue
}
addReport(o.VersionedFileIdentity(), true, []*source.Diagnostic{diagnostic})
reports.add(o.VersionedFileIdentity(), true, diagnostic)
}
}
return reports, showMsg
@ -252,23 +328,7 @@ Otherwise, see the troubleshooting guidelines for help investigating (https://gi
}
}
// diagnosticKey creates a unique identifier for a given diagnostic, since we
// cannot use source.Diagnostics as map keys. This is used to de-duplicate
// diagnostics.
func diagnosticKey(d *source.Diagnostic) string {
var tags, related string
for _, t := range d.Tags {
tags += fmt.Sprintf("%s", t)
}
for _, r := range d.Related {
related += fmt.Sprintf("%s%s%s", r.URI, r.Message, r.Range)
}
key := fmt.Sprintf("%s%s%s%s%s%s", d.Message, d.Range, d.Severity, d.Source, tags, related)
return fmt.Sprintf("%x", sha256.Sum256([]byte(key)))
}
func errorsToDiagnostic(ctx context.Context, snapshot source.Snapshot, errors []*source.Error) (map[idWithAnalysis]map[string]*source.Diagnostic, error) {
reports := make(map[idWithAnalysis]map[string]*source.Diagnostic)
func errorsToDiagnostic(ctx context.Context, snapshot source.Snapshot, errors []*source.Error, reports *reportSet) error {
for _, e := range errors {
diagnostic := &source.Diagnostic{
Range: e.Range,
@ -279,30 +339,23 @@ func errorsToDiagnostic(ctx context.Context, snapshot source.Snapshot, errors []
}
fh, err := snapshot.GetFile(ctx, e.URI)
if err != nil {
return nil, err
return err
}
id := idWithAnalysis{
id: fh.VersionedFileIdentity(),
withAnalysis: false,
}
if _, ok := reports[id]; !ok {
reports[id] = make(map[string]*source.Diagnostic)
}
reports[id][diagnosticKey(diagnostic)] = diagnostic
reports.add(fh.VersionedFileIdentity(), false, diagnostic)
}
return reports, nil
return nil
}
func (s *Server) publishReports(ctx context.Context, snapshot source.Snapshot, reports map[idWithAnalysis]map[string]*source.Diagnostic) {
func (s *Server) publishReports(ctx context.Context, snapshot source.Snapshot, reports *reportSet) {
// Check for context cancellation before publishing diagnostics.
if ctx.Err() != nil {
if ctx.Err() != nil || reports == nil {
return
}
s.deliveredMu.Lock()
defer s.deliveredMu.Unlock()
for key, diagnosticsMap := range reports {
for key, diagnosticsMap := range reports.reports {
// Don't deliver diagnostics if the context has already been canceled.
if ctx.Err() != nil {
break
@ -469,6 +522,7 @@ See https://github.com/golang/go/issues/39164 for more detail on this issue.`,
if errors.Is(loadErr, source.PackagesLoadError) {
// TODO(rstambler): Construct the diagnostics in internal/lsp/cache
// so that we can avoid this here.
reports := new(reportSet)
for _, uri := range snapshot.ModFiles() {
fh, err := snapshot.GetFile(ctx, uri)
if err != nil {
@ -478,9 +532,8 @@ See https://github.com/golang/go/issues/39164 for more detail on this issue.`,
if err != nil {
return false
}
s.publishReports(ctx, snapshot, map[idWithAnalysis]map[string]*source.Diagnostic{
{id: fh.VersionedFileIdentity()}: {diagnosticKey(diag): diag},
})
reports.add(fh.VersionedFileIdentity(), false, diag)
s.publishReports(ctx, snapshot, reports)
return true
}
}

View File

@ -204,6 +204,11 @@ func (e *Editor) configuration() map[string]interface{} {
if !e.Config.WithoutExperimentalWorkspaceModule {
config["experimentalWorkspaceModule"] = true
}
// TODO(rFindley): uncomment this if/when diagnostics delay is on by
// default... and probably change to the new settings name.
// config["experimentalDiagnosticsDelay"] = "10ms"
return config
}

View File

@ -30,6 +30,7 @@ func NewServer(session source.Session, client protocol.Client) *Server {
client: client,
diagnosticsSema: make(chan struct{}, concurrentAnalyses),
progress: newProgressTracker(client),
debouncer: newDebouncer(),
}
}
@ -94,6 +95,9 @@ type Server struct {
diagnosticsSema chan struct{}
progress *progressTracker
// debouncer is used for debouncing diagnostics.
debouncer *debouncer
}
// sentDiagnostics is used to cache diagnostics that have been sent for a given file.

File diff suppressed because one or more lines are too long

View File

@ -330,6 +330,14 @@ type ExperimentalOptions struct {
// "&someStruct{}" are offered. Tests disable this flag to simplify
// their expected values.
LiteralCompletions bool
// ExperimentalDiagnosticsDelay controls the amount of time that gopls waits
// after the most recent file modification before computing deep diagnostics.
// Simple diagnostics (parsing and type-checking) are always run immediately
// on recently modified packages.
//
// This option must be set to a valid duration string, for example `"250ms"`.
ExperimentalDiagnosticsDelay time.Duration
}
// DebuggingOptions should not affect the logical execution of Gopls, but may
@ -548,15 +556,7 @@ func (o *Options) set(name string, value interface{}) OptionResult {
case "completeUnimported":
result.setBool(&o.CompleteUnimported)
case "completionBudget":
if v, ok := result.asString(); ok {
d, err := time.ParseDuration(v)
if err != nil {
result.errorf("failed to parse duration %q: %v", v, err)
break
}
o.CompletionBudget = d
}
result.setDuration(&o.CompletionBudget)
case "matcher":
matcher, ok := result.asString()
if !ok {
@ -693,6 +693,9 @@ func (o *Options) set(name string, value interface{}) OptionResult {
case "experimentalWorkspaceModule":
result.setBool(&o.ExperimentalWorkspaceModule)
case "experimentalDiagnosticsDelay":
result.setDuration(&o.ExperimentalDiagnosticsDelay)
// Replaced settings.
case "experimentalDisabledAnalyses":
result.State = OptionDeprecated
@ -760,6 +763,17 @@ func (r *OptionResult) setBool(b *bool) {
}
}
func (r *OptionResult) setDuration(d *time.Duration) {
if v, ok := r.asString(); ok {
parsed, err := time.ParseDuration(v)
if err != nil {
r.errorf("failed to parse duration %q: %v", v, err)
return
}
*d = parsed
}
}
func (r *OptionResult) setBoolMap(bm *map[string]bool) {
all, ok := r.Value.(map[string]interface{})
if !ok {

View File

@ -191,6 +191,7 @@ func (s *Server) didModifyFiles(ctx context.Context, modifications []source.File
return err
}
// Clear out diagnostics for deleted files.
for _, uri := range deletions {
if err := s.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{
URI: protocol.URIFromSpanURI(uri),
@ -254,7 +255,7 @@ func (s *Server) didModifyFiles(ctx context.Context, modifications []source.File
diagnosticWG.Add(1)
go func(snapshot source.Snapshot, uris []span.URI) {
defer diagnosticWG.Done()
s.diagnoseSnapshot(snapshot)
s.diagnoseSnapshot(snapshot, uris)
}(snapshot, uris)
}