internal/lsp: wait for ongoing work to complete during server shutdown

Add a new WaitGroup to the View that allows waiting for all snapshot
destroy operations to complete. This helps ensure that the server
properly cleans up resources when shutting down, and lets us remove
work-arounds in the gopls regtests intended to avoid races during
shutdown.

Also:
 - re-enable postfix completion tests that had to be disabled due to
   being too racy
 - rename the inlayHints regtest package to follow lower-cased naming
   conventions
 - add several TODOs

Fixes golang/go#50707
Fixes golang/go#53735

Change-Id: If216763fb7a32f487f6116459e3dc45f4c903b8a
Reviewed-on: https://go-review.googlesource.com/c/tools/+/416594
Run-TryBot: Robert Findley <rfindley@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
Reviewed-by: Alan Donovan <adonovan@google.com>
This commit is contained in:
Robert Findley 2022-07-08 15:06:14 -04:00
parent a5adb0f2c2
commit 3db2cdc060
8 changed files with 66 additions and 48 deletions

View File

@ -13,8 +13,6 @@ import (
)
func TestPostfixSnippetCompletion(t *testing.T) {
t.Skipf("skipping test due to suspected synchronization bug; see https://go.dev/issue/50707")
const mod = `
-- go.mod --
module mod.com
@ -400,7 +398,7 @@ func _() {
before: `
package foo
func foo() []string {
func foo() []string {
x := "test"
return x.split
}`,
@ -409,7 +407,7 @@ package foo
import "strings"
func foo() []string {
func foo() []string {
x := "test"
return strings.Split(x, "$0")
}`,

View File

@ -1,7 +1,7 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package inlayHint
package inlayhint
import (
"testing"
@ -17,6 +17,7 @@ func TestMain(m *testing.M) {
bug.PanicOnBugs = true
Main(m, hooks.Options)
}
func TestEnablingInlayHints(t *testing.T) {
testenv.NeedsGo1Point(t, 14) // Test fails on 1.13.
const workspace = `

View File

@ -309,8 +309,13 @@ func runTest(t *testing.T, workspaceData, proxyData string, test func(context.Co
if err != nil {
t.Fatal(err)
}
defer release()
defer view.Shutdown(ctx)
defer func() {
// The snapshot must be released before calling view.Shutdown, to avoid a
// deadlock.
release()
view.Shutdown(ctx)
}()
test(ctx, snapshot)
}

View File

@ -301,19 +301,6 @@ func (s *Session) viewOf(uri span.URI) (*View, error) {
return s.viewMap[uri], nil
}
func (s *Session) viewsOf(uri span.URI) []*View {
s.viewMu.RLock()
defer s.viewMu.RUnlock()
var views []*View
for _, view := range s.views {
if source.InDir(view.folder.Filename(), uri.Filename()) {
views = append(views, view)
}
}
return views
}
func (s *Session) Views() []source.View {
s.viewMu.RLock()
defer s.viewMu.RUnlock()

View File

@ -187,11 +187,25 @@ type actionKey struct {
// destroy waits for all leases on the snapshot to expire then releases
// any resources (reference counts and files) associated with it.
// Snapshots being destroyed can be awaited using v.destroyWG.
//
// TODO(adonovan): move this logic into the release function returned
// by Acquire when the reference count becomes zero. (This would cost
// us the destroyedBy debug info, unless we add it to the signature of
// memoize.RefCounted.Acquire.)
//
// The destroyedBy argument is used for debugging.
//
// v.snapshotMu must be held while calling this function, in order to preserve
// the invariants described by the the docstring for v.snapshot.
func (v *View) destroy(s *snapshot, destroyedBy string) {
v.snapshotWG.Add(1)
go func() {
defer v.snapshotWG.Done()
s.destroy(destroyedBy)
}()
}
func (s *snapshot) destroy(destroyedBy string) {
// Wait for all leases to end before commencing destruction.
s.refcount.Wait()
@ -1678,7 +1692,7 @@ func (s *snapshot) orphanedFiles() []source.VersionedFileHandle {
}
// If the URI doesn't belong to this view, then it's not in a workspace
// package and should not be reloaded directly.
if !contains(s.view.session.viewsOf(uri), s.view) {
if !source.InDir(s.view.folder.Filename(), uri.Filename()) {
return
}
// If the file is not open and is in a vendor directory, don't treat it

View File

@ -67,9 +67,18 @@ type View struct {
// attempt at initialization.
initCancelFirstAttempt context.CancelFunc
// Track the latest snapshot via the snapshot field, guarded by snapshotMu.
//
// Invariant: whenever the snapshot field is overwritten, destroy(snapshot)
// is called on the previous (overwritten) snapshot while snapshotMu is held,
// incrementing snapshotWG. During shutdown the final snapshot is
// overwritten with nil and destroyed, guaranteeing that all observed
// snapshots have been destroyed via the destroy method, and snapshotWG may
// be waited upon to let these destroy operations complete.
snapshotMu sync.Mutex
snapshot *snapshot // nil after shutdown has been called
releaseSnapshot func() // called when snapshot is no longer needed
snapshot *snapshot // latest snapshot
releaseSnapshot func() // called when snapshot is no longer needed
snapshotWG sync.WaitGroup // refcount for pending destroy operations
// initialWorkspaceLoad is closed when the first workspace initialization has
// completed. If we failed to load, we only retry if the go.mod file changes,
@ -125,6 +134,11 @@ type environmentVariables struct {
gocache, gopath, goroot, goprivate, gomodcache, go111module string
}
// workspaceMode holds various flags defining how the gopls workspace should
// behave. They may be derived from the environment, user configuration, or
// depend on the Go version.
//
// TODO(rfindley): remove workspace mode, in favor of explicit checks.
type workspaceMode int
const (
@ -521,6 +535,9 @@ func (v *View) Shutdown(ctx context.Context) {
v.session.removeView(ctx, v)
}
// shutdown releases resources associated with the view, and waits for ongoing
// work to complete.
//
// TODO(rFindley): probably some of this should also be one in View.Shutdown
// above?
func (v *View) shutdown(ctx context.Context) {
@ -530,13 +547,14 @@ func (v *View) shutdown(ctx context.Context) {
v.snapshotMu.Lock()
if v.snapshot != nil {
v.releaseSnapshot()
go v.snapshot.destroy("View.shutdown")
v.destroy(v.snapshot, "View.shutdown")
v.snapshot = nil
v.releaseSnapshot = nil
}
v.snapshotMu.Unlock()
v.importsState.destroy()
v.snapshotWG.Wait()
}
func (v *View) Session() *Session {
@ -732,7 +750,7 @@ func (v *View) invalidateContent(ctx context.Context, changes map[span.URI]*file
v.snapshot, v.releaseSnapshot = prevSnapshot.clone(ctx, v.baseCtx, changes, forceReloadMetadata)
prevReleaseSnapshot()
go prevSnapshot.destroy("View.invalidateContent")
v.destroy(prevSnapshot, "View.invalidateContent")
// Return a second lease to the caller.
return v.snapshot, v.snapshot.Acquire()

View File

@ -487,6 +487,8 @@ func (s *Server) beginFileRequest(ctx context.Context, pURI protocol.DocumentURI
return snapshot, fh, true, release, nil
}
// shutdown implements the 'shutdown' LSP handler. It releases resources
// associated with the server and waits for all ongoing work to complete.
func (s *Server) shutdown(ctx context.Context) error {
s.stateMu.Lock()
defer s.stateMu.Unlock()

View File

@ -66,9 +66,6 @@ type Runner struct {
mu sync.Mutex
ts *servertest.TCPServer
socketDir string
// closers is a queue of clean-up functions to run at the end of the entire
// test suite.
closers []io.Closer
}
type runConfig struct {
@ -228,6 +225,8 @@ type TestFunc func(t *testing.T, env *Env)
// modes. For each a test run, a new workspace is created containing the
// un-txtared files specified by filedata.
func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOption) {
// TODO(rfindley): this function has gotten overly complicated, and warrants
// refactoring.
t.Helper()
checkBuilder(t)
@ -259,6 +258,10 @@ func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOptio
}
t.Run(tc.name, func(t *testing.T) {
// TODO(rfindley): once jsonrpc2 shutdown is fixed, we should not leak
// goroutines in this test function.
// stacktest.NoLeak(t)
ctx := context.Background()
if r.Timeout != 0 && !config.noDefaultTimeout {
var cancel context.CancelFunc
@ -282,6 +285,7 @@ func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOptio
if err := os.MkdirAll(rootDir, 0755); err != nil {
t.Fatal(err)
}
files := fake.UnpackTxt(files)
if config.editor.WindowsLineEndings {
for name, data := range files {
@ -294,13 +298,14 @@ func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOptio
if err != nil {
t.Fatal(err)
}
// Deferring the closure of ws until the end of the entire test suite
// has, in testing, given the LSP server time to properly shutdown and
// release any file locks held in workspace, which is a problem on
// Windows. This may still be flaky however, and in the future we need a
// better solution to ensure that all Go processes started by gopls have
// exited before we clean up.
r.AddCloser(sandbox)
defer func() {
if !r.SkipCleanup {
if err := sandbox.Close(); err != nil {
pprof.Lookup("goroutine").WriteTo(os.Stderr, 1)
t.Errorf("closing the sandbox: %v", err)
}
}
}()
ss := tc.getServer(t, config.optionsHook)
framer := jsonrpc2.NewRawStream
ls := &loggingFramer{}
@ -322,6 +327,7 @@ func (r *Runner) Run(t *testing.T, files string, test TestFunc, opts ...RunOptio
closeCtx, cancel := context.WithTimeout(xcontext.Detach(ctx), 5*time.Second)
defer cancel()
if err := env.Editor.Close(closeCtx); err != nil {
pprof.Lookup("goroutine").WriteTo(os.Stderr, 1)
t.Errorf("closing editor: %v", err)
}
}()
@ -493,14 +499,6 @@ func (r *Runner) getRemoteSocket(t *testing.T) string {
return socket
}
// AddCloser schedules a closer to be closed at the end of the test run. This
// is useful for Windows in particular, as
func (r *Runner) AddCloser(closer io.Closer) {
r.mu.Lock()
defer r.mu.Unlock()
r.closers = append(r.closers, closer)
}
// Close cleans up resource that have been allocated to this workspace.
func (r *Runner) Close() error {
r.mu.Lock()
@ -518,11 +516,6 @@ func (r *Runner) Close() error {
}
}
if !r.SkipCleanup {
for _, closer := range r.closers {
if err := closer.Close(); err != nil {
errmsgs = append(errmsgs, err.Error())
}
}
if err := os.RemoveAll(r.TempDir); err != nil {
errmsgs = append(errmsgs, err.Error())
}