Merge branch 'golang:master' into master

This commit is contained in:
Lokesh 2025-06-05 21:20:48 +02:00 committed by GitHub
commit e0ce9653db
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
62 changed files with 1327 additions and 351 deletions

View File

@ -101,6 +101,7 @@ pkg testing, method (*F) Output() io.Writer #59928
pkg testing, method (*T) Attr(string, string) #43936 pkg testing, method (*T) Attr(string, string) #43936
pkg testing, method (*T) Output() io.Writer #59928 pkg testing, method (*T) Output() io.Writer #59928
pkg testing, type TB interface, Attr(string, string) #43936 pkg testing, type TB interface, Attr(string, string) #43936
pkg testing, type TB interface, Output() io.Writer #59928
pkg testing/fstest, method (MapFS) Lstat(string) (fs.FileInfo, error) #49580 pkg testing/fstest, method (MapFS) Lstat(string) (fs.FileInfo, error) #49580
pkg testing/fstest, method (MapFS) ReadLink(string) (string, error) #49580 pkg testing/fstest, method (MapFS) ReadLink(string) (string, error) #49580
pkg testing/synctest, func Test(*testing.T, func(*testing.T)) #67434 pkg testing/synctest, func Test(*testing.T, func(*testing.T)) #67434

View File

@ -785,7 +785,7 @@ func inlineCallCheck(callerfn *ir.Func, call *ir.CallExpr) (bool, bool) {
if call.Op() != ir.OCALLFUNC { if call.Op() != ir.OCALLFUNC {
return false, false return false, false
} }
if call.GoDefer { if call.GoDefer || call.NoInline {
return false, false return false, false
} }

View File

@ -279,7 +279,12 @@ func (s *inlClosureState) mark(n ir.Node) ir.Node {
ok := match(n) ok := match(n)
// can't wrap TailCall's child into ParenExpr
if t, ok := n.(*ir.TailCallStmt); ok {
ir.EditChildren(t.Call, s.mark)
} else {
ir.EditChildren(n, s.mark) ir.EditChildren(n, s.mark)
}
if ok { if ok {
if p == nil { if p == nil {
@ -317,23 +322,6 @@ func (s *inlClosureState) unparenthesize() {
n = paren.X n = paren.X
} }
ir.EditChildren(n, unparen) ir.EditChildren(n, unparen)
// special case for tail calls: if the tail call was inlined, transform
// the tail call to a return stmt if the inlined function was not void,
// otherwise replace it with the inlined expression followed by a return.
if tail, ok := n.(*ir.TailCallStmt); ok {
if inl, done := tail.Call.(*ir.InlinedCallExpr); done {
if len(inl.ReturnVars) != 0 {
ret := ir.NewReturnStmt(tail.Pos(), []ir.Node{inl})
if len(inl.ReturnVars) > 1 {
typecheck.RewriteMultiValueCall(ret, inl)
}
n = ret
} else {
ret := ir.NewReturnStmt(tail.Pos(), nil)
n = ir.NewBlockStmt(tail.Pos(), []ir.Node{inl, ret})
}
}
}
return n return n
} }
ir.EditChildren(s.fn, unparen) ir.EditChildren(s.fn, unparen)
@ -370,9 +358,11 @@ func (s *inlClosureState) fixpoint() bool {
} }
func match(n ir.Node) bool { func match(n ir.Node) bool {
switch n.(type) { switch n := n.(type) {
case *ir.CallExpr: case *ir.CallExpr:
return true return true
case *ir.TailCallStmt:
n.Call.NoInline = true // can't inline yet
} }
return false return false
} }

View File

@ -191,6 +191,7 @@ type CallExpr struct {
KeepAlive []*Name // vars to be kept alive until call returns KeepAlive []*Name // vars to be kept alive until call returns
IsDDD bool IsDDD bool
GoDefer bool // whether this call is part of a go or defer statement GoDefer bool // whether this call is part of a go or defer statement
NoInline bool // whether this call must not be inlined
} }
func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr { func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr {

View File

@ -2202,13 +2202,13 @@ func (n *TailCallStmt) doChildrenWithHidden(do func(Node) bool) bool {
func (n *TailCallStmt) editChildren(edit func(Node) Node) { func (n *TailCallStmt) editChildren(edit func(Node) Node) {
editNodes(n.init, edit) editNodes(n.init, edit)
if n.Call != nil { if n.Call != nil {
n.Call = edit(n.Call) n.Call = edit(n.Call).(*CallExpr)
} }
} }
func (n *TailCallStmt) editChildrenWithHidden(edit func(Node) Node) { func (n *TailCallStmt) editChildrenWithHidden(edit func(Node) Node) {
editNodes(n.init, edit) editNodes(n.init, edit)
if n.Call != nil { if n.Call != nil {
n.Call = edit(n.Call) n.Call = edit(n.Call).(*CallExpr)
} }
} }

View File

@ -479,7 +479,7 @@ func NewSwitchStmt(pos src.XPos, tag Node, cases []*CaseClause) *SwitchStmt {
// code generation to jump directly to another function entirely. // code generation to jump directly to another function entirely.
type TailCallStmt struct { type TailCallStmt struct {
miniStmt miniStmt
Call Node // the underlying call Call *CallExpr // the underlying call
} }
func NewTailCallStmt(pos src.XPos, call *CallExpr) *TailCallStmt { func NewTailCallStmt(pos src.XPos, call *CallExpr) *TailCallStmt {

View File

@ -25,9 +25,9 @@ determines its index in the series.
SectionMeta SectionMeta
SectionPosBase SectionPosBase
SectionPkg SectionPkg
SectionName // TODO(markfreeman) Define. SectionName
SectionType // TODO(markfreeman) Define. SectionType
SectionObj // TODO(markfreeman) Define. SectionObj
SectionObjExt // TODO(markfreeman) Define. SectionObjExt // TODO(markfreeman) Define.
SectionObjDict // TODO(markfreeman) Define. SectionObjDict // TODO(markfreeman) Define.
SectionBody // TODO(markfreeman) Define. SectionBody // TODO(markfreeman) Define.
@ -35,9 +35,11 @@ determines its index in the series.
# Sections # Sections
A section is a series of elements of a type determined by the section's A section is a series of elements of a type determined by the section's
kind. Go constructs are mapped onto (potentially multiple) elements. kind. Go constructs are mapped onto one or more elements with possibly
Elements are accessed using an index relative to the start of the different types; in that case, the elements are in different sections.
section.
Elements are accessed using an element index relative to the start of
the section.
RelElemIdx = Uint64 . RelElemIdx = Uint64 .
@ -47,6 +49,9 @@ outside the string section access string values by reference.
SectionString = { String } . SectionString = { String } .
Note that despite being an element, a string does not begin with a
reference table.
## Meta Section ## Meta Section
The meta section provides fundamental information for a package. It The meta section provides fundamental information for a package. It
contains exactly two elements a public root and a private root. contains exactly two elements a public root and a private root.
@ -135,6 +140,97 @@ Note, a PkgRef is *not* equivalent to Ref[Pkg] due to an extra marker.
Ref[Pkg] Ref[Pkg]
. .
## Type Section
The type section is a series of type definition elements.
SectionType = { TypeDef } .
A type definition can be in one of several formats, which are identified
by their TypeSpec code.
TypeDef = RefTable
[ Sync ]
[ Sync ]
Uint64 // denotes which TypeSpec to use
TypeSpec
.
TypeSpec = TypeSpecBasic // TODO(markfreeman): Define.
| TypeSpecNamed // TODO(markfreeman): Define.
| TypeSpecPointer // TODO(markfreeman): Define.
| TypeSpecSlice // TODO(markfreeman): Define.
| TypeSpecArray // TODO(markfreeman): Define.
| TypeSpecChan // TODO(markfreeman): Define.
| TypeSpecMap // TODO(markfreeman): Define.
| TypeSpecSignature // TODO(markfreeman): Define.
| TypeSpecStruct // TODO(markfreeman): Define.
| TypeSpecInterface // TODO(markfreeman): Define.
| TypeSpecUnion // TODO(markfreeman): Define.
| TypeSpecTypeParam // TODO(markfreeman): Define.
.
// TODO(markfreeman): Document the reader dictionary once we understand it more.
To use a type elsewhere, a TypeUse is encoded.
TypeUse = [ Sync ]
Bool // whether it is a derived type
[ Uint64 ] // if derived, an index into the reader dictionary
[ Ref[TypeDef] ] // else, a reference to the type
.
## Object Sections
Information about an object (e.g. variable, function, type name, etc.)
is split into multiple elements in different sections. Those elements
have the same section-relative element index.
### Name Section
The name section holds a series of names.
SectionName = { Name } .
Names are elements holding qualified identifiers and type information
for objects.
Name = RefTable
[ Sync ]
[ Sync ]
PkgRef // the object's package
StringRef // the object's package-local name
[ Sync ]
Uint64 // the object's type (e.g. Var, Func, etc.)
.
### Definition Section
The definition section holds definitions for objects defined by the target
package; it does not contain definitions for imported objects.
SectionObj = { ObjectDef } .
Object definitions can be in one of several formats. To determine the correct
format, the name section must be referenced; it contains a code indicating
the object's type.
ObjectDef = RefTable
[ Sync ]
ObjectSpec
.
ObjectSpec = ObjectSpecConst // TODO(markfreeman) Define.
| ObjectSpecFunc // TODO(markfreeman) Define.
| ObjectSpecAlias // TODO(markfreeman) Define.
| ObjectSpecNamedType // TODO(markfreeman) Define.
| ObjectSpecVar // TODO(markfreeman) Define.
.
To use an object definition elsewhere, an ObjectUse is encoded.
ObjectUse = [ Sync ]
[ Bool ]
Ref[ObjectDef]
Uint64 // the number of type arguments
{ TypeUse } // references to the type arguments
.
# References # References
A reference table precedes every element. Each entry in the table A reference table precedes every element. Each entry in the table
contains a (section, index) pair denoting the location of the contains a (section, index) pair denoting the location of the
@ -152,9 +248,14 @@ referenced element.
Elements encode references to other elements as an index in the Elements encode references to other elements as an index in the
reference table not the location of the referenced element directly. reference table not the location of the referenced element directly.
// TODO(markfreeman): Rename to RefUse. RefTableIdx = Uint64 .
UseReloc = [ Sync ]
RelElemIdx To do this, the Ref[T] primitive is used as below; note that this is
the same shape as provided by package pkgbits, just with new
interpretation applied.
Ref[T] = [ Sync ]
RefTableIdx // the Uint64
. .
# Primitives # Primitives

View File

@ -458,7 +458,7 @@ func Renameinit() *types.Sym {
func checkEmbed(decl *syntax.VarDecl, haveEmbed, withinFunc bool) error { func checkEmbed(decl *syntax.VarDecl, haveEmbed, withinFunc bool) error {
switch { switch {
case !haveEmbed: case !haveEmbed:
return errors.New("go:embed only allowed in Go files that import \"embed\"") return errors.New("go:embed requires import \"embed\" (or import _ \"embed\", if package is not used)")
case len(decl.NameList) > 1: case len(decl.NameList) > 1:
return errors.New("go:embed cannot apply to multiple vars") return errors.New("go:embed cannot apply to multiple vars")
case decl.Values != nil: case decl.Values != nil:

View File

@ -3996,11 +3996,12 @@ func addTailCall(pos src.XPos, fn *ir.Func, recv ir.Node, method *types.Field) {
if recv.Type() != nil && recv.Type().IsPtr() && method.Type.Recv().Type.IsPtr() && if recv.Type() != nil && recv.Type().IsPtr() && method.Type.Recv().Type.IsPtr() &&
method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) &&
!unifiedHaveInlineBody(ir.MethodExprName(dot).Func) &&
!(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) { !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) {
if base.Debug.TailCall != 0 { if base.Debug.TailCall != 0 {
base.WarnfAt(fn.Nname.Type().Recv().Type.Elem().Pos(), "tail call emitted for the method %v wrapper", method.Nname) base.WarnfAt(fn.Nname.Type().Recv().Type.Elem().Pos(), "tail call emitted for the method %v wrapper", method.Nname)
} }
// Prefer OTAILCALL to reduce code size (the called method can be inlined). // Prefer OTAILCALL to reduce code size (except the case when the called method can be inlined).
fn.Body.Append(ir.NewTailCallStmt(pos, call)) fn.Body.Append(ir.NewTailCallStmt(pos, call))
return return
} }

View File

@ -1921,7 +1921,7 @@ func (s *state) stmt(n ir.Node) {
case ir.OTAILCALL: case ir.OTAILCALL:
n := n.(*ir.TailCallStmt) n := n.(*ir.TailCallStmt)
s.callResult(n.Call.(*ir.CallExpr), callTail) s.callResult(n.Call, callTail)
call := s.mem() call := s.mem()
b := s.endBlock() b := s.endBlock()
b.Kind = ssa.BlockRetJmp // could use BlockExit. BlockRetJmp is mostly for clarity. b.Kind = ssa.BlockRetJmp // could use BlockExit. BlockRetJmp is mostly for clarity.

View File

@ -137,7 +137,7 @@ assignOK:
if cr > len(rhs) { if cr > len(rhs) {
stmt := stmt.(*ir.AssignListStmt) stmt := stmt.(*ir.AssignListStmt)
stmt.SetOp(ir.OAS2FUNC) stmt.SetOp(ir.OAS2FUNC)
r := rhs[0] r := rhs[0].(*ir.CallExpr)
rtyp := r.Type() rtyp := r.Type()
mismatched := false mismatched := false

View File

@ -333,6 +333,7 @@ func TestStdFixed(t *testing.T) {
"issue56103.go", // anonymous interface cycles; will be a type checker error in 1.22 "issue56103.go", // anonymous interface cycles; will be a type checker error in 1.22
"issue52697.go", // types2 does not have constraints on stack size "issue52697.go", // types2 does not have constraints on stack size
"issue73309.go", // this test requires GODEBUG=gotypesalias=1 "issue73309.go", // this test requires GODEBUG=gotypesalias=1
"issue73309b.go", // this test requires GODEBUG=gotypesalias=1
// These tests requires runtime/cgo.Incomplete, which is only available on some platforms. // These tests requires runtime/cgo.Incomplete, which is only available on some platforms.
// However, types2 does not know about build constraints. // However, types2 does not know about build constraints.

View File

@ -139,8 +139,7 @@ func walkStmt(n ir.Node) ir.Node {
n := n.(*ir.TailCallStmt) n := n.(*ir.TailCallStmt)
var init ir.Nodes var init ir.Nodes
call := n.Call.(*ir.CallExpr) n.Call.Fun = walkExpr(n.Call.Fun, &init)
call.Fun = walkExpr(call.Fun, &init)
if len(init) > 0 { if len(init) > 0 {
init.Append(n) init.Append(n)

28
src/cmd/dist/build.go vendored
View File

@ -1516,7 +1516,7 @@ func cmdbootstrap() {
} }
// To recap, so far we have built the new toolchain // To recap, so far we have built the new toolchain
// (cmd/asm, cmd/cgo, cmd/compile, cmd/link) // (cmd/asm, cmd/cgo, cmd/compile, cmd/link, cmd/preprofile)
// using the Go bootstrap toolchain and go command. // using the Go bootstrap toolchain and go command.
// Then we built the new go command (as go_bootstrap) // Then we built the new go command (as go_bootstrap)
// using the new toolchain and our own build logic (above). // using the new toolchain and our own build logic (above).
@ -1589,6 +1589,18 @@ func cmdbootstrap() {
os.Setenv("GOCACHE", oldgocache) os.Setenv("GOCACHE", oldgocache)
} }
// Keep in sync with binExes in cmd/distpack/pack.go.
binExesIncludedInDistpack := []string{"cmd/go", "cmd/gofmt"}
// Keep in sync with the filter in cmd/distpack/pack.go.
toolsIncludedInDistpack := []string{"cmd/asm", "cmd/cgo", "cmd/compile", "cmd/cover", "cmd/link", "cmd/preprofile", "cmd/vet"}
// We could install all tools in "cmd", but is unnecessary because we will
// remove them in distpack, so instead install the tools that will actually
// be included in distpack, which is a superset of toolchain. Not installing
// the tools will help us test what happens when the tools aren't present.
toolsToInstall := slices.Concat(binExesIncludedInDistpack, toolsIncludedInDistpack)
if goos == oldgoos && goarch == oldgoarch { if goos == oldgoos && goarch == oldgoarch {
// Common case - not setting up for cross-compilation. // Common case - not setting up for cross-compilation.
timelog("build", "toolchain") timelog("build", "toolchain")
@ -1605,9 +1617,9 @@ func cmdbootstrap() {
xprintf("\n") xprintf("\n")
} }
xprintf("Building commands for host, %s/%s.\n", goos, goarch) xprintf("Building commands for host, %s/%s.\n", goos, goarch)
goInstall(toolenv(), goBootstrap, "cmd") goInstall(toolenv(), goBootstrap, toolsToInstall...)
checkNotStale(toolenv(), goBootstrap, "cmd") checkNotStale(toolenv(), goBootstrap, toolsToInstall...)
checkNotStale(toolenv(), gorootBinGo, "cmd") checkNotStale(toolenv(), gorootBinGo, toolsToInstall...)
timelog("build", "target toolchain") timelog("build", "target toolchain")
if vflag > 0 { if vflag > 0 {
@ -1621,12 +1633,12 @@ func cmdbootstrap() {
xprintf("Building packages and commands for target, %s/%s.\n", goos, goarch) xprintf("Building packages and commands for target, %s/%s.\n", goos, goarch)
} }
goInstall(nil, goBootstrap, "std") goInstall(nil, goBootstrap, "std")
goInstall(toolenv(), goBootstrap, "cmd") goInstall(toolenv(), goBootstrap, toolsToInstall...)
checkNotStale(toolenv(), goBootstrap, toolchain...) checkNotStale(toolenv(), goBootstrap, toolchain...)
checkNotStale(nil, goBootstrap, "std") checkNotStale(nil, goBootstrap, "std")
checkNotStale(toolenv(), goBootstrap, "cmd") checkNotStale(toolenv(), goBootstrap, toolsToInstall...)
checkNotStale(nil, gorootBinGo, "std") checkNotStale(nil, gorootBinGo, "std")
checkNotStale(toolenv(), gorootBinGo, "cmd") checkNotStale(toolenv(), gorootBinGo, toolsToInstall...)
if debug { if debug {
run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full") run("", ShowOutput|CheckExit, pathf("%s/compile", tooldir), "-V=full")
checkNotStale(toolenv(), goBootstrap, toolchain...) checkNotStale(toolenv(), goBootstrap, toolchain...)
@ -1677,7 +1689,7 @@ func cmdbootstrap() {
if distpack { if distpack {
xprintf("Packaging archives for %s/%s.\n", goos, goarch) xprintf("Packaging archives for %s/%s.\n", goos, goarch)
run("", ShowOutput|CheckExit, pathf("%s/distpack", tooldir)) run("", ShowOutput|CheckExit, gorootBinGo, "tool", "distpack")
} }
// Print trailing banner unless instructed otherwise. // Print trailing banner unless instructed otherwise.

View File

@ -171,6 +171,7 @@ func main() {
switch strings.TrimSuffix(path.Base(name), ".exe") { switch strings.TrimSuffix(path.Base(name), ".exe") {
default: default:
return false return false
// Keep in sync with toolsIncludedInDistpack in cmd/dist/build.go.
case "asm", "cgo", "compile", "cover", "link", "preprofile", "vet": case "asm", "cgo", "compile", "cover", "link", "preprofile", "vet":
} }
} }
@ -179,6 +180,7 @@ func main() {
// Add go and gofmt to bin, using cross-compiled binaries // Add go and gofmt to bin, using cross-compiled binaries
// if this is a cross-compiled distribution. // if this is a cross-compiled distribution.
// Keep in sync with binExesIncludedInDistpack in cmd/dist/build.go.
binExes := []string{ binExes := []string{
"go", "go",
"gofmt", "gofmt",

55
src/cmd/doc/doc.go Normal file
View File

@ -0,0 +1,55 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Doc (usually run as go doc) accepts zero, one or two arguments.
//
// Zero arguments:
//
// go doc
//
// Show the documentation for the package in the current directory.
//
// One argument:
//
// go doc <pkg>
// go doc <sym>[.<methodOrField>]
// go doc [<pkg>.]<sym>[.<methodOrField>]
// go doc [<pkg>.][<sym>.]<methodOrField>
//
// The first item in this list that succeeds is the one whose documentation
// is printed. If there is a symbol but no package, the package in the current
// directory is chosen. However, if the argument begins with a capital
// letter it is always assumed to be a symbol in the current directory.
//
// Two arguments:
//
// go doc <pkg> <sym>[.<methodOrField>]
//
// Show the documentation for the package, symbol, and method or field. The
// first argument must be a full package path. This is similar to the
// command-line usage for the godoc command.
//
// For commands, unless the -cmd flag is present "go doc command"
// shows only the package-level docs for the package.
//
// The -src flag causes doc to print the full source code for the symbol, such
// as the body of a struct, function or method.
//
// The -all flag causes doc to print all documentation for the package and
// all its visible symbols. The argument must identify a package.
//
// For complete documentation, run "go help doc".
package main
import (
"cmd/internal/doc"
"cmd/internal/telemetry/counter"
"os"
)
func main() {
counter.Open()
counter.Inc("doc/invocations")
doc.Main(os.Args[1:])
}

View File

@ -1093,10 +1093,10 @@ func TestGoListTest(t *testing.T) {
tg.grepStdoutNot(`^testing \[bytes.test\]$`, "unexpected test copy of testing") tg.grepStdoutNot(`^testing \[bytes.test\]$`, "unexpected test copy of testing")
tg.grepStdoutNot(`^testing$`, "unexpected real copy of testing") tg.grepStdoutNot(`^testing$`, "unexpected real copy of testing")
tg.run("list", "-test", "cmd/buildid", "cmd/doc") tg.run("list", "-test", "cmd/buildid", "cmd/gofmt")
tg.grepStdout(`^cmd/buildid$`, "missing cmd/buildid") tg.grepStdout(`^cmd/buildid$`, "missing cmd/buildid")
tg.grepStdout(`^cmd/doc$`, "missing cmd/doc") tg.grepStdout(`^cmd/gofmt$`, "missing cmd/gofmt")
tg.grepStdout(`^cmd/doc\.test$`, "missing cmd/doc test") tg.grepStdout(`^cmd/gofmt\.test$`, "missing cmd/gofmt test")
tg.grepStdoutNot(`^cmd/buildid\.test$`, "unexpected cmd/buildid test") tg.grepStdoutNot(`^cmd/buildid\.test$`, "unexpected cmd/buildid test")
tg.grepStdoutNot(`^testing`, "unexpected testing") tg.grepStdoutNot(`^testing`, "unexpected testing")

View File

@ -227,6 +227,8 @@ func ForceHost() {
// Recompute the build context using Goos and Goarch to // Recompute the build context using Goos and Goarch to
// set the correct value for ctx.CgoEnabled. // set the correct value for ctx.CgoEnabled.
BuildContext = defaultContext() BuildContext = defaultContext()
// Call SetGOROOT to properly set the GOROOT on the new context.
SetGOROOT(Getenv("GOROOT"), false)
// Recompute experiments: the settings determined depend on GOOS and GOARCH. // Recompute experiments: the settings determined depend on GOOS and GOARCH.
// This will also update the BuildContext's tool tags to include the new // This will also update the BuildContext's tool tags to include the new
// experiment tags. // experiment tags.

View File

@ -2,17 +2,15 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build !cmd_go_bootstrap
// Package doc implements the “go doc” command. // Package doc implements the “go doc” command.
package doc package doc
import ( import (
"cmd/go/internal/base" "cmd/go/internal/base"
"cmd/go/internal/cfg" "cmd/internal/doc"
"context" "context"
"errors"
"os"
"os/exec"
"path/filepath"
) )
var CmdDoc = &base.Command{ var CmdDoc = &base.Command{
@ -134,13 +132,5 @@ Flags:
} }
func runDoc(ctx context.Context, cmd *base.Command, args []string) { func runDoc(ctx context.Context, cmd *base.Command, args []string) {
base.StartSigHandlers() doc.Main(args)
err := base.RunErr(cfg.BuildToolexec, filepath.Join(cfg.GOROOTbin, "go"), "tool", "doc", args)
if err != nil {
var ee *exec.ExitError
if errors.As(err, &ee) {
os.Exit(ee.ExitCode())
}
base.Error(err)
}
} }

View File

@ -0,0 +1,13 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build cmd_go_bootstrap
// Don't build cmd/doc into go_bootstrap because it depends on net.
package doc
import "cmd/go/internal/base"
var CmdDoc = &base.Command{}

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package main package doc
import ( import (
"bytes" "bytes"

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package main package doc
import ( import (
"bytes" "bytes"
@ -90,7 +90,7 @@ type test struct {
no []string // Regular expressions that should not match. no []string // Regular expressions that should not match.
} }
const p = "cmd/doc/testdata" const p = "cmd/internal/doc/testdata"
var tests = []test{ var tests = []test{
// Sanity check. // Sanity check.
@ -105,7 +105,7 @@ var tests = []test{
{ {
"package clause", "package clause",
[]string{p}, []string{p},
[]string{`package pkg.*cmd/doc/testdata`}, []string{`package pkg.*cmd/internal/doc/testdata`},
nil, nil,
}, },

View File

@ -2,45 +2,8 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Doc (usually run as go doc) accepts zero, one or two arguments. // Package doc provides the implementation of the "go doc" subcommand and cmd/doc.
// package doc
// Zero arguments:
//
// go doc
//
// Show the documentation for the package in the current directory.
//
// One argument:
//
// go doc <pkg>
// go doc <sym>[.<methodOrField>]
// go doc [<pkg>.]<sym>[.<methodOrField>]
// go doc [<pkg>.][<sym>.]<methodOrField>
//
// The first item in this list that succeeds is the one whose documentation
// is printed. If there is a symbol but no package, the package in the current
// directory is chosen. However, if the argument begins with a capital
// letter it is always assumed to be a symbol in the current directory.
//
// Two arguments:
//
// go doc <pkg> <sym>[.<methodOrField>]
//
// Show the documentation for the package, symbol, and method or field. The
// first argument must be a full package path. This is similar to the
// command-line usage for the godoc command.
//
// For commands, unless the -cmd flag is present "go doc command"
// shows only the package-level docs for the package.
//
// The -src flag causes doc to print the full source code for the symbol, such
// as the body of a struct, function or method.
//
// The -all flag causes doc to print all documentation for the package and
// all its visible symbols. The argument must identify a package.
//
// For complete documentation, run "go help doc".
package main
import ( import (
"bytes" "bytes"
@ -74,7 +37,7 @@ var (
) )
// usage is a replacement usage function for the flags package. // usage is a replacement usage function for the flags package.
func usage() { func usage(flagSet *flag.FlagSet) {
fmt.Fprintf(os.Stderr, "Usage of [go] doc:\n") fmt.Fprintf(os.Stderr, "Usage of [go] doc:\n")
fmt.Fprintf(os.Stderr, "\tgo doc\n") fmt.Fprintf(os.Stderr, "\tgo doc\n")
fmt.Fprintf(os.Stderr, "\tgo doc <pkg>\n") fmt.Fprintf(os.Stderr, "\tgo doc <pkg>\n")
@ -85,16 +48,17 @@ func usage() {
fmt.Fprintf(os.Stderr, "For more information run\n") fmt.Fprintf(os.Stderr, "For more information run\n")
fmt.Fprintf(os.Stderr, "\tgo help doc\n\n") fmt.Fprintf(os.Stderr, "\tgo help doc\n\n")
fmt.Fprintf(os.Stderr, "Flags:\n") fmt.Fprintf(os.Stderr, "Flags:\n")
flag.PrintDefaults() flagSet.PrintDefaults()
os.Exit(2) os.Exit(2)
} }
func main() { // Main is the entry point, invoked both by go doc and cmd/doc.
func Main(args []string) {
log.SetFlags(0) log.SetFlags(0)
log.SetPrefix("doc: ") log.SetPrefix("doc: ")
counter.Open()
dirsInit() dirsInit()
err := do(os.Stdout, flag.CommandLine, os.Args[1:]) var flagSet flag.FlagSet
err := do(os.Stdout, &flagSet, args)
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
} }
@ -102,7 +66,7 @@ func main() {
// do is the workhorse, broken out of main to make testing easier. // do is the workhorse, broken out of main to make testing easier.
func do(writer io.Writer, flagSet *flag.FlagSet, args []string) (err error) { func do(writer io.Writer, flagSet *flag.FlagSet, args []string) (err error) {
flagSet.Usage = usage flagSet.Usage = func() { usage(flagSet) }
unexported = false unexported = false
matchCase = false matchCase = false
flagSet.StringVar(&chdir, "C", "", "change to `dir` before running command") flagSet.StringVar(&chdir, "C", "", "change to `dir` before running command")
@ -114,7 +78,6 @@ func do(writer io.Writer, flagSet *flag.FlagSet, args []string) (err error) {
flagSet.BoolVar(&short, "short", false, "one-line representation for each symbol") flagSet.BoolVar(&short, "short", false, "one-line representation for each symbol")
flagSet.BoolVar(&serveHTTP, "http", false, "serve HTML docs over HTTP") flagSet.BoolVar(&serveHTTP, "http", false, "serve HTML docs over HTTP")
flagSet.Parse(args) flagSet.Parse(args)
counter.Inc("doc/invocations")
counter.CountFlags("doc/flag:", *flag.CommandLine) counter.CountFlags("doc/flag:", *flag.CommandLine)
if chdir != "" { if chdir != "" {
if err := os.Chdir(chdir); err != nil { if err := os.Chdir(chdir); err != nil {
@ -151,7 +114,7 @@ func do(writer io.Writer, flagSet *flag.FlagSet, args []string) (err error) {
// Loop until something is printed. // Loop until something is printed.
dirs.Reset() dirs.Reset()
for i := 0; ; i++ { for i := 0; ; i++ {
buildPackage, userPath, sym, more := parseArgs(flagSet.Args()) buildPackage, userPath, sym, more := parseArgs(flagSet, flagSet.Args())
if i > 0 && !more { // Ignore the "more" bit on the first iteration. if i > 0 && !more { // Ignore the "more" bit on the first iteration.
return failMessage(paths, symbol, method) return failMessage(paths, symbol, method)
} }
@ -165,7 +128,7 @@ func do(writer io.Writer, flagSet *flag.FlagSet, args []string) (err error) {
unexported = true unexported = true
} }
symbol, method = parseSymbol(sym) symbol, method = parseSymbol(flagSet, sym)
pkg := parsePackage(writer, buildPackage, userPath) pkg := parsePackage(writer, buildPackage, userPath)
paths = append(paths, pkg.prettyPath()) paths = append(paths, pkg.prettyPath())
@ -338,7 +301,7 @@ func failMessage(paths []string, symbol, method string) error {
// and there may be more matches. For example, if the argument // and there may be more matches. For example, if the argument
// is rand.Float64, we must scan both crypto/rand and math/rand // is rand.Float64, we must scan both crypto/rand and math/rand
// to find the symbol, and the first call will return crypto/rand, true. // to find the symbol, and the first call will return crypto/rand, true.
func parseArgs(args []string) (pkg *build.Package, path, symbol string, more bool) { func parseArgs(flagSet *flag.FlagSet, args []string) (pkg *build.Package, path, symbol string, more bool) {
wd, err := os.Getwd() wd, err := os.Getwd()
if err != nil { if err != nil {
log.Fatal(err) log.Fatal(err)
@ -356,7 +319,7 @@ func parseArgs(args []string) (pkg *build.Package, path, symbol string, more boo
} }
switch len(args) { switch len(args) {
default: default:
usage() usage(flagSet)
case 1: case 1:
// Done below. // Done below.
case 2: case 2:
@ -499,7 +462,7 @@ func importDir(dir string) *build.Package {
// parseSymbol breaks str apart into a symbol and method. // parseSymbol breaks str apart into a symbol and method.
// Both may be missing or the method may be missing. // Both may be missing or the method may be missing.
// If present, each must be a valid Go identifier. // If present, each must be a valid Go identifier.
func parseSymbol(str string) (symbol, method string) { func parseSymbol(flagSet *flag.FlagSet, str string) (symbol, method string) {
if str == "" { if str == "" {
return return
} }
@ -510,7 +473,7 @@ func parseSymbol(str string) (symbol, method string) {
method = elem[1] method = elem[1]
default: default:
log.Printf("too many periods in symbol specification") log.Printf("too many periods in symbol specification")
usage() usage(flagSet)
} }
symbol = elem[0] symbol = elem[0]
return return

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
package main package doc
import ( import (
"bufio" "bufio"

View File

@ -4,7 +4,7 @@
//go:build plan9 || windows //go:build plan9 || windows
package main package doc
import ( import (
"os" "os"

View File

@ -4,7 +4,7 @@
//go:build unix || js || wasip1 //go:build unix || js || wasip1
package main package doc
import ( import (
"os" "os"

View File

@ -215,12 +215,12 @@ func (g *stackSampleGenerator[R]) StackSample(ctx *traceContext, ev *trace.Event
// to trace.ResourceNone (the global scope). // to trace.ResourceNone (the global scope).
type globalRangeGenerator struct { type globalRangeGenerator struct {
ranges map[string]activeRange ranges map[string]activeRange
seenSync bool seenSync int
} }
// Sync notifies the generator of an EventSync event. // Sync notifies the generator of an EventSync event.
func (g *globalRangeGenerator) Sync() { func (g *globalRangeGenerator) Sync() {
g.seenSync = true g.seenSync++
} }
// GlobalRange implements a handler for EventRange* events whose Scope.Kind is ResourceNone. // GlobalRange implements a handler for EventRange* events whose Scope.Kind is ResourceNone.
@ -234,8 +234,9 @@ func (g *globalRangeGenerator) GlobalRange(ctx *traceContext, ev *trace.Event) {
case trace.EventRangeBegin: case trace.EventRangeBegin:
g.ranges[r.Name] = activeRange{ev.Time(), ev.Stack()} g.ranges[r.Name] = activeRange{ev.Time(), ev.Stack()}
case trace.EventRangeActive: case trace.EventRangeActive:
// If we've seen a Sync event, then Active events are always redundant. // If we've seen at least 2 Sync events (indicating that we're in at least the second
if !g.seenSync { // generation), then Active events are always redundant.
if g.seenSync < 2 {
// Otherwise, they extend back to the start of the trace. // Otherwise, they extend back to the start of the trace.
g.ranges[r.Name] = activeRange{ctx.startTime, ev.Stack()} g.ranges[r.Name] = activeRange{ctx.startTime, ev.Stack()}
} }
@ -294,12 +295,12 @@ func (g *globalMetricGenerator) GlobalMetric(ctx *traceContext, ev *trace.Event)
// ResourceProc. // ResourceProc.
type procRangeGenerator struct { type procRangeGenerator struct {
ranges map[trace.Range]activeRange ranges map[trace.Range]activeRange
seenSync bool seenSync int
} }
// Sync notifies the generator of an EventSync event. // Sync notifies the generator of an EventSync event.
func (g *procRangeGenerator) Sync() { func (g *procRangeGenerator) Sync() {
g.seenSync = true g.seenSync++
} }
// ProcRange implements a handler for EventRange* events whose Scope.Kind is ResourceProc. // ProcRange implements a handler for EventRange* events whose Scope.Kind is ResourceProc.
@ -313,8 +314,9 @@ func (g *procRangeGenerator) ProcRange(ctx *traceContext, ev *trace.Event) {
case trace.EventRangeBegin: case trace.EventRangeBegin:
g.ranges[r] = activeRange{ev.Time(), ev.Stack()} g.ranges[r] = activeRange{ev.Time(), ev.Stack()}
case trace.EventRangeActive: case trace.EventRangeActive:
// If we've seen a Sync event, then Active events are always redundant. // If we've seen at least 2 Sync events (indicating that we're in at least the second
if !g.seenSync { // generation), then Active events are always redundant.
if g.seenSync < 2 {
// Otherwise, they extend back to the start of the trace. // Otherwise, they extend back to the start of the trace.
g.ranges[r] = activeRange{ctx.startTime, ev.Stack()} g.ranges[r] = activeRange{ctx.startTime, ev.Stack()}
} }

View File

@ -4,6 +4,10 @@
// Package ast declares the types used to represent syntax trees for Go // Package ast declares the types used to represent syntax trees for Go
// packages. // packages.
//
// Syntax trees may be constructed directly, but they are typically
// produced from Go source code by the parser; see the ParseFile
// function in package [go/parser].
package ast package ast
import ( import (

View File

@ -2,10 +2,14 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
// Package parser implements a parser for Go source files. Input may be // Package parser implements a parser for Go source files.
// provided in a variety of forms (see the various Parse* functions); the //
// output is an abstract syntax tree (AST) representing the Go source. The // The [ParseFile] function reads file input from a string, []byte, or
// parser is invoked through one of the Parse* functions. // io.Reader, and produces an [ast.File] representing the complete
// abstract syntax tree of the file.
//
// The [ParseExprFrom] function reads a single source-level expression and
// produces an [ast.Expr], the syntax tree of the expression.
// //
// The parser accepts a larger language than is syntactically permitted by // The parser accepts a larger language than is syntactically permitted by
// the Go spec, for simplicity, and for improved robustness in the presence // the Go spec, for simplicity, and for improved robustness in the presence
@ -13,6 +17,11 @@
// treated like an ordinary parameter list and thus may contain multiple // treated like an ordinary parameter list and thus may contain multiple
// entries where the spec permits exactly one. Consequently, the corresponding // entries where the spec permits exactly one. Consequently, the corresponding
// field in the AST (ast.FuncDecl.Recv) field is not restricted to one entry. // field in the AST (ast.FuncDecl.Recv) field is not restricted to one entry.
//
// Applications that need to parse one or more complete packages of Go
// source code may find it more convenient not to interact directly
// with the parser but instead to use the Load function in package
// [golang.org/x/tools/go/packages].
package parser package parser
import ( import (

View File

@ -429,7 +429,7 @@ func (f *File) Position(p Pos) (pos Position) {
type FileSet struct { type FileSet struct {
mutex sync.RWMutex // protects the file set mutex sync.RWMutex // protects the file set
base int // base offset for the next file base int // base offset for the next file
files []*File // list of files in the order added to the set tree tree // tree of files in ascending base order
last atomic.Pointer[File] // cache of last file looked up last atomic.Pointer[File] // cache of last file looked up
} }
@ -487,7 +487,7 @@ func (s *FileSet) AddFile(filename string, base, size int) *File {
} }
// add the file to the file set // add the file to the file set
s.base = base s.base = base
s.files = append(s.files, f) s.tree.add(f)
s.last.Store(f) s.last.Store(f)
return f return f
} }
@ -518,40 +518,9 @@ func (s *FileSet) AddExistingFiles(files ...*File) {
s.mutex.Lock() s.mutex.Lock()
defer s.mutex.Unlock() defer s.mutex.Unlock()
// Merge and sort. for _, f := range files {
newFiles := append(s.files, files...) s.tree.add(f)
slices.SortFunc(newFiles, func(x, y *File) int { s.base = max(s.base, f.Base()+f.Size()+1)
return cmp.Compare(x.Base(), y.Base())
})
// Reject overlapping files.
// Discard adjacent identical files.
out := newFiles[:0]
for i, file := range newFiles {
if i > 0 {
prev := newFiles[i-1]
if file == prev {
continue
}
if prev.Base()+prev.Size()+1 > file.Base() {
panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)",
prev.Name(), prev.Base(), prev.Base()+prev.Size(),
file.Name(), file.Base(), file.Base()+file.Size()))
}
}
out = append(out, file)
}
newFiles = out
s.files = newFiles
// Advance base.
if len(newFiles) > 0 {
last := newFiles[len(newFiles)-1]
newBase := last.Base() + last.Size() + 1
if s.base < newBase {
s.base = newBase
}
} }
} }
@ -567,39 +536,26 @@ func (s *FileSet) RemoveFile(file *File) {
s.mutex.Lock() s.mutex.Lock()
defer s.mutex.Unlock() defer s.mutex.Unlock()
if i := searchFiles(s.files, file.base); i >= 0 && s.files[i] == file { pn, _ := s.tree.locate(file.key())
last := &s.files[len(s.files)-1] if *pn != nil && (*pn).file == file {
s.files = slices.Delete(s.files, i, i+1) s.tree.delete(pn)
*last = nil // don't prolong lifetime when popping last element
} }
} }
// Iterate calls f for the files in the file set in the order they were added // Iterate calls yield for the files in the file set in ascending Base
// until f returns false. // order until yield returns false.
func (s *FileSet) Iterate(f func(*File) bool) { func (s *FileSet) Iterate(yield func(*File) bool) {
for i := 0; ; i++ {
var file *File
s.mutex.RLock() s.mutex.RLock()
if i < len(s.files) { defer s.mutex.RUnlock()
file = s.files[i]
}
s.mutex.RUnlock()
if file == nil || !f(file) {
break
}
}
}
func searchFiles(a []*File, x int) int { // Unlock around user code.
i, found := slices.BinarySearchFunc(a, x, func(a *File, x int) int { // The iterator is robust to modification by yield.
return cmp.Compare(a.base, x) // Avoid range here, so we can use defer.
s.tree.all()(func(f *File) bool {
s.mutex.RUnlock()
defer s.mutex.RLock()
return yield(f)
}) })
if !found {
// We want the File containing x, but if we didn't
// find x then i is the next one.
i--
}
return i
} }
func (s *FileSet) file(p Pos) *File { func (s *FileSet) file(p Pos) *File {
@ -611,16 +567,12 @@ func (s *FileSet) file(p Pos) *File {
s.mutex.RLock() s.mutex.RLock()
defer s.mutex.RUnlock() defer s.mutex.RUnlock()
// p is not in last file - search all files pn, _ := s.tree.locate(key{int(p), int(p)})
if i := searchFiles(s.files, int(p)); i >= 0 { if n := *pn; n != nil {
f := s.files[i]
// f.base <= int(p) by definition of searchFiles
if int(p) <= f.base+f.size {
// Update cache of last file. A race is ok, // Update cache of last file. A race is ok,
// but an exclusive lock causes heavy contention. // but an exclusive lock causes heavy contention.
s.last.Store(f) s.last.Store(n.file)
return f return n.file
}
} }
return nil return nil
} }

View File

@ -84,15 +84,15 @@ func BenchmarkFileSet_Position(b *testing.B) {
} }
func BenchmarkFileSet_AddExistingFiles(b *testing.B) { func BenchmarkFileSet_AddExistingFiles(b *testing.B) {
rng := rand.New(rand.NewPCG(rand.Uint64(), rand.Uint64()))
// Create the "universe" of files. // Create the "universe" of files.
fset := token.NewFileSet() fset := token.NewFileSet()
var files []*token.File var files []*token.File
for range 25000 { for range 25000 {
files = append(files, fset.AddFile("", -1, 10000)) files = append(files, fset.AddFile("", -1, 10000))
} }
rand.Shuffle(len(files), func(i, j int) { token.Shuffle(rng, files)
files[i], files[j] = files[j], files[i]
})
// choose returns n random files. // choose returns n random files.
choose := func(n int) []*token.File { choose := func(n int) []*token.File {

View File

@ -4,6 +4,8 @@
package token package token
import "slices"
type serializedFile struct { type serializedFile struct {
// fields correspond 1:1 to fields with same (lower-case) name in File // fields correspond 1:1 to fields with same (lower-case) name in File
Name string Name string
@ -27,18 +29,15 @@ func (s *FileSet) Read(decode func(any) error) error {
s.mutex.Lock() s.mutex.Lock()
s.base = ss.Base s.base = ss.Base
files := make([]*File, len(ss.Files)) for _, f := range ss.Files {
for i := 0; i < len(ss.Files); i++ { s.tree.add(&File{
f := &ss.Files[i]
files[i] = &File{
name: f.Name, name: f.Name,
base: f.Base, base: f.Base,
size: f.Size, size: f.Size,
lines: f.Lines, lines: f.Lines,
infos: f.Infos, infos: f.Infos,
})
} }
}
s.files = files
s.last.Store(nil) s.last.Store(nil)
s.mutex.Unlock() s.mutex.Unlock()
@ -51,16 +50,16 @@ func (s *FileSet) Write(encode func(any) error) error {
s.mutex.Lock() s.mutex.Lock()
ss.Base = s.base ss.Base = s.base
files := make([]serializedFile, len(s.files)) var files []serializedFile
for i, f := range s.files { for f := range s.tree.all() {
f.mutex.Lock() f.mutex.Lock()
files[i] = serializedFile{ files = append(files, serializedFile{
Name: f.name, Name: f.name,
Base: f.base, Base: f.base,
Size: f.size, Size: f.size,
Lines: append([]int(nil), f.lines...), Lines: slices.Clone(f.lines),
Infos: append([]lineInfo(nil), f.infos...), Infos: slices.Clone(f.infos),
} })
f.mutex.Unlock() f.mutex.Unlock()
} }
ss.Files = files ss.Files = files

View File

@ -8,6 +8,7 @@ import (
"bytes" "bytes"
"encoding/gob" "encoding/gob"
"fmt" "fmt"
"slices"
"testing" "testing"
) )
@ -29,12 +30,14 @@ func equal(p, q *FileSet) error {
return fmt.Errorf("different bases: %d != %d", p.base, q.base) return fmt.Errorf("different bases: %d != %d", p.base, q.base)
} }
if len(p.files) != len(q.files) { pfiles := slices.Collect(p.tree.all())
return fmt.Errorf("different number of files: %d != %d", len(p.files), len(q.files)) qfiles := slices.Collect(q.tree.all())
if len(pfiles) != len(qfiles) {
return fmt.Errorf("different number of files: %d != %d", len(pfiles), len(qfiles))
} }
for i, f := range p.files { for i, f := range pfiles {
g := q.files[i] g := qfiles[i]
if f.name != g.name { if f.name != g.name {
return fmt.Errorf("different filenames: %q != %q", f.name, g.name) return fmt.Errorf("different filenames: %q != %q", f.name, g.name)
} }
@ -88,7 +91,7 @@ func TestSerialization(t *testing.T) {
p := NewFileSet() p := NewFileSet()
checkSerialize(t, p) checkSerialize(t, p)
// add some files // add some files
for i := 0; i < 10; i++ { for i := range 10 {
f := p.AddFile(fmt.Sprintf("file%d", i), p.Base()+i, i*100) f := p.AddFile(fmt.Sprintf("file%d", i), p.Base()+i, i*100)
checkSerialize(t, p) checkSerialize(t, p)
// add some lines and alternative file infos // add some lines and alternative file infos

410
src/go/token/tree.go Normal file
View File

@ -0,0 +1,410 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package token
// tree is a self-balancing AVL tree; see
// Lewis & Denenberg, Data Structures and Their Algorithms.
//
// An AVL tree is a binary tree in which the difference between the
// heights of a node's two subtrees--the node's "balance factor"--is
// at most one. It is more strictly balanced than a red/black tree,
// and thus favors lookups at the expense of updates, which is the
// appropriate trade-off for FileSet.
//
// Insertion at a node may cause its ancestors' balance factors to
// temporarily reach ±2, requiring rebalancing of each such ancestor
// by a rotation.
//
// Each key is the pos-end range of a single File.
// All Files in the tree must have disjoint ranges.
//
// The implementation is simplified from Russ Cox's github.com/rsc/omap.
import (
"fmt"
"iter"
)
// A tree is a tree-based ordered map:
// each value is a *File, keyed by its Pos range.
// All map entries cover disjoint ranges.
//
// The zero value of tree is an empty map ready to use.
type tree struct {
root *node
}
type node struct {
// We use the notation (parent left right) in many comments.
parent *node
left *node
right *node
file *File
key key // = file.key(), but improves locality (25% faster)
balance int32 // at most ±2
height int32
}
// A key represents the Pos range of a File.
type key struct{ start, end int }
func (f *File) key() key {
return key{f.base, f.base + f.size}
}
// compareKey reports whether x is before y (-1),
// after y (+1), or overlapping y (0).
// This is a total order so long as all
// files in the tree have disjoint ranges.
//
// All files are separated by at least one unit.
// This allows us to use strict < comparisons.
// Use key{p, p} to search for a zero-width position
// even at the start or end of a file.
func compareKey(x, y key) int {
switch {
case x.end < y.start:
return -1
case y.end < x.start:
return +1
}
return 0
}
// check asserts that each node's height, subtree, and parent link is
// correct.
func (n *node) check(parent *node) {
const debugging = false
if debugging {
if n == nil {
return
}
if n.parent != parent {
panic("bad parent")
}
n.left.check(n)
n.right.check(n)
n.checkBalance()
}
}
func (n *node) checkBalance() {
lheight, rheight := n.left.safeHeight(), n.right.safeHeight()
balance := rheight - lheight
if balance != n.balance {
panic("bad node.balance")
}
if !(-2 <= balance && balance <= +2) {
panic(fmt.Sprintf("node.balance out of range: %d", balance))
}
h := 1 + max(lheight, rheight)
if h != n.height {
panic("bad node.height")
}
}
// locate returns a pointer to the variable that holds the node
// identified by k, along with its parent, if any. If the key is not
// present, it returns a pointer to the node where the key should be
// inserted by a subsequent call to [tree.set].
func (t *tree) locate(k key) (pos **node, parent *node) {
pos, x := &t.root, t.root
for x != nil {
sign := compareKey(k, x.key)
if sign < 0 {
pos, x, parent = &x.left, x.left, x
} else if sign > 0 {
pos, x, parent = &x.right, x.right, x
} else {
break
}
}
return pos, parent
}
// all returns an iterator over the tree t.
// If t is modified during the iteration,
// some files may not be visited.
// No file will be visited multiple times.
func (t *tree) all() iter.Seq[*File] {
return func(yield func(*File) bool) {
if t == nil {
return
}
x := t.root
if x != nil {
for x.left != nil {
x = x.left
}
}
for x != nil && yield(x.file) {
if x.height >= 0 {
// still in tree
x = x.next()
} else {
// deleted
x = t.nextAfter(t.locate(x.key))
}
}
}
}
// nextAfter returns the node in the key sequence following
// (pos, parent), a result pair from [tree.locate].
func (t *tree) nextAfter(pos **node, parent *node) *node {
switch {
case *pos != nil:
return (*pos).next()
case parent == nil:
return nil
case pos == &parent.left:
return parent
default:
return parent.next()
}
}
func (x *node) next() *node {
if x.right == nil {
for x.parent != nil && x.parent.right == x {
x = x.parent
}
return x.parent
}
x = x.right
for x.left != nil {
x = x.left
}
return x
}
func (t *tree) setRoot(x *node) {
t.root = x
if x != nil {
x.parent = nil
}
}
func (x *node) setLeft(y *node) {
x.left = y
if y != nil {
y.parent = x
}
}
func (x *node) setRight(y *node) {
x.right = y
if y != nil {
y.parent = x
}
}
func (n *node) safeHeight() int32 {
if n == nil {
return -1
}
return n.height
}
func (n *node) update() {
lheight, rheight := n.left.safeHeight(), n.right.safeHeight()
n.height = max(lheight, rheight) + 1
n.balance = rheight - lheight
}
func (t *tree) replaceChild(parent, old, new *node) {
switch {
case parent == nil:
if t.root != old {
panic("corrupt tree")
}
t.setRoot(new)
case parent.left == old:
parent.setLeft(new)
case parent.right == old:
parent.setRight(new)
default:
panic("corrupt tree")
}
}
// rebalanceUp visits each excessively unbalanced ancestor
// of x, restoring balance by rotating it.
//
// x is a node that has just been mutated, and so the height and
// balance of x and its ancestors may be stale, but the children of x
// must be in a valid state.
func (t *tree) rebalanceUp(x *node) {
for x != nil {
h := x.height
x.update()
switch x.balance {
case -2:
if x.left.balance == 1 {
t.rotateLeft(x.left)
}
x = t.rotateRight(x)
case +2:
if x.right.balance == -1 {
t.rotateRight(x.right)
}
x = t.rotateLeft(x)
}
if x.height == h {
// x's height has not changed, so the height
// and balance of its ancestors have not changed;
// no further rebalancing is required.
return
}
x = x.parent
}
}
// rotateRight rotates the subtree rooted at node y.
// turning (y (x a b) c) into (x a (y b c)).
func (t *tree) rotateRight(y *node) *node {
// p -> (y (x a b) c)
p := y.parent
x := y.left
b := x.right
x.checkBalance()
y.checkBalance()
x.setRight(y)
y.setLeft(b)
t.replaceChild(p, y, x)
y.update()
x.update()
return x
}
// rotateLeft rotates the subtree rooted at node x.
// turning (x a (y b c)) into (y (x a b) c).
func (t *tree) rotateLeft(x *node) *node {
// p -> (x a (y b c))
p := x.parent
y := x.right
b := y.left
x.checkBalance()
y.checkBalance()
y.setLeft(x)
x.setRight(b)
t.replaceChild(p, x, y)
x.update()
y.update()
return y
}
// add inserts file into the tree, if not present.
// It panics if file overlaps with another.
func (t *tree) add(file *File) {
pos, parent := t.locate(file.key())
if *pos == nil {
t.set(file, pos, parent) // missing; insert
return
}
if prev := (*pos).file; prev != file {
panic(fmt.Sprintf("file %s (%d-%d) overlaps with file %s (%d-%d)",
prev.Name(), prev.Base(), prev.Base()+prev.Size(),
file.Name(), file.Base(), file.Base()+file.Size()))
}
}
// set updates the existing node at (pos, parent) if present, or
// inserts a new node if not, so that it refers to file.
func (t *tree) set(file *File, pos **node, parent *node) {
if x := *pos; x != nil {
// This code path isn't currently needed
// because FileSet never updates an existing entry.
// Remove this assertion if things change.
if true {
panic("unreachable according to current FileSet requirements")
}
x.file = file
return
}
x := &node{file: file, key: file.key(), parent: parent, height: -1}
*pos = x
t.rebalanceUp(x)
}
// delete deletes the node at pos.
func (t *tree) delete(pos **node) {
t.root.check(nil)
x := *pos
switch {
case x == nil:
// This code path isn't currently needed because FileSet
// only calls delete after a positive locate.
// Remove this assertion if things change.
if true {
panic("unreachable according to current FileSet requirements")
}
return
case x.left == nil:
if *pos = x.right; *pos != nil {
(*pos).parent = x.parent
}
t.rebalanceUp(x.parent)
case x.right == nil:
*pos = x.left
x.left.parent = x.parent
t.rebalanceUp(x.parent)
default:
t.deleteSwap(pos)
}
x.balance = -100
x.parent = nil
x.left = nil
x.right = nil
x.height = -1
t.root.check(nil)
}
// deleteSwap deletes a node that has two children by replacing
// it by its in-order successor, then triggers a rebalance.
func (t *tree) deleteSwap(pos **node) {
x := *pos
z := t.deleteMin(&x.right)
*pos = z
unbalanced := z.parent // lowest potentially unbalanced node
if unbalanced == x {
unbalanced = z // (x a (z nil b)) -> (z a b)
}
z.parent = x.parent
z.height = x.height
z.balance = x.balance
z.setLeft(x.left)
z.setRight(x.right)
t.rebalanceUp(unbalanced)
}
// deleteMin updates the subtree rooted at *zpos to delete its minimum
// (leftmost) element, which may be *zpos itself. It returns the
// deleted node.
func (t *tree) deleteMin(zpos **node) (z *node) {
for (*zpos).left != nil {
zpos = &(*zpos).left
}
z = *zpos
*zpos = z.right
if *zpos != nil {
(*zpos).parent = z.parent
}
return z
}

86
src/go/token/tree_test.go Normal file
View File

@ -0,0 +1,86 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package token
import (
"math/rand/v2"
"slices"
"testing"
)
// TestTree provides basic coverage of the AVL tree operations.
func TestTree(t *testing.T) {
// Use a reproducible PRNG.
seed1, seed2 := rand.Uint64(), rand.Uint64()
t.Logf("random seeds: %d, %d", seed1, seed2)
rng := rand.New(rand.NewPCG(seed1, seed2))
// Create a number of Files of arbitrary size.
files := make([]*File, 500)
var base int
for i := range files {
base++
size := 1000
files[i] = &File{base: base, size: size}
base += size
}
// Add them all to the tree in random order.
var tr tree
{
files2 := slices.Clone(files)
Shuffle(rng, files2)
for _, f := range files2 {
tr.add(f)
}
}
// Randomly delete a subset of them.
for range 100 {
i := rng.IntN(len(files))
file := files[i]
if file == nil {
continue // already deleted
}
files[i] = nil
pn, _ := tr.locate(file.key())
if (*pn).file != file {
t.Fatalf("locate returned wrong file")
}
tr.delete(pn)
}
// Check some position lookups within each file.
for _, file := range files {
if file == nil {
continue // deleted
}
for _, pos := range []int{
file.base, // start
file.base + file.size/2, // midpoint
file.base + file.size, // end
} {
pn, _ := tr.locate(key{pos, pos})
if (*pn).file != file {
t.Fatalf("lookup %s@%d returned wrong file %s",
file.name, pos,
(*pn).file.name)
}
}
}
// Check that the sequence is the same.
files = slices.DeleteFunc(files, func(f *File) bool { return f == nil })
if !slices.Equal(slices.Collect(tr.all()), files) {
t.Fatalf("incorrect tree.all sequence")
}
}
func Shuffle[T any](rng *rand.Rand, slice []*T) {
rng.Shuffle(len(slice), func(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
})
}

View File

@ -26,6 +26,11 @@
// specification. Use the Types field of [Info] for the results of // specification. Use the Types field of [Info] for the results of
// type deduction. // type deduction.
// //
// Applications that need to type-check one or more complete packages
// of Go source code may find it more convenient not to invoke the
// type checker directly but instead to use the Load function in
// package [golang.org/x/tools/go/packages].
//
// For a tutorial, see https://go.dev/s/types-tutorial. // For a tutorial, see https://go.dev/s/types-tutorial.
package types package types

View File

@ -335,6 +335,7 @@ func TestStdFixed(t *testing.T) {
"issue56103.go", // anonymous interface cycles; will be a type checker error in 1.22 "issue56103.go", // anonymous interface cycles; will be a type checker error in 1.22
"issue52697.go", // go/types does not have constraints on stack size "issue52697.go", // go/types does not have constraints on stack size
"issue73309.go", // this test requires GODEBUG=gotypesalias=1 "issue73309.go", // this test requires GODEBUG=gotypesalias=1
"issue73309b.go", // this test requires GODEBUG=gotypesalias=1
// These tests requires runtime/cgo.Incomplete, which is only available on some platforms. // These tests requires runtime/cgo.Incomplete, which is only available on some platforms.
// However, go/types does not know about build constraints. // However, go/types does not know about build constraints.

View File

@ -16,6 +16,7 @@ import (
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
"sync/atomic"
"testing" "testing"
"time" "time"
"weak" "weak"
@ -218,6 +219,116 @@ func TestTimerFromOutsideBubble(t *testing.T) {
} }
} }
// TestTimerNondeterminism verifies that timers firing at the same instant
// don't always fire in exactly the same order.
func TestTimerNondeterminism(t *testing.T) {
synctest.Run(func() {
const iterations = 1000
var seen1, seen2 bool
for range iterations {
tm1 := time.NewTimer(1)
tm2 := time.NewTimer(1)
select {
case <-tm1.C:
seen1 = true
case <-tm2.C:
seen2 = true
}
if seen1 && seen2 {
return
}
synctest.Wait()
}
t.Errorf("after %v iterations, seen timer1:%v, timer2:%v; want both", iterations, seen1, seen2)
})
}
// TestSleepNondeterminism verifies that goroutines sleeping to the same instant
// don't always schedule in exactly the same order.
func TestSleepNondeterminism(t *testing.T) {
synctest.Run(func() {
const iterations = 1000
var seen1, seen2 bool
for range iterations {
var first atomic.Int32
go func() {
time.Sleep(1)
first.CompareAndSwap(0, 1)
}()
go func() {
time.Sleep(1)
first.CompareAndSwap(0, 2)
}()
time.Sleep(1)
synctest.Wait()
switch v := first.Load(); v {
case 1:
seen1 = true
case 2:
seen2 = true
default:
t.Fatalf("first = %v, want 1 or 2", v)
}
if seen1 && seen2 {
return
}
synctest.Wait()
}
t.Errorf("after %v iterations, seen goroutine 1:%v, 2:%v; want both", iterations, seen1, seen2)
})
}
// TestTimerRunsImmediately verifies that a 0-duration timer sends on its channel
// without waiting for the bubble to block.
func TestTimerRunsImmediately(t *testing.T) {
synctest.Run(func() {
start := time.Now()
tm := time.NewTimer(0)
select {
case got := <-tm.C:
if !got.Equal(start) {
t.Errorf("<-tm.C = %v, want %v", got, start)
}
default:
t.Errorf("0-duration timer channel is not readable; want it to be")
}
})
}
// TestTimerRunsLater verifies that reading from a timer's channel receives the
// timer fired, even when that time is in reading from a timer's channel receives the
// time the timer fired, even when that time is in the past.
func TestTimerRanInPast(t *testing.T) {
synctest.Run(func() {
delay := 1 * time.Second
want := time.Now().Add(delay)
tm := time.NewTimer(delay)
time.Sleep(2 * delay)
select {
case got := <-tm.C:
if !got.Equal(want) {
t.Errorf("<-tm.C = %v, want %v", got, want)
}
default:
t.Errorf("0-duration timer channel is not readable; want it to be")
}
})
}
// TestAfterFuncRunsImmediately verifies that a 0-duration AfterFunc is scheduled
// without waiting for the bubble to block.
func TestAfterFuncRunsImmediately(t *testing.T) {
synctest.Run(func() {
var b atomic.Bool
time.AfterFunc(0, func() {
b.Store(true)
})
for !b.Load() {
runtime.Gosched()
}
})
}
func TestChannelFromOutsideBubble(t *testing.T) { func TestChannelFromOutsideBubble(t *testing.T) {
choutside := make(chan struct{}) choutside := make(chan struct{})
for _, test := range []struct { for _, test := range []struct {

View File

@ -806,7 +806,8 @@ func (c *Client) makeHeadersCopier(ireq *Request) func(req *Request, stripSensit
for k, vv := range ireqhdr { for k, vv := range ireqhdr {
sensitive := false sensitive := false
switch CanonicalHeaderKey(k) { switch CanonicalHeaderKey(k) {
case "Authorization", "Www-Authenticate", "Cookie", "Cookie2": case "Authorization", "Www-Authenticate", "Cookie", "Cookie2",
"Proxy-Authorization", "Proxy-Authenticate":
sensitive = true sensitive = true
} }
if !(sensitive && stripSensitiveHeaders) { if !(sensitive && stripSensitiveHeaders) {

View File

@ -1550,6 +1550,8 @@ func testClientStripHeadersOnRepeatedRedirect(t *testing.T, mode testMode) {
if r.Host+r.URL.Path != "a.example.com/" { if r.Host+r.URL.Path != "a.example.com/" {
if h := r.Header.Get("Authorization"); h != "" { if h := r.Header.Get("Authorization"); h != "" {
t.Errorf("on request to %v%v, Authorization=%q, want no header", r.Host, r.URL.Path, h) t.Errorf("on request to %v%v, Authorization=%q, want no header", r.Host, r.URL.Path, h)
} else if h := r.Header.Get("Proxy-Authorization"); h != "" {
t.Errorf("on request to %v%v, Proxy-Authorization=%q, want no header", r.Host, r.URL.Path, h)
} }
} }
// Follow a chain of redirects from a to b and back to a. // Follow a chain of redirects from a to b and back to a.
@ -1578,6 +1580,7 @@ func testClientStripHeadersOnRepeatedRedirect(t *testing.T, mode testMode) {
req, _ := NewRequest("GET", proto+"://a.example.com/", nil) req, _ := NewRequest("GET", proto+"://a.example.com/", nil)
req.Header.Add("Cookie", "foo=bar") req.Header.Add("Cookie", "foo=bar")
req.Header.Add("Authorization", "secretpassword") req.Header.Add("Authorization", "secretpassword")
req.Header.Add("Proxy-Authorization", "secretpassword")
res, err := c.Do(req) res, err := c.Do(req)
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)

View File

@ -497,7 +497,7 @@ func empty(c *hchan) bool {
// c.timer is also immutable (it is set after make(chan) but before any channel operations). // c.timer is also immutable (it is set after make(chan) but before any channel operations).
// All timer channels have dataqsiz > 0. // All timer channels have dataqsiz > 0.
if c.timer != nil { if c.timer != nil {
c.timer.maybeRunChan() c.timer.maybeRunChan(c)
} }
return atomic.Loaduint(&c.qcount) == 0 return atomic.Loaduint(&c.qcount) == 0
} }
@ -542,7 +542,7 @@ func chanrecv(c *hchan, ep unsafe.Pointer, block bool) (selected, received bool)
} }
if c.timer != nil { if c.timer != nil {
c.timer.maybeRunChan() c.timer.maybeRunChan(c)
} }
// Fast path: check for failed non-blocking operation without acquiring the lock. // Fast path: check for failed non-blocking operation without acquiring the lock.
@ -821,7 +821,7 @@ func chanlen(c *hchan) int {
} }
async := debug.asynctimerchan.Load() != 0 async := debug.asynctimerchan.Load() != 0
if c.timer != nil && async { if c.timer != nil && async {
c.timer.maybeRunChan() c.timer.maybeRunChan(c)
} }
if c.timer != nil && !async { if c.timer != nil && !async {
// timer channels have a buffered implementation // timer channels have a buffered implementation

View File

@ -1123,3 +1123,102 @@ func BenchmarkMemclrKnownSize512KiB(b *testing.B) {
memclrSink = x[:] memclrSink = x[:]
} }
func BenchmarkMemmoveKnownSize112(b *testing.B) {
type T struct {
x [112]int8
}
p := &T{}
q := &T{}
b.SetBytes(int64(unsafe.Sizeof(T{})))
for i := 0; i < b.N; i++ {
*p = *q
}
memclrSink = p.x[:]
}
func BenchmarkMemmoveKnownSize128(b *testing.B) {
type T struct {
x [128]int8
}
p := &T{}
q := &T{}
b.SetBytes(int64(unsafe.Sizeof(T{})))
for i := 0; i < b.N; i++ {
*p = *q
}
memclrSink = p.x[:]
}
func BenchmarkMemmoveKnownSize192(b *testing.B) {
type T struct {
x [192]int8
}
p := &T{}
q := &T{}
b.SetBytes(int64(unsafe.Sizeof(T{})))
for i := 0; i < b.N; i++ {
*p = *q
}
memclrSink = p.x[:]
}
func BenchmarkMemmoveKnownSize248(b *testing.B) {
type T struct {
x [248]int8
}
p := &T{}
q := &T{}
b.SetBytes(int64(unsafe.Sizeof(T{})))
for i := 0; i < b.N; i++ {
*p = *q
}
memclrSink = p.x[:]
}
func BenchmarkMemmoveKnownSize256(b *testing.B) {
type T struct {
x [256]int8
}
p := &T{}
q := &T{}
b.SetBytes(int64(unsafe.Sizeof(T{})))
for i := 0; i < b.N; i++ {
*p = *q
}
memclrSink = p.x[:]
}
func BenchmarkMemmoveKnownSize512(b *testing.B) {
type T struct {
x [512]int8
}
p := &T{}
q := &T{}
b.SetBytes(int64(unsafe.Sizeof(T{})))
for i := 0; i < b.N; i++ {
*p = *q
}
memclrSink = p.x[:]
}
func BenchmarkMemmoveKnownSize1024(b *testing.B) {
type T struct {
x [1024]int8
}
p := &T{}
q := &T{}
b.SetBytes(int64(unsafe.Sizeof(T{})))
for i := 0; i < b.N; i++ {
*p = *q
}
memclrSink = p.x[:]
}

View File

@ -131,6 +131,7 @@ package runtime
import ( import (
"internal/cpu" "internal/cpu"
"internal/goarch" "internal/goarch"
"internal/goexperiment"
"internal/runtime/atomic" "internal/runtime/atomic"
"internal/runtime/gc" "internal/runtime/gc"
"unsafe" "unsafe"
@ -717,7 +718,7 @@ func gcStart(trigger gcTrigger) {
throw("p mcache not flushed") throw("p mcache not flushed")
} }
// Initialize ptrBuf if necessary. // Initialize ptrBuf if necessary.
if p.gcw.ptrBuf == nil { if goexperiment.GreenTeaGC && p.gcw.ptrBuf == nil {
p.gcw.ptrBuf = (*[gc.PageSize / goarch.PtrSize]uintptr)(persistentalloc(gc.PageSize, goarch.PtrSize, &memstats.gcMiscSys)) p.gcw.ptrBuf = (*[gc.PageSize / goarch.PtrSize]uintptr)(persistentalloc(gc.PageSize, goarch.PtrSize, &memstats.gcMiscSys))
} }
} }
@ -1233,14 +1234,7 @@ func gcMarkTermination(stw worldStop) {
}) })
} }
if debug.gctrace > 1 { if debug.gctrace > 1 {
for i := range pp.gcw.stats { pp.gcw.flushScanStats(&memstats.lastScanStats)
memstats.lastScanStats[i].spansDenseScanned += pp.gcw.stats[i].spansDenseScanned
memstats.lastScanStats[i].spanObjsDenseScanned += pp.gcw.stats[i].spanObjsDenseScanned
memstats.lastScanStats[i].spansSparseScanned += pp.gcw.stats[i].spansSparseScanned
memstats.lastScanStats[i].spanObjsSparseScanned += pp.gcw.stats[i].spanObjsSparseScanned
memstats.lastScanStats[i].sparseObjsScanned += pp.gcw.stats[i].sparseObjsScanned
}
clear(pp.gcw.stats[:])
} }
pp.pinnerCache = nil pp.pinnerCache = nil
}) })
@ -1301,38 +1295,7 @@ func gcMarkTermination(stw worldStop) {
print("\n") print("\n")
if debug.gctrace > 1 { if debug.gctrace > 1 {
var ( dumpScanStats()
spansDenseScanned uint64
spanObjsDenseScanned uint64
spansSparseScanned uint64
spanObjsSparseScanned uint64
sparseObjsScanned uint64
)
for _, stats := range memstats.lastScanStats {
spansDenseScanned += stats.spansDenseScanned
spanObjsDenseScanned += stats.spanObjsDenseScanned
spansSparseScanned += stats.spansSparseScanned
spanObjsSparseScanned += stats.spanObjsSparseScanned
sparseObjsScanned += stats.sparseObjsScanned
}
totalObjs := sparseObjsScanned + spanObjsSparseScanned + spanObjsDenseScanned
totalSpans := spansSparseScanned + spansDenseScanned
print("scan: total ", sparseObjsScanned, "+", spanObjsSparseScanned, "+", spanObjsDenseScanned, "=", totalObjs, " objs")
print(", ", spansSparseScanned, "+", spansDenseScanned, "=", totalSpans, " spans\n")
for i, stats := range memstats.lastScanStats {
if stats == (sizeClassScanStats{}) {
continue
}
totalObjs := stats.sparseObjsScanned + stats.spanObjsSparseScanned + stats.spanObjsDenseScanned
totalSpans := stats.spansSparseScanned + stats.spansDenseScanned
if i == 0 {
print("scan: class L ")
} else {
print("scan: class ", gc.SizeClassToSize[i], "B ")
}
print(stats.sparseObjsScanned, "+", stats.spanObjsSparseScanned, "+", stats.spanObjsDenseScanned, "=", totalObjs, " objs")
print(", ", stats.spansSparseScanned, "+", stats.spansDenseScanned, "=", totalSpans, " spans\n")
}
} }
printunlock() printunlock()
} }

View File

@ -763,3 +763,57 @@ func heapBitsSmallForAddrInline(spanBase, addr, elemsize uintptr) uintptr {
} }
return read return read
} }
type sizeClassScanStats struct {
spansDenseScanned uint64
spanObjsDenseScanned uint64
spansSparseScanned uint64
spanObjsSparseScanned uint64
sparseObjsScanned uint64
}
func dumpScanStats() {
var (
spansDenseScanned uint64
spanObjsDenseScanned uint64
spansSparseScanned uint64
spanObjsSparseScanned uint64
sparseObjsScanned uint64
)
for _, stats := range memstats.lastScanStats {
spansDenseScanned += stats.spansDenseScanned
spanObjsDenseScanned += stats.spanObjsDenseScanned
spansSparseScanned += stats.spansSparseScanned
spanObjsSparseScanned += stats.spanObjsSparseScanned
sparseObjsScanned += stats.sparseObjsScanned
}
totalObjs := sparseObjsScanned + spanObjsSparseScanned + spanObjsDenseScanned
totalSpans := spansSparseScanned + spansDenseScanned
print("scan: total ", sparseObjsScanned, "+", spanObjsSparseScanned, "+", spanObjsDenseScanned, "=", totalObjs, " objs")
print(", ", spansSparseScanned, "+", spansDenseScanned, "=", totalSpans, " spans\n")
for i, stats := range memstats.lastScanStats {
if stats == (sizeClassScanStats{}) {
continue
}
totalObjs := stats.sparseObjsScanned + stats.spanObjsSparseScanned + stats.spanObjsDenseScanned
totalSpans := stats.spansSparseScanned + stats.spansDenseScanned
if i == 0 {
print("scan: class L ")
} else {
print("scan: class ", gc.SizeClassToSize[i], "B ")
}
print(stats.sparseObjsScanned, "+", stats.spanObjsSparseScanned, "+", stats.spanObjsDenseScanned, "=", totalObjs, " objs")
print(", ", stats.spansSparseScanned, "+", stats.spansDenseScanned, "=", totalSpans, " spans\n")
}
}
func (w *gcWork) flushScanStats(dst *[gc.NumSizeClasses]sizeClassScanStats) {
for i := range w.stats {
dst[i].spansDenseScanned += w.stats[i].spansDenseScanned
dst[i].spanObjsDenseScanned += w.stats[i].spanObjsDenseScanned
dst[i].spansSparseScanned += w.stats[i].spansSparseScanned
dst[i].spanObjsSparseScanned += w.stats[i].spanObjsSparseScanned
dst[i].sparseObjsScanned += w.stats[i].sparseObjsScanned
}
clear(w.stats[:])
}

View File

@ -6,6 +6,8 @@
package runtime package runtime
import "internal/runtime/gc"
func (s *mspan) markBitsForIndex(objIndex uintptr) markBits { func (s *mspan) markBitsForIndex(objIndex uintptr) markBits {
bytep, mask := s.gcmarkBits.bitp(objIndex) bytep, mask := s.gcmarkBits.bitp(objIndex)
return markBits{bytep, mask, objIndex} return markBits{bytep, mask, objIndex}
@ -78,3 +80,33 @@ func (w *gcWork) tryGetSpan(steal bool) objptr {
func scanSpan(p objptr, gcw *gcWork) { func scanSpan(p objptr, gcw *gcWork) {
throw("unimplemented") throw("unimplemented")
} }
type sizeClassScanStats struct {
sparseObjsScanned uint64
}
func dumpScanStats() {
var sparseObjsScanned uint64
for _, stats := range memstats.lastScanStats {
sparseObjsScanned += stats.sparseObjsScanned
}
print("scan: total ", sparseObjsScanned, " objs\n")
for i, stats := range memstats.lastScanStats {
if stats == (sizeClassScanStats{}) {
continue
}
if i == 0 {
print("scan: class L ")
} else {
print("scan: class ", gc.SizeClassToSize[i], "B ")
}
print(stats.sparseObjsScanned, " objs\n")
}
}
func (w *gcWork) flushScanStats(dst *[gc.NumSizeClasses]sizeClassScanStats) {
for i := range w.stats {
dst[i].sparseObjsScanned += w.stats[i].sparseObjsScanned
}
clear(w.stats[:])
}

View File

@ -49,14 +49,6 @@ type mstats struct {
enablegc bool enablegc bool
} }
type sizeClassScanStats struct {
spansDenseScanned uint64
spanObjsDenseScanned uint64
spansSparseScanned uint64
spanObjsSparseScanned uint64
sparseObjsScanned uint64
}
var memstats mstats var memstats mstats
// A MemStats records statistics about the memory allocator. // A MemStats records statistics about the memory allocator.

View File

@ -3341,7 +3341,7 @@ top:
// which may steal timers. It's important that between now // which may steal timers. It's important that between now
// and then, nothing blocks, so these numbers remain mostly // and then, nothing blocks, so these numbers remain mostly
// relevant. // relevant.
now, pollUntil, _ := pp.timers.check(0) now, pollUntil, _ := pp.timers.check(0, nil)
// Try to schedule the trace reader. // Try to schedule the trace reader.
if traceEnabled() || traceShuttingDown() { if traceEnabled() || traceShuttingDown() {
@ -3780,7 +3780,7 @@ func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWo
// timerpMask tells us whether the P may have timers at all. If it // timerpMask tells us whether the P may have timers at all. If it
// can't, no need to check at all. // can't, no need to check at all.
if stealTimersOrRunNextG && timerpMask.read(enum.position()) { if stealTimersOrRunNextG && timerpMask.read(enum.position()) {
tnow, w, ran := p2.timers.check(now) tnow, w, ran := p2.timers.check(now, nil)
now = tnow now = tnow
if w != 0 && (pollUntil == 0 || w < pollUntil) { if w != 0 && (pollUntil == 0 || w < pollUntil) {
pollUntil = w pollUntil = w

View File

@ -185,7 +185,7 @@ func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, blo
} }
if cas.c.timer != nil { if cas.c.timer != nil {
cas.c.timer.maybeRunChan() cas.c.timer.maybeRunChan(cas.c)
} }
j := cheaprandn(uint32(norder + 1)) j := cheaprandn(uint32(norder + 1))

View File

@ -185,7 +185,6 @@ func synctestRun(f func()) {
} }
const synctestBaseTime = 946684800000000000 // midnight UTC 2000-01-01 const synctestBaseTime = 946684800000000000 // midnight UTC 2000-01-01
bubble.now = synctestBaseTime bubble.now = synctestBaseTime
bubble.timers.bubble = bubble
lockInit(&bubble.mu, lockRankSynctest) lockInit(&bubble.mu, lockRankSynctest)
lockInit(&bubble.timers.mu, lockRankTimers) lockInit(&bubble.timers.mu, lockRankTimers)
@ -213,7 +212,7 @@ func synctestRun(f func()) {
// so timer goroutines inherit their child race context from g0. // so timer goroutines inherit their child race context from g0.
curg := gp.m.curg curg := gp.m.curg
gp.m.curg = nil gp.m.curg = nil
gp.bubble.timers.check(gp.bubble.now) gp.bubble.timers.check(bubble.now, bubble)
gp.m.curg = curg gp.m.curg = curg
}) })
gopark(synctestidle_c, nil, waitReasonSynctestRun, traceBlockSynctest, 0) gopark(synctestidle_c, nil, waitReasonSynctestRun, traceBlockSynctest, 0)

View File

@ -62,6 +62,7 @@ type timer struct {
isFake bool // timer is using fake time; immutable; can be read without lock isFake bool // timer is using fake time; immutable; can be read without lock
blocked uint32 // number of goroutines blocked on timer's channel blocked uint32 // number of goroutines blocked on timer's channel
rand uint32 // randomizes order of timers at same instant; only set when isFake
// Timer wakes up at when, and then at when+period, ... (period > 0 only) // Timer wakes up at when, and then at when+period, ... (period > 0 only)
// each time calling f(arg, seq, delay) in the timer goroutine, so f must be // each time calling f(arg, seq, delay) in the timer goroutine, so f must be
@ -156,8 +157,6 @@ type timers struct {
// heap[i].when over timers with the timerModified bit set. // heap[i].when over timers with the timerModified bit set.
// If minWhenModified = 0, it means there are no timerModified timers in the heap. // If minWhenModified = 0, it means there are no timerModified timers in the heap.
minWhenModified atomic.Int64 minWhenModified atomic.Int64
bubble *synctestBubble
} }
type timerWhen struct { type timerWhen struct {
@ -165,6 +164,21 @@ type timerWhen struct {
when int64 when int64
} }
// less reports whether tw is less than other.
func (tw timerWhen) less(other timerWhen) bool {
switch {
case tw.when < other.when:
return true
case tw.when > other.when:
return false
default:
// When timers wake at the same time, use a per-timer random value to order them.
// We only set the random value for timers using fake time, since there's
// no practical way to schedule real-time timers for the same instant.
return tw.timer.rand < other.timer.rand
}
}
func (ts *timers) lock() { func (ts *timers) lock() {
lock(&ts.mu) lock(&ts.mu)
} }
@ -387,7 +401,7 @@ func newTimer(when, period int64, f func(arg any, seq uintptr, delay int64), arg
throw("invalid timer channel: no capacity") throw("invalid timer channel: no capacity")
} }
} }
if gr := getg().bubble; gr != nil { if bubble := getg().bubble; bubble != nil {
t.isFake = true t.isFake = true
} }
t.modify(when, period, f, arg, 0) t.modify(when, period, f, arg, 0)
@ -469,7 +483,7 @@ func (t *timer) maybeRunAsync() {
// timer ourselves now is fine.) // timer ourselves now is fine.)
if now := nanotime(); t.when <= now { if now := nanotime(); t.when <= now {
systemstack(func() { systemstack(func() {
t.unlockAndRun(now) // resets t.when t.unlockAndRun(now, nil) // resets t.when
}) })
t.lock() t.lock()
} }
@ -605,6 +619,29 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in
add := t.needsAdd() add := t.needsAdd()
if add && t.isFake {
// If this is a bubbled timer scheduled to fire immediately,
// run it now rather than waiting for the bubble's timer scheduler.
// This avoids deferring timer execution until after the bubble
// becomes durably blocked.
//
// Don't do this for non-bubbled timers: It isn't necessary,
// and there may be cases where the runtime executes timers with
// the expectation the timer func will not run in the current goroutine.
// Bubbled timers are always created by the time package, and are
// safe to run in the current goroutine.
bubble := getg().bubble
if bubble == nil {
throw("fake timer executing with no bubble")
}
if t.state&timerHeaped == 0 && when <= bubble.now {
systemstack(func() {
t.unlockAndRun(bubble.now, bubble)
})
return pending
}
}
if !async && t.isChan { if !async && t.isChan {
// Stop any future sends with stale values. // Stop any future sends with stale values.
// See timer.unlockAndRun. // See timer.unlockAndRun.
@ -641,7 +678,7 @@ func (t *timer) modify(when, period int64, f func(arg any, seq uintptr, delay in
// t must be locked. // t must be locked.
func (t *timer) needsAdd() bool { func (t *timer) needsAdd() bool {
assertLockHeld(&t.mu) assertLockHeld(&t.mu)
need := t.state&timerHeaped == 0 && t.when > 0 && (!t.isChan || t.isFake || t.blocked > 0) need := t.state&timerHeaped == 0 && t.when > 0 && (!t.isChan || t.blocked > 0)
if need { if need {
t.trace("needsAdd+") t.trace("needsAdd+")
} else { } else {
@ -696,6 +733,12 @@ func (t *timer) maybeAdd() {
when := int64(0) when := int64(0)
wake := false wake := false
if t.needsAdd() { if t.needsAdd() {
if t.isFake {
// Re-randomize timer order.
// We could do this for all timers, but unbubbled timers are highly
// unlikely to have the same when.
t.rand = cheaprand()
}
t.state |= timerHeaped t.state |= timerHeaped
when = t.when when = t.when
wakeTime := ts.wakeTime() wakeTime := ts.wakeTime()
@ -960,7 +1003,7 @@ func (ts *timers) wakeTime() int64 {
// We pass now in and out to avoid extra calls of nanotime. // We pass now in and out to avoid extra calls of nanotime.
// //
//go:yeswritebarrierrec //go:yeswritebarrierrec
func (ts *timers) check(now int64) (rnow, pollUntil int64, ran bool) { func (ts *timers) check(now int64, bubble *synctestBubble) (rnow, pollUntil int64, ran bool) {
ts.trace("check") ts.trace("check")
// If it's not yet time for the first timer, or the first adjusted // If it's not yet time for the first timer, or the first adjusted
// timer, then there is nothing to do. // timer, then there is nothing to do.
@ -993,7 +1036,7 @@ func (ts *timers) check(now int64) (rnow, pollUntil int64, ran bool) {
ts.adjust(now, false) ts.adjust(now, false)
for len(ts.heap) > 0 { for len(ts.heap) > 0 {
// Note that runtimer may temporarily unlock ts. // Note that runtimer may temporarily unlock ts.
if tw := ts.run(now); tw != 0 { if tw := ts.run(now, bubble); tw != 0 {
if tw > 0 { if tw > 0 {
pollUntil = tw pollUntil = tw
} }
@ -1025,7 +1068,7 @@ func (ts *timers) check(now int64) (rnow, pollUntil int64, ran bool) {
// If a timer is run, this will temporarily unlock ts. // If a timer is run, this will temporarily unlock ts.
// //
//go:systemstack //go:systemstack
func (ts *timers) run(now int64) int64 { func (ts *timers) run(now int64, bubble *synctestBubble) int64 {
ts.trace("run") ts.trace("run")
assertLockHeld(&ts.mu) assertLockHeld(&ts.mu)
Redo: Redo:
@ -1059,7 +1102,7 @@ Redo:
return t.when return t.when
} }
t.unlockAndRun(now) t.unlockAndRun(now, bubble)
assertLockHeld(&ts.mu) // t is unlocked now, but not ts assertLockHeld(&ts.mu) // t is unlocked now, but not ts
return 0 return 0
} }
@ -1070,7 +1113,7 @@ Redo:
// unlockAndRun returns with t unlocked and t.ts (re-)locked. // unlockAndRun returns with t unlocked and t.ts (re-)locked.
// //
//go:systemstack //go:systemstack
func (t *timer) unlockAndRun(now int64) { func (t *timer) unlockAndRun(now int64, bubble *synctestBubble) {
t.trace("unlockAndRun") t.trace("unlockAndRun")
assertLockHeld(&t.mu) assertLockHeld(&t.mu)
if t.ts != nil { if t.ts != nil {
@ -1082,10 +1125,10 @@ func (t *timer) unlockAndRun(now int64) {
// out from under us while this function executes. // out from under us while this function executes.
gp := getg() gp := getg()
var tsLocal *timers var tsLocal *timers
if t.ts == nil || t.ts.bubble == nil { if bubble == nil {
tsLocal = &gp.m.p.ptr().timers tsLocal = &gp.m.p.ptr().timers
} else { } else {
tsLocal = &t.ts.bubble.timers tsLocal = &bubble.timers
} }
if tsLocal.raceCtx == 0 { if tsLocal.raceCtx == 0 {
tsLocal.raceCtx = racegostart(abi.FuncPCABIInternal((*timers).run) + sys.PCQuantum) tsLocal.raceCtx = racegostart(abi.FuncPCABIInternal((*timers).run) + sys.PCQuantum)
@ -1138,10 +1181,10 @@ func (t *timer) unlockAndRun(now int64) {
if gp.racectx != 0 { if gp.racectx != 0 {
throw("unexpected racectx") throw("unexpected racectx")
} }
if ts == nil || ts.bubble == nil { if bubble == nil {
gp.racectx = gp.m.p.ptr().timers.raceCtx gp.racectx = gp.m.p.ptr().timers.raceCtx
} else { } else {
gp.racectx = ts.bubble.timers.raceCtx gp.racectx = bubble.timers.raceCtx
} }
} }
@ -1149,14 +1192,14 @@ func (t *timer) unlockAndRun(now int64) {
ts.unlock() ts.unlock()
} }
if ts != nil && ts.bubble != nil { if bubble != nil {
// Temporarily use the timer's synctest group for the G running this timer. // Temporarily use the timer's synctest group for the G running this timer.
gp := getg() gp := getg()
if gp.bubble != nil { if gp.bubble != nil {
throw("unexpected syncgroup set") throw("unexpected syncgroup set")
} }
gp.bubble = ts.bubble gp.bubble = bubble
ts.bubble.changegstatus(gp, _Gdead, _Grunning) bubble.changegstatus(gp, _Gdead, _Grunning)
} }
if !async && t.isChan { if !async && t.isChan {
@ -1200,13 +1243,13 @@ func (t *timer) unlockAndRun(now int64) {
unlock(&t.sendLock) unlock(&t.sendLock)
} }
if ts != nil && ts.bubble != nil { if bubble != nil {
gp := getg() gp := getg()
ts.bubble.changegstatus(gp, _Grunning, _Gdead) bubble.changegstatus(gp, _Grunning, _Gdead)
if raceenabled { if raceenabled {
// Establish a happens-before between this timer event and // Establish a happens-before between this timer event and
// the next synctest.Wait call. // the next synctest.Wait call.
racereleasemergeg(gp, ts.bubble.raceaddr()) racereleasemergeg(gp, bubble.raceaddr())
} }
gp.bubble = nil gp.bubble = nil
} }
@ -1234,7 +1277,7 @@ func (ts *timers) verify() {
// The heap is timerHeapN-ary. See siftupTimer and siftdownTimer. // The heap is timerHeapN-ary. See siftupTimer and siftdownTimer.
p := int(uint(i-1) / timerHeapN) p := int(uint(i-1) / timerHeapN)
if tw.when < ts.heap[p].when { if tw.less(ts.heap[p]) {
print("bad timer heap at ", i, ": ", p, ": ", ts.heap[p].when, ", ", i, ": ", tw.when, "\n") print("bad timer heap at ", i, ": ", p, ": ", ts.heap[p].when, ", ", i, ": ", tw.when, "\n")
throw("bad timer heap") throw("bad timer heap")
} }
@ -1312,13 +1355,12 @@ func (ts *timers) siftUp(i int) {
badTimer() badTimer()
} }
tw := heap[i] tw := heap[i]
when := tw.when if tw.when <= 0 {
if when <= 0 {
badTimer() badTimer()
} }
for i > 0 { for i > 0 {
p := int(uint(i-1) / timerHeapN) // parent p := int(uint(i-1) / timerHeapN) // parent
if when >= heap[p].when { if !tw.less(heap[p]) {
break break
} }
heap[i] = heap[p] heap[i] = heap[p]
@ -1341,8 +1383,7 @@ func (ts *timers) siftDown(i int) {
return return
} }
tw := heap[i] tw := heap[i]
when := tw.when if tw.when <= 0 {
if when <= 0 {
badTimer() badTimer()
} }
for { for {
@ -1350,11 +1391,11 @@ func (ts *timers) siftDown(i int) {
if leftChild >= n { if leftChild >= n {
break break
} }
w := when w := tw
c := -1 c := -1
for j, tw := range heap[leftChild:min(leftChild+timerHeapN, n)] { for j, tw := range heap[leftChild:min(leftChild+timerHeapN, n)] {
if tw.when < w { if tw.less(w) {
w = tw.when w = tw
c = leftChild + j c = leftChild + j
} }
} }
@ -1395,24 +1436,10 @@ func badTimer() {
// maybeRunChan checks whether the timer needs to run // maybeRunChan checks whether the timer needs to run
// to send a value to its associated channel. If so, it does. // to send a value to its associated channel. If so, it does.
// The timer must not be locked. // The timer must not be locked.
func (t *timer) maybeRunChan() { func (t *timer) maybeRunChan(c *hchan) {
if t.isFake { if t.isFake && getg().bubble != c.bubble {
t.lock() // This should have been checked by the caller, but check just in case.
var timerBubble *synctestBubble fatal("synctest timer accessed from outside bubble")
if t.ts != nil {
timerBubble = t.ts.bubble
}
t.unlock()
bubble := getg().bubble
if bubble == nil {
panic(plainError("synctest timer accessed from outside bubble"))
}
if timerBubble != nil && bubble != timerBubble {
panic(plainError("timer moved between synctest bubbles"))
}
// No need to do anything here.
// synctest.Run will run the timer when it advances its fake clock.
return
} }
if t.astate.Load()&timerHeaped != 0 { if t.astate.Load()&timerHeaped != 0 {
// If the timer is in the heap, the ordinary timer code // If the timer is in the heap, the ordinary timer code
@ -1422,6 +1449,9 @@ func (t *timer) maybeRunChan() {
t.lock() t.lock()
now := nanotime() now := nanotime()
if t.isFake {
now = getg().bubble.now
}
if t.state&timerHeaped != 0 || t.when == 0 || t.when > now { if t.state&timerHeaped != 0 || t.when == 0 || t.when > now {
t.trace("maybeRunChan-") t.trace("maybeRunChan-")
// Timer in the heap, or not running at all, or not triggered. // Timer in the heap, or not running at all, or not triggered.
@ -1430,7 +1460,7 @@ func (t *timer) maybeRunChan() {
} }
t.trace("maybeRunChan+") t.trace("maybeRunChan+")
systemstack(func() { systemstack(func() {
t.unlockAndRun(now) t.unlockAndRun(now, c.bubble)
}) })
} }
@ -1440,9 +1470,11 @@ func (t *timer) maybeRunChan() {
// adding it if needed. // adding it if needed.
func blockTimerChan(c *hchan) { func blockTimerChan(c *hchan) {
t := c.timer t := c.timer
if t.isFake { if t.isFake && c.bubble != getg().bubble {
return // This should have been checked by the caller, but check just in case.
fatal("synctest timer accessed from outside bubble")
} }
t.lock() t.lock()
t.trace("blockTimerChan") t.trace("blockTimerChan")
if !t.isChan { if !t.isChan {
@ -1480,9 +1512,6 @@ func blockTimerChan(c *hchan) {
// blocked on it anymore. // blocked on it anymore.
func unblockTimerChan(c *hchan) { func unblockTimerChan(c *hchan) {
t := c.timer t := c.timer
if t.isFake {
return
}
t.lock() t.lock()
t.trace("unblockTimerChan") t.trace("unblockTimerChan")
if !t.isChan || t.blocked == 0 { if !t.isChan || t.blocked == 0 {

View File

@ -23,7 +23,7 @@ func BenchmarkBinarySearchFloats(b *testing.B) {
needle := (floats[midpoint] + floats[midpoint+1]) / 2 needle := (floats[midpoint] + floats[midpoint+1]) / 2
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
slices.BinarySearch(floats, needle) _, _ = slices.BinarySearch(floats, needle)
} }
}) })
} }
@ -46,7 +46,7 @@ func BenchmarkBinarySearchFuncStruct(b *testing.B) {
cmpFunc := func(a, b *myStruct) int { return a.n - b.n } cmpFunc := func(a, b *myStruct) int { return a.n - b.n }
b.ResetTimer() b.ResetTimer()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
slices.BinarySearchFunc(structs, needle, cmpFunc) _, _ = slices.BinarySearchFunc(structs, needle, cmpFunc)
} }
}) })
} }

View File

@ -264,19 +264,19 @@ func TestMinMaxPanics(t *testing.T) {
intCmp := func(a, b int) int { return a - b } intCmp := func(a, b int) int { return a - b }
emptySlice := []int{} emptySlice := []int{}
if !panics(func() { Min(emptySlice) }) { if !panics(func() { _ = Min(emptySlice) }) {
t.Errorf("Min([]): got no panic, want panic") t.Errorf("Min([]): got no panic, want panic")
} }
if !panics(func() { Max(emptySlice) }) { if !panics(func() { _ = Max(emptySlice) }) {
t.Errorf("Max([]): got no panic, want panic") t.Errorf("Max([]): got no panic, want panic")
} }
if !panics(func() { MinFunc(emptySlice, intCmp) }) { if !panics(func() { _ = MinFunc(emptySlice, intCmp) }) {
t.Errorf("MinFunc([]): got no panic, want panic") t.Errorf("MinFunc([]): got no panic, want panic")
} }
if !panics(func() { MaxFunc(emptySlice, intCmp) }) { if !panics(func() { _ = MaxFunc(emptySlice, intCmp) }) {
t.Errorf("MaxFunc([]): got no panic, want panic") t.Errorf("MaxFunc([]): got no panic, want panic")
} }
} }

View File

@ -85,7 +85,7 @@ func BenchmarkSlicesIsSorted(b *testing.B) {
b.StopTimer() b.StopTimer()
ints := makeSortedInts(N) ints := makeSortedInts(N)
b.StartTimer() b.StartTimer()
slices.IsSorted(ints) _ = slices.IsSorted(ints)
} }
} }

View File

@ -900,6 +900,7 @@ type TB interface {
Skipped() bool Skipped() bool
TempDir() string TempDir() string
Context() context.Context Context() context.Context
Output() io.Writer
// A private method to prevent users implementing the // A private method to prevent users implementing the
// interface and so future additions to it will not // interface and so future additions to it will not

View File

@ -0,0 +1,88 @@
// compile
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
type Unsigned interface {
~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
}
// a Validator instance
type Validator []Validable
type Numeric interface {
~int | ~int8 | ~int16 | ~int32 | ~int64 | ~float32 | ~float64
}
func (v Validator) Valid() bool {
for _, field := range v {
if !field.Validate() {
return false
}
}
return true
}
type Validable interface {
Validate() bool
}
type FieldDef[T any] struct {
value T
rules []Rule[T]
}
func (f FieldDef[T]) Validate() bool {
for _, rule := range f.rules {
if !rule(f) {
return false
}
}
return true
}
type Rule[T any] = func(FieldDef[T]) bool
func Field[T any](value T, rules ...Rule[T]) *FieldDef[T] {
return &FieldDef[T]{value: value, rules: rules}
}
type StringRule = Rule[string]
type NumericRule[T Numeric] = Rule[T]
type UnsignedRule[T Unsigned] = Rule[T]
func MinS(n int) StringRule {
return func(fd FieldDef[string]) bool {
return len(fd.value) < n
}
}
func MinD[T Numeric](n T) NumericRule[T] {
return func(fd FieldDef[T]) bool {
return fd.value < n
}
}
func MinU[T Unsigned](n T) UnsignedRule[T] {
return func(fd FieldDef[T]) bool {
return fd.value < n
}
}
func main() {
v := Validator{
Field("test", MinS(5)),
}
if !v.Valid() {
println("invalid")
return
}
println("valid")
}

View File

@ -7,14 +7,16 @@
package p package p
// Test that when generating wrappers for methods, we generate a tail call to the pointer version of // Test that when generating wrappers for methods, we generate a tail call to the pointer version of
// the method. // the method, if that method is not inlineable. We use go:noinline here to force the non-inlineability
// condition.
//go:noinline
func (f *Foo) Get2Vals() [2]int { return [2]int{f.Val, f.Val + 1} } func (f *Foo) Get2Vals() [2]int { return [2]int{f.Val, f.Val + 1} }
func (f *Foo) Get3Vals() (int, int, int) { return f.Val, f.Val + 1, f.Val + 2 } func (f *Foo) Get3Vals() [3]int { return [3]int{f.Val, f.Val + 1, f.Val + 2} }
type Foo struct{ Val int } type Foo struct{ Val int }
type Bar struct { // ERROR "tail call emitted for the method \(\*Foo\).Get2Vals wrapper" "tail call emitted for the method \(\*Foo\).Get3Vals wrapper" type Bar struct { // ERROR "tail call emitted for the method \(\*Foo\).Get2Vals wrapper"
int64 int64
*Foo // needs a method wrapper *Foo // needs a method wrapper
string string