diff --git a/.github/ISSUE_TEMPLATE b/.github/ISSUE_TEMPLATE/00-bug.md
similarity index 80%
rename from .github/ISSUE_TEMPLATE
rename to .github/ISSUE_TEMPLATE/00-bug.md
index 5cbfc09fe7..f056dab7dd 100644
--- a/.github/ISSUE_TEMPLATE
+++ b/.github/ISSUE_TEMPLATE/00-bug.md
@@ -1,6 +1,11 @@
+---
+name: Bugs
+about: The go command, standard library, or anything else
+title: "affected/package: "
+---
+
### What version of Go are you using (`go version`)?
@@ -26,7 +31,7 @@ $ go env
@@ -36,3 +41,5 @@ A link on play.golang.org is best.
### What did you see instead?
+
+
diff --git a/.github/ISSUE_TEMPLATE/01-pkgsite.md b/.github/ISSUE_TEMPLATE/01-pkgsite.md
new file mode 100644
index 0000000000..fee00f5b27
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/01-pkgsite.md
@@ -0,0 +1,47 @@
+---
+name: Pkg.go.dev bugs or feature requests
+about: Issues or feature requests for the documentation site
+title: "x/pkgsite: "
+labels: pkgsite
+---
+
+
+
+### What is the URL of the page with the issue?
+
+
+
+### What is your user agent?
+
+
+
+
+
+### Screenshot
+
+
+
+
+
+### What did you do?
+
+
+
+
+
+### What did you expect to see?
+
+
+
+### What did you see instead?
+
+
diff --git a/.github/ISSUE_TEMPLATE/02-pkgsite-removal.md b/.github/ISSUE_TEMPLATE/02-pkgsite-removal.md
new file mode 100644
index 0000000000..5c1eb94be6
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/02-pkgsite-removal.md
@@ -0,0 +1,39 @@
+---
+name: Pkg.go.dev package removal request
+about: Request a package be removed from the documentation site (pkg.go.dev)
+title: "x/pkgsite: package removal request for [type path here]"
+labels: pkgsite
+---
+
+
+
+### What is the path of the package that you would like to have removed?
+
+
+
+
+
+### Are you the owner of this package?
+
+
+
+
+
+### What is the reason that you could not retract this package instead?
+
+
+
+
diff --git a/.github/ISSUE_TEMPLATE/03-gopls.md b/.github/ISSUE_TEMPLATE/03-gopls.md
new file mode 100644
index 0000000000..c4934c3898
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/03-gopls.md
@@ -0,0 +1,61 @@
+---
+name: Gopls bugs or feature requests
+about: Issues or feature requests for the Go language server (gopls)
+title: "x/tools/gopls: "
+labels: gopls Tools
+---
+
+
+
+### gopls version
+
+
+
+
+
+### go env
+
+
+
+
+### What did you do?
+
+
+
+
+
+### What did you expect to see?
+
+
+
+### What did you see instead?
+
+
+
+### Editor and settings
+
+
+
+
+
+### Logs
+
+
+
+
diff --git a/.github/ISSUE_TEMPLATE/10-proposal.md b/.github/ISSUE_TEMPLATE/10-proposal.md
new file mode 100644
index 0000000000..ab30ddf417
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/10-proposal.md
@@ -0,0 +1,13 @@
+---
+name: Proposals
+about: New external API or other notable changes
+title: "proposal: affected/package: "
+labels: Proposal
+---
+
+
+
+
diff --git a/.github/ISSUE_TEMPLATE/11-language-change.md b/.github/ISSUE_TEMPLATE/11-language-change.md
new file mode 100644
index 0000000000..2032301327
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/11-language-change.md
@@ -0,0 +1,52 @@
+---
+name: Language Change Proposals
+about: Changes to the language
+title: "proposal: Go 2: "
+labels: Proposal Go2 LanguageChange
+---
+
+
+
+### Author background
+
+- **Would you consider yourself a novice, intermediate, or experienced Go programmer?**
+- **What other languages do you have experience with?**
+
+### Related proposals
+
+- **Has this idea, or one like it, been proposed before?**
+ - **If so, how does this proposal differ?**
+- **Does this affect error handling?**
+ - **If so, how does this differ from previous error handling proposals?**
+- **Is this about generics?**
+ - **If so, how does this relate to the accepted design and other generics proposals?**
+
+### Proposal
+
+- **What is the proposed change?**
+- **Who does this proposal help, and why?**
+- **Please describe as precisely as possible the change to the language.**
+- **What would change in the language spec?**
+- **Please also describe the change informally, as in a class teaching Go.**
+- **Is this change backward compatible?**
+ - Breaking the Go 1 compatibility guarantee is a large cost and requires a large benefit.
+ Show example code before and after the change.
+ - **Before**
+ - **After**
+- **Orthogonality: how does this change interact or overlap with existing features?**
+- **Is the goal of this change a performance improvement?**
+ - **If so, what quantifiable improvement should we expect?**
+ - **How would we measure it?**
+
+### Costs
+
+- **Would this change make Go easier or harder to learn, and why?**
+- **What is the cost of this proposal? (Every language change has a cost).**
+- **How many tools (such as vet, gopls, gofmt, goimports, etc.) would be affected?**
+- **What is the compile time cost?**
+- **What is the run time cost?**
+- **Can you describe a possible implementation?**
+- **Do you have a prototype? (This is not required.)**
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 0000000000..c07f1e4d1c
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,5 @@
+blank_issues_enabled: false
+contact_links:
+ - name: Questions
+ about: Please use one of the forums for questions or general discussions
+ url: https://go.dev/wiki/Questions
diff --git a/AUTHORS b/AUTHORS
index 8d7e196732..1a4a57bae7 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -209,6 +209,7 @@ Benjamin Hsieh
Benny Siegert
Benoit Sigoure
Berengar Lehr
+Bharath Kumar Uppala
Bill Zissimopoulos
Billie Harold Cleek
Bjorn Tillenius
@@ -262,6 +263,7 @@ Casey Callendrello
Casey Marshall
Cezar Sá Espinola
ChaiShushan
+Chaoqun Han
Charles Fenwick Elliott
Charles L. Dorian
Charles Lee
@@ -1479,6 +1481,7 @@ Zemanta d.o.o.
Zev Goldstein
Zheng Dayu
Zhongtao Chen
+Zhou Guangyuan
Zhou Peng
Ziad Hatahet
Zizhao Zhang
diff --git a/CONTRIBUTORS b/CONTRIBUTORS
index a548cb0e2f..e2e102f610 100644
--- a/CONTRIBUTORS
+++ b/CONTRIBUTORS
@@ -368,6 +368,7 @@ Benny Siegert
Benoit Sigoure
Berengar Lehr
Berkant Ipek <41230766+0xbkt@users.noreply.github.com>
+Bharath Kumar Uppala
Bharath Thiruveedula
Bhavin Gandhi
Bill Neubauer
@@ -475,6 +476,7 @@ ChaiShushan
Changkun Ou
Channing Kimble-Brown
Chao Xu
+Chaoqun Han
Charles Fenwick Elliott
Charles Kenney
Charles L. Dorian
@@ -2746,6 +2748,7 @@ Zhengyu He
Zhongpeng Lin
Zhongtao Chen
Zhongwei Yao
+Zhou Guangyuan
Zhou Peng
Ziad Hatahet
Ziheng Liu
diff --git a/api/go1.18.txt b/api/go1.18.txt
new file mode 100644
index 0000000000..2d05c3f41c
--- /dev/null
+++ b/api/go1.18.txt
@@ -0,0 +1,239 @@
+pkg bufio, method (*Writer) AvailableBuffer() []uint8
+pkg bufio, method (ReadWriter) AvailableBuffer() []uint8
+pkg bytes, func Cut([]uint8, []uint8) ([]uint8, []uint8, bool)
+pkg constraints, type Complex interface {}
+pkg constraints, type Float interface {}
+pkg constraints, type Integer interface {}
+pkg constraints, type Ordered interface {}
+pkg constraints, type Signed interface {}
+pkg constraints, type Unsigned interface {}
+pkg crypto/tls, method (*Conn) NetConn() net.Conn
+pkg debug/buildinfo, func Read(io.ReaderAt) (*debug.BuildInfo, error)
+pkg debug/buildinfo, func ReadFile(string) (*debug.BuildInfo, error)
+pkg debug/buildinfo, type BuildInfo = debug.BuildInfo
+pkg debug/elf, const R_PPC64_RELATIVE = 22
+pkg debug/elf, const R_PPC64_RELATIVE R_PPC64
+pkg debug/dwarf, type BasicType struct, DataBitOffset int64
+pkg debug/dwarf, type StructField struct, DataBitOffset int64
+pkg debug/plan9obj, var ErrNoSymbols error
+pkg go/ast, method (*IndexListExpr) End() token.Pos
+pkg go/ast, method (*IndexListExpr) Pos() token.Pos
+pkg go/ast, type FuncType struct, TypeParams *FieldList
+pkg go/ast, type IndexListExpr struct
+pkg go/ast, type IndexListExpr struct, Indices []Expr
+pkg go/ast, type IndexListExpr struct, Lbrack token.Pos
+pkg go/ast, type IndexListExpr struct, Rbrack token.Pos
+pkg go/ast, type IndexListExpr struct, X Expr
+pkg go/ast, type TypeSpec struct, TypeParams *FieldList
+pkg go/constant, method (Kind) String() string
+pkg go/token, const TILDE = 88
+pkg go/token, const TILDE Token
+pkg go/types, func Instantiate(*Context, Type, []Type, bool) (Type, error)
+pkg go/types, func NewContext() *Context
+pkg go/types, func NewSignatureType(*Var, []*TypeParam, []*TypeParam, *Tuple, *Tuple, bool) *Signature
+pkg go/types, func NewTerm(bool, Type) *Term
+pkg go/types, func NewTypeParam(*TypeName, Type) *TypeParam
+pkg go/types, func NewUnion([]*Term) *Union
+pkg go/types, method (*ArgumentError) Error() string
+pkg go/types, method (*ArgumentError) Unwrap() error
+pkg go/types, method (*Interface) IsComparable() bool
+pkg go/types, method (*Interface) IsImplicit() bool
+pkg go/types, method (*Interface) IsMethodSet() bool
+pkg go/types, method (*Interface) MarkImplicit()
+pkg go/types, method (*Named) Origin() *Named
+pkg go/types, method (*Named) SetTypeParams([]*TypeParam)
+pkg go/types, method (*Named) TypeArgs() *TypeList
+pkg go/types, method (*Named) TypeParams() *TypeParamList
+pkg go/types, method (*Signature) RecvTypeParams() *TypeParamList
+pkg go/types, method (*Signature) TypeParams() *TypeParamList
+pkg go/types, method (*Term) String() string
+pkg go/types, method (*Term) Tilde() bool
+pkg go/types, method (*Term) Type() Type
+pkg go/types, method (*TypeList) At(int) Type
+pkg go/types, method (*TypeList) Len() int
+pkg go/types, method (*TypeParam) Constraint() Type
+pkg go/types, method (*TypeParam) Index() int
+pkg go/types, method (*TypeParam) Obj() *TypeName
+pkg go/types, method (*TypeParam) SetConstraint(Type)
+pkg go/types, method (*TypeParam) String() string
+pkg go/types, method (*TypeParam) Underlying() Type
+pkg go/types, method (*TypeParamList) At(int) *TypeParam
+pkg go/types, method (*TypeParamList) Len() int
+pkg go/types, method (*Union) Len() int
+pkg go/types, method (*Union) String() string
+pkg go/types, method (*Union) Term(int) *Term
+pkg go/types, method (*Union) Underlying() Type
+pkg go/types, type ArgumentError struct
+pkg go/types, type ArgumentError struct, Err error
+pkg go/types, type ArgumentError struct, Index int
+pkg go/types, type Config struct, Context *Context
+pkg go/types, type Config struct, GoVersion string
+pkg go/types, type Context struct
+pkg go/types, type Info struct, Instances map[*ast.Ident]Instance
+pkg go/types, type Instance struct
+pkg go/types, type Instance struct, Type Type
+pkg go/types, type Instance struct, TypeArgs *TypeList
+pkg go/types, type Term struct
+pkg go/types, type TypeList struct
+pkg go/types, type TypeParam struct
+pkg go/types, type TypeParamList struct
+pkg go/types, type Union struct
+pkg net, func TCPAddrFromAddrPort(netip.AddrPort) *TCPAddr
+pkg net, func UDPAddrFromAddrPort(netip.AddrPort) *UDPAddr
+pkg net, method (*Resolver) LookupNetIP(context.Context, string, string) ([]netip.Addr, error)
+pkg net, method (*TCPAddr) AddrPort() netip.AddrPort
+pkg net, method (*UDPAddr) AddrPort() netip.AddrPort
+pkg net, method (*UDPConn) ReadFromUDPAddrPort([]uint8) (int, netip.AddrPort, error)
+pkg net, method (*UDPConn) ReadMsgUDPAddrPort([]uint8, []uint8) (int, int, int, netip.AddrPort, error)
+pkg net, method (*UDPConn) WriteMsgUDPAddrPort([]uint8, []uint8, netip.AddrPort) (int, int, error)
+pkg net, method (*UDPConn) WriteToUDPAddrPort([]uint8, netip.AddrPort) (int, error)
+pkg net/http, func MaxBytesHandler(Handler, int64) Handler
+pkg net/http, method (*Cookie) Valid() error
+pkg net/netip, func AddrFrom16([16]uint8) Addr
+pkg net/netip, func AddrFrom4([4]uint8) Addr
+pkg net/netip, func AddrFromSlice([]uint8) (Addr, bool)
+pkg net/netip, func AddrPortFrom(Addr, uint16) AddrPort
+pkg net/netip, func IPv4Unspecified() Addr
+pkg net/netip, func IPv6LinkLocalAllNodes() Addr
+pkg net/netip, func IPv6Unspecified() Addr
+pkg net/netip, func MustParseAddr(string) Addr
+pkg net/netip, func MustParseAddrPort(string) AddrPort
+pkg net/netip, func MustParsePrefix(string) Prefix
+pkg net/netip, func ParseAddr(string) (Addr, error)
+pkg net/netip, func ParseAddrPort(string) (AddrPort, error)
+pkg net/netip, func ParsePrefix(string) (Prefix, error)
+pkg net/netip, func PrefixFrom(Addr, int) Prefix
+pkg net/netip, method (*Addr) UnmarshalBinary([]uint8) error
+pkg net/netip, method (*Addr) UnmarshalText([]uint8) error
+pkg net/netip, method (*AddrPort) UnmarshalBinary([]uint8) error
+pkg net/netip, method (*AddrPort) UnmarshalText([]uint8) error
+pkg net/netip, method (*Prefix) UnmarshalBinary([]uint8) error
+pkg net/netip, method (*Prefix) UnmarshalText([]uint8) error
+pkg net/netip, method (Addr) AppendTo([]uint8) []uint8
+pkg net/netip, method (Addr) As16() [16]uint8
+pkg net/netip, method (Addr) As4() [4]uint8
+pkg net/netip, method (Addr) AsSlice() []uint8
+pkg net/netip, method (Addr) BitLen() int
+pkg net/netip, method (Addr) Compare(Addr) int
+pkg net/netip, method (Addr) Is4() bool
+pkg net/netip, method (Addr) Is4In6() bool
+pkg net/netip, method (Addr) Is6() bool
+pkg net/netip, method (Addr) IsGlobalUnicast() bool
+pkg net/netip, method (Addr) IsInterfaceLocalMulticast() bool
+pkg net/netip, method (Addr) IsLinkLocalMulticast() bool
+pkg net/netip, method (Addr) IsLinkLocalUnicast() bool
+pkg net/netip, method (Addr) IsLoopback() bool
+pkg net/netip, method (Addr) IsMulticast() bool
+pkg net/netip, method (Addr) IsPrivate() bool
+pkg net/netip, method (Addr) IsUnspecified() bool
+pkg net/netip, method (Addr) IsValid() bool
+pkg net/netip, method (Addr) Less(Addr) bool
+pkg net/netip, method (Addr) MarshalBinary() ([]uint8, error)
+pkg net/netip, method (Addr) MarshalText() ([]uint8, error)
+pkg net/netip, method (Addr) Next() Addr
+pkg net/netip, method (Addr) Prefix(int) (Prefix, error)
+pkg net/netip, method (Addr) Prev() Addr
+pkg net/netip, method (Addr) String() string
+pkg net/netip, method (Addr) StringExpanded() string
+pkg net/netip, method (Addr) Unmap() Addr
+pkg net/netip, method (Addr) WithZone(string) Addr
+pkg net/netip, method (Addr) Zone() string
+pkg net/netip, method (AddrPort) Addr() Addr
+pkg net/netip, method (AddrPort) AppendTo([]uint8) []uint8
+pkg net/netip, method (AddrPort) IsValid() bool
+pkg net/netip, method (AddrPort) MarshalBinary() ([]uint8, error)
+pkg net/netip, method (AddrPort) MarshalText() ([]uint8, error)
+pkg net/netip, method (AddrPort) Port() uint16
+pkg net/netip, method (AddrPort) String() string
+pkg net/netip, method (Prefix) Addr() Addr
+pkg net/netip, method (Prefix) AppendTo([]uint8) []uint8
+pkg net/netip, method (Prefix) Bits() int
+pkg net/netip, method (Prefix) Contains(Addr) bool
+pkg net/netip, method (Prefix) IsSingleIP() bool
+pkg net/netip, method (Prefix) IsValid() bool
+pkg net/netip, method (Prefix) MarshalBinary() ([]uint8, error)
+pkg net/netip, method (Prefix) MarshalText() ([]uint8, error)
+pkg net/netip, method (Prefix) Masked() Prefix
+pkg net/netip, method (Prefix) Overlaps(Prefix) bool
+pkg net/netip, method (Prefix) String() string
+pkg net/netip, type Addr struct
+pkg net/netip, type AddrPort struct
+pkg net/netip, type Prefix struct
+pkg reflect, const Pointer = 22
+pkg reflect, const Pointer Kind
+pkg reflect, func PointerTo(Type) Type
+pkg reflect, method (*MapIter) Reset(Value)
+pkg reflect, method (Value) CanComplex() bool
+pkg reflect, method (Value) CanFloat() bool
+pkg reflect, method (Value) CanInt() bool
+pkg reflect, method (Value) CanUint() bool
+pkg reflect, method (Value) FieldByIndexErr([]int) (Value, error)
+pkg reflect, method (Value) SetIterKey(*MapIter)
+pkg reflect, method (Value) SetIterValue(*MapIter)
+pkg reflect, method (Value) UnsafePointer() unsafe.Pointer
+pkg runtime/debug, method (*BuildInfo) MarshalText() ([]uint8, error)
+pkg runtime/debug, method (*BuildInfo) UnmarshalText([]uint8) error
+pkg runtime/debug, type BuildInfo struct, GoVersion string
+pkg runtime/debug, type BuildInfo struct, Settings []BuildSetting
+pkg runtime/debug, type BuildSetting struct
+pkg runtime/debug, type BuildSetting struct, Key string
+pkg runtime/debug, type BuildSetting struct, Value string
+pkg strings, func Clone(string) string
+pkg strings, func Cut(string, string) (string, string, bool)
+pkg sync, method (*Mutex) TryLock() bool
+pkg sync, method (*RWMutex) TryLock() bool
+pkg sync, method (*RWMutex) TryRLock() bool
+pkg syscall (freebsd-386), type SysProcAttr struct, Pdeathsig Signal
+pkg syscall (freebsd-386-cgo), type SysProcAttr struct, Pdeathsig Signal
+pkg syscall (freebsd-amd64), type SysProcAttr struct, Pdeathsig Signal
+pkg syscall (freebsd-amd64-cgo), type SysProcAttr struct, Pdeathsig Signal
+pkg syscall (freebsd-arm), type SysProcAttr struct, Pdeathsig Signal
+pkg syscall (freebsd-arm-cgo), type SysProcAttr struct, Pdeathsig Signal
+pkg syscall (windows-386), func SyscallN(uintptr, ...uintptr) (uintptr, uintptr, Errno)
+pkg syscall (windows-amd64), func SyscallN(uintptr, ...uintptr) (uintptr, uintptr, Errno)
+pkg testing, func MainStart(testDeps, []InternalTest, []InternalBenchmark, []InternalFuzzTarget, []InternalExample) *M
+pkg testing, method (*F) Add(...interface{})
+pkg testing, method (*F) Cleanup(func())
+pkg testing, method (*F) Error(...interface{})
+pkg testing, method (*F) Errorf(string, ...interface{})
+pkg testing, method (*F) Fail()
+pkg testing, method (*F) FailNow()
+pkg testing, method (*F) Failed() bool
+pkg testing, method (*F) Fatal(...interface{})
+pkg testing, method (*F) Fatalf(string, ...interface{})
+pkg testing, method (*F) Fuzz(interface{})
+pkg testing, method (*F) Helper()
+pkg testing, method (*F) Log(...interface{})
+pkg testing, method (*F) Logf(string, ...interface{})
+pkg testing, method (*F) Name() string
+pkg testing, method (*F) Setenv(string, string)
+pkg testing, method (*F) Skip(...interface{})
+pkg testing, method (*F) SkipNow()
+pkg testing, method (*F) Skipf(string, ...interface{})
+pkg testing, method (*F) Skipped() bool
+pkg testing, method (*F) TempDir() string
+pkg testing, type F struct
+pkg testing, type InternalFuzzTarget struct
+pkg testing, type InternalFuzzTarget struct, Fn func(*F)
+pkg testing, type InternalFuzzTarget struct, Name string
+pkg text/template/parse, const NodeBreak = 21
+pkg text/template/parse, const NodeBreak NodeType
+pkg text/template/parse, const NodeContinue = 22
+pkg text/template/parse, const NodeContinue NodeType
+pkg text/template/parse, method (*BreakNode) Copy() Node
+pkg text/template/parse, method (*BreakNode) String() string
+pkg text/template/parse, method (*ContinueNode) Copy() Node
+pkg text/template/parse, method (*ContinueNode) String() string
+pkg text/template/parse, method (BreakNode) Position() Pos
+pkg text/template/parse, method (BreakNode) Type() NodeType
+pkg text/template/parse, method (ContinueNode) Position() Pos
+pkg text/template/parse, method (ContinueNode) Type() NodeType
+pkg text/template/parse, type BreakNode struct
+pkg text/template/parse, type BreakNode struct, Line int
+pkg text/template/parse, type BreakNode struct, embedded NodeType
+pkg text/template/parse, type BreakNode struct, embedded Pos
+pkg text/template/parse, type ContinueNode struct
+pkg text/template/parse, type ContinueNode struct, Line int
+pkg text/template/parse, type ContinueNode struct, embedded NodeType
+pkg text/template/parse, type ContinueNode struct, embedded Pos
+pkg unicode/utf8, func AppendRune([]uint8, int32) []uint8
diff --git a/api/next.txt b/api/next.txt
index cc4120b7ab..e69de29bb2 100644
--- a/api/next.txt
+++ b/api/next.txt
@@ -1,47 +0,0 @@
-pkg debug/buildinfo, func Read(io.ReaderAt) (*debug.BuildInfo, error)
-pkg debug/buildinfo, func ReadFile(string) (*debug.BuildInfo, error)
-pkg debug/buildinfo, type BuildInfo = debug.BuildInfo
-pkg runtime/debug, method (*BuildInfo) MarshalText() ([]byte, error)
-pkg runtime/debug, method (*BuildInfo) UnmarshalText() ([]byte, error)
-pkg runtime/debug, type BuildInfo struct, GoVersion string
-pkg runtime/debug, type BuildInfo struct, Settings []BuildSetting
-pkg runtime/debug, type BuildSetting struct
-pkg runtime/debug, type BuildSetting struct, Key string
-pkg runtime/debug, type BuildSetting struct, Value string
-pkg testing, func Fuzz(func(*F)) FuzzResult
-pkg testing, func MainStart(testDeps, []InternalTest, []InternalBenchmark, []InternalFuzzTarget, []InternalExample) *M
-pkg testing, func RunFuzzTargets(func(string, string) (bool, error), []InternalFuzzTarget) bool
-pkg testing, func RunFuzzing(func(string, string) (bool, error), []InternalFuzzTarget) bool
-pkg testing, method (*B) Setenv(string, string)
-pkg testing, method (*F) Add(...interface{})
-pkg testing, method (*F) Cleanup(func())
-pkg testing, method (*F) Error(...interface{})
-pkg testing, method (*F) Errorf(string, ...interface{})
-pkg testing, method (*F) Fail()
-pkg testing, method (*F) FailNow()
-pkg testing, method (*F) Failed() bool
-pkg testing, method (*F) Fatal(...interface{})
-pkg testing, method (*F) Fatalf(string, ...interface{})
-pkg testing, method (*F) Fuzz(interface{})
-pkg testing, method (*F) Helper()
-pkg testing, method (*F) Log(...interface{})
-pkg testing, method (*F) Logf(string, ...interface{})
-pkg testing, method (*F) Name() string
-pkg testing, method (*F) Setenv(string, string)
-pkg testing, method (*F) Skip(...interface{})
-pkg testing, method (*F) SkipNow()
-pkg testing, method (*F) Skipf(string, ...interface{})
-pkg testing, method (*F) Skipped() bool
-pkg testing, method (*F) TempDir() string
-pkg testing, method (*T) Setenv(string, string)
-pkg testing, method (FuzzResult) String() string
-pkg testing, type F struct
-pkg testing, type FuzzResult struct
-pkg testing, type FuzzResult struct, Crasher entry
-pkg testing, type FuzzResult struct, Error error
-pkg testing, type FuzzResult struct, N int
-pkg testing, type FuzzResult struct, T time.Duration
-pkg testing, type InternalFuzzTarget struct
-pkg testing, type InternalFuzzTarget struct, Fn func(*F)
-pkg testing, type InternalFuzzTarget struct, Name string
-pkg net/http, method (*Cookie) Valid() error
diff --git a/doc/go1.17_spec.html b/doc/go1.17_spec.html
new file mode 100644
index 0000000000..0b374e7bfb
--- /dev/null
+++ b/doc/go1.17_spec.html
@@ -0,0 +1,6858 @@
+
+
+Introduction
+
+
+This is a reference manual for the Go programming language. For
+more information and other documents, see golang.org.
+
+
+
+Go is a general-purpose language designed with systems programming
+in mind. It is strongly typed and garbage-collected and has explicit
+support for concurrent programming. Programs are constructed from
+packages, whose properties allow efficient management of
+dependencies.
+
+
+
+The grammar is compact and simple to parse, allowing for easy analysis
+by automatic tools such as integrated development environments.
+
+
+Notation
+
+The syntax is specified using Extended Backus-Naur Form (EBNF):
+
+
+
+Production = production_name "=" [ Expression ] "." .
+Expression = Alternative { "|" Alternative } .
+Alternative = Term { Term } .
+Term = production_name | token [ "…" token ] | Group | Option | Repetition .
+Group = "(" Expression ")" .
+Option = "[" Expression "]" .
+Repetition = "{" Expression "}" .
+
+
+
+Productions are expressions constructed from terms and the following
+operators, in increasing precedence:
+
+
+| alternation
+() grouping
+[] option (0 or 1 times)
+{} repetition (0 to n times)
+
+
+
+Lower-case production names are used to identify lexical tokens.
+Non-terminals are in CamelCase. Lexical tokens are enclosed in
+double quotes "" or back quotes ``.
+
+
+
+The form a … b represents the set of characters from
+a through b as alternatives. The horizontal
+ellipsis … is also used elsewhere in the spec to informally denote various
+enumerations or code snippets that are not further specified. The character …
+(as opposed to the three characters ...) is not a token of the Go
+language.
+
+
+Source code representation
+
+
+Source code is Unicode text encoded in
+UTF-8. The text is not
+canonicalized, so a single accented code point is distinct from the
+same character constructed from combining an accent and a letter;
+those are treated as two code points. For simplicity, this document
+will use the unqualified term character to refer to a Unicode code point
+in the source text.
+
+
+Each code point is distinct; for instance, upper and lower case letters
+are different characters.
+
+
+Implementation restriction: For compatibility with other tools, a
+compiler may disallow the NUL character (U+0000) in the source text.
+
+
+Implementation restriction: For compatibility with other tools, a
+compiler may ignore a UTF-8-encoded byte order mark
+(U+FEFF) if it is the first Unicode code point in the source text.
+A byte order mark may be disallowed anywhere else in the source.
+
+
+Characters
+
+
+The following terms are used to denote specific Unicode character classes:
+
+
+newline = /* the Unicode code point U+000A */ .
+unicode_char = /* an arbitrary Unicode code point except newline */ .
+unicode_letter = /* a Unicode code point classified as "Letter" */ .
+unicode_digit = /* a Unicode code point classified as "Number, decimal digit" */ .
+
+
+
+In The Unicode Standard 8.0,
+Section 4.5 "General Category" defines a set of character categories.
+Go treats all characters in any of the Letter categories Lu, Ll, Lt, Lm, or Lo
+as Unicode letters, and those in the Number category Nd as Unicode digits.
+
+
+Letters and digits
+
+
+The underscore character _ (U+005F) is considered a letter.
+
+
+letter = unicode_letter | "_" .
+decimal_digit = "0" … "9" .
+binary_digit = "0" | "1" .
+octal_digit = "0" … "7" .
+hex_digit = "0" … "9" | "A" … "F" | "a" … "f" .
+
+
+Lexical elements
+
+
+
+
+Comments serve as program documentation. There are two forms:
+
+
+
+-
+Line comments start with the character sequence
//
+and stop at the end of the line.
+
+-
+General comments start with the character sequence
/*
+and stop with the first subsequent character sequence */.
+
+
+
+
+A comment cannot start inside a rune or
+string literal, or inside a comment.
+A general comment containing no newlines acts like a space.
+Any other comment acts like a newline.
+
+
+Tokens
+
+
+Tokens form the vocabulary of the Go language.
+There are four classes: identifiers, keywords, operators
+and punctuation, and literals. White space, formed from
+spaces (U+0020), horizontal tabs (U+0009),
+carriage returns (U+000D), and newlines (U+000A),
+is ignored except as it separates tokens
+that would otherwise combine into a single token. Also, a newline or end of file
+may trigger the insertion of a semicolon.
+While breaking the input into tokens,
+the next token is the longest sequence of characters that form a
+valid token.
+
+
+Semicolons
+
+
+The formal grammar uses semicolons ";" as terminators in
+a number of productions. Go programs may omit most of these semicolons
+using the following two rules:
+
+
+
+-
+When the input is broken into tokens, a semicolon is automatically inserted
+into the token stream immediately after a line's final token if that token is
+
+ - an
+ identifier
+
+
+ - an
+ integer,
+ floating-point,
+ imaginary,
+ rune, or
+ string literal
+
+
+ - one of the keywords
+
break,
+ continue,
+ fallthrough, or
+ return
+
+
+ - one of the operators and punctuation
+
++,
+ --,
+ ),
+ ], or
+ }
+
+
+
+
+-
+To allow complex statements to occupy a single line, a semicolon
+may be omitted before a closing
")" or "}".
+
+
+
+
+To reflect idiomatic use, code examples in this document elide semicolons
+using these rules.
+
+
+
+Identifiers
+
+
+Identifiers name program entities such as variables and types.
+An identifier is a sequence of one or more letters and digits.
+The first character in an identifier must be a letter.
+
+
+identifier = letter { letter | unicode_digit } .
+
+
+a
+_x9
+ThisVariableIsExported
+αβ
+
+
+
+Some identifiers are predeclared.
+
+
+
+Keywords
+
+
+The following keywords are reserved and may not be used as identifiers.
+
+
+break default func interface select
+case defer go map struct
+chan else goto package switch
+const fallthrough if range type
+continue for import return var
+
+
+Operators and punctuation
+
+
+The following character sequences represent operators
+(including assignment operators) and punctuation:
+
+
++ & += &= && == != ( )
+- | -= |= || < <= [ ]
+* ^ *= ^= <- > >= { }
+/ << /= <<= ++ = := , ;
+% >> %= >>= -- ! ... . :
+ &^ &^=
+
+
+Integer literals
+
+
+An integer literal is a sequence of digits representing an
+integer constant.
+An optional prefix sets a non-decimal base: 0b or 0B
+for binary, 0, 0o, or 0O for octal,
+and 0x or 0X for hexadecimal.
+A single 0 is considered a decimal zero.
+In hexadecimal literals, letters a through f
+and A through F represent values 10 through 15.
+
+
+
+For readability, an underscore character _ may appear after
+a base prefix or between successive digits; such underscores do not change
+the literal's value.
+
+
+int_lit = decimal_lit | binary_lit | octal_lit | hex_lit .
+decimal_lit = "0" | ( "1" … "9" ) [ [ "_" ] decimal_digits ] .
+binary_lit = "0" ( "b" | "B" ) [ "_" ] binary_digits .
+octal_lit = "0" [ "o" | "O" ] [ "_" ] octal_digits .
+hex_lit = "0" ( "x" | "X" ) [ "_" ] hex_digits .
+
+decimal_digits = decimal_digit { [ "_" ] decimal_digit } .
+binary_digits = binary_digit { [ "_" ] binary_digit } .
+octal_digits = octal_digit { [ "_" ] octal_digit } .
+hex_digits = hex_digit { [ "_" ] hex_digit } .
+
+
+
+42
+4_2
+0600
+0_600
+0o600
+0O600 // second character is capital letter 'O'
+0xBadFace
+0xBad_Face
+0x_67_7a_2f_cc_40_c6
+170141183460469231731687303715884105727
+170_141183_460469_231731_687303_715884_105727
+
+_42 // an identifier, not an integer literal
+42_ // invalid: _ must separate successive digits
+4__2 // invalid: only one _ at a time
+0_xBadFace // invalid: _ must separate successive digits
+
+
+
+Floating-point literals
+
+
+A floating-point literal is a decimal or hexadecimal representation of a
+floating-point constant.
+
+
+
+A decimal floating-point literal consists of an integer part (decimal digits),
+a decimal point, a fractional part (decimal digits), and an exponent part
+(e or E followed by an optional sign and decimal digits).
+One of the integer part or the fractional part may be elided; one of the decimal point
+or the exponent part may be elided.
+An exponent value exp scales the mantissa (integer and fractional part) by 10exp.
+
+
+
+A hexadecimal floating-point literal consists of a 0x or 0X
+prefix, an integer part (hexadecimal digits), a radix point, a fractional part (hexadecimal digits),
+and an exponent part (p or P followed by an optional sign and decimal digits).
+One of the integer part or the fractional part may be elided; the radix point may be elided as well,
+but the exponent part is required. (This syntax matches the one given in IEEE 754-2008 §5.12.3.)
+An exponent value exp scales the mantissa (integer and fractional part) by 2exp.
+
+
+
+For readability, an underscore character _ may appear after
+a base prefix or between successive digits; such underscores do not change
+the literal value.
+
+
+
+float_lit = decimal_float_lit | hex_float_lit .
+
+decimal_float_lit = decimal_digits "." [ decimal_digits ] [ decimal_exponent ] |
+ decimal_digits decimal_exponent |
+ "." decimal_digits [ decimal_exponent ] .
+decimal_exponent = ( "e" | "E" ) [ "+" | "-" ] decimal_digits .
+
+hex_float_lit = "0" ( "x" | "X" ) hex_mantissa hex_exponent .
+hex_mantissa = [ "_" ] hex_digits "." [ hex_digits ] |
+ [ "_" ] hex_digits |
+ "." hex_digits .
+hex_exponent = ( "p" | "P" ) [ "+" | "-" ] decimal_digits .
+
+
+
+0.
+72.40
+072.40 // == 72.40
+2.71828
+1.e+0
+6.67428e-11
+1E6
+.25
+.12345E+5
+1_5. // == 15.0
+0.15e+0_2 // == 15.0
+
+0x1p-2 // == 0.25
+0x2.p10 // == 2048.0
+0x1.Fp+0 // == 1.9375
+0X.8p-0 // == 0.5
+0X_1FFFP-16 // == 0.1249847412109375
+0x15e-2 // == 0x15e - 2 (integer subtraction)
+
+0x.p1 // invalid: mantissa has no digits
+1p-2 // invalid: p exponent requires hexadecimal mantissa
+0x1.5e-2 // invalid: hexadecimal mantissa requires p exponent
+1_.5 // invalid: _ must separate successive digits
+1._5 // invalid: _ must separate successive digits
+1.5_e1 // invalid: _ must separate successive digits
+1.5e_1 // invalid: _ must separate successive digits
+1.5e1_ // invalid: _ must separate successive digits
+
+
+
+Imaginary literals
+
+
+An imaginary literal represents the imaginary part of a
+complex constant.
+It consists of an integer or
+floating-point literal
+followed by the lower-case letter i.
+The value of an imaginary literal is the value of the respective
+integer or floating-point literal multiplied by the imaginary unit i.
+
+
+
+imaginary_lit = (decimal_digits | int_lit | float_lit) "i" .
+
+
+
+For backward compatibility, an imaginary literal's integer part consisting
+entirely of decimal digits (and possibly underscores) is considered a decimal
+integer, even if it starts with a leading 0.
+
+
+
+0i
+0123i // == 123i for backward-compatibility
+0o123i // == 0o123 * 1i == 83i
+0xabci // == 0xabc * 1i == 2748i
+0.i
+2.71828i
+1.e+0i
+6.67428e-11i
+1E6i
+.25i
+.12345E+5i
+0x1p-2i // == 0x1p-2 * 1i == 0.25i
+
+
+
+Rune literals
+
+
+A rune literal represents a rune constant,
+an integer value identifying a Unicode code point.
+A rune literal is expressed as one or more characters enclosed in single quotes,
+as in 'x' or '\n'.
+Within the quotes, any character may appear except newline and unescaped single
+quote. A single quoted character represents the Unicode value
+of the character itself,
+while multi-character sequences beginning with a backslash encode
+values in various formats.
+
+
+
+The simplest form represents the single character within the quotes;
+since Go source text is Unicode characters encoded in UTF-8, multiple
+UTF-8-encoded bytes may represent a single integer value. For
+instance, the literal 'a' holds a single byte representing
+a literal a, Unicode U+0061, value 0x61, while
+'ä' holds two bytes (0xc3 0xa4) representing
+a literal a-dieresis, U+00E4, value 0xe4.
+
+
+
+Several backslash escapes allow arbitrary values to be encoded as
+ASCII text. There are four ways to represent the integer value
+as a numeric constant: \x followed by exactly two hexadecimal
+digits; \u followed by exactly four hexadecimal digits;
+\U followed by exactly eight hexadecimal digits, and a
+plain backslash \ followed by exactly three octal digits.
+In each case the value of the literal is the value represented by
+the digits in the corresponding base.
+
+
+
+Although these representations all result in an integer, they have
+different valid ranges. Octal escapes must represent a value between
+0 and 255 inclusive. Hexadecimal escapes satisfy this condition
+by construction. The escapes \u and \U
+represent Unicode code points so within them some values are illegal,
+in particular those above 0x10FFFF and surrogate halves.
+
+
+
+After a backslash, certain single-character escapes represent special values:
+
+
+
+\a U+0007 alert or bell
+\b U+0008 backspace
+\f U+000C form feed
+\n U+000A line feed or newline
+\r U+000D carriage return
+\t U+0009 horizontal tab
+\v U+000B vertical tab
+\\ U+005C backslash
+\' U+0027 single quote (valid escape only within rune literals)
+\" U+0022 double quote (valid escape only within string literals)
+
+
+
+All other sequences starting with a backslash are illegal inside rune literals.
+
+
+rune_lit = "'" ( unicode_value | byte_value ) "'" .
+unicode_value = unicode_char | little_u_value | big_u_value | escaped_char .
+byte_value = octal_byte_value | hex_byte_value .
+octal_byte_value = `\` octal_digit octal_digit octal_digit .
+hex_byte_value = `\` "x" hex_digit hex_digit .
+little_u_value = `\` "u" hex_digit hex_digit hex_digit hex_digit .
+big_u_value = `\` "U" hex_digit hex_digit hex_digit hex_digit
+ hex_digit hex_digit hex_digit hex_digit .
+escaped_char = `\` ( "a" | "b" | "f" | "n" | "r" | "t" | "v" | `\` | "'" | `"` ) .
+
+
+
+'a'
+'ä'
+'本'
+'\t'
+'\000'
+'\007'
+'\377'
+'\x07'
+'\xff'
+'\u12e4'
+'\U00101234'
+'\'' // rune literal containing single quote character
+'aa' // illegal: too many characters
+'\xa' // illegal: too few hexadecimal digits
+'\0' // illegal: too few octal digits
+'\uDFFF' // illegal: surrogate half
+'\U00110000' // illegal: invalid Unicode code point
+
+
+
+String literals
+
+
+A string literal represents a string constant
+obtained from concatenating a sequence of characters. There are two forms:
+raw string literals and interpreted string literals.
+
+
+
+Raw string literals are character sequences between back quotes, as in
+`foo`. Within the quotes, any character may appear except
+back quote. The value of a raw string literal is the
+string composed of the uninterpreted (implicitly UTF-8-encoded) characters
+between the quotes;
+in particular, backslashes have no special meaning and the string may
+contain newlines.
+Carriage return characters ('\r') inside raw string literals
+are discarded from the raw string value.
+
+
+
+Interpreted string literals are character sequences between double
+quotes, as in "bar".
+Within the quotes, any character may appear except newline and unescaped double quote.
+The text between the quotes forms the
+value of the literal, with backslash escapes interpreted as they
+are in rune literals (except that \' is illegal and
+\" is legal), with the same restrictions.
+The three-digit octal (\nnn)
+and two-digit hexadecimal (\xnn) escapes represent individual
+bytes of the resulting string; all other escapes represent
+the (possibly multi-byte) UTF-8 encoding of individual characters.
+Thus inside a string literal \377 and \xFF represent
+a single byte of value 0xFF=255, while ÿ,
+\u00FF, \U000000FF and \xc3\xbf represent
+the two bytes 0xc3 0xbf of the UTF-8 encoding of character
+U+00FF.
+
+
+
+string_lit = raw_string_lit | interpreted_string_lit .
+raw_string_lit = "`" { unicode_char | newline } "`" .
+interpreted_string_lit = `"` { unicode_value | byte_value } `"` .
+
+
+
+`abc` // same as "abc"
+`\n
+\n` // same as "\\n\n\\n"
+"\n"
+"\"" // same as `"`
+"Hello, world!\n"
+"日本語"
+"\u65e5本\U00008a9e"
+"\xff\u00FF"
+"\uD800" // illegal: surrogate half
+"\U00110000" // illegal: invalid Unicode code point
+
+
+
+These examples all represent the same string:
+
+
+
+"日本語" // UTF-8 input text
+`日本語` // UTF-8 input text as a raw literal
+"\u65e5\u672c\u8a9e" // the explicit Unicode code points
+"\U000065e5\U0000672c\U00008a9e" // the explicit Unicode code points
+"\xe6\x97\xa5\xe6\x9c\xac\xe8\xaa\x9e" // the explicit UTF-8 bytes
+
+
+
+If the source code represents a character as two code points, such as
+a combining form involving an accent and a letter, the result will be
+an error if placed in a rune literal (it is not a single code
+point), and will appear as two code points if placed in a string
+literal.
+
+
+
+Constants
+
+There are boolean constants,
+rune constants,
+integer constants,
+floating-point constants, complex constants,
+and string constants. Rune, integer, floating-point,
+and complex constants are
+collectively called numeric constants.
+
+
+
+A constant value is represented by a
+rune,
+integer,
+floating-point,
+imaginary,
+or
+string literal,
+an identifier denoting a constant,
+a constant expression,
+a conversion with a result that is a constant, or
+the result value of some built-in functions such as
+unsafe.Sizeof applied to any value,
+cap or len applied to
+some expressions,
+real and imag applied to a complex constant
+and complex applied to numeric constants.
+The boolean truth values are represented by the predeclared constants
+true and false. The predeclared identifier
+iota denotes an integer constant.
+
+
+
+In general, complex constants are a form of
+constant expression
+and are discussed in that section.
+
+
+
+Numeric constants represent exact values of arbitrary precision and do not overflow.
+Consequently, there are no constants denoting the IEEE-754 negative zero, infinity,
+and not-a-number values.
+
+
+
+Constants may be typed or untyped.
+Literal constants, true, false, iota,
+and certain constant expressions
+containing only untyped constant operands are untyped.
+
+
+
+A constant may be given a type explicitly by a constant declaration
+or conversion, or implicitly when used in a
+variable declaration or an
+assignment or as an
+operand in an expression.
+It is an error if the constant value
+cannot be represented as a value of the respective type.
+
+
+
+An untyped constant has a default type which is the type to which the
+constant is implicitly converted in contexts where a typed value is required,
+for instance, in a short variable declaration
+such as i := 0 where there is no explicit type.
+The default type of an untyped constant is bool, rune,
+int, float64, complex128 or string
+respectively, depending on whether it is a boolean, rune, integer, floating-point,
+complex, or string constant.
+
+
+
+Implementation restriction: Although numeric constants have arbitrary
+precision in the language, a compiler may implement them using an
+internal representation with limited precision. That said, every
+implementation must:
+
+
+
+ - Represent integer constants with at least 256 bits.
+
+ - Represent floating-point constants, including the parts of
+ a complex constant, with a mantissa of at least 256 bits
+ and a signed binary exponent of at least 16 bits.
+
+ - Give an error if unable to represent an integer constant
+ precisely.
+
+ - Give an error if unable to represent a floating-point or
+ complex constant due to overflow.
+
+ - Round to the nearest representable constant if unable to
+ represent a floating-point or complex constant due to limits
+ on precision.
+
+
+
+These requirements apply both to literal constants and to the result
+of evaluating constant
+expressions.
+
+
+
+Variables
+
+
+A variable is a storage location for holding a value.
+The set of permissible values is determined by the
+variable's type.
+
+
+
+A variable declaration
+or, for function parameters and results, the signature
+of a function declaration
+or function literal reserves
+storage for a named variable.
+
+Calling the built-in function new
+or taking the address of a composite literal
+allocates storage for a variable at run time.
+Such an anonymous variable is referred to via a (possibly implicit)
+pointer indirection.
+
+
+
+Structured variables of array, slice,
+and struct types have elements and fields that may
+be addressed individually. Each such element
+acts like a variable.
+
+
+
+The static type (or just type) of a variable is the
+type given in its declaration, the type provided in the
+new call or composite literal, or the type of
+an element of a structured variable.
+Variables of interface type also have a distinct dynamic type,
+which is the concrete type of the value assigned to the variable at run time
+(unless the value is the predeclared identifier nil,
+which has no type).
+The dynamic type may vary during execution but values stored in interface
+variables are always assignable
+to the static type of the variable.
+
+
+
+var x interface{} // x is nil and has static type interface{}
+var v *T // v has value nil, static type *T
+x = 42 // x has value 42 and dynamic type int
+x = v // x has value (*T)(nil) and dynamic type *T
+
+
+
+A variable's value is retrieved by referring to the variable in an
+expression; it is the most recent value
+assigned to the variable.
+If a variable has not yet been assigned a value, its value is the
+zero value for its type.
+
+
+
+Types
+
+
+A type determines a set of values together with operations and methods specific
+to those values. A type may be denoted by a type name, if it has one,
+or specified using a type literal, which composes a type from existing types.
+
+
+
+Type = TypeName | TypeLit | "(" Type ")" .
+TypeName = identifier | QualifiedIdent .
+TypeLit = ArrayType | StructType | PointerType | FunctionType | InterfaceType |
+ SliceType | MapType | ChannelType .
+
+
+
+The language predeclares certain type names.
+Others are introduced with type declarations.
+Composite types—array, struct, pointer, function,
+interface, slice, map, and channel types—may be constructed using
+type literals.
+
+
+
+Each type T has an underlying type: If T
+is one of the predeclared boolean, numeric, or string types, or a type literal,
+the corresponding underlying
+type is T itself. Otherwise, T's underlying type
+is the underlying type of the type to which T refers in its
+type declaration.
+
+
+
+type (
+ A1 = string
+ A2 = A1
+)
+
+type (
+ B1 string
+ B2 B1
+ B3 []B1
+ B4 B3
+)
+
+
+
+The underlying type of string, A1, A2, B1,
+and B2 is string.
+The underlying type of []B1, B3, and B4 is []B1.
+
+
+Method sets
+
+A type has a (possibly empty) method set associated with it.
+The method set of an interface type is its interface.
+The method set of any other type T consists of all
+methods declared with receiver type T.
+The method set of the corresponding pointer type *T
+is the set of all methods declared with receiver *T or T
+(that is, it also contains the method set of T).
+Further rules apply to structs containing embedded fields, as described
+in the section on struct types.
+Any other type has an empty method set.
+In a method set, each method must have a
+unique
+non-blank method name.
+
+
+
+The method set of a type determines the interfaces that the
+type implements
+and the methods that can be called
+using a receiver of that type.
+
+
+Boolean types
+
+
+A boolean type represents the set of Boolean truth values
+denoted by the predeclared constants true
+and false. The predeclared boolean type is bool;
+it is a defined type.
+
+
+Numeric types
+
+
+A numeric type represents sets of integer or floating-point values.
+The predeclared architecture-independent numeric types are:
+
+
+
+uint8 the set of all unsigned 8-bit integers (0 to 255)
+uint16 the set of all unsigned 16-bit integers (0 to 65535)
+uint32 the set of all unsigned 32-bit integers (0 to 4294967295)
+uint64 the set of all unsigned 64-bit integers (0 to 18446744073709551615)
+
+int8 the set of all signed 8-bit integers (-128 to 127)
+int16 the set of all signed 16-bit integers (-32768 to 32767)
+int32 the set of all signed 32-bit integers (-2147483648 to 2147483647)
+int64 the set of all signed 64-bit integers (-9223372036854775808 to 9223372036854775807)
+
+float32 the set of all IEEE-754 32-bit floating-point numbers
+float64 the set of all IEEE-754 64-bit floating-point numbers
+
+complex64 the set of all complex numbers with float32 real and imaginary parts
+complex128 the set of all complex numbers with float64 real and imaginary parts
+
+byte alias for uint8
+rune alias for int32
+
+
+
+The value of an n-bit integer is n bits wide and represented using
+two's complement arithmetic.
+
+
+
+There is also a set of predeclared numeric types with implementation-specific sizes:
+
+
+
+uint either 32 or 64 bits
+int same size as uint
+uintptr an unsigned integer large enough to store the uninterpreted bits of a pointer value
+
+
+
+To avoid portability issues all numeric types are defined
+types and thus distinct except
+byte, which is an alias for uint8, and
+rune, which is an alias for int32.
+Explicit conversions
+are required when different numeric types are mixed in an expression
+or assignment. For instance, int32 and int
+are not the same type even though they may have the same size on a
+particular architecture.
+
+
+
String types
+
+
+A string type represents the set of string values.
+A string value is a (possibly empty) sequence of bytes.
+The number of bytes is called the length of the string and is never negative.
+Strings are immutable: once created,
+it is impossible to change the contents of a string.
+The predeclared string type is string;
+it is a defined type.
+
+
+
+The length of a string s can be discovered using
+the built-in function len.
+The length is a compile-time constant if the string is a constant.
+A string's bytes can be accessed by integer indices
+0 through len(s)-1.
+It is illegal to take the address of such an element; if
+s[i] is the i'th byte of a
+string, &s[i] is invalid.
+
+
+
+Array types
+
+
+An array is a numbered sequence of elements of a single
+type, called the element type.
+The number of elements is called the length of the array and is never negative.
+
+
+
+ArrayType = "[" ArrayLength "]" ElementType .
+ArrayLength = Expression .
+ElementType = Type .
+
+
+
+The length is part of the array's type; it must evaluate to a
+non-negative constant
+representable by a value
+of type int.
+The length of array a can be discovered
+using the built-in function len.
+The elements can be addressed by integer indices
+0 through len(a)-1.
+Array types are always one-dimensional but may be composed to form
+multi-dimensional types.
+
+
+
+[32]byte
+[2*N] struct { x, y int32 }
+[1000]*float64
+[3][5]int
+[2][2][2]float64 // same as [2]([2]([2]float64))
+
+
+Slice types
+
+
+A slice is a descriptor for a contiguous segment of an underlying array and
+provides access to a numbered sequence of elements from that array.
+A slice type denotes the set of all slices of arrays of its element type.
+The number of elements is called the length of the slice and is never negative.
+The value of an uninitialized slice is nil.
+
+
+
+SliceType = "[" "]" ElementType .
+
+
+
+The length of a slice s can be discovered by the built-in function
+len; unlike with arrays it may change during
+execution. The elements can be addressed by integer indices
+0 through len(s)-1. The slice index of a
+given element may be less than the index of the same element in the
+underlying array.
+
+
+A slice, once initialized, is always associated with an underlying
+array that holds its elements. A slice therefore shares storage
+with its array and with other slices of the same array; by contrast,
+distinct arrays always represent distinct storage.
+
+
+The array underlying a slice may extend past the end of the slice.
+The capacity is a measure of that extent: it is the sum of
+the length of the slice and the length of the array beyond the slice;
+a slice of length up to that capacity can be created by
+slicing a new one from the original slice.
+The capacity of a slice a can be discovered using the
+built-in function cap(a).
+
+
+
+A new, initialized slice value for a given element type T is
+made using the built-in function
+make,
+which takes a slice type
+and parameters specifying the length and optionally the capacity.
+A slice created with make always allocates a new, hidden array
+to which the returned slice value refers. That is, executing
+
+
+
+make([]T, length, capacity)
+
+
+
+produces the same slice as allocating an array and slicing
+it, so these two expressions are equivalent:
+
+
+
+make([]int, 50, 100)
+new([100]int)[0:50]
+
+
+
+Like arrays, slices are always one-dimensional but may be composed to construct
+higher-dimensional objects.
+With arrays of arrays, the inner arrays are, by construction, always the same length;
+however with slices of slices (or arrays of slices), the inner lengths may vary dynamically.
+Moreover, the inner slices must be initialized individually.
+
+
+Struct types
+
+
+A struct is a sequence of named elements, called fields, each of which has a
+name and a type. Field names may be specified explicitly (IdentifierList) or
+implicitly (EmbeddedField).
+Within a struct, non-blank field names must
+be unique.
+
+
+
+StructType = "struct" "{" { FieldDecl ";" } "}" .
+FieldDecl = (IdentifierList Type | EmbeddedField) [ Tag ] .
+EmbeddedField = [ "*" ] TypeName .
+Tag = string_lit .
+
+
+
+// An empty struct.
+struct {}
+
+// A struct with 6 fields.
+struct {
+ x, y int
+ u float32
+ _ float32 // padding
+ A *[]int
+ F func()
+}
+
+
+
+A field declared with a type but no explicit field name is called an embedded field.
+An embedded field must be specified as
+a type name T or as a pointer to a non-interface type name *T,
+and T itself may not be
+a pointer type. The unqualified type name acts as the field name.
+
+
+
+// A struct with four embedded fields of types T1, *T2, P.T3 and *P.T4
+struct {
+ T1 // field name is T1
+ *T2 // field name is T2
+ P.T3 // field name is T3
+ *P.T4 // field name is T4
+ x, y int // field names are x and y
+}
+
+
+
+The following declaration is illegal because field names must be unique
+in a struct type:
+
+
+
+struct {
+ T // conflicts with embedded field *T and *P.T
+ *T // conflicts with embedded field T and *P.T
+ *P.T // conflicts with embedded field T and *T
+}
+
+
+
+A field or method f of an
+embedded field in a struct x is called promoted if
+x.f is a legal selector that denotes
+that field or method f.
+
+
+
+Promoted fields act like ordinary fields
+of a struct except that they cannot be used as field names in
+composite literals of the struct.
+
+
+
+Given a struct type S and a defined type
+T, promoted methods are included in the method set of the struct as follows:
+
+
+ -
+ If
S contains an embedded field T,
+ the method sets of S
+ and *S both include promoted methods with receiver
+ T. The method set of *S also
+ includes promoted methods with receiver *T.
+
+
+ -
+ If
S contains an embedded field *T,
+ the method sets of S and *S both
+ include promoted methods with receiver T or
+ *T.
+
+
+
+
+A field declaration may be followed by an optional string literal tag,
+which becomes an attribute for all the fields in the corresponding
+field declaration. An empty tag string is equivalent to an absent tag.
+The tags are made visible through a reflection interface
+and take part in type identity for structs
+but are otherwise ignored.
+
+
+
+struct {
+ x, y float64 "" // an empty tag string is like an absent tag
+ name string "any string is permitted as a tag"
+ _ [4]byte "ceci n'est pas un champ de structure"
+}
+
+// A struct corresponding to a TimeStamp protocol buffer.
+// The tag strings define the protocol buffer field numbers;
+// they follow the convention outlined by the reflect package.
+struct {
+ microsec uint64 `protobuf:"1"`
+ serverIP6 uint64 `protobuf:"2"`
+}
+
+
+Pointer types
+
+
+A pointer type denotes the set of all pointers to variables of a given
+type, called the base type of the pointer.
+The value of an uninitialized pointer is nil.
+
+
+
+PointerType = "*" BaseType .
+BaseType = Type .
+
+
+
+*Point
+*[4]int
+
+
+Function types
+
+
+A function type denotes the set of all functions with the same parameter
+and result types. The value of an uninitialized variable of function type
+is nil.
+
+
+
+FunctionType = "func" Signature .
+Signature = Parameters [ Result ] .
+Result = Parameters | Type .
+Parameters = "(" [ ParameterList [ "," ] ] ")" .
+ParameterList = ParameterDecl { "," ParameterDecl } .
+ParameterDecl = [ IdentifierList ] [ "..." ] Type .
+
+
+
+Within a list of parameters or results, the names (IdentifierList)
+must either all be present or all be absent. If present, each name
+stands for one item (parameter or result) of the specified type and
+all non-blank names in the signature
+must be unique.
+If absent, each type stands for one item of that type.
+Parameter and result
+lists are always parenthesized except that if there is exactly
+one unnamed result it may be written as an unparenthesized type.
+
+
+
+The final incoming parameter in a function signature may have
+a type prefixed with ....
+A function with such a parameter is called variadic and
+may be invoked with zero or more arguments for that parameter.
+
+
+
+func()
+func(x int) int
+func(a, _ int, z float32) bool
+func(a, b int, z float32) (bool)
+func(prefix string, values ...int)
+func(a, b int, z float64, opt ...interface{}) (success bool)
+func(int, int, float64) (float64, *[]int)
+func(n int) func(p *T)
+
+
+
+Interface types
+
+
+An interface type specifies a method set called its interface.
+A variable of interface type can store a value of any type with a method set
+that is any superset of the interface. Such a type is said to
+implement the interface.
+The value of an uninitialized variable of interface type is nil.
+
+
+
+InterfaceType = "interface" "{" { ( MethodSpec | InterfaceTypeName ) ";" } "}" .
+MethodSpec = MethodName Signature .
+MethodName = identifier .
+InterfaceTypeName = TypeName .
+
+
+
+An interface type may specify methods explicitly through method specifications,
+or it may embed methods of other interfaces through interface type names.
+
+
+
+// A simple File interface.
+interface {
+ Read([]byte) (int, error)
+ Write([]byte) (int, error)
+ Close() error
+}
+
+
+
+The name of each explicitly specified method must be unique
+and not blank.
+
+
+
+interface {
+ String() string
+ String() string // illegal: String not unique
+ _(x int) // illegal: method must have non-blank name
+}
+
+
+
+More than one type may implement an interface.
+For instance, if two types S1 and S2
+have the method set
+
+
+
+func (p T) Read(p []byte) (n int, err error)
+func (p T) Write(p []byte) (n int, err error)
+func (p T) Close() error
+
+
+
+(where T stands for either S1 or S2)
+then the File interface is implemented by both S1 and
+S2, regardless of what other methods
+S1 and S2 may have or share.
+
+
+
+A type implements any interface comprising any subset of its methods
+and may therefore implement several distinct interfaces. For
+instance, all types implement the empty interface:
+
+
+
+interface{}
+
+
+
+Similarly, consider this interface specification,
+which appears within a type declaration
+to define an interface called Locker:
+
+
+
+type Locker interface {
+ Lock()
+ Unlock()
+}
+
+
+
+If S1 and S2 also implement
+
+
+
+func (p T) Lock() { … }
+func (p T) Unlock() { … }
+
+
+
+they implement the Locker interface as well
+as the File interface.
+
+
+
+An interface T may use a (possibly qualified) interface type
+name E in place of a method specification. This is called
+embedding interface E in T.
+The method set of T is the union
+of the method sets of T’s explicitly declared methods and of
+T’s embedded interfaces.
+
+
+
+type Reader interface {
+ Read(p []byte) (n int, err error)
+ Close() error
+}
+
+type Writer interface {
+ Write(p []byte) (n int, err error)
+ Close() error
+}
+
+// ReadWriter's methods are Read, Write, and Close.
+type ReadWriter interface {
+ Reader // includes methods of Reader in ReadWriter's method set
+ Writer // includes methods of Writer in ReadWriter's method set
+}
+
+
+
+A union of method sets contains the (exported and non-exported)
+methods of each method set exactly once, and methods with the
+same names must
+have identical signatures.
+
+
+
+type ReadCloser interface {
+ Reader // includes methods of Reader in ReadCloser's method set
+ Close() // illegal: signatures of Reader.Close and Close are different
+}
+
+
+
+An interface type T may not embed itself
+or any interface type that embeds T, recursively.
+
+
+
+// illegal: Bad cannot embed itself
+type Bad interface {
+ Bad
+}
+
+// illegal: Bad1 cannot embed itself using Bad2
+type Bad1 interface {
+ Bad2
+}
+type Bad2 interface {
+ Bad1
+}
+
+
+Map types
+
+
+A map is an unordered group of elements of one type, called the
+element type, indexed by a set of unique keys of another type,
+called the key type.
+The value of an uninitialized map is nil.
+
+
+
+MapType = "map" "[" KeyType "]" ElementType .
+KeyType = Type .
+
+
+
+The comparison operators
+== and != must be fully defined
+for operands of the key type; thus the key type must not be a function, map, or
+slice.
+If the key type is an interface type, these
+comparison operators must be defined for the dynamic key values;
+failure will cause a run-time panic.
+
+
+
+
+map[string]int
+map[*T]struct{ x, y float64 }
+map[string]interface{}
+
+
+
+The number of map elements is called its length.
+For a map m, it can be discovered using the
+built-in function len
+and may change during execution. Elements may be added during execution
+using assignments and retrieved with
+index expressions; they may be removed with the
+delete built-in function.
+
+
+A new, empty map value is made using the built-in
+function make,
+which takes the map type and an optional capacity hint as arguments:
+
+
+
+make(map[string]int)
+make(map[string]int, 100)
+
+
+
+The initial capacity does not bound its size:
+maps grow to accommodate the number of items
+stored in them, with the exception of nil maps.
+A nil map is equivalent to an empty map except that no elements
+may be added.
+
+
Channel types
+
+
+A channel provides a mechanism for
+concurrently executing functions
+to communicate by
+sending and
+receiving
+values of a specified element type.
+The value of an uninitialized channel is nil.
+
+
+
+ChannelType = ( "chan" | "chan" "<-" | "<-" "chan" ) ElementType .
+
+
+
+The optional <- operator specifies the channel direction,
+send or receive. If no direction is given, the channel is
+bidirectional.
+A channel may be constrained only to send or only to receive by
+assignment or
+explicit conversion.
+
+
+
+chan T // can be used to send and receive values of type T
+chan<- float64 // can only be used to send float64s
+<-chan int // can only be used to receive ints
+
+
+
+The <- operator associates with the leftmost chan
+possible:
+
+
+
+chan<- chan int // same as chan<- (chan int)
+chan<- <-chan int // same as chan<- (<-chan int)
+<-chan <-chan int // same as <-chan (<-chan int)
+chan (<-chan int)
+
+
+
+A new, initialized channel
+value can be made using the built-in function
+make,
+which takes the channel type and an optional capacity as arguments:
+
+
+
+make(chan int, 100)
+
+
+
+The capacity, in number of elements, sets the size of the buffer in the channel.
+If the capacity is zero or absent, the channel is unbuffered and communication
+succeeds only when both a sender and receiver are ready. Otherwise, the channel
+is buffered and communication succeeds without blocking if the buffer
+is not full (sends) or not empty (receives).
+A nil channel is never ready for communication.
+
+
+
+A channel may be closed with the built-in function
+close.
+The multi-valued assignment form of the
+receive operator
+reports whether a received value was sent before
+the channel was closed.
+
+
+
+A single channel may be used in
+send statements,
+receive operations,
+and calls to the built-in functions
+cap and
+len
+by any number of goroutines without further synchronization.
+Channels act as first-in-first-out queues.
+For example, if one goroutine sends values on a channel
+and a second goroutine receives them, the values are
+received in the order sent.
+
+
+Properties of types and values
+
+Type identity
+
+
+Two types are either identical or different.
+
+
+
+A defined type is always different from any other type.
+Otherwise, two types are identical if their underlying type literals are
+structurally equivalent; that is, they have the same literal structure and corresponding
+components have identical types. In detail:
+
+
+
+ - Two array types are identical if they have identical element types and
+ the same array length.
+
+ - Two slice types are identical if they have identical element types.
+
+ - Two struct types are identical if they have the same sequence of fields,
+ and if corresponding fields have the same names, and identical types,
+ and identical tags.
+ Non-exported field names from different
+ packages are always different.
+
+ - Two pointer types are identical if they have identical base types.
+
+ - Two function types are identical if they have the same number of parameters
+ and result values, corresponding parameter and result types are
+ identical, and either both functions are variadic or neither is.
+ Parameter and result names are not required to match.
+
+ - Two interface types are identical if they have the same set of methods
+ with the same names and identical function types.
+ Non-exported method names from different
+ packages are always different. The order of the methods is irrelevant.
+
+ - Two map types are identical if they have identical key and element types.
+
+ - Two channel types are identical if they have identical element types and
+ the same direction.
+
+
+
+Given the declarations
+
+
+
+type (
+ A0 = []string
+ A1 = A0
+ A2 = struct{ a, b int }
+ A3 = int
+ A4 = func(A3, float64) *A0
+ A5 = func(x int, _ float64) *[]string
+)
+
+type (
+ B0 A0
+ B1 []string
+ B2 struct{ a, b int }
+ B3 struct{ a, c int }
+ B4 func(int, float64) *B0
+ B5 func(x int, y float64) *A1
+)
+
+type C0 = B0
+
+
+
+these types are identical:
+
+
+
+A0, A1, and []string
+A2 and struct{ a, b int }
+A3 and int
+A4, func(int, float64) *[]string, and A5
+
+B0 and C0
+[]int and []int
+struct{ a, b *T5 } and struct{ a, b *T5 }
+func(x int, y float64) *[]string, func(int, float64) (result *[]string), and A5
+
+
+
+B0 and B1 are different because they are new types
+created by distinct type definitions;
+func(int, float64) *B0 and func(x int, y float64) *[]string
+are different because B0 is different from []string.
+
+
+
+Assignability
+
+
+A value x is assignable to a variable of type T
+("x is assignable to T") if one of the following conditions applies:
+
+
+
+-
+
x's type is identical to T.
+
+-
+
x's type V and T have identical
+underlying types and at least one of V
+or T is not a defined type.
+
+-
+
T is an interface type and
+x implements T.
+
+-
+
x is a bidirectional channel value, T is a channel type,
+x's type V and T have identical element types,
+and at least one of V or T is not a defined type.
+
+-
+
x is the predeclared identifier nil and T
+is a pointer, function, slice, map, channel, or interface type.
+
+-
+
x is an untyped constant
+representable
+by a value of type T.
+
+
+
+
+Representability
+
+
+A constant x is representable
+by a value of type T if one of the following conditions applies:
+
+
+
+-
+
x is in the set of values determined by T.
+
+
+-
+
T is a floating-point type and x can be rounded to T's
+precision without overflow. Rounding uses IEEE 754 round-to-even rules but with an IEEE
+negative zero further simplified to an unsigned zero. Note that constant values never result
+in an IEEE negative zero, NaN, or infinity.
+
+
+-
+
T is a complex type, and x's
+components real(x) and imag(x)
+are representable by values of T's component type (float32 or
+float64).
+
+
+
+
+x T x is representable by a value of T because
+
+'a' byte 97 is in the set of byte values
+97 rune rune is an alias for int32, and 97 is in the set of 32-bit integers
+"foo" string "foo" is in the set of string values
+1024 int16 1024 is in the set of 16-bit integers
+42.0 byte 42 is in the set of unsigned 8-bit integers
+1e10 uint64 10000000000 is in the set of unsigned 64-bit integers
+2.718281828459045 float32 2.718281828459045 rounds to 2.7182817 which is in the set of float32 values
+-1e-1000 float64 -1e-1000 rounds to IEEE -0.0 which is further simplified to 0.0
+0i int 0 is an integer value
+(42 + 0i) float32 42.0 (with zero imaginary part) is in the set of float32 values
+
+
+
+x T x is not representable by a value of T because
+
+0 bool 0 is not in the set of boolean values
+'a' string 'a' is a rune, it is not in the set of string values
+1024 byte 1024 is not in the set of unsigned 8-bit integers
+-1 uint16 -1 is not in the set of unsigned 16-bit integers
+1.1 int 1.1 is not an integer value
+42i float32 (0 + 42i) is not in the set of float32 values
+1e1000 float64 1e1000 overflows to IEEE +Inf after rounding
+
+
+
+Blocks
+
+
+A block is a possibly empty sequence of declarations and statements
+within matching brace brackets.
+
+
+
+Block = "{" StatementList "}" .
+StatementList = { Statement ";" } .
+
+
+
+In addition to explicit blocks in the source code, there are implicit blocks:
+
+
+
+ - The universe block encompasses all Go source text.
+
+ - Each package has a package block containing all
+ Go source text for that package.
+
+ - Each file has a file block containing all Go source text
+ in that file.
+
+ - Each "if",
+ "for", and
+ "switch"
+ statement is considered to be in its own implicit block.
+
+ - Each clause in a "switch"
+ or "select" statement
+ acts as an implicit block.
+
+
+
+Blocks nest and influence scoping.
+
+
+
+Declarations and scope
+
+
+A declaration binds a non-blank identifier to a
+constant,
+type,
+variable,
+function,
+label, or
+package.
+Every identifier in a program must be declared.
+No identifier may be declared twice in the same block, and
+no identifier may be declared in both the file and package block.
+
+
+
+The blank identifier may be used like any other identifier
+in a declaration, but it does not introduce a binding and thus is not declared.
+In the package block, the identifier init may only be used for
+init function declarations,
+and like the blank identifier it does not introduce a new binding.
+
+
+
+Declaration = ConstDecl | TypeDecl | VarDecl .
+TopLevelDecl = Declaration | FunctionDecl | MethodDecl .
+
+
+
+The scope of a declared identifier is the extent of source text in which
+the identifier denotes the specified constant, type, variable, function, label, or package.
+
+
+
+Go is lexically scoped using blocks:
+
+
+
+ - The scope of a predeclared identifier is the universe block.
+
+ - The scope of an identifier denoting a constant, type, variable,
+ or function (but not method) declared at top level (outside any
+ function) is the package block.
+
+ - The scope of the package name of an imported package is the file block
+ of the file containing the import declaration.
+
+ - The scope of an identifier denoting a method receiver, function parameter,
+ or result variable is the function body.
+
+ - The scope of a constant or variable identifier declared
+ inside a function begins at the end of the ConstSpec or VarSpec
+ (ShortVarDecl for short variable declarations)
+ and ends at the end of the innermost containing block.
+
+ - The scope of a type identifier declared inside a function
+ begins at the identifier in the TypeSpec
+ and ends at the end of the innermost containing block.
+
+
+
+An identifier declared in a block may be redeclared in an inner block.
+While the identifier of the inner declaration is in scope, it denotes
+the entity declared by the inner declaration.
+
+
+
+The package clause is not a declaration; the package name
+does not appear in any scope. Its purpose is to identify the files belonging
+to the same package and to specify the default package name for import
+declarations.
+
+
+
+Label scopes
+
+
+Labels are declared by labeled statements and are
+used in the "break",
+"continue", and
+"goto" statements.
+It is illegal to define a label that is never used.
+In contrast to other identifiers, labels are not block scoped and do
+not conflict with identifiers that are not labels. The scope of a label
+is the body of the function in which it is declared and excludes
+the body of any nested function.
+
+
+
+Blank identifier
+
+
+The blank identifier is represented by the underscore character _.
+It serves as an anonymous placeholder instead of a regular (non-blank)
+identifier and has special meaning in declarations,
+as an operand, and in assignments.
+
+
+
+Predeclared identifiers
+
+
+The following identifiers are implicitly declared in the
+universe block:
+
+
+Types:
+ bool byte complex64 complex128 error float32 float64
+ int int8 int16 int32 int64 rune string
+ uint uint8 uint16 uint32 uint64 uintptr
+
+Constants:
+ true false iota
+
+Zero value:
+ nil
+
+Functions:
+ append cap close complex copy delete imag len
+ make new panic print println real recover
+
+
+
+Exported identifiers
+
+
+An identifier may be exported to permit access to it from another package.
+An identifier is exported if both:
+
+
+ - the first character of the identifier's name is a Unicode upper case
+ letter (Unicode class "Lu"); and
+ - the identifier is declared in the package block
+ or it is a field name or
+ method name.
+
+
+All other identifiers are not exported.
+
+
+
+Uniqueness of identifiers
+
+
+Given a set of identifiers, an identifier is called unique if it is
+different from every other in the set.
+Two identifiers are different if they are spelled differently, or if they
+appear in different packages and are not
+exported. Otherwise, they are the same.
+
+
+Constant declarations
+
+
+A constant declaration binds a list of identifiers (the names of
+the constants) to the values of a list of constant expressions.
+The number of identifiers must be equal
+to the number of expressions, and the nth identifier on
+the left is bound to the value of the nth expression on the
+right.
+
+
+
+ConstDecl = "const" ( ConstSpec | "(" { ConstSpec ";" } ")" ) .
+ConstSpec = IdentifierList [ [ Type ] "=" ExpressionList ] .
+
+IdentifierList = identifier { "," identifier } .
+ExpressionList = Expression { "," Expression } .
+
+
+
+If the type is present, all constants take the type specified, and
+the expressions must be assignable to that type.
+If the type is omitted, the constants take the
+individual types of the corresponding expressions.
+If the expression values are untyped constants,
+the declared constants remain untyped and the constant identifiers
+denote the constant values. For instance, if the expression is a
+floating-point literal, the constant identifier denotes a floating-point
+constant, even if the literal's fractional part is zero.
+
+
+
+const Pi float64 = 3.14159265358979323846
+const zero = 0.0 // untyped floating-point constant
+const (
+ size int64 = 1024
+ eof = -1 // untyped integer constant
+)
+const a, b, c = 3, 4, "foo" // a = 3, b = 4, c = "foo", untyped integer and string constants
+const u, v float32 = 0, 3 // u = 0.0, v = 3.0
+
+
+
+Within a parenthesized const declaration list the
+expression list may be omitted from any but the first ConstSpec.
+Such an empty list is equivalent to the textual substitution of the
+first preceding non-empty expression list and its type if any.
+Omitting the list of expressions is therefore equivalent to
+repeating the previous list. The number of identifiers must be equal
+to the number of expressions in the previous list.
+Together with the iota constant generator
+this mechanism permits light-weight declaration of sequential values:
+
+
+
+const (
+ Sunday = iota
+ Monday
+ Tuesday
+ Wednesday
+ Thursday
+ Friday
+ Partyday
+ numberOfDays // this constant is not exported
+)
+
+
+
+Iota
+
+
+Within a constant declaration, the predeclared identifier
+iota represents successive untyped integer
+constants. Its value is the index of the respective ConstSpec
+in that constant declaration, starting at zero.
+It can be used to construct a set of related constants:
+
+
+
+const (
+ c0 = iota // c0 == 0
+ c1 = iota // c1 == 1
+ c2 = iota // c2 == 2
+)
+
+const (
+ a = 1 << iota // a == 1 (iota == 0)
+ b = 1 << iota // b == 2 (iota == 1)
+ c = 3 // c == 3 (iota == 2, unused)
+ d = 1 << iota // d == 8 (iota == 3)
+)
+
+const (
+ u = iota * 42 // u == 0 (untyped integer constant)
+ v float64 = iota * 42 // v == 42.0 (float64 constant)
+ w = iota * 42 // w == 84 (untyped integer constant)
+)
+
+const x = iota // x == 0
+const y = iota // y == 0
+
+
+
+By definition, multiple uses of iota in the same ConstSpec all have the same value:
+
+
+
+const (
+ bit0, mask0 = 1 << iota, 1<<iota - 1 // bit0 == 1, mask0 == 0 (iota == 0)
+ bit1, mask1 // bit1 == 2, mask1 == 1 (iota == 1)
+ _, _ // (iota == 2, unused)
+ bit3, mask3 // bit3 == 8, mask3 == 7 (iota == 3)
+)
+
+
+
+This last example exploits the implicit repetition
+of the last non-empty expression list.
+
+
+
+Type declarations
+
+
+A type declaration binds an identifier, the type name, to a type.
+Type declarations come in two forms: alias declarations and type definitions.
+
+
+
+TypeDecl = "type" ( TypeSpec | "(" { TypeSpec ";" } ")" ) .
+TypeSpec = AliasDecl | TypeDef .
+
+
+Alias declarations
+
+
+An alias declaration binds an identifier to the given type.
+
+
+
+AliasDecl = identifier "=" Type .
+
+
+
+Within the scope of
+the identifier, it serves as an alias for the type.
+
+
+
+type (
+ nodeList = []*Node // nodeList and []*Node are identical types
+ Polar = polar // Polar and polar denote identical types
+)
+
+
+
+Type definitions
+
+
+A type definition creates a new, distinct type with the same
+underlying type and operations as the given type,
+and binds an identifier to it.
+
+
+
+TypeDef = identifier Type .
+
+
+
+The new type is called a defined type.
+It is different from any other type,
+including the type it is created from.
+
+
+
+type (
+ Point struct{ x, y float64 } // Point and struct{ x, y float64 } are different types
+ polar Point // polar and Point denote different types
+)
+
+type TreeNode struct {
+ left, right *TreeNode
+ value *Comparable
+}
+
+type Block interface {
+ BlockSize() int
+ Encrypt(src, dst []byte)
+ Decrypt(src, dst []byte)
+}
+
+
+
+A defined type may have methods associated with it.
+It does not inherit any methods bound to the given type,
+but the method set
+of an interface type or of elements of a composite type remains unchanged:
+
+
+
+// A Mutex is a data type with two methods, Lock and Unlock.
+type Mutex struct { /* Mutex fields */ }
+func (m *Mutex) Lock() { /* Lock implementation */ }
+func (m *Mutex) Unlock() { /* Unlock implementation */ }
+
+// NewMutex has the same composition as Mutex but its method set is empty.
+type NewMutex Mutex
+
+// The method set of PtrMutex's underlying type *Mutex remains unchanged,
+// but the method set of PtrMutex is empty.
+type PtrMutex *Mutex
+
+// The method set of *PrintableMutex contains the methods
+// Lock and Unlock bound to its embedded field Mutex.
+type PrintableMutex struct {
+ Mutex
+}
+
+// MyBlock is an interface type that has the same method set as Block.
+type MyBlock Block
+
+
+
+Type definitions may be used to define different boolean, numeric,
+or string types and associate methods with them:
+
+
+
+type TimeZone int
+
+const (
+ EST TimeZone = -(5 + iota)
+ CST
+ MST
+ PST
+)
+
+func (tz TimeZone) String() string {
+ return fmt.Sprintf("GMT%+dh", tz)
+}
+
+
+
+Variable declarations
+
+
+A variable declaration creates one or more variables,
+binds corresponding identifiers to them, and gives each a type and an initial value.
+
+
+
+VarDecl = "var" ( VarSpec | "(" { VarSpec ";" } ")" ) .
+VarSpec = IdentifierList ( Type [ "=" ExpressionList ] | "=" ExpressionList ) .
+
+
+
+var i int
+var U, V, W float64
+var k = 0
+var x, y float32 = -1, -2
+var (
+ i int
+ u, v, s = 2.0, 3.0, "bar"
+)
+var re, im = complexSqrt(-1)
+var _, found = entries[name] // map lookup; only interested in "found"
+
+
+
+If a list of expressions is given, the variables are initialized
+with the expressions following the rules for assignments.
+Otherwise, each variable is initialized to its zero value.
+
+
+
+If a type is present, each variable is given that type.
+Otherwise, each variable is given the type of the corresponding
+initialization value in the assignment.
+If that value is an untyped constant, it is first implicitly
+converted to its default type;
+if it is an untyped boolean value, it is first implicitly converted to type bool.
+The predeclared value nil cannot be used to initialize a variable
+with no explicit type.
+
+
+
+var d = math.Sin(0.5) // d is float64
+var i = 42 // i is int
+var t, ok = x.(T) // t is T, ok is bool
+var n = nil // illegal
+
+
+
+Implementation restriction: A compiler may make it illegal to declare a variable
+inside a function body if the variable is
+never used.
+
+
+Short variable declarations
+
+
+A short variable declaration uses the syntax:
+
+
+
+ShortVarDecl = IdentifierList ":=" ExpressionList .
+
+
+
+It is shorthand for a regular variable declaration
+with initializer expressions but no types:
+
+
+
+"var" IdentifierList = ExpressionList .
+
+
+
+i, j := 0, 10
+f := func() int { return 7 }
+ch := make(chan int)
+r, w, _ := os.Pipe() // os.Pipe() returns a connected pair of Files and an error, if any
+_, y, _ := coord(p) // coord() returns three values; only interested in y coordinate
+
+
+
+Unlike regular variable declarations, a short variable declaration may redeclare
+variables provided they were originally declared earlier in the same block
+(or the parameter lists if the block is the function body) with the same type,
+and at least one of the non-blank variables is new.
+As a consequence, redeclaration can only appear in a multi-variable short declaration.
+Redeclaration does not introduce a new variable; it just assigns a new value to the original.
+
+
+
+field1, offset := nextField(str, 0)
+field2, offset := nextField(str, offset) // redeclares offset
+a, a := 1, 2 // illegal: double declaration of a or no new variable if a was declared elsewhere
+
+
+
+Short variable declarations may appear only inside functions.
+In some contexts such as the initializers for
+"if",
+"for", or
+"switch" statements,
+they can be used to declare local temporary variables.
+
+
+Function declarations
+
+
+A function declaration binds an identifier, the function name,
+to a function.
+
+
+
+FunctionDecl = "func" FunctionName Signature [ FunctionBody ] .
+FunctionName = identifier .
+FunctionBody = Block .
+
+
+
+If the function's signature declares
+result parameters, the function body's statement list must end in
+a terminating statement.
+
+
+
+func IndexRune(s string, r rune) int {
+ for i, c := range s {
+ if c == r {
+ return i
+ }
+ }
+ // invalid: missing return statement
+}
+
+
+
+A function declaration may omit the body. Such a declaration provides the
+signature for a function implemented outside Go, such as an assembly routine.
+
+
+
+func min(x int, y int) int {
+ if x < y {
+ return x
+ }
+ return y
+}
+
+func flushICache(begin, end uintptr) // implemented externally
+
+
+Method declarations
+
+
+A method is a function with a receiver.
+A method declaration binds an identifier, the method name, to a method,
+and associates the method with the receiver's base type.
+
+
+
+MethodDecl = "func" Receiver MethodName Signature [ FunctionBody ] .
+Receiver = Parameters .
+
+
+
+The receiver is specified via an extra parameter section preceding the method
+name. That parameter section must declare a single non-variadic parameter, the receiver.
+Its type must be a defined type T or a
+pointer to a defined type T. T is called the receiver
+base type. A receiver base type cannot be a pointer or interface type and
+it must be defined in the same package as the method.
+The method is said to be bound to its receiver base type and the method name
+is visible only within selectors for type T
+or *T.
+
+
+
+A non-blank receiver identifier must be
+unique in the method signature.
+If the receiver's value is not referenced inside the body of the method,
+its identifier may be omitted in the declaration. The same applies in
+general to parameters of functions and methods.
+
+
+
+For a base type, the non-blank names of methods bound to it must be unique.
+If the base type is a struct type,
+the non-blank method and field names must be distinct.
+
+
+
+Given defined type Point, the declarations
+
+
+
+func (p *Point) Length() float64 {
+ return math.Sqrt(p.x * p.x + p.y * p.y)
+}
+
+func (p *Point) Scale(factor float64) {
+ p.x *= factor
+ p.y *= factor
+}
+
+
+
+bind the methods Length and Scale,
+with receiver type *Point,
+to the base type Point.
+
+
+
+The type of a method is the type of a function with the receiver as first
+argument. For instance, the method Scale has type
+
+
+
+func(p *Point, factor float64)
+
+
+
+However, a function declared this way is not a method.
+
+
+
+Expressions
+
+
+An expression specifies the computation of a value by applying
+operators and functions to operands.
+
+
+Operands
+
+
+Operands denote the elementary values in an expression. An operand may be a
+literal, a (possibly qualified)
+non-blank identifier denoting a
+constant,
+variable, or
+function,
+or a parenthesized expression.
+
+
+
+The blank identifier may appear as an
+operand only on the left-hand side of an assignment.
+
+
+
+Operand = Literal | OperandName | "(" Expression ")" .
+Literal = BasicLit | CompositeLit | FunctionLit .
+BasicLit = int_lit | float_lit | imaginary_lit | rune_lit | string_lit .
+OperandName = identifier | QualifiedIdent .
+
+
+Qualified identifiers
+
+
+A qualified identifier is an identifier qualified with a package name prefix.
+Both the package name and the identifier must not be
+blank.
+
+
+
+QualifiedIdent = PackageName "." identifier .
+
+
+
+A qualified identifier accesses an identifier in a different package, which
+must be imported.
+The identifier must be exported and
+declared in the package block of that package.
+
+
+
+math.Sin // denotes the Sin function in package math
+
+
+Composite literals
+
+
+Composite literals construct values for structs, arrays, slices, and maps
+and create a new value each time they are evaluated.
+They consist of the type of the literal followed by a brace-bound list of elements.
+Each element may optionally be preceded by a corresponding key.
+
+
+
+CompositeLit = LiteralType LiteralValue .
+LiteralType = StructType | ArrayType | "[" "..." "]" ElementType |
+ SliceType | MapType | TypeName .
+LiteralValue = "{" [ ElementList [ "," ] ] "}" .
+ElementList = KeyedElement { "," KeyedElement } .
+KeyedElement = [ Key ":" ] Element .
+Key = FieldName | Expression | LiteralValue .
+FieldName = identifier .
+Element = Expression | LiteralValue .
+
+
+
+The LiteralType's underlying type must be a struct, array, slice, or map type
+(the grammar enforces this constraint except when the type is given
+as a TypeName).
+The types of the elements and keys must be assignable
+to the respective field, element, and key types of the literal type;
+there is no additional conversion.
+The key is interpreted as a field name for struct literals,
+an index for array and slice literals, and a key for map literals.
+For map literals, all elements must have a key. It is an error
+to specify multiple elements with the same field name or
+constant key value. For non-constant map keys, see the section on
+evaluation order.
+
+
+
+For struct literals the following rules apply:
+
+
+ - A key must be a field name declared in the struct type.
+
+ - An element list that does not contain any keys must
+ list an element for each struct field in the
+ order in which the fields are declared.
+
+ - If any element has a key, every element must have a key.
+
+ - An element list that contains keys does not need to
+ have an element for each struct field. Omitted fields
+ get the zero value for that field.
+
+ - A literal may omit the element list; such a literal evaluates
+ to the zero value for its type.
+
+ - It is an error to specify an element for a non-exported
+ field of a struct belonging to a different package.
+
+
+
+
+Given the declarations
+
+
+type Point3D struct { x, y, z float64 }
+type Line struct { p, q Point3D }
+
+
+
+one may write
+
+
+
+origin := Point3D{} // zero value for Point3D
+line := Line{origin, Point3D{y: -4, z: 12.3}} // zero value for line.q.x
+
+
+
+For array and slice literals the following rules apply:
+
+
+ - Each element has an associated integer index marking
+ its position in the array.
+
+ - An element with a key uses the key as its index. The
+ key must be a non-negative constant
+ representable by
+ a value of type
int; and if it is typed
+ it must be of integer type.
+
+ - An element without a key uses the previous element's index plus one.
+ If the first element has no key, its index is zero.
+
+
+
+
+Taking the address of a composite literal
+generates a pointer to a unique variable initialized
+with the literal's value.
+
+
+
+var pointer *Point3D = &Point3D{y: 1000}
+
+
+
+Note that the zero value for a slice or map
+type is not the same as an initialized but empty value of the same type.
+Consequently, taking the address of an empty slice or map composite literal
+does not have the same effect as allocating a new slice or map value with
+new.
+
+
+
+p1 := &[]int{} // p1 points to an initialized, empty slice with value []int{} and length 0
+p2 := new([]int) // p2 points to an uninitialized slice with value nil and length 0
+
+
+
+The length of an array literal is the length specified in the literal type.
+If fewer elements than the length are provided in the literal, the missing
+elements are set to the zero value for the array element type.
+It is an error to provide elements with index values outside the index range
+of the array. The notation ... specifies an array length equal
+to the maximum element index plus one.
+
+
+
+buffer := [10]string{} // len(buffer) == 10
+intSet := [6]int{1, 2, 3, 5} // len(intSet) == 6
+days := [...]string{"Sat", "Sun"} // len(days) == 2
+
+
+
+A slice literal describes the entire underlying array literal.
+Thus the length and capacity of a slice literal are the maximum
+element index plus one. A slice literal has the form
+
+
+
+[]T{x1, x2, … xn}
+
+
+
+and is shorthand for a slice operation applied to an array:
+
+
+
+tmp := [n]T{x1, x2, … xn}
+tmp[0 : n]
+
+
+
+Within a composite literal of array, slice, or map type T,
+elements or map keys that are themselves composite literals may elide the respective
+literal type if it is identical to the element or key type of T.
+Similarly, elements or keys that are addresses of composite literals may elide
+the &T when the element or key type is *T.
+
+
+
+[...]Point{{1.5, -3.5}, {0, 0}} // same as [...]Point{Point{1.5, -3.5}, Point{0, 0}}
+[][]int{{1, 2, 3}, {4, 5}} // same as [][]int{[]int{1, 2, 3}, []int{4, 5}}
+[][]Point{{{0, 1}, {1, 2}}} // same as [][]Point{[]Point{Point{0, 1}, Point{1, 2}}}
+map[string]Point{"orig": {0, 0}} // same as map[string]Point{"orig": Point{0, 0}}
+map[Point]string{{0, 0}: "orig"} // same as map[Point]string{Point{0, 0}: "orig"}
+
+type PPoint *Point
+[2]*Point{{1.5, -3.5}, {}} // same as [2]*Point{&Point{1.5, -3.5}, &Point{}}
+[2]PPoint{{1.5, -3.5}, {}} // same as [2]PPoint{PPoint(&Point{1.5, -3.5}), PPoint(&Point{})}
+
+
+
+A parsing ambiguity arises when a composite literal using the
+TypeName form of the LiteralType appears as an operand between the
+keyword and the opening brace of the block
+of an "if", "for", or "switch" statement, and the composite literal
+is not enclosed in parentheses, square brackets, or curly braces.
+In this rare case, the opening brace of the literal is erroneously parsed
+as the one introducing the block of statements. To resolve the ambiguity,
+the composite literal must appear within parentheses.
+
+
+
+if x == (T{a,b,c}[i]) { … }
+if (x == T{a,b,c}[i]) { … }
+
+
+
+Examples of valid array, slice, and map literals:
+
+
+
+// list of prime numbers
+primes := []int{2, 3, 5, 7, 9, 2147483647}
+
+// vowels[ch] is true if ch is a vowel
+vowels := [128]bool{'a': true, 'e': true, 'i': true, 'o': true, 'u': true, 'y': true}
+
+// the array [10]float32{-1, 0, 0, 0, -0.1, -0.1, 0, 0, 0, -1}
+filter := [10]float32{-1, 4: -0.1, -0.1, 9: -1}
+
+// frequencies in Hz for equal-tempered scale (A4 = 440Hz)
+noteFrequency := map[string]float32{
+ "C0": 16.35, "D0": 18.35, "E0": 20.60, "F0": 21.83,
+ "G0": 24.50, "A0": 27.50, "B0": 30.87,
+}
+
+
+
+Function literals
+
+
+A function literal represents an anonymous function.
+
+
+
+FunctionLit = "func" Signature FunctionBody .
+
+
+
+func(a, b int, z float64) bool { return a*b < int(z) }
+
+
+
+A function literal can be assigned to a variable or invoked directly.
+
+
+
+f := func(x, y int) int { return x + y }
+func(ch chan int) { ch <- ACK }(replyChan)
+
+
+
+Function literals are closures: they may refer to variables
+defined in a surrounding function. Those variables are then shared between
+the surrounding function and the function literal, and they survive as long
+as they are accessible.
+
+
+
+Primary expressions
+
+
+Primary expressions are the operands for unary and binary expressions.
+
+
+
+PrimaryExpr =
+ Operand |
+ Conversion |
+ MethodExpr |
+ PrimaryExpr Selector |
+ PrimaryExpr Index |
+ PrimaryExpr Slice |
+ PrimaryExpr TypeAssertion |
+ PrimaryExpr Arguments .
+
+Selector = "." identifier .
+Index = "[" Expression "]" .
+Slice = "[" [ Expression ] ":" [ Expression ] "]" |
+ "[" [ Expression ] ":" Expression ":" Expression "]" .
+TypeAssertion = "." "(" Type ")" .
+Arguments = "(" [ ( ExpressionList | Type [ "," ExpressionList ] ) [ "..." ] [ "," ] ] ")" .
+
+
+
+
+x
+2
+(s + ".txt")
+f(3.1415, true)
+Point{1, 2}
+m["foo"]
+s[i : j + 1]
+obj.color
+f.p[i].x()
+
+
+
+Selectors
+
+
+For a primary expression x
+that is not a package name, the
+selector expression
+
+
+
+x.f
+
+
+
+denotes the field or method f of the value x
+(or sometimes *x; see below).
+The identifier f is called the (field or method) selector;
+it must not be the blank identifier.
+The type of the selector expression is the type of f.
+If x is a package name, see the section on
+qualified identifiers.
+
+
+
+A selector f may denote a field or method f of
+a type T, or it may refer
+to a field or method f of a nested
+embedded field of T.
+The number of embedded fields traversed
+to reach f is called its depth in T.
+The depth of a field or method f
+declared in T is zero.
+The depth of a field or method f declared in
+an embedded field A in T is the
+depth of f in A plus one.
+
+
+
+The following rules apply to selectors:
+
+
+
+-
+For a value
x of type T or *T
+where T is not a pointer or interface type,
+x.f denotes the field or method at the shallowest depth
+in T where there
+is such an f.
+If there is not exactly one f
+with shallowest depth, the selector expression is illegal.
+
+
+-
+For a value
x of type I where I
+is an interface type, x.f denotes the actual method with name
+f of the dynamic value of x.
+If there is no method with name f in the
+method set of I, the selector
+expression is illegal.
+
+
+-
+As an exception, if the type of
x is a defined
+pointer type and (*x).f is a valid selector expression denoting a field
+(but not a method), x.f is shorthand for (*x).f.
+
+
+-
+In all other cases,
x.f is illegal.
+
+
+-
+If
x is of pointer type and has the value
+nil and x.f denotes a struct field,
+assigning to or evaluating x.f
+causes a run-time panic.
+
+
+-
+If
x is of interface type and has the value
+nil, calling or
+evaluating the method x.f
+causes a run-time panic.
+
+
+
+
+For example, given the declarations:
+
+
+
+type T0 struct {
+ x int
+}
+
+func (*T0) M0()
+
+type T1 struct {
+ y int
+}
+
+func (T1) M1()
+
+type T2 struct {
+ z int
+ T1
+ *T0
+}
+
+func (*T2) M2()
+
+type Q *T2
+
+var t T2 // with t.T0 != nil
+var p *T2 // with p != nil and (*p).T0 != nil
+var q Q = p
+
+
+
+one may write:
+
+
+
+t.z // t.z
+t.y // t.T1.y
+t.x // (*t.T0).x
+
+p.z // (*p).z
+p.y // (*p).T1.y
+p.x // (*(*p).T0).x
+
+q.x // (*(*q).T0).x (*q).x is a valid field selector
+
+p.M0() // ((*p).T0).M0() M0 expects *T0 receiver
+p.M1() // ((*p).T1).M1() M1 expects T1 receiver
+p.M2() // p.M2() M2 expects *T2 receiver
+t.M2() // (&t).M2() M2 expects *T2 receiver, see section on Calls
+
+
+
+but the following is invalid:
+
+
+
+q.M0() // (*q).M0 is valid but not a field selector
+
+
+
+Method expressions
+
+
+If M is in the method set of type T,
+T.M is a function that is callable as a regular function
+with the same arguments as M prefixed by an additional
+argument that is the receiver of the method.
+
+
+
+MethodExpr = ReceiverType "." MethodName .
+ReceiverType = Type .
+
+
+
+Consider a struct type T with two methods,
+Mv, whose receiver is of type T, and
+Mp, whose receiver is of type *T.
+
+
+
+type T struct {
+ a int
+}
+func (tv T) Mv(a int) int { return 0 } // value receiver
+func (tp *T) Mp(f float32) float32 { return 1 } // pointer receiver
+
+var t T
+
+
+
+The expression
+
+
+
+T.Mv
+
+
+
+yields a function equivalent to Mv but
+with an explicit receiver as its first argument; it has signature
+
+
+
+func(tv T, a int) int
+
+
+
+That function may be called normally with an explicit receiver, so
+these five invocations are equivalent:
+
+
+
+t.Mv(7)
+T.Mv(t, 7)
+(T).Mv(t, 7)
+f1 := T.Mv; f1(t, 7)
+f2 := (T).Mv; f2(t, 7)
+
+
+
+Similarly, the expression
+
+
+
+(*T).Mp
+
+
+
+yields a function value representing Mp with signature
+
+
+
+func(tp *T, f float32) float32
+
+
+
+For a method with a value receiver, one can derive a function
+with an explicit pointer receiver, so
+
+
+
+(*T).Mv
+
+
+
+yields a function value representing Mv with signature
+
+
+
+func(tv *T, a int) int
+
+
+
+Such a function indirects through the receiver to create a value
+to pass as the receiver to the underlying method;
+the method does not overwrite the value whose address is passed in
+the function call.
+
+
+
+The final case, a value-receiver function for a pointer-receiver method,
+is illegal because pointer-receiver methods are not in the method set
+of the value type.
+
+
+
+Function values derived from methods are called with function call syntax;
+the receiver is provided as the first argument to the call.
+That is, given f := T.Mv, f is invoked
+as f(t, 7) not t.f(7).
+To construct a function that binds the receiver, use a
+function literal or
+method value.
+
+
+
+It is legal to derive a function value from a method of an interface type.
+The resulting function takes an explicit receiver of that interface type.
+
+
+Method values
+
+
+If the expression x has static type T and
+M is in the method set of type T,
+x.M is called a method value.
+The method value x.M is a function value that is callable
+with the same arguments as a method call of x.M.
+The expression x is evaluated and saved during the evaluation of the
+method value; the saved copy is then used as the receiver in any calls,
+which may be executed later.
+
+
+
+type S struct { *T }
+type T int
+func (t T) M() { print(t) }
+
+t := new(T)
+s := S{T: t}
+f := t.M // receiver *t is evaluated and stored in f
+g := s.M // receiver *(s.T) is evaluated and stored in g
+*t = 42 // does not affect stored receivers in f and g
+
+
+
+The type T may be an interface or non-interface type.
+
+
+
+As in the discussion of method expressions above,
+consider a struct type T with two methods,
+Mv, whose receiver is of type T, and
+Mp, whose receiver is of type *T.
+
+
+
+type T struct {
+ a int
+}
+func (tv T) Mv(a int) int { return 0 } // value receiver
+func (tp *T) Mp(f float32) float32 { return 1 } // pointer receiver
+
+var t T
+var pt *T
+func makeT() T
+
+
+
+The expression
+
+
+
+t.Mv
+
+
+
+yields a function value of type
+
+
+
+func(int) int
+
+
+
+These two invocations are equivalent:
+
+
+
+t.Mv(7)
+f := t.Mv; f(7)
+
+
+
+Similarly, the expression
+
+
+
+pt.Mp
+
+
+
+yields a function value of type
+
+
+
+func(float32) float32
+
+
+
+As with selectors, a reference to a non-interface method with a value receiver
+using a pointer will automatically dereference that pointer: pt.Mv is equivalent to (*pt).Mv.
+
+
+
+As with method calls, a reference to a non-interface method with a pointer receiver
+using an addressable value will automatically take the address of that value: t.Mp is equivalent to (&t).Mp.
+
+
+
+f := t.Mv; f(7) // like t.Mv(7)
+f := pt.Mp; f(7) // like pt.Mp(7)
+f := pt.Mv; f(7) // like (*pt).Mv(7)
+f := t.Mp; f(7) // like (&t).Mp(7)
+f := makeT().Mp // invalid: result of makeT() is not addressable
+
+
+
+Although the examples above use non-interface types, it is also legal to create a method value
+from a value of interface type.
+
+
+
+var i interface { M(int) } = myVal
+f := i.M; f(7) // like i.M(7)
+
+
+
+Index expressions
+
+
+A primary expression of the form
+
+
+
+a[x]
+
+
+
+denotes the element of the array, pointer to array, slice, string or map a indexed by x.
+The value x is called the index or map key, respectively.
+The following rules apply:
+
+
+
+If a is not a map:
+
+
+ - the index
x must be of integer type or an untyped constant
+ - a constant index must be non-negative and
+ representable by a value of type
int
+ - a constant index that is untyped is given type
int
+ - the index
x is in range if 0 <= x < len(a),
+ otherwise it is out of range
+
+
+
+For a of array type A:
+
+
+ - a constant index must be in range
+ - if
x is out of range at run time,
+ a run-time panic occurs
+ a[x] is the array element at index x and the type of
+ a[x] is the element type of A
+
+
+
+For a of pointer to array type:
+
+
+ a[x] is shorthand for (*a)[x]
+
+
+
+For a of slice type S:
+
+
+ - if
x is out of range at run time,
+ a run-time panic occurs
+ a[x] is the slice element at index x and the type of
+ a[x] is the element type of S
+
+
+
+For a of string type:
+
+
+ - a constant index must be in range
+ if the string
a is also constant
+ - if
x is out of range at run time,
+ a run-time panic occurs
+ a[x] is the non-constant byte value at index x and the type of
+ a[x] is byte
+ a[x] may not be assigned to
+
+
+
+For a of map type M:
+
+
+ x's type must be
+ assignable
+ to the key type of M
+ - if the map contains an entry with key
x,
+ a[x] is the map element with key x
+ and the type of a[x] is the element type of M
+ - if the map is
nil or does not contain such an entry,
+ a[x] is the zero value
+ for the element type of M
+
+
+
+Otherwise a[x] is illegal.
+
+
+
+An index expression on a map a of type map[K]V
+used in an assignment or initialization of the special form
+
+
+
+v, ok = a[x]
+v, ok := a[x]
+var v, ok = a[x]
+
+
+
+yields an additional untyped boolean value. The value of ok is
+true if the key x is present in the map, and
+false otherwise.
+
+
+
+Assigning to an element of a nil map causes a
+run-time panic.
+
+
+
+Slice expressions
+
+
+Slice expressions construct a substring or slice from a string, array, pointer
+to array, or slice. There are two variants: a simple form that specifies a low
+and high bound, and a full form that also specifies a bound on the capacity.
+
+
+Simple slice expressions
+
+
+For a string, array, pointer to array, or slice a, the primary expression
+
+
+
+a[low : high]
+
+
+
+constructs a substring or slice. The indices low and
+high select which elements of operand a appear
+in the result. The result has indices starting at 0 and length equal to
+high - low.
+After slicing the array a
+
+
+
+a := [5]int{1, 2, 3, 4, 5}
+s := a[1:4]
+
+
+
+the slice s has type []int, length 3, capacity 4, and elements
+
+
+
+s[0] == 2
+s[1] == 3
+s[2] == 4
+
+
+
+For convenience, any of the indices may be omitted. A missing low
+index defaults to zero; a missing high index defaults to the length of the
+sliced operand:
+
+
+
+a[2:] // same as a[2 : len(a)]
+a[:3] // same as a[0 : 3]
+a[:] // same as a[0 : len(a)]
+
+
+
+If a is a pointer to an array, a[low : high] is shorthand for
+(*a)[low : high].
+
+
+
+For arrays or strings, the indices are in range if
+0 <= low <= high <= len(a),
+otherwise they are out of range.
+For slices, the upper index bound is the slice capacity cap(a) rather than the length.
+A constant index must be non-negative and
+representable by a value of type
+int; for arrays or constant strings, constant indices must also be in range.
+If both indices are constant, they must satisfy low <= high.
+If the indices are out of range at run time, a run-time panic occurs.
+
+
+
+Except for untyped strings, if the sliced operand is a string or slice,
+the result of the slice operation is a non-constant value of the same type as the operand.
+For untyped string operands the result is a non-constant value of type string.
+If the sliced operand is an array, it must be addressable
+and the result of the slice operation is a slice with the same element type as the array.
+
+
+
+If the sliced operand of a valid slice expression is a nil slice, the result
+is a nil slice. Otherwise, if the result is a slice, it shares its underlying
+array with the operand.
+
+
+
+var a [10]int
+s1 := a[3:7] // underlying array of s1 is array a; &s1[2] == &a[5]
+s2 := s1[1:4] // underlying array of s2 is underlying array of s1 which is array a; &s2[1] == &a[5]
+s2[1] = 42 // s2[1] == s1[2] == a[5] == 42; they all refer to the same underlying array element
+
+
+
+Full slice expressions
+
+
+For an array, pointer to array, or slice a (but not a string), the primary expression
+
+
+
+a[low : high : max]
+
+
+
+constructs a slice of the same type, and with the same length and elements as the simple slice
+expression a[low : high]. Additionally, it controls the resulting slice's capacity
+by setting it to max - low. Only the first index may be omitted; it defaults to 0.
+After slicing the array a
+
+
+
+a := [5]int{1, 2, 3, 4, 5}
+t := a[1:3:5]
+
+
+
+the slice t has type []int, length 2, capacity 4, and elements
+
+
+
+t[0] == 2
+t[1] == 3
+
+
+
+As for simple slice expressions, if a is a pointer to an array,
+a[low : high : max] is shorthand for (*a)[low : high : max].
+If the sliced operand is an array, it must be addressable.
+
+
+
+The indices are in range if 0 <= low <= high <= max <= cap(a),
+otherwise they are out of range.
+A constant index must be non-negative and
+representable by a value of type
+int; for arrays, constant indices must also be in range.
+If multiple indices are constant, the constants that are present must be in range relative to each
+other.
+If the indices are out of range at run time, a run-time panic occurs.
+
+
+Type assertions
+
+
+For an expression x of interface type
+and a type T, the primary expression
+
+
+
+x.(T)
+
+
+
+asserts that x is not nil
+and that the value stored in x is of type T.
+The notation x.(T) is called a type assertion.
+
+
+More precisely, if T is not an interface type, x.(T) asserts
+that the dynamic type of x is identical
+to the type T.
+In this case, T must implement the (interface) type of x;
+otherwise the type assertion is invalid since it is not possible for x
+to store a value of type T.
+If T is an interface type, x.(T) asserts that the dynamic type
+of x implements the interface T.
+
+
+If the type assertion holds, the value of the expression is the value
+stored in x and its type is T. If the type assertion is false,
+a run-time panic occurs.
+In other words, even though the dynamic type of x
+is known only at run time, the type of x.(T) is
+known to be T in a correct program.
+
+
+
+var x interface{} = 7 // x has dynamic type int and value 7
+i := x.(int) // i has type int and value 7
+
+type I interface { m() }
+
+func f(y I) {
+ s := y.(string) // illegal: string does not implement I (missing method m)
+ r := y.(io.Reader) // r has type io.Reader and the dynamic type of y must implement both I and io.Reader
+ …
+}
+
+
+
+A type assertion used in an assignment or initialization of the special form
+
+
+
+v, ok = x.(T)
+v, ok := x.(T)
+var v, ok = x.(T)
+var v, ok interface{} = x.(T) // dynamic types of v and ok are T and bool
+
+
+
+yields an additional untyped boolean value. The value of ok is true
+if the assertion holds. Otherwise it is false and the value of v is
+the zero value for type T.
+No run-time panic occurs in this case.
+
+
+
+Calls
+
+
+Given an expression f of function type
+F,
+
+
+
+f(a1, a2, … an)
+
+
+
+calls f with arguments a1, a2, … an.
+Except for one special case, arguments must be single-valued expressions
+assignable to the parameter types of
+F and are evaluated before the function is called.
+The type of the expression is the result type
+of F.
+A method invocation is similar but the method itself
+is specified as a selector upon a value of the receiver type for
+the method.
+
+
+
+math.Atan2(x, y) // function call
+var pt *Point
+pt.Scale(3.5) // method call with receiver pt
+
+
+
+In a function call, the function value and arguments are evaluated in
+the usual order.
+After they are evaluated, the parameters of the call are passed by value to the function
+and the called function begins execution.
+The return parameters of the function are passed by value
+back to the caller when the function returns.
+
+
+
+Calling a nil function value
+causes a run-time panic.
+
+
+
+As a special case, if the return values of a function or method
+g are equal in number and individually
+assignable to the parameters of another function or method
+f, then the call f(g(parameters_of_g))
+will invoke f after binding the return values of
+g to the parameters of f in order. The call
+of f must contain no parameters other than the call of g,
+and g must have at least one return value.
+If f has a final ... parameter, it is
+assigned the return values of g that remain after
+assignment of regular parameters.
+
+
+
+func Split(s string, pos int) (string, string) {
+ return s[0:pos], s[pos:]
+}
+
+func Join(s, t string) string {
+ return s + t
+}
+
+if Join(Split(value, len(value)/2)) != value {
+ log.Panic("test fails")
+}
+
+
+
+A method call x.m() is valid if the method set
+of (the type of) x contains m and the
+argument list can be assigned to the parameter list of m.
+If x is addressable and &x's method
+set contains m, x.m() is shorthand
+for (&x).m():
+
+
+
+var p Point
+p.Scale(3.5)
+
+
+
+There is no distinct method type and there are no method literals.
+
+
+Passing arguments to ... parameters
+
+
+If f is variadic with a final
+parameter p of type ...T, then within f
+the type of p is equivalent to type []T.
+If f is invoked with no actual arguments for p,
+the value passed to p is nil.
+Otherwise, the value passed is a new slice
+of type []T with a new underlying array whose successive elements
+are the actual arguments, which all must be assignable
+to T. The length and capacity of the slice is therefore
+the number of arguments bound to p and may differ for each
+call site.
+
+
+
+Given the function and calls
+
+
+func Greeting(prefix string, who ...string)
+Greeting("nobody")
+Greeting("hello:", "Joe", "Anna", "Eileen")
+
+
+
+within Greeting, who will have the value
+nil in the first call, and
+[]string{"Joe", "Anna", "Eileen"} in the second.
+
+
+
+If the final argument is assignable to a slice type []T and
+is followed by ..., it is passed unchanged as the value
+for a ...T parameter. In this case no new slice is created.
+
+
+
+Given the slice s and call
+
+
+
+s := []string{"James", "Jasmine"}
+Greeting("goodbye:", s...)
+
+
+
+within Greeting, who will have the same value as s
+with the same underlying array.
+
+
+
+Operators
+
+
+Operators combine operands into expressions.
+
+
+
+Expression = UnaryExpr | Expression binary_op Expression .
+UnaryExpr = PrimaryExpr | unary_op UnaryExpr .
+
+binary_op = "||" | "&&" | rel_op | add_op | mul_op .
+rel_op = "==" | "!=" | "<" | "<=" | ">" | ">=" .
+add_op = "+" | "-" | "|" | "^" .
+mul_op = "*" | "/" | "%" | "<<" | ">>" | "&" | "&^" .
+
+unary_op = "+" | "-" | "!" | "^" | "*" | "&" | "<-" .
+
+
+
+Comparisons are discussed elsewhere.
+For other binary operators, the operand types must be identical
+unless the operation involves shifts or untyped constants.
+For operations involving constants only, see the section on
+constant expressions.
+
+
+
+Except for shift operations, if one operand is an untyped constant
+and the other operand is not, the constant is implicitly converted
+to the type of the other operand.
+
+
+
+The right operand in a shift expression must have integer type
+or be an untyped constant representable by a
+value of type uint.
+If the left operand of a non-constant shift expression is an untyped constant,
+it is first implicitly converted to the type it would assume if the shift expression were
+replaced by its left operand alone.
+
+
+
+var a [1024]byte
+var s uint = 33
+
+// The results of the following examples are given for 64-bit ints.
+var i = 1<<s // 1 has type int
+var j int32 = 1<<s // 1 has type int32; j == 0
+var k = uint64(1<<s) // 1 has type uint64; k == 1<<33
+var m int = 1.0<<s // 1.0 has type int; m == 1<<33
+var n = 1.0<<s == j // 1.0 has type int32; n == true
+var o = 1<<s == 2<<s // 1 and 2 have type int; o == false
+var p = 1<<s == 1<<33 // 1 has type int; p == true
+var u = 1.0<<s // illegal: 1.0 has type float64, cannot shift
+var u1 = 1.0<<s != 0 // illegal: 1.0 has type float64, cannot shift
+var u2 = 1<<s != 1.0 // illegal: 1 has type float64, cannot shift
+var v float32 = 1<<s // illegal: 1 has type float32, cannot shift
+var w int64 = 1.0<<33 // 1.0<<33 is a constant shift expression; w == 1<<33
+var x = a[1.0<<s] // panics: 1.0 has type int, but 1<<33 overflows array bounds
+var b = make([]byte, 1.0<<s) // 1.0 has type int; len(b) == 1<<33
+
+// The results of the following examples are given for 32-bit ints,
+// which means the shifts will overflow.
+var mm int = 1.0<<s // 1.0 has type int; mm == 0
+var oo = 1<<s == 2<<s // 1 and 2 have type int; oo == true
+var pp = 1<<s == 1<<33 // illegal: 1 has type int, but 1<<33 overflows int
+var xx = a[1.0<<s] // 1.0 has type int; xx == a[0]
+var bb = make([]byte, 1.0<<s) // 1.0 has type int; len(bb) == 0
+
+
+Operator precedence
+
+Unary operators have the highest precedence.
+As the ++ and -- operators form
+statements, not expressions, they fall
+outside the operator hierarchy.
+As a consequence, statement *p++ is the same as (*p)++.
+
+There are five precedence levels for binary operators.
+Multiplication operators bind strongest, followed by addition
+operators, comparison operators, && (logical AND),
+and finally || (logical OR):
+
+
+
+Precedence Operator
+ 5 * / % << >> & &^
+ 4 + - | ^
+ 3 == != < <= > >=
+ 2 &&
+ 1 ||
+
+
+
+Binary operators of the same precedence associate from left to right.
+For instance, x / y * z is the same as (x / y) * z.
+
+
+
++x
+23 + 3*x[i]
+x <= f()
+^a >> b
+f() || g()
+x == y+1 && <-chanInt > 0
+
+
+
+Arithmetic operators
+
+Arithmetic operators apply to numeric values and yield a result of the same
+type as the first operand. The four standard arithmetic operators (+,
+-, *, /) apply to integer,
+floating-point, and complex types; + also applies to strings.
+The bitwise logical and shift operators apply to integers only.
+
+
+
++ sum integers, floats, complex values, strings
+- difference integers, floats, complex values
+* product integers, floats, complex values
+/ quotient integers, floats, complex values
+% remainder integers
+
+& bitwise AND integers
+| bitwise OR integers
+^ bitwise XOR integers
+&^ bit clear (AND NOT) integers
+
+<< left shift integer << integer >= 0
+>> right shift integer >> integer >= 0
+
+
+
+Integer operators
+
+
+For two integer values x and y, the integer quotient
+q = x / y and remainder r = x % y satisfy the following
+relationships:
+
+
+
+x = q*y + r and |r| < |y|
+
+
+
+with x / y truncated towards zero
+("truncated division").
+
+
+
+ x y x / y x % y
+ 5 3 1 2
+-5 3 -1 -2
+ 5 -3 -1 2
+-5 -3 1 -2
+
+
+
+The one exception to this rule is that if the dividend x is
+the most negative value for the int type of x, the quotient
+q = x / -1 is equal to x (and r = 0)
+due to two's-complement integer overflow:
+
+
+
+ x, q
+int8 -128
+int16 -32768
+int32 -2147483648
+int64 -9223372036854775808
+
+
+
+If the divisor is a constant, it must not be zero.
+If the divisor is zero at run time, a run-time panic occurs.
+If the dividend is non-negative and the divisor is a constant power of 2,
+the division may be replaced by a right shift, and computing the remainder may
+be replaced by a bitwise AND operation:
+
+
+
+ x x / 4 x % 4 x >> 2 x & 3
+ 11 2 3 2 3
+-11 -2 -3 -3 1
+
+
+
+The shift operators shift the left operand by the shift count specified by the
+right operand, which must be non-negative. If the shift count is negative at run time,
+a run-time panic occurs.
+The shift operators implement arithmetic shifts if the left operand is a signed
+integer and logical shifts if it is an unsigned integer.
+There is no upper limit on the shift count. Shifts behave
+as if the left operand is shifted n times by 1 for a shift
+count of n.
+As a result, x << 1 is the same as x*2
+and x >> 1 is the same as
+x/2 but truncated towards negative infinity.
+
+
+
+For integer operands, the unary operators
++, -, and ^ are defined as
+follows:
+
+
+
++x is 0 + x
+-x negation is 0 - x
+^x bitwise complement is m ^ x with m = "all bits set to 1" for unsigned x
+ and m = -1 for signed x
+
+
+
+Integer overflow
+
+
+For unsigned integer values, the operations +,
+-, *, and << are
+computed modulo 2n, where n is the bit width of
+the unsigned integer's type.
+Loosely speaking, these unsigned integer operations
+discard high bits upon overflow, and programs may rely on "wrap around".
+
+
+For signed integers, the operations +,
+-, *, /, and << may legally
+overflow and the resulting value exists and is deterministically defined
+by the signed integer representation, the operation, and its operands.
+Overflow does not cause a run-time panic.
+A compiler may not optimize code under the assumption that overflow does
+not occur. For instance, it may not assume that x < x + 1 is always true.
+
+
+
+Floating-point operators
+
+
+For floating-point and complex numbers,
++x is the same as x,
+while -x is the negation of x.
+The result of a floating-point or complex division by zero is not specified beyond the
+IEEE-754 standard; whether a run-time panic
+occurs is implementation-specific.
+
+
+
+An implementation may combine multiple floating-point operations into a single
+fused operation, possibly across statements, and produce a result that differs
+from the value obtained by executing and rounding the instructions individually.
+An explicit floating-point type conversion rounds to
+the precision of the target type, preventing fusion that would discard that rounding.
+
+
+
+For instance, some architectures provide a "fused multiply and add" (FMA) instruction
+that computes x*y + z without rounding the intermediate result x*y.
+These examples show when a Go implementation can use that instruction:
+
+
+
+// FMA allowed for computing r, because x*y is not explicitly rounded:
+r = x*y + z
+r = z; r += x*y
+t = x*y; r = t + z
+*p = x*y; r = *p + z
+r = x*y + float64(z)
+
+// FMA disallowed for computing r, because it would omit rounding of x*y:
+r = float64(x*y) + z
+r = z; r += float64(x*y)
+t = float64(x*y); r = t + z
+
+
+String concatenation
+
+
+Strings can be concatenated using the + operator
+or the += assignment operator:
+
+
+
+s := "hi" + string(c)
+s += " and good bye"
+
+
+
+String addition creates a new string by concatenating the operands.
+
+
+
+Comparison operators
+
+
+Comparison operators compare two operands and yield an untyped boolean value.
+
+
+
+== equal
+!= not equal
+< less
+<= less or equal
+> greater
+>= greater or equal
+
+
+
+In any comparison, the first operand
+must be assignable
+to the type of the second operand, or vice versa.
+
+
+The equality operators == and != apply
+to operands that are comparable.
+The ordering operators <, <=, >, and >=
+apply to operands that are ordered.
+These terms and the result of the comparisons are defined as follows:
+
+
+
+ -
+ Boolean values are comparable.
+ Two boolean values are equal if they are either both
+
true or both false.
+
+
+ -
+ Integer values are comparable and ordered, in the usual way.
+
+
+ -
+ Floating-point values are comparable and ordered,
+ as defined by the IEEE-754 standard.
+
+
+ -
+ Complex values are comparable.
+ Two complex values
u and v are
+ equal if both real(u) == real(v) and
+ imag(u) == imag(v).
+
+
+ -
+ String values are comparable and ordered, lexically byte-wise.
+
+
+ -
+ Pointer values are comparable.
+ Two pointer values are equal if they point to the same variable or if both have value
nil.
+ Pointers to distinct zero-size variables may or may not be equal.
+
+
+ -
+ Channel values are comparable.
+ Two channel values are equal if they were created by the same call to
+
make
+ or if both have value nil.
+
+
+ -
+ Interface values are comparable.
+ Two interface values are equal if they have identical dynamic types
+ and equal dynamic values or if both have value
nil.
+
+
+ -
+ A value
x of non-interface type X and
+ a value t of interface type T are comparable when values
+ of type X are comparable and
+ X implements T.
+ They are equal if t's dynamic type is identical to X
+ and t's dynamic value is equal to x.
+
+
+ -
+ Struct values are comparable if all their fields are comparable.
+ Two struct values are equal if their corresponding
+ non-blank fields are equal.
+
+
+ -
+ Array values are comparable if values of the array element type are comparable.
+ Two array values are equal if their corresponding elements are equal.
+
+
+
+
+A comparison of two interface values with identical dynamic types
+causes a run-time panic if values
+of that type are not comparable. This behavior applies not only to direct interface
+value comparisons but also when comparing arrays of interface values
+or structs with interface-valued fields.
+
+
+
+Slice, map, and function values are not comparable.
+However, as a special case, a slice, map, or function value may
+be compared to the predeclared identifier nil.
+Comparison of pointer, channel, and interface values to nil
+is also allowed and follows from the general rules above.
+
+
+
+const c = 3 < 4 // c is the untyped boolean constant true
+
+type MyBool bool
+var x, y int
+var (
+ // The result of a comparison is an untyped boolean.
+ // The usual assignment rules apply.
+ b3 = x == y // b3 has type bool
+ b4 bool = x == y // b4 has type bool
+ b5 MyBool = x == y // b5 has type MyBool
+)
+
+
+Logical operators
+
+
+Logical operators apply to boolean values
+and yield a result of the same type as the operands.
+The right operand is evaluated conditionally.
+
+
+
+&& conditional AND p && q is "if p then q else false"
+|| conditional OR p || q is "if p then true else q"
+! NOT !p is "not p"
+
+
+
+Address operators
+
+
+For an operand x of type T, the address operation
+&x generates a pointer of type *T to x.
+The operand must be addressable,
+that is, either a variable, pointer indirection, or slice indexing
+operation; or a field selector of an addressable struct operand;
+or an array indexing operation of an addressable array.
+As an exception to the addressability requirement, x may also be a
+(possibly parenthesized)
+composite literal.
+If the evaluation of x would cause a run-time panic,
+then the evaluation of &x does too.
+
+
+
+For an operand x of pointer type *T, the pointer
+indirection *x denotes the variable of type T pointed
+to by x.
+If x is nil, an attempt to evaluate *x
+will cause a run-time panic.
+
+
+
+&x
+&a[f(2)]
+&Point{2, 3}
+*p
+*pf(x)
+
+var x *int = nil
+*x // causes a run-time panic
+&*x // causes a run-time panic
+
+
+
+Receive operator
+
+
+For an operand ch of channel type,
+the value of the receive operation <-ch is the value received
+from the channel ch. The channel direction must permit receive operations,
+and the type of the receive operation is the element type of the channel.
+The expression blocks until a value is available.
+Receiving from a nil channel blocks forever.
+A receive operation on a closed channel can always proceed
+immediately, yielding the element type's zero value
+after any previously sent values have been received.
+
+
+
+v1 := <-ch
+v2 = <-ch
+f(<-ch)
+<-strobe // wait until clock pulse and discard received value
+
+
+
+A receive expression used in an assignment or initialization of the special form
+
+
+
+x, ok = <-ch
+x, ok := <-ch
+var x, ok = <-ch
+var x, ok T = <-ch
+
+
+
+yields an additional untyped boolean result reporting whether the
+communication succeeded. The value of ok is true
+if the value received was delivered by a successful send operation to the
+channel, or false if it is a zero value generated because the
+channel is closed and empty.
+
+
+
+Conversions
+
+
+A conversion changes the type of an expression
+to the type specified by the conversion.
+A conversion may appear literally in the source, or it may be implied
+by the context in which an expression appears.
+
+
+
+An explicit conversion is an expression of the form T(x)
+where T is a type and x is an expression
+that can be converted to type T.
+
+
+
+Conversion = Type "(" Expression [ "," ] ")" .
+
+
+
+If the type starts with the operator * or <-,
+or if the type starts with the keyword func
+and has no result list, it must be parenthesized when
+necessary to avoid ambiguity:
+
+
+
+*Point(p) // same as *(Point(p))
+(*Point)(p) // p is converted to *Point
+<-chan int(c) // same as <-(chan int(c))
+(<-chan int)(c) // c is converted to <-chan int
+func()(x) // function signature func() x
+(func())(x) // x is converted to func()
+(func() int)(x) // x is converted to func() int
+func() int(x) // x is converted to func() int (unambiguous)
+
+
+
+A constant value x can be converted to
+type T if x is representable
+by a value of T.
+As a special case, an integer constant x can be explicitly converted to a
+string type using the
+same rule
+as for non-constant x.
+
+
+
+Converting a constant yields a typed constant as result.
+
+
+
+uint(iota) // iota value of type uint
+float32(2.718281828) // 2.718281828 of type float32
+complex128(1) // 1.0 + 0.0i of type complex128
+float32(0.49999999) // 0.5 of type float32
+float64(-1e-1000) // 0.0 of type float64
+string('x') // "x" of type string
+string(0x266c) // "♬" of type string
+MyString("foo" + "bar") // "foobar" of type MyString
+string([]byte{'a'}) // not a constant: []byte{'a'} is not a constant
+(*int)(nil) // not a constant: nil is not a constant, *int is not a boolean, numeric, or string type
+int(1.2) // illegal: 1.2 cannot be represented as an int
+string(65.0) // illegal: 65.0 is not an integer constant
+
+
+
+A non-constant value x can be converted to type T
+in any of these cases:
+
+
+
+ -
+
x is assignable
+ to T.
+
+ -
+ ignoring struct tags (see below),
+
x's type and T have identical
+ underlying types.
+
+ -
+ ignoring struct tags (see below),
+
x's type and T are pointer types
+ that are not defined types,
+ and their pointer base types have identical underlying types.
+
+ -
+
x's type and T are both integer or floating
+ point types.
+
+ -
+
x's type and T are both complex types.
+
+ -
+
x is an integer or a slice of bytes or runes
+ and T is a string type.
+
+ -
+
x is a string and T is a slice of bytes or runes.
+
+ -
+
x is a slice, T is a pointer to an array,
+ and the slice and array types have identical element types.
+
+
+
+
+Struct tags are ignored when comparing struct types
+for identity for the purpose of conversion:
+
+
+
+type Person struct {
+ Name string
+ Address *struct {
+ Street string
+ City string
+ }
+}
+
+var data *struct {
+ Name string `json:"name"`
+ Address *struct {
+ Street string `json:"street"`
+ City string `json:"city"`
+ } `json:"address"`
+}
+
+var person = (*Person)(data) // ignoring tags, the underlying types are identical
+
+
+
+Specific rules apply to (non-constant) conversions between numeric types or
+to and from a string type.
+These conversions may change the representation of x
+and incur a run-time cost.
+All other conversions only change the type but not the representation
+of x.
+
+
+
+There is no linguistic mechanism to convert between pointers and integers.
+The package unsafe
+implements this functionality under
+restricted circumstances.
+
+
+Conversions between numeric types
+
+
+For the conversion of non-constant numeric values, the following rules apply:
+
+
+
+-
+When converting between integer types, if the value is a signed integer, it is
+sign extended to implicit infinite precision; otherwise it is zero extended.
+It is then truncated to fit in the result type's size.
+For example, if
v := uint16(0x10F0), then uint32(int8(v)) == 0xFFFFFFF0.
+The conversion always yields a valid value; there is no indication of overflow.
+
+-
+When converting a floating-point number to an integer, the fraction is discarded
+(truncation towards zero).
+
+-
+When converting an integer or floating-point number to a floating-point type,
+or a complex number to another complex type, the result value is rounded
+to the precision specified by the destination type.
+For instance, the value of a variable
x of type float32
+may be stored using additional precision beyond that of an IEEE-754 32-bit number,
+but float32(x) represents the result of rounding x's value to
+32-bit precision. Similarly, x + 0.1 may use more than 32 bits
+of precision, but float32(x + 0.1) does not.
+
+
+
+
+In all non-constant conversions involving floating-point or complex values,
+if the result type cannot represent the value the conversion
+succeeds but the result value is implementation-dependent.
+
+
+Conversions to and from a string type
+
+
+-
+Converting a signed or unsigned integer value to a string type yields a
+string containing the UTF-8 representation of the integer. Values outside
+the range of valid Unicode code points are converted to
"\uFFFD".
+
+
+string('a') // "a"
+string(-1) // "\ufffd" == "\xef\xbf\xbd"
+string(0xf8) // "\u00f8" == "ø" == "\xc3\xb8"
+type MyString string
+MyString(0x65e5) // "\u65e5" == "日" == "\xe6\x97\xa5"
+
+
+
+-
+Converting a slice of bytes to a string type yields
+a string whose successive bytes are the elements of the slice.
+
+
+string([]byte{'h', 'e', 'l', 'l', '\xc3', '\xb8'}) // "hellø"
+string([]byte{}) // ""
+string([]byte(nil)) // ""
+
+type MyBytes []byte
+string(MyBytes{'h', 'e', 'l', 'l', '\xc3', '\xb8'}) // "hellø"
+
+
+
+-
+Converting a slice of runes to a string type yields
+a string that is the concatenation of the individual rune values
+converted to strings.
+
+
+string([]rune{0x767d, 0x9d6c, 0x7fd4}) // "\u767d\u9d6c\u7fd4" == "白鵬翔"
+string([]rune{}) // ""
+string([]rune(nil)) // ""
+
+type MyRunes []rune
+string(MyRunes{0x767d, 0x9d6c, 0x7fd4}) // "\u767d\u9d6c\u7fd4" == "白鵬翔"
+
+
+
+-
+Converting a value of a string type to a slice of bytes type
+yields a slice whose successive elements are the bytes of the string.
+
+
+[]byte("hellø") // []byte{'h', 'e', 'l', 'l', '\xc3', '\xb8'}
+[]byte("") // []byte{}
+
+MyBytes("hellø") // []byte{'h', 'e', 'l', 'l', '\xc3', '\xb8'}
+
+
+
+-
+Converting a value of a string type to a slice of runes type
+yields a slice containing the individual Unicode code points of the string.
+
+
+[]rune(MyString("白鵬翔")) // []rune{0x767d, 0x9d6c, 0x7fd4}
+[]rune("") // []rune{}
+
+MyRunes("白鵬翔") // []rune{0x767d, 0x9d6c, 0x7fd4}
+
+
+
+
+Conversions from slice to array pointer
+
+
+Converting a slice to an array pointer yields a pointer to the underlying array of the slice.
+If the length of the slice is less than the length of the array,
+a run-time panic occurs.
+
+
+
+s := make([]byte, 2, 4)
+s0 := (*[0]byte)(s) // s0 != nil
+s1 := (*[1]byte)(s[1:]) // &s1[0] == &s[1]
+s2 := (*[2]byte)(s) // &s2[0] == &s[0]
+s4 := (*[4]byte)(s) // panics: len([4]byte) > len(s)
+
+var t []string
+t0 := (*[0]string)(t) // t0 == nil
+t1 := (*[1]string)(t) // panics: len([1]string) > len(t)
+
+u := make([]byte, 0)
+u0 := (*[0]byte)(u) // u0 != nil
+
+
+Constant expressions
+
+
+Constant expressions may contain only constant
+operands and are evaluated at compile time.
+
+
+
+Untyped boolean, numeric, and string constants may be used as operands
+wherever it is legal to use an operand of boolean, numeric, or string type,
+respectively.
+
+
+
+A constant comparison always yields
+an untyped boolean constant. If the left operand of a constant
+shift expression is an untyped constant, the
+result is an integer constant; otherwise it is a constant of the same
+type as the left operand, which must be of
+integer type.
+
+
+
+Any other operation on untyped constants results in an untyped constant of the
+same kind; that is, a boolean, integer, floating-point, complex, or string
+constant.
+If the untyped operands of a binary operation (other than a shift) are of
+different kinds, the result is of the operand's kind that appears later in this
+list: integer, rune, floating-point, complex.
+For example, an untyped integer constant divided by an
+untyped complex constant yields an untyped complex constant.
+
+
+
+const a = 2 + 3.0 // a == 5.0 (untyped floating-point constant)
+const b = 15 / 4 // b == 3 (untyped integer constant)
+const c = 15 / 4.0 // c == 3.75 (untyped floating-point constant)
+const Θ float64 = 3/2 // Θ == 1.0 (type float64, 3/2 is integer division)
+const Π float64 = 3/2. // Π == 1.5 (type float64, 3/2. is float division)
+const d = 1 << 3.0 // d == 8 (untyped integer constant)
+const e = 1.0 << 3 // e == 8 (untyped integer constant)
+const f = int32(1) << 33 // illegal (constant 8589934592 overflows int32)
+const g = float64(2) >> 1 // illegal (float64(2) is a typed floating-point constant)
+const h = "foo" > "bar" // h == true (untyped boolean constant)
+const j = true // j == true (untyped boolean constant)
+const k = 'w' + 1 // k == 'x' (untyped rune constant)
+const l = "hi" // l == "hi" (untyped string constant)
+const m = string(k) // m == "x" (type string)
+const Σ = 1 - 0.707i // (untyped complex constant)
+const Δ = Σ + 2.0e-4 // (untyped complex constant)
+const Φ = iota*1i - 1/1i // (untyped complex constant)
+
+
+
+Applying the built-in function complex to untyped
+integer, rune, or floating-point constants yields
+an untyped complex constant.
+
+
+
+const ic = complex(0, c) // ic == 3.75i (untyped complex constant)
+const iΘ = complex(0, Θ) // iΘ == 1i (type complex128)
+
+
+
+Constant expressions are always evaluated exactly; intermediate values and the
+constants themselves may require precision significantly larger than supported
+by any predeclared type in the language. The following are legal declarations:
+
+
+
+const Huge = 1 << 100 // Huge == 1267650600228229401496703205376 (untyped integer constant)
+const Four int8 = Huge >> 98 // Four == 4 (type int8)
+
+
+
+The divisor of a constant division or remainder operation must not be zero:
+
+
+
+3.14 / 0.0 // illegal: division by zero
+
+
+
+The values of typed constants must always be accurately
+representable by values
+of the constant type. The following constant expressions are illegal:
+
+
+
+uint(-1) // -1 cannot be represented as a uint
+int(3.14) // 3.14 cannot be represented as an int
+int64(Huge) // 1267650600228229401496703205376 cannot be represented as an int64
+Four * 300 // operand 300 cannot be represented as an int8 (type of Four)
+Four * 100 // product 400 cannot be represented as an int8 (type of Four)
+
+
+
+The mask used by the unary bitwise complement operator ^ matches
+the rule for non-constants: the mask is all 1s for unsigned constants
+and -1 for signed and untyped constants.
+
+
+
+^1 // untyped integer constant, equal to -2
+uint8(^1) // illegal: same as uint8(-2), -2 cannot be represented as a uint8
+^uint8(1) // typed uint8 constant, same as 0xFF ^ uint8(1) = uint8(0xFE)
+int8(^1) // same as int8(-2)
+^int8(1) // same as -1 ^ int8(1) = -2
+
+
+
+Implementation restriction: A compiler may use rounding while
+computing untyped floating-point or complex constant expressions; see
+the implementation restriction in the section
+on constants. This rounding may cause a
+floating-point constant expression to be invalid in an integer
+context, even if it would be integral when calculated using infinite
+precision, and vice versa.
+
+
+
+Order of evaluation
+
+
+At package level, initialization dependencies
+determine the evaluation order of individual initialization expressions in
+variable declarations.
+Otherwise, when evaluating the operands of an
+expression, assignment, or
+return statement,
+all function calls, method calls, and
+communication operations are evaluated in lexical left-to-right
+order.
+
+
+
+For example, in the (function-local) assignment
+
+
+y[f()], ok = g(h(), i()+x[j()], <-c), k()
+
+
+the function calls and communication happen in the order
+f(), h(), i(), j(),
+<-c, g(), and k().
+However, the order of those events compared to the evaluation
+and indexing of x and the evaluation
+of y is not specified.
+
+
+
+a := 1
+f := func() int { a++; return a }
+x := []int{a, f()} // x may be [1, 2] or [2, 2]: evaluation order between a and f() is not specified
+m := map[int]int{a: 1, a: 2} // m may be {2: 1} or {2: 2}: evaluation order between the two map assignments is not specified
+n := map[int]int{a: f()} // n may be {2: 3} or {3: 3}: evaluation order between the key and the value is not specified
+
+
+
+At package level, initialization dependencies override the left-to-right rule
+for individual initialization expressions, but not for operands within each
+expression:
+
+
+
+var a, b, c = f() + v(), g(), sqr(u()) + v()
+
+func f() int { return c }
+func g() int { return a }
+func sqr(x int) int { return x*x }
+
+// functions u and v are independent of all other variables and functions
+
+
+
+The function calls happen in the order
+u(), sqr(), v(),
+f(), v(), and g().
+
+
+
+Floating-point operations within a single expression are evaluated according to
+the associativity of the operators. Explicit parentheses affect the evaluation
+by overriding the default associativity.
+In the expression x + (y + z) the addition y + z
+is performed before adding x.
+
+
+Statements
+
+
+Statements control execution.
+
+
+
+Statement =
+ Declaration | LabeledStmt | SimpleStmt |
+ GoStmt | ReturnStmt | BreakStmt | ContinueStmt | GotoStmt |
+ FallthroughStmt | Block | IfStmt | SwitchStmt | SelectStmt | ForStmt |
+ DeferStmt .
+
+SimpleStmt = EmptyStmt | ExpressionStmt | SendStmt | IncDecStmt | Assignment | ShortVarDecl .
+
+
+Terminating statements
+
+
+A terminating statement interrupts the regular flow of control in
+a block. The following statements are terminating:
+
+
+
+-
+ A "return" or
+ "goto" statement.
+
+
+
+
+-
+ A call to the built-in function
+
panic.
+
+
+
+
+-
+ A block in which the statement list ends in a terminating statement.
+
+
+
+
+-
+ An "if" statement in which:
+
+ - the "else" branch is present, and
+ - both branches are terminating statements.
+
+
+
+-
+ A "for" statement in which:
+
+ - there are no "break" statements referring to the "for" statement, and
+ - the loop condition is absent, and
+ - the "for" statement does not use a range clause.
+
+
+
+-
+ A "switch" statement in which:
+
+ - there are no "break" statements referring to the "switch" statement,
+ - there is a default case, and
+ - the statement lists in each case, including the default, end in a terminating
+ statement, or a possibly labeled "fallthrough"
+ statement.
+
+
+
+-
+ A "select" statement in which:
+
+ - there are no "break" statements referring to the "select" statement, and
+ - the statement lists in each case, including the default if present,
+ end in a terminating statement.
+
+
+
+-
+ A labeled statement labeling
+ a terminating statement.
+
+
+
+
+All other statements are not terminating.
+
+
+
+A statement list ends in a terminating statement if the list
+is not empty and its final non-empty statement is terminating.
+
+
+
+Empty statements
+
+
+The empty statement does nothing.
+
+
+
+EmptyStmt = .
+
+
+
+Labeled statements
+
+
+A labeled statement may be the target of a goto,
+break or continue statement.
+
+
+
+LabeledStmt = Label ":" Statement .
+Label = identifier .
+
+
+
+Error: log.Panic("error encountered")
+
+
+
+Expression statements
+
+
+With the exception of specific built-in functions,
+function and method calls and
+receive operations
+can appear in statement context. Such statements may be parenthesized.
+
+
+
+ExpressionStmt = Expression .
+
+
+
+The following built-in functions are not permitted in statement context:
+
+
+
+append cap complex imag len make new real
+unsafe.Add unsafe.Alignof unsafe.Offsetof unsafe.Sizeof unsafe.Slice
+
+
+
+h(x+y)
+f.Close()
+<-ch
+(<-ch)
+len("foo") // illegal if len is the built-in function
+
+
+
+Send statements
+
+
+A send statement sends a value on a channel.
+The channel expression must be of channel type,
+the channel direction must permit send operations,
+and the type of the value to be sent must be assignable
+to the channel's element type.
+
+
+
+SendStmt = Channel "<-" Expression .
+Channel = Expression .
+
+
+
+Both the channel and the value expression are evaluated before communication
+begins. Communication blocks until the send can proceed.
+A send on an unbuffered channel can proceed if a receiver is ready.
+A send on a buffered channel can proceed if there is room in the buffer.
+A send on a closed channel proceeds by causing a run-time panic.
+A send on a nil channel blocks forever.
+
+
+
+ch <- 3 // send value 3 to channel ch
+
+
+
+IncDec statements
+
+
+The "++" and "--" statements increment or decrement their operands
+by the untyped constant 1.
+As with an assignment, the operand must be addressable
+or a map index expression.
+
+
+
+IncDecStmt = Expression ( "++" | "--" ) .
+
+
+
+The following assignment statements are semantically
+equivalent:
+
+
+
+IncDec statement Assignment
+x++ x += 1
+x-- x -= 1
+
+
+
+Assignments
+
+
+Assignment = ExpressionList assign_op ExpressionList .
+
+assign_op = [ add_op | mul_op ] "=" .
+
+
+
+Each left-hand side operand must be addressable,
+a map index expression, or (for = assignments only) the
+blank identifier.
+Operands may be parenthesized.
+
+
+
+x = 1
+*p = f()
+a[i] = 23
+(k) = <-ch // same as: k = <-ch
+
+
+
+An assignment operation x op=
+y where op is a binary arithmetic operator
+is equivalent to x = x op
+(y) but evaluates x
+only once. The op= construct is a single token.
+In assignment operations, both the left- and right-hand expression lists
+must contain exactly one single-valued expression, and the left-hand
+expression must not be the blank identifier.
+
+
+
+a[i] <<= 2
+i &^= 1<<n
+
+
+
+A tuple assignment assigns the individual elements of a multi-valued
+operation to a list of variables. There are two forms. In the
+first, the right hand operand is a single multi-valued expression
+such as a function call, a channel or
+map operation, or a type assertion.
+The number of operands on the left
+hand side must match the number of values. For instance, if
+f is a function returning two values,
+
+
+
+x, y = f()
+
+
+
+assigns the first value to x and the second to y.
+In the second form, the number of operands on the left must equal the number
+of expressions on the right, each of which must be single-valued, and the
+nth expression on the right is assigned to the nth
+operand on the left:
+
+
+
+one, two, three = '一', '二', '三'
+
+
+
+The blank identifier provides a way to
+ignore right-hand side values in an assignment:
+
+
+
+_ = x // evaluate x but ignore it
+x, _ = f() // evaluate f() but ignore second result value
+
+
+
+The assignment proceeds in two phases.
+First, the operands of index expressions
+and pointer indirections
+(including implicit pointer indirections in selectors)
+on the left and the expressions on the right are all
+evaluated in the usual order.
+Second, the assignments are carried out in left-to-right order.
+
+
+
+a, b = b, a // exchange a and b
+
+x := []int{1, 2, 3}
+i := 0
+i, x[i] = 1, 2 // set i = 1, x[0] = 2
+
+i = 0
+x[i], i = 2, 1 // set x[0] = 2, i = 1
+
+x[0], x[0] = 1, 2 // set x[0] = 1, then x[0] = 2 (so x[0] == 2 at end)
+
+x[1], x[3] = 4, 5 // set x[1] = 4, then panic setting x[3] = 5.
+
+type Point struct { x, y int }
+var p *Point
+x[2], p.x = 6, 7 // set x[2] = 6, then panic setting p.x = 7
+
+i = 2
+x = []int{3, 5, 7}
+for i, x[i] = range x { // set i, x[2] = 0, x[0]
+ break
+}
+// after this loop, i == 0 and x == []int{3, 5, 3}
+
+
+
+In assignments, each value must be assignable
+to the type of the operand to which it is assigned, with the following special cases:
+
+
+
+-
+ Any typed value may be assigned to the blank identifier.
+
+
+-
+ If an untyped constant
+ is assigned to a variable of interface type or the blank identifier,
+ the constant is first implicitly converted to its
+ default type.
+
+
+-
+ If an untyped boolean value is assigned to a variable of interface type or
+ the blank identifier, it is first implicitly converted to type
bool.
+
+
+
+If statements
+
+
+"If" statements specify the conditional execution of two branches
+according to the value of a boolean expression. If the expression
+evaluates to true, the "if" branch is executed, otherwise, if
+present, the "else" branch is executed.
+
+
+
+IfStmt = "if" [ SimpleStmt ";" ] Expression Block [ "else" ( IfStmt | Block ) ] .
+
+
+
+if x > max {
+ x = max
+}
+
+
+
+The expression may be preceded by a simple statement, which
+executes before the expression is evaluated.
+
+
+
+if x := f(); x < y {
+ return x
+} else if x > z {
+ return z
+} else {
+ return y
+}
+
+
+
+Switch statements
+
+
+"Switch" statements provide multi-way execution.
+An expression or type is compared to the "cases"
+inside the "switch" to determine which branch
+to execute.
+
+
+
+SwitchStmt = ExprSwitchStmt | TypeSwitchStmt .
+
+
+
+There are two forms: expression switches and type switches.
+In an expression switch, the cases contain expressions that are compared
+against the value of the switch expression.
+In a type switch, the cases contain types that are compared against the
+type of a specially annotated switch expression.
+The switch expression is evaluated exactly once in a switch statement.
+
+
+Expression switches
+
+
+In an expression switch,
+the switch expression is evaluated and
+the case expressions, which need not be constants,
+are evaluated left-to-right and top-to-bottom; the first one that equals the
+switch expression
+triggers execution of the statements of the associated case;
+the other cases are skipped.
+If no case matches and there is a "default" case,
+its statements are executed.
+There can be at most one default case and it may appear anywhere in the
+"switch" statement.
+A missing switch expression is equivalent to the boolean value
+true.
+
+
+
+ExprSwitchStmt = "switch" [ SimpleStmt ";" ] [ Expression ] "{" { ExprCaseClause } "}" .
+ExprCaseClause = ExprSwitchCase ":" StatementList .
+ExprSwitchCase = "case" ExpressionList | "default" .
+
+
+
+If the switch expression evaluates to an untyped constant, it is first implicitly
+converted to its default type.
+The predeclared untyped value nil cannot be used as a switch expression.
+The switch expression type must be comparable.
+
+
+
+If a case expression is untyped, it is first implicitly converted
+to the type of the switch expression.
+For each (possibly converted) case expression x and the value t
+of the switch expression, x == t must be a valid comparison.
+
+
+
+In other words, the switch expression is treated as if it were used to declare and
+initialize a temporary variable t without explicit type; it is that
+value of t against which each case expression x is tested
+for equality.
+
+
+
+In a case or default clause, the last non-empty statement
+may be a (possibly labeled)
+"fallthrough" statement to
+indicate that control should flow from the end of this clause to
+the first statement of the next clause.
+Otherwise control flows to the end of the "switch" statement.
+A "fallthrough" statement may appear as the last statement of all
+but the last clause of an expression switch.
+
+
+
+The switch expression may be preceded by a simple statement, which
+executes before the expression is evaluated.
+
+
+
+switch tag {
+default: s3()
+case 0, 1, 2, 3: s1()
+case 4, 5, 6, 7: s2()
+}
+
+switch x := f(); { // missing switch expression means "true"
+case x < 0: return -x
+default: return x
+}
+
+switch {
+case x < y: f1()
+case x < z: f2()
+case x == 4: f3()
+}
+
+
+
+Implementation restriction: A compiler may disallow multiple case
+expressions evaluating to the same constant.
+For instance, the current compilers disallow duplicate integer,
+floating point, or string constants in case expressions.
+
+
+Type switches
+
+
+A type switch compares types rather than values. It is otherwise similar
+to an expression switch. It is marked by a special switch expression that
+has the form of a type assertion
+using the keyword type rather than an actual type:
+
+
+
+switch x.(type) {
+// cases
+}
+
+
+
+Cases then match actual types T against the dynamic type of the
+expression x. As with type assertions, x must be of
+interface type, and each non-interface type
+T listed in a case must implement the type of x.
+The types listed in the cases of a type switch must all be
+different.
+
+
+
+TypeSwitchStmt = "switch" [ SimpleStmt ";" ] TypeSwitchGuard "{" { TypeCaseClause } "}" .
+TypeSwitchGuard = [ identifier ":=" ] PrimaryExpr "." "(" "type" ")" .
+TypeCaseClause = TypeSwitchCase ":" StatementList .
+TypeSwitchCase = "case" TypeList | "default" .
+TypeList = Type { "," Type } .
+
+
+
+The TypeSwitchGuard may include a
+short variable declaration.
+When that form is used, the variable is declared at the end of the
+TypeSwitchCase in the implicit block of each clause.
+In clauses with a case listing exactly one type, the variable
+has that type; otherwise, the variable has the type of the expression
+in the TypeSwitchGuard.
+
+
+
+Instead of a type, a case may use the predeclared identifier
+nil;
+that case is selected when the expression in the TypeSwitchGuard
+is a nil interface value.
+There may be at most one nil case.
+
+
+
+Given an expression x of type interface{},
+the following type switch:
+
+
+
+switch i := x.(type) {
+case nil:
+ printString("x is nil") // type of i is type of x (interface{})
+case int:
+ printInt(i) // type of i is int
+case float64:
+ printFloat64(i) // type of i is float64
+case func(int) float64:
+ printFunction(i) // type of i is func(int) float64
+case bool, string:
+ printString("type is bool or string") // type of i is type of x (interface{})
+default:
+ printString("don't know the type") // type of i is type of x (interface{})
+}
+
+
+
+could be rewritten:
+
+
+
+v := x // x is evaluated exactly once
+if v == nil {
+ i := v // type of i is type of x (interface{})
+ printString("x is nil")
+} else if i, isInt := v.(int); isInt {
+ printInt(i) // type of i is int
+} else if i, isFloat64 := v.(float64); isFloat64 {
+ printFloat64(i) // type of i is float64
+} else if i, isFunc := v.(func(int) float64); isFunc {
+ printFunction(i) // type of i is func(int) float64
+} else {
+ _, isBool := v.(bool)
+ _, isString := v.(string)
+ if isBool || isString {
+ i := v // type of i is type of x (interface{})
+ printString("type is bool or string")
+ } else {
+ i := v // type of i is type of x (interface{})
+ printString("don't know the type")
+ }
+}
+
+
+
+The type switch guard may be preceded by a simple statement, which
+executes before the guard is evaluated.
+
+
+
+The "fallthrough" statement is not permitted in a type switch.
+
+
+For statements
+
+
+A "for" statement specifies repeated execution of a block. There are three forms:
+The iteration may be controlled by a single condition, a "for" clause, or a "range" clause.
+
+
+
+ForStmt = "for" [ Condition | ForClause | RangeClause ] Block .
+Condition = Expression .
+
+
+For statements with single condition
+
+
+In its simplest form, a "for" statement specifies the repeated execution of
+a block as long as a boolean condition evaluates to true.
+The condition is evaluated before each iteration.
+If the condition is absent, it is equivalent to the boolean value
+true.
+
+
+
+for a < b {
+ a *= 2
+}
+
+
+For statements with for clause
+
+
+A "for" statement with a ForClause is also controlled by its condition, but
+additionally it may specify an init
+and a post statement, such as an assignment,
+an increment or decrement statement. The init statement may be a
+short variable declaration, but the post statement must not.
+Variables declared by the init statement are re-used in each iteration.
+
+
+
+ForClause = [ InitStmt ] ";" [ Condition ] ";" [ PostStmt ] .
+InitStmt = SimpleStmt .
+PostStmt = SimpleStmt .
+
+
+
+for i := 0; i < 10; i++ {
+ f(i)
+}
+
+
+
+If non-empty, the init statement is executed once before evaluating the
+condition for the first iteration;
+the post statement is executed after each execution of the block (and
+only if the block was executed).
+Any element of the ForClause may be empty but the
+semicolons are
+required unless there is only a condition.
+If the condition is absent, it is equivalent to the boolean value
+true.
+
+
+
+for cond { S() } is the same as for ; cond ; { S() }
+for { S() } is the same as for true { S() }
+
+
+For statements with range clause
+
+
+A "for" statement with a "range" clause
+iterates through all entries of an array, slice, string or map,
+or values received on a channel. For each entry it assigns iteration values
+to corresponding iteration variables if present and then executes the block.
+
+
+
+RangeClause = [ ExpressionList "=" | IdentifierList ":=" ] "range" Expression .
+
+
+
+The expression on the right in the "range" clause is called the range expression,
+which may be an array, pointer to an array, slice, string, map, or channel permitting
+receive operations.
+As with an assignment, if present the operands on the left must be
+addressable or map index expressions; they
+denote the iteration variables. If the range expression is a channel, at most
+one iteration variable is permitted, otherwise there may be up to two.
+If the last iteration variable is the blank identifier,
+the range clause is equivalent to the same clause without that identifier.
+
+
+
+The range expression x is evaluated once before beginning the loop,
+with one exception: if at most one iteration variable is present and
+len(x) is constant,
+the range expression is not evaluated.
+
+
+
+Function calls on the left are evaluated once per iteration.
+For each iteration, iteration values are produced as follows
+if the respective iteration variables are present:
+
+
+
+Range expression 1st value 2nd value
+
+array or slice a [n]E, *[n]E, or []E index i int a[i] E
+string s string type index i int see below rune
+map m map[K]V key k K m[k] V
+channel c chan E, <-chan E element e E
+
+
+
+-
+For an array, pointer to array, or slice value
a, the index iteration
+values are produced in increasing order, starting at element index 0.
+If at most one iteration variable is present, the range loop produces
+iteration values from 0 up to len(a)-1 and does not index into the array
+or slice itself. For a nil slice, the number of iterations is 0.
+
+
+-
+For a string value, the "range" clause iterates over the Unicode code points
+in the string starting at byte index 0. On successive iterations, the index value will be the
+index of the first byte of successive UTF-8-encoded code points in the string,
+and the second value, of type
rune, will be the value of
+the corresponding code point. If the iteration encounters an invalid
+UTF-8 sequence, the second value will be 0xFFFD,
+the Unicode replacement character, and the next iteration will advance
+a single byte in the string.
+
+
+-
+The iteration order over maps is not specified
+and is not guaranteed to be the same from one iteration to the next.
+If a map entry that has not yet been reached is removed during iteration,
+the corresponding iteration value will not be produced. If a map entry is
+created during iteration, that entry may be produced during the iteration or
+may be skipped. The choice may vary for each entry created and from one
+iteration to the next.
+If the map is
nil, the number of iterations is 0.
+
+
+-
+For channels, the iteration values produced are the successive values sent on
+the channel until the channel is closed. If the channel
+is
nil, the range expression blocks forever.
+
+
+
+
+The iteration values are assigned to the respective
+iteration variables as in an assignment statement.
+
+
+
+The iteration variables may be declared by the "range" clause using a form of
+short variable declaration
+(:=).
+In this case their types are set to the types of the respective iteration values
+and their scope is the block of the "for"
+statement; they are re-used in each iteration.
+If the iteration variables are declared outside the "for" statement,
+after execution their values will be those of the last iteration.
+
+
+
+var testdata *struct {
+ a *[7]int
+}
+for i, _ := range testdata.a {
+ // testdata.a is never evaluated; len(testdata.a) is constant
+ // i ranges from 0 to 6
+ f(i)
+}
+
+var a [10]string
+for i, s := range a {
+ // type of i is int
+ // type of s is string
+ // s == a[i]
+ g(i, s)
+}
+
+var key string
+var val interface{} // element type of m is assignable to val
+m := map[string]int{"mon":0, "tue":1, "wed":2, "thu":3, "fri":4, "sat":5, "sun":6}
+for key, val = range m {
+ h(key, val)
+}
+// key == last map key encountered in iteration
+// val == map[key]
+
+var ch chan Work = producer()
+for w := range ch {
+ doWork(w)
+}
+
+// empty a channel
+for range ch {}
+
+
+
+Go statements
+
+
+A "go" statement starts the execution of a function call
+as an independent concurrent thread of control, or goroutine,
+within the same address space.
+
+
+
+GoStmt = "go" Expression .
+
+
+
+The expression must be a function or method call; it cannot be parenthesized.
+Calls of built-in functions are restricted as for
+expression statements.
+
+
+
+The function value and parameters are
+evaluated as usual
+in the calling goroutine, but
+unlike with a regular call, program execution does not wait
+for the invoked function to complete.
+Instead, the function begins executing independently
+in a new goroutine.
+When the function terminates, its goroutine also terminates.
+If the function has any return values, they are discarded when the
+function completes.
+
+
+
+go Server()
+go func(ch chan<- bool) { for { sleep(10); ch <- true }} (c)
+
+
+
+Select statements
+
+
+A "select" statement chooses which of a set of possible
+send or
+receive
+operations will proceed.
+It looks similar to a
+"switch" statement but with the
+cases all referring to communication operations.
+
+
+
+SelectStmt = "select" "{" { CommClause } "}" .
+CommClause = CommCase ":" StatementList .
+CommCase = "case" ( SendStmt | RecvStmt ) | "default" .
+RecvStmt = [ ExpressionList "=" | IdentifierList ":=" ] RecvExpr .
+RecvExpr = Expression .
+
+
+
+A case with a RecvStmt may assign the result of a RecvExpr to one or
+two variables, which may be declared using a
+short variable declaration.
+The RecvExpr must be a (possibly parenthesized) receive operation.
+There can be at most one default case and it may appear anywhere
+in the list of cases.
+
+
+
+Execution of a "select" statement proceeds in several steps:
+
+
+
+-
+For all the cases in the statement, the channel operands of receive operations
+and the channel and right-hand-side expressions of send statements are
+evaluated exactly once, in source order, upon entering the "select" statement.
+The result is a set of channels to receive from or send to,
+and the corresponding values to send.
+Any side effects in that evaluation will occur irrespective of which (if any)
+communication operation is selected to proceed.
+Expressions on the left-hand side of a RecvStmt with a short variable declaration
+or assignment are not yet evaluated.
+
+
+-
+If one or more of the communications can proceed,
+a single one that can proceed is chosen via a uniform pseudo-random selection.
+Otherwise, if there is a default case, that case is chosen.
+If there is no default case, the "select" statement blocks until
+at least one of the communications can proceed.
+
+
+-
+Unless the selected case is the default case, the respective communication
+operation is executed.
+
+
+-
+If the selected case is a RecvStmt with a short variable declaration or
+an assignment, the left-hand side expressions are evaluated and the
+received value (or values) are assigned.
+
+
+-
+The statement list of the selected case is executed.
+
+
+
+
+Since communication on nil channels can never proceed,
+a select with only nil channels and no default case blocks forever.
+
+
+
+var a []int
+var c, c1, c2, c3, c4 chan int
+var i1, i2 int
+select {
+case i1 = <-c1:
+ print("received ", i1, " from c1\n")
+case c2 <- i2:
+ print("sent ", i2, " to c2\n")
+case i3, ok := (<-c3): // same as: i3, ok := <-c3
+ if ok {
+ print("received ", i3, " from c3\n")
+ } else {
+ print("c3 is closed\n")
+ }
+case a[f()] = <-c4:
+ // same as:
+ // case t := <-c4
+ // a[f()] = t
+default:
+ print("no communication\n")
+}
+
+for { // send random sequence of bits to c
+ select {
+ case c <- 0: // note: no statement, no fallthrough, no folding of cases
+ case c <- 1:
+ }
+}
+
+select {} // block forever
+
+
+
+Return statements
+
+
+A "return" statement in a function F terminates the execution
+of F, and optionally provides one or more result values.
+Any functions deferred by F
+are executed before F returns to its caller.
+
+
+
+ReturnStmt = "return" [ ExpressionList ] .
+
+
+
+In a function without a result type, a "return" statement must not
+specify any result values.
+
+
+func noResult() {
+ return
+}
+
+
+
+There are three ways to return values from a function with a result
+type:
+
+
+
+ - The return value or values may be explicitly listed
+ in the "return" statement. Each expression must be single-valued
+ and assignable
+ to the corresponding element of the function's result type.
+
+func simpleF() int {
+ return 2
+}
+
+func complexF1() (re float64, im float64) {
+ return -7.0, -4.0
+}
+
+
+ - The expression list in the "return" statement may be a single
+ call to a multi-valued function. The effect is as if each value
+ returned from that function were assigned to a temporary
+ variable with the type of the respective value, followed by a
+ "return" statement listing these variables, at which point the
+ rules of the previous case apply.
+
+func complexF2() (re float64, im float64) {
+ return complexF1()
+}
+
+
+ - The expression list may be empty if the function's result
+ type specifies names for its result parameters.
+ The result parameters act as ordinary local variables
+ and the function may assign values to them as necessary.
+ The "return" statement returns the values of these variables.
+
+func complexF3() (re float64, im float64) {
+ re = 7.0
+ im = 4.0
+ return
+}
+
+func (devnull) Write(p []byte) (n int, _ error) {
+ n = len(p)
+ return
+}
+
+
+
+
+
+Regardless of how they are declared, all the result values are initialized to
+the zero values for their type upon entry to the
+function. A "return" statement that specifies results sets the result parameters before
+any deferred functions are executed.
+
+
+
+Implementation restriction: A compiler may disallow an empty expression list
+in a "return" statement if a different entity (constant, type, or variable)
+with the same name as a result parameter is in
+scope at the place of the return.
+
+
+
+func f(n int) (res int, err error) {
+ if _, err := f(n-1); err != nil {
+ return // invalid return statement: err is shadowed
+ }
+ return
+}
+
+
+Break statements
+
+
+A "break" statement terminates execution of the innermost
+"for",
+"switch", or
+"select" statement
+within the same function.
+
+
+
+BreakStmt = "break" [ Label ] .
+
+
+
+If there is a label, it must be that of an enclosing
+"for", "switch", or "select" statement,
+and that is the one whose execution terminates.
+
+
+
+OuterLoop:
+ for i = 0; i < n; i++ {
+ for j = 0; j < m; j++ {
+ switch a[i][j] {
+ case nil:
+ state = Error
+ break OuterLoop
+ case item:
+ state = Found
+ break OuterLoop
+ }
+ }
+ }
+
+
+Continue statements
+
+
+A "continue" statement begins the next iteration of the
+innermost "for" loop at its post statement.
+The "for" loop must be within the same function.
+
+
+
+ContinueStmt = "continue" [ Label ] .
+
+
+
+If there is a label, it must be that of an enclosing
+"for" statement, and that is the one whose execution
+advances.
+
+
+
+RowLoop:
+ for y, row := range rows {
+ for x, data := range row {
+ if data == endOfRow {
+ continue RowLoop
+ }
+ row[x] = data + bias(x, y)
+ }
+ }
+
+
+Goto statements
+
+
+A "goto" statement transfers control to the statement with the corresponding label
+within the same function.
+
+
+
+GotoStmt = "goto" Label .
+
+
+
+goto Error
+
+
+
+Executing the "goto" statement must not cause any variables to come into
+scope that were not already in scope at the point of the goto.
+For instance, this example:
+
+
+
+ goto L // BAD
+ v := 3
+L:
+
+
+
+is erroneous because the jump to label L skips
+the creation of v.
+
+
+
+A "goto" statement outside a block cannot jump to a label inside that block.
+For instance, this example:
+
+
+
+if n%2 == 1 {
+ goto L1
+}
+for n > 0 {
+ f()
+ n--
+L1:
+ f()
+ n--
+}
+
+
+
+is erroneous because the label L1 is inside
+the "for" statement's block but the goto is not.
+
+
+Fallthrough statements
+
+
+A "fallthrough" statement transfers control to the first statement of the
+next case clause in an expression "switch" statement.
+It may be used only as the final non-empty statement in such a clause.
+
+
+
+FallthroughStmt = "fallthrough" .
+
+
+
+Defer statements
+
+
+A "defer" statement invokes a function whose execution is deferred
+to the moment the surrounding function returns, either because the
+surrounding function executed a return statement,
+reached the end of its function body,
+or because the corresponding goroutine is panicking.
+
+
+
+DeferStmt = "defer" Expression .
+
+
+
+The expression must be a function or method call; it cannot be parenthesized.
+Calls of built-in functions are restricted as for
+expression statements.
+
+
+
+Each time a "defer" statement
+executes, the function value and parameters to the call are
+evaluated as usual
+and saved anew but the actual function is not invoked.
+Instead, deferred functions are invoked immediately before
+the surrounding function returns, in the reverse order
+they were deferred. That is, if the surrounding function
+returns through an explicit return statement,
+deferred functions are executed after any result parameters are set
+by that return statement but before the function returns to its caller.
+If a deferred function value evaluates
+to nil, execution panics
+when the function is invoked, not when the "defer" statement is executed.
+
+
+
+For instance, if the deferred function is
+a function literal and the surrounding
+function has named result parameters that
+are in scope within the literal, the deferred function may access and modify
+the result parameters before they are returned.
+If the deferred function has any return values, they are discarded when
+the function completes.
+(See also the section on handling panics.)
+
+
+
+lock(l)
+defer unlock(l) // unlocking happens before surrounding function returns
+
+// prints 3 2 1 0 before surrounding function returns
+for i := 0; i <= 3; i++ {
+ defer fmt.Print(i)
+}
+
+// f returns 42
+func f() (result int) {
+ defer func() {
+ // result is accessed after it was set to 6 by the return statement
+ result *= 7
+ }()
+ return 6
+}
+
+
+Built-in functions
+
+
+Built-in functions are
+predeclared.
+They are called like any other function but some of them
+accept a type instead of an expression as the first argument.
+
+
+
+The built-in functions do not have standard Go types,
+so they can only appear in call expressions;
+they cannot be used as function values.
+
+
+Close
+
+
+For a channel c, the built-in function close(c)
+records that no more values will be sent on the channel.
+It is an error if c is a receive-only channel.
+Sending to or closing a closed channel causes a run-time panic.
+Closing the nil channel also causes a run-time panic.
+After calling close, and after any previously
+sent values have been received, receive operations will return
+the zero value for the channel's type without blocking.
+The multi-valued receive operation
+returns a received value along with an indication of whether the channel is closed.
+
+
+
+Length and capacity
+
+
+The built-in functions len and cap take arguments
+of various types and return a result of type int.
+The implementation guarantees that the result always fits into an int.
+
+
+
+Call Argument type Result
+
+len(s) string type string length in bytes
+ [n]T, *[n]T array length (== n)
+ []T slice length
+ map[K]T map length (number of defined keys)
+ chan T number of elements queued in channel buffer
+
+cap(s) [n]T, *[n]T array length (== n)
+ []T slice capacity
+ chan T channel buffer capacity
+
+
+
+The capacity of a slice is the number of elements for which there is
+space allocated in the underlying array.
+At any time the following relationship holds:
+
+
+
+0 <= len(s) <= cap(s)
+
+
+
+The length of a nil slice, map or channel is 0.
+The capacity of a nil slice or channel is 0.
+
+
+
+The expression len(s) is constant if
+s is a string constant. The expressions len(s) and
+cap(s) are constants if the type of s is an array
+or pointer to an array and the expression s does not contain
+channel receives or (non-constant)
+function calls; in this case s is not evaluated.
+Otherwise, invocations of len and cap are not
+constant and s is evaluated.
+
+
+
+const (
+ c1 = imag(2i) // imag(2i) = 2.0 is a constant
+ c2 = len([10]float64{2}) // [10]float64{2} contains no function calls
+ c3 = len([10]float64{c1}) // [10]float64{c1} contains no function calls
+ c4 = len([10]float64{imag(2i)}) // imag(2i) is a constant and no function call is issued
+ c5 = len([10]float64{imag(z)}) // invalid: imag(z) is a (non-constant) function call
+)
+var z complex128
+
+
+Allocation
+
+
+The built-in function new takes a type T,
+allocates storage for a variable of that type
+at run time, and returns a value of type *T
+pointing to it.
+The variable is initialized as described in the section on
+initial values.
+
+
+
+new(T)
+
+
+
+For instance
+
+
+
+type S struct { a int; b float64 }
+new(S)
+
+
+
+allocates storage for a variable of type S,
+initializes it (a=0, b=0.0),
+and returns a value of type *S containing the address
+of the location.
+
+
+Making slices, maps and channels
+
+
+The built-in function make takes a type T,
+which must be a slice, map or channel type,
+optionally followed by a type-specific list of expressions.
+It returns a value of type T (not *T).
+The memory is initialized as described in the section on
+initial values.
+
+
+
+Call Type T Result
+
+make(T, n) slice slice of type T with length n and capacity n
+make(T, n, m) slice slice of type T with length n and capacity m
+
+make(T) map map of type T
+make(T, n) map map of type T with initial space for approximately n elements
+
+make(T) channel unbuffered channel of type T
+make(T, n) channel buffered channel of type T, buffer size n
+
+
+
+
+Each of the size arguments n and m must be of integer type
+or an untyped constant.
+A constant size argument must be non-negative and representable
+by a value of type int; if it is an untyped constant it is given type int.
+If both n and m are provided and are constant, then
+n must be no larger than m.
+If n is negative or larger than m at run time,
+a run-time panic occurs.
+
+
+
+s := make([]int, 10, 100) // slice with len(s) == 10, cap(s) == 100
+s := make([]int, 1e3) // slice with len(s) == cap(s) == 1000
+s := make([]int, 1<<63) // illegal: len(s) is not representable by a value of type int
+s := make([]int, 10, 0) // illegal: len(s) > cap(s)
+c := make(chan int, 10) // channel with a buffer size of 10
+m := make(map[string]int, 100) // map with initial space for approximately 100 elements
+
+
+
+Calling make with a map type and size hint n will
+create a map with initial space to hold n map elements.
+The precise behavior is implementation-dependent.
+
+
+
+Appending to and copying slices
+
+
+The built-in functions append and copy assist in
+common slice operations.
+For both functions, the result is independent of whether the memory referenced
+by the arguments overlaps.
+
+
+
+The variadic function append
+appends zero or more values x
+to s of type S, which must be a slice type, and
+returns the resulting slice, also of type S.
+The values x are passed to a parameter of type ...T
+where T is the element type of
+S and the respective
+parameter passing rules apply.
+As a special case, append also accepts a first argument
+assignable to type []byte with a second argument of
+string type followed by .... This form appends the
+bytes of the string.
+
+
+
+append(s S, x ...T) S // T is the element type of S
+
+
+
+If the capacity of s is not large enough to fit the additional
+values, append allocates a new, sufficiently large underlying
+array that fits both the existing slice elements and the additional values.
+Otherwise, append re-uses the underlying array.
+
+
+
+s0 := []int{0, 0}
+s1 := append(s0, 2) // append a single element s1 == []int{0, 0, 2}
+s2 := append(s1, 3, 5, 7) // append multiple elements s2 == []int{0, 0, 2, 3, 5, 7}
+s3 := append(s2, s0...) // append a slice s3 == []int{0, 0, 2, 3, 5, 7, 0, 0}
+s4 := append(s3[3:6], s3[2:]...) // append overlapping slice s4 == []int{3, 5, 7, 2, 3, 5, 7, 0, 0}
+
+var t []interface{}
+t = append(t, 42, 3.1415, "foo") // t == []interface{}{42, 3.1415, "foo"}
+
+var b []byte
+b = append(b, "bar"...) // append string contents b == []byte{'b', 'a', 'r' }
+
+
+
+The function copy copies slice elements from
+a source src to a destination dst and returns the
+number of elements copied.
+Both arguments must have identical element type T and must be
+assignable to a slice of type []T.
+The number of elements copied is the minimum of
+len(src) and len(dst).
+As a special case, copy also accepts a destination argument assignable
+to type []byte with a source argument of a string type.
+This form copies the bytes from the string into the byte slice.
+
+
+
+copy(dst, src []T) int
+copy(dst []byte, src string) int
+
+
+
+Examples:
+
+
+
+var a = [...]int{0, 1, 2, 3, 4, 5, 6, 7}
+var s = make([]int, 6)
+var b = make([]byte, 5)
+n1 := copy(s, a[0:]) // n1 == 6, s == []int{0, 1, 2, 3, 4, 5}
+n2 := copy(s, s[2:]) // n2 == 4, s == []int{2, 3, 4, 5, 4, 5}
+n3 := copy(b, "Hello, World!") // n3 == 5, b == []byte("Hello")
+
+
+
+Deletion of map elements
+
+
+The built-in function delete removes the element with key
+k from a map m. The
+type of k must be assignable
+to the key type of m.
+
+
+
+delete(m, k) // remove element m[k] from map m
+
+
+
+If the map m is nil or the element m[k]
+does not exist, delete is a no-op.
+
+
+
+Manipulating complex numbers
+
+
+Three functions assemble and disassemble complex numbers.
+The built-in function complex constructs a complex
+value from a floating-point real and imaginary part, while
+real and imag
+extract the real and imaginary parts of a complex value.
+
+
+
+complex(realPart, imaginaryPart floatT) complexT
+real(complexT) floatT
+imag(complexT) floatT
+
+
+
+The type of the arguments and return value correspond.
+For complex, the two arguments must be of the same
+floating-point type and the return type is the complex type
+with the corresponding floating-point constituents:
+complex64 for float32 arguments, and
+complex128 for float64 arguments.
+If one of the arguments evaluates to an untyped constant, it is first implicitly
+converted to the type of the other argument.
+If both arguments evaluate to untyped constants, they must be non-complex
+numbers or their imaginary parts must be zero, and the return value of
+the function is an untyped complex constant.
+
+
+
+For real and imag, the argument must be
+of complex type, and the return type is the corresponding floating-point
+type: float32 for a complex64 argument, and
+float64 for a complex128 argument.
+If the argument evaluates to an untyped constant, it must be a number,
+and the return value of the function is an untyped floating-point constant.
+
+
+
+The real and imag functions together form the inverse of
+complex, so for a value z of a complex type Z,
+z == Z(complex(real(z), imag(z))).
+
+
+
+If the operands of these functions are all constants, the return
+value is a constant.
+
+
+
+var a = complex(2, -2) // complex128
+const b = complex(1.0, -1.4) // untyped complex constant 1 - 1.4i
+x := float32(math.Cos(math.Pi/2)) // float32
+var c64 = complex(5, -x) // complex64
+var s int = complex(1, 0) // untyped complex constant 1 + 0i can be converted to int
+_ = complex(1, 2<<s) // illegal: 2 assumes floating-point type, cannot shift
+var rl = real(c64) // float32
+var im = imag(a) // float64
+const c = imag(b) // untyped constant -1.4
+_ = imag(3 << s) // illegal: 3 assumes complex type, cannot shift
+
+
+Handling panics
+
+ Two built-in functions, panic and recover,
+assist in reporting and handling run-time panics
+and program-defined error conditions.
+
+
+
+func panic(interface{})
+func recover() interface{}
+
+
+
+While executing a function F,
+an explicit call to panic or a run-time panic
+terminates the execution of F.
+Any functions deferred by F
+are then executed as usual.
+Next, any deferred functions run by F's caller are run,
+and so on up to any deferred by the top-level function in the executing goroutine.
+At that point, the program is terminated and the error
+condition is reported, including the value of the argument to panic.
+This termination sequence is called panicking.
+
+
+
+panic(42)
+panic("unreachable")
+panic(Error("cannot parse"))
+
+
+
+The recover function allows a program to manage behavior
+of a panicking goroutine.
+Suppose a function G defers a function D that calls
+recover and a panic occurs in a function on the same goroutine in which G
+is executing.
+When the running of deferred functions reaches D,
+the return value of D's call to recover will be the value passed to the call of panic.
+If D returns normally, without starting a new
+panic, the panicking sequence stops. In that case,
+the state of functions called between G and the call to panic
+is discarded, and normal execution resumes.
+Any functions deferred by G before D are then run and G's
+execution terminates by returning to its caller.
+
+
+
+The return value of recover is nil if any of the following conditions holds:
+
+
+-
+
panic's argument was nil;
+
+-
+the goroutine is not panicking;
+
+-
+
recover was not called directly by a deferred function.
+
+
+
+
+The protect function in the example below invokes
+the function argument g and protects callers from
+run-time panics raised by g.
+
+
+
+func protect(g func()) {
+ defer func() {
+ log.Println("done") // Println executes normally even if there is a panic
+ if x := recover(); x != nil {
+ log.Printf("run time panic: %v", x)
+ }
+ }()
+ log.Println("start")
+ g()
+}
+
+
+
+Bootstrapping
+
+
+Current implementations provide several built-in functions useful during
+bootstrapping. These functions are documented for completeness but are not
+guaranteed to stay in the language. They do not return a result.
+
+
+
+Function Behavior
+
+print prints all arguments; formatting of arguments is implementation-specific
+println like print but prints spaces between arguments and a newline at the end
+
+
+
+Implementation restriction: print and println need not
+accept arbitrary argument types, but printing of boolean, numeric, and string
+types must be supported.
+
+
+Packages
+
+
+Go programs are constructed by linking together packages.
+A package in turn is constructed from one or more source files
+that together declare constants, types, variables and functions
+belonging to the package and which are accessible in all files
+of the same package. Those elements may be
+exported and used in another package.
+
+
+Source file organization
+
+
+Each source file consists of a package clause defining the package
+to which it belongs, followed by a possibly empty set of import
+declarations that declare packages whose contents it wishes to use,
+followed by a possibly empty set of declarations of functions,
+types, variables, and constants.
+
+
+
+SourceFile = PackageClause ";" { ImportDecl ";" } { TopLevelDecl ";" } .
+
+
+Package clause
+
+
+A package clause begins each source file and defines the package
+to which the file belongs.
+
+
+
+PackageClause = "package" PackageName .
+PackageName = identifier .
+
+
+
+The PackageName must not be the blank identifier.
+
+
+
+package math
+
+
+
+A set of files sharing the same PackageName form the implementation of a package.
+An implementation may require that all source files for a package inhabit the same directory.
+
+
+Import declarations
+
+
+An import declaration states that the source file containing the declaration
+depends on functionality of the imported package
+(§Program initialization and execution)
+and enables access to exported identifiers
+of that package.
+The import names an identifier (PackageName) to be used for access and an ImportPath
+that specifies the package to be imported.
+
+
+
+ImportDecl = "import" ( ImportSpec | "(" { ImportSpec ";" } ")" ) .
+ImportSpec = [ "." | PackageName ] ImportPath .
+ImportPath = string_lit .
+
+
+
+The PackageName is used in qualified identifiers
+to access exported identifiers of the package within the importing source file.
+It is declared in the file block.
+If the PackageName is omitted, it defaults to the identifier specified in the
+package clause of the imported package.
+If an explicit period (.) appears instead of a name, all the
+package's exported identifiers declared in that package's
+package block will be declared in the importing source
+file's file block and must be accessed without a qualifier.
+
+
+
+The interpretation of the ImportPath is implementation-dependent but
+it is typically a substring of the full file name of the compiled
+package and may be relative to a repository of installed packages.
+
+
+
+Implementation restriction: A compiler may restrict ImportPaths to
+non-empty strings using only characters belonging to
+Unicode's
+L, M, N, P, and S general categories (the Graphic characters without
+spaces) and may also exclude the characters
+!"#$%&'()*,:;<=>?[\]^`{|}
+and the Unicode replacement character U+FFFD.
+
+
+
+Assume we have compiled a package containing the package clause
+package math, which exports function Sin, and
+installed the compiled package in the file identified by
+"lib/math".
+This table illustrates how Sin is accessed in files
+that import the package after the
+various types of import declaration.
+
+
+
+Import declaration Local name of Sin
+
+import "lib/math" math.Sin
+import m "lib/math" m.Sin
+import . "lib/math" Sin
+
+
+
+An import declaration declares a dependency relation between
+the importing and imported package.
+It is illegal for a package to import itself, directly or indirectly,
+or to directly import a package without
+referring to any of its exported identifiers. To import a package solely for
+its side-effects (initialization), use the blank
+identifier as explicit package name:
+
+
+
+import _ "lib/math"
+
+
+
+An example package
+
+
+Here is a complete Go package that implements a concurrent prime sieve.
+
+
+
+package main
+
+import "fmt"
+
+// Send the sequence 2, 3, 4, … to channel 'ch'.
+func generate(ch chan<- int) {
+ for i := 2; ; i++ {
+ ch <- i // Send 'i' to channel 'ch'.
+ }
+}
+
+// Copy the values from channel 'src' to channel 'dst',
+// removing those divisible by 'prime'.
+func filter(src <-chan int, dst chan<- int, prime int) {
+ for i := range src { // Loop over values received from 'src'.
+ if i%prime != 0 {
+ dst <- i // Send 'i' to channel 'dst'.
+ }
+ }
+}
+
+// The prime sieve: Daisy-chain filter processes together.
+func sieve() {
+ ch := make(chan int) // Create a new channel.
+ go generate(ch) // Start generate() as a subprocess.
+ for {
+ prime := <-ch
+ fmt.Print(prime, "\n")
+ ch1 := make(chan int)
+ go filter(ch, ch1, prime)
+ ch = ch1
+ }
+}
+
+func main() {
+ sieve()
+}
+
+
+Program initialization and execution
+
+The zero value
+
+When storage is allocated for a variable,
+either through a declaration or a call of new, or when
+a new value is created, either through a composite literal or a call
+of make,
+and no explicit initialization is provided, the variable or value is
+given a default value. Each element of such a variable or value is
+set to the zero value for its type: false for booleans,
+0 for numeric types, ""
+for strings, and nil for pointers, functions, interfaces, slices, channels, and maps.
+This initialization is done recursively, so for instance each element of an
+array of structs will have its fields zeroed if no value is specified.
+
+
+These two simple declarations are equivalent:
+
+
+
+var i int
+var i int = 0
+
+
+
+After
+
+
+
+type T struct { i int; f float64; next *T }
+t := new(T)
+
+
+
+the following holds:
+
+
+
+t.i == 0
+t.f == 0.0
+t.next == nil
+
+
+
+The same would also be true after
+
+
+
+var t T
+
+
+Package initialization
+
+
+Within a package, package-level variable initialization proceeds stepwise,
+with each step selecting the variable earliest in declaration order
+which has no dependencies on uninitialized variables.
+
+
+
+More precisely, a package-level variable is considered ready for
+initialization if it is not yet initialized and either has
+no initialization expression or
+its initialization expression has no dependencies on uninitialized variables.
+Initialization proceeds by repeatedly initializing the next package-level
+variable that is earliest in declaration order and ready for initialization,
+until there are no variables ready for initialization.
+
+
+
+If any variables are still uninitialized when this
+process ends, those variables are part of one or more initialization cycles,
+and the program is not valid.
+
+
+
+Multiple variables on the left-hand side of a variable declaration initialized
+by single (multi-valued) expression on the right-hand side are initialized
+together: If any of the variables on the left-hand side is initialized, all
+those variables are initialized in the same step.
+
+
+
+var x = a
+var a, b = f() // a and b are initialized together, before x is initialized
+
+
+
+For the purpose of package initialization, blank
+variables are treated like any other variables in declarations.
+
+
+
+The declaration order of variables declared in multiple files is determined
+by the order in which the files are presented to the compiler: Variables
+declared in the first file are declared before any of the variables declared
+in the second file, and so on.
+
+
+
+Dependency analysis does not rely on the actual values of the
+variables, only on lexical references to them in the source,
+analyzed transitively. For instance, if a variable x's
+initialization expression refers to a function whose body refers to
+variable y then x depends on y.
+Specifically:
+
+
+
+-
+A reference to a variable or function is an identifier denoting that
+variable or function.
+
+
+-
+A reference to a method
m is a
+method value or
+method expression of the form
+t.m, where the (static) type of t is
+not an interface type, and the method m is in the
+method set of t.
+It is immaterial whether the resulting function value
+t.m is invoked.
+
+
+-
+A variable, function, or method
x depends on a variable
+y if x's initialization expression or body
+(for functions and methods) contains a reference to y
+or to a function or method that depends on y.
+
+
+
+
+For example, given the declarations
+
+
+
+var (
+ a = c + b // == 9
+ b = f() // == 4
+ c = f() // == 5
+ d = 3 // == 5 after initialization has finished
+)
+
+func f() int {
+ d++
+ return d
+}
+
+
+
+the initialization order is d, b, c, a.
+Note that the order of subexpressions in initialization expressions is irrelevant:
+a = c + b and a = b + c result in the same initialization
+order in this example.
+
+
+
+Dependency analysis is performed per package; only references referring
+to variables, functions, and (non-interface) methods declared in the current
+package are considered. If other, hidden, data dependencies exists between
+variables, the initialization order between those variables is unspecified.
+
+
+
+For instance, given the declarations
+
+
+
+var x = I(T{}).ab() // x has an undetected, hidden dependency on a and b
+var _ = sideEffect() // unrelated to x, a, or b
+var a = b
+var b = 42
+
+type I interface { ab() []int }
+type T struct{}
+func (T) ab() []int { return []int{a, b} }
+
+
+
+the variable a will be initialized after b but
+whether x is initialized before b, between
+b and a, or after a, and
+thus also the moment at which sideEffect() is called (before
+or after x is initialized) is not specified.
+
+
+
+Variables may also be initialized using functions named init
+declared in the package block, with no arguments and no result parameters.
+
+
+
+func init() { … }
+
+
+
+Multiple such functions may be defined per package, even within a single
+source file. In the package block, the init identifier can
+be used only to declare init functions, yet the identifier
+itself is not declared. Thus
+init functions cannot be referred to from anywhere
+in a program.
+
+
+
+A package with no imports is initialized by assigning initial values
+to all its package-level variables followed by calling all init
+functions in the order they appear in the source, possibly in multiple files,
+as presented to the compiler.
+If a package has imports, the imported packages are initialized
+before initializing the package itself. If multiple packages import
+a package, the imported package will be initialized only once.
+The importing of packages, by construction, guarantees that there
+can be no cyclic initialization dependencies.
+
+
+
+Package initialization—variable initialization and the invocation of
+init functions—happens in a single goroutine,
+sequentially, one package at a time.
+An init function may launch other goroutines, which can run
+concurrently with the initialization code. However, initialization
+always sequences
+the init functions: it will not invoke the next one
+until the previous one has returned.
+
+
+
+To ensure reproducible initialization behavior, build systems are encouraged
+to present multiple files belonging to the same package in lexical file name
+order to a compiler.
+
+
+
+Program execution
+
+A complete program is created by linking a single, unimported package
+called the main package with all the packages it imports, transitively.
+The main package must
+have package name main and
+declare a function main that takes no
+arguments and returns no value.
+
+
+
+func main() { … }
+
+
+
+Program execution begins by initializing the main package and then
+invoking the function main.
+When that function invocation returns, the program exits.
+It does not wait for other (non-main) goroutines to complete.
+
+
+Errors
+
+
+The predeclared type error is defined as
+
+
+
+type error interface {
+ Error() string
+}
+
+
+
+It is the conventional interface for representing an error condition,
+with the nil value representing no error.
+For instance, a function to read data from a file might be defined:
+
+
+
+func Read(f *File, b []byte) (n int, err error)
+
+
+Run-time panics
+
+
+Execution errors such as attempting to index an array out
+of bounds trigger a run-time panic equivalent to a call of
+the built-in function panic
+with a value of the implementation-defined interface type runtime.Error.
+That type satisfies the predeclared interface type
+error.
+The exact error values that
+represent distinct run-time error conditions are unspecified.
+
+
+
+package runtime
+
+type Error interface {
+ error
+ // and perhaps other methods
+}
+
+
+System considerations
+
+Package unsafe
+
+
+The built-in package unsafe, known to the compiler
+and accessible through the import path "unsafe",
+provides facilities for low-level programming including operations
+that violate the type system. A package using unsafe
+must be vetted manually for type safety and may not be portable.
+The package provides the following interface:
+
+
+
+package unsafe
+
+type ArbitraryType int // shorthand for an arbitrary Go type; it is not a real type
+type Pointer *ArbitraryType
+
+func Alignof(variable ArbitraryType) uintptr
+func Offsetof(selector ArbitraryType) uintptr
+func Sizeof(variable ArbitraryType) uintptr
+
+type IntegerType int // shorthand for an integer type; it is not a real type
+func Add(ptr Pointer, len IntegerType) Pointer
+func Slice(ptr *ArbitraryType, len IntegerType) []ArbitraryType
+
+
+
+A Pointer is a pointer type but a Pointer
+value may not be dereferenced.
+Any pointer or value of underlying type uintptr can be converted to
+a type of underlying type Pointer and vice versa.
+The effect of converting between Pointer and uintptr is implementation-defined.
+
+
+
+var f float64
+bits = *(*uint64)(unsafe.Pointer(&f))
+
+type ptr unsafe.Pointer
+bits = *(*uint64)(ptr(&f))
+
+var p ptr = nil
+
+
+
+The functions Alignof and Sizeof take an expression x
+of any type and return the alignment or size, respectively, of a hypothetical variable v
+as if v was declared via var v = x.
+
+
+The function Offsetof takes a (possibly parenthesized) selector
+s.f, denoting a field f of the struct denoted by s
+or *s, and returns the field offset in bytes relative to the struct's address.
+If f is an embedded field, it must be reachable
+without pointer indirections through fields of the struct.
+For a struct s with field f:
+
+
+
+uintptr(unsafe.Pointer(&s)) + unsafe.Offsetof(s.f) == uintptr(unsafe.Pointer(&s.f))
+
+
+
+Computer architectures may require memory addresses to be aligned;
+that is, for addresses of a variable to be a multiple of a factor,
+the variable's type's alignment. The function Alignof
+takes an expression denoting a variable of any type and returns the
+alignment of the (type of the) variable in bytes. For a variable
+x:
+
+
+
+uintptr(unsafe.Pointer(&x)) % unsafe.Alignof(x) == 0
+
+
+
+Calls to Alignof, Offsetof, and
+Sizeof are compile-time constant expressions of type uintptr.
+
+
+
+The function Add adds len to ptr
+and returns the updated pointer unsafe.Pointer(uintptr(ptr) + uintptr(len)).
+The len argument must be of integer type or an untyped constant.
+A constant len argument must be representable by a value of type int;
+if it is an untyped constant it is given type int.
+The rules for valid uses of Pointer still apply.
+
+
+
+The function Slice returns a slice whose underlying array starts at ptr
+and whose length and capacity are len.
+Slice(ptr, len) is equivalent to
+
+
+
+(*[len]ArbitraryType)(unsafe.Pointer(ptr))[:]
+
+
+
+except that, as a special case, if ptr
+is nil and len is zero,
+Slice returns nil.
+
+
+
+The len argument must be of integer type or an untyped constant.
+A constant len argument must be non-negative and representable by a value of type int;
+if it is an untyped constant it is given type int.
+At run time, if len is negative,
+or if ptr is nil and len is not zero,
+a run-time panic occurs.
+
+
+Size and alignment guarantees
+
+
+For the numeric types, the following sizes are guaranteed:
+
+
+
+type size in bytes
+
+byte, uint8, int8 1
+uint16, int16 2
+uint32, int32, float32 4
+uint64, int64, float64, complex64 8
+complex128 16
+
+
+
+The following minimal alignment properties are guaranteed:
+
+
+- For a variable
x of any type: unsafe.Alignof(x) is at least 1.
+
+
+- For a variable
x of struct type: unsafe.Alignof(x) is the largest of
+ all the values unsafe.Alignof(x.f) for each field f of x, but at least 1.
+
+
+- For a variable
x of array type: unsafe.Alignof(x) is the same as
+ the alignment of a variable of the array's element type.
+
+
+
+
+A struct or array type has size zero if it contains no fields (or elements, respectively) that have a size greater than zero. Two distinct zero-size variables may have the same address in memory.
+
diff --git a/doc/go1.18.html b/doc/go1.18.html
index 55a1de3bd8..e69113411e 100644
--- a/doc/go1.18.html
+++ b/doc/go1.18.html
@@ -25,12 +25,164 @@ Do not send CLs removing the interior tags from such phrases.
Changes to the language
+Generics
+
+
+ Go 1.18 includes an implementation of generic features as described by the
+ Type
+ Parameters Proposal.
+ This includes major - but fully backward-compatible - changes to the language.
+
+
- TODO: complete this section
+ These new language changes required a large amount of new code that
+ has not had significant testing in production settings. That will
+ only happen as more people write and use generic code. We believe
+ that this feature is well implemented and high quality. However,
+ unlike most aspects of Go, we can't back up that belief with real
+ world experience. Therefore, while we encourage the use of generics
+ where it makes sense, please use appropriate caution when deploying
+ generic code in production.
+
+
+
+ The following is a list of the most visible changes. For a more comprehensive overview, see the
+ proposal.
+ For details see the language spec.
+
+
+
+ -
+ The syntax for
+ function and
+ type declarations
+ now accepts
+ type parameters.
+
+ -
+ Parameterized functions and types can be instantiated by following them with a list of
+ type arguments in square brackets.
+
+ -
+ The new token
~ has been added to the set of
+ operators and punctuation.
+
+ -
+ The syntax for
+ Interface types
+ now permits the embedding of arbitrary types (not just type names of interfaces)
+ as well as union and
~T type elements. Such interfaces may only be used
+ as type constraints.
+ An interface now defines a set of types as well as a set of methods.
+
+ -
+ The new
+ predeclared identifier
+
any is an alias for the empty interface. It may be used instead of
+ interface{}.
+
+ -
+ The new
+ predeclared identifier
+
comparable is an interface the denotes the set of all types which can be
+ compared using == or !=. It may only be used as (or embedded in)
+ a type constraint.
+
+
+
+
+ The current generics implementation has the following limitations:
+
+ -
+ The Go compiler cannot currently handle type declarations inside generic functions
+ or methods. We hope to provide support for this feature in Go 1.19.
+
+ -
+ The Go compiler currently does not accept arguments of type parameter type with
+ the predeclared functions
real, imag, and complex.
+ We hope to remove this restriction in Go 1.19.
+
+ -
+ Embedding a type parameter, or a pointer to a type parameter, as
+ an unnamed field in a struct type is not permitted. Similarly
+ embedding a type parameter in an interface type is not permitted.
+ Whether these will ever be permitted is unclear at present.
+
+ -
+ A union element with more than one term may not contain an
+ interface type with a non-empty method set. Whether this will
+ ever be permitted is unclear at present.
+
+
+
+
+Bug fixes
+
+
+ The Go 1.18 compiler now correctly reports declared but not used errors
+ for variables that are set inside a function literal but are never used. Before Go 1.18,
+ the compiler did not report an error in such cases. This fixes long-outstanding compiler
+ issue #8560. As a result of this change,
+ (possibly incorrect) programs may not compile anymore. The necessary fix is
+ straightforward: fix the program if it was in fact incorrect, or use the offending
+ variable, for instance by assigning it to the blank identifier _.
+ Since go vet always pointed out this error, the number of affected
+ programs is likely very small.
+
+
+
+ The Go 1.18 compiler now reports an overflow when passing a rune constant expression
+ such as '1' << 32 as an argument to the predeclared functions
+ print and println, consistent with the behavior of
+ user-defined functions. Before Go 1.18, the compiler did not report an error
+ in such cases but silently accepted such constant arguments if they fit into an
+ int64. As a result of this change, (possibly incorrect) programs
+ may not compile anymore. The necessary fix is straightforward: fix the program if it
+ was in fact incorrect, or explicitly convert the offending argument to the correct type.
+ Since go vet always pointed out this error, the number of affected
+ programs is likely very small.
Ports
+AMD64
+
+
+ Go 1.18 introduces the new GOAMD64 environment variable, which selects at compile time
+ a mininum target version of the AMD64 architecture. Allowed values are v1,
+ v2, v3, or v4. Each higher level requires,
+ and takes advantage of, additional processor features. A detailed
+ description can be found
+ here.
+
+
+ The GOAMD64 environment variable defaults to v1.
+
+
+RISC-V
+
+
+ The 64-bit RISC-V architecture on Linux (the linux/riscv64 port)
+ now supports the c-archive and c-shared build modes.
+
+
+Windows
+
+
+ The windows/arm and windows/arm64 ports now support
+ non-cooperative preemption, bringing that capability to all four Windows
+ ports, which should hopefully address subtle bugs encountered when calling
+ into Win32 functions that block for extended periods of time.
+
+
+iOS
+
+
+ On iOS (the ios/arm64 port)
+ and iOS simulator running on AMD64-based macOS (the ios/amd64 port),
+ Go 1.18 now requires iOS 12 or later; support for previous versions has been discontinued.
+
+
FreeBSD
@@ -40,21 +192,29 @@ Do not send CLs removing the interior tags from such phrases.
FreeBSD 13.0+ will require a kernel with the COMPAT_FREEBSD12 option set (this is the default).
-PPC64
-
-
- TODO: https://golang.org/cl/353969: internal/buildcfg: enable register ABI for PPC64
-
-
-RISC-V
-
-
- The 64-bit RISC-V architecture on Linux (the linux/riscv64 port)
- now supports the c-archive and c-shared build modes.
-
-
+Fuzzing
+
+
+ Go 1.18 includes an implementation of fuzzing as described by
+ the fuzzing proposal.
+
+
+
+ See the fuzzing landing page to get
+ started.
+
+
+
+ Please be aware that fuzzing can consume a lot of memory and may impact your
+ machine’s performance while it runs. Also be aware that the fuzzing engine
+ writes values that expand test coverage to a fuzz cache directory within
+ $GOCACHE/fuzz while it runs. There is currently no limit to the
+ number of files or total bytes that may be written to the fuzz cache, so it
+ may occupy a large amount of storage (possibly several GBs).
+
+
Go command
@@ -92,15 +252,24 @@ Do not send CLs removing the interior tags from such phrases.
including build and tool tags (set with -tags), compiler,
assembler, and linker flags (like -gcflags), whether cgo was
enabled, and if it was, the values of the cgo environment variables
- (like CGO_CFLAGS). This information may be omitted using the
- flag -buildinfo=false. Both VCS and build information may be
- read together with module information using go
- version -m file or
+ (like CGO_CFLAGS).
+ Both VCS and build information may be read together with module
+ information using
+ go version -m file or
runtime/debug.ReadBuildInfo (for the currently running binary)
or the new debug/buildinfo
package.
+
+ The underlying data format of the embedded build information can change with
+ new go releases, so an older version of go may not handle the
+ build information produced with a newer version of go.
+ To read the version information from a binary built with go 1.18,
+ use the go version command and the
+ debug/buildinfo package from go 1.18+.
+
+
If the main module's go.mod file
specifies go 1.17
@@ -114,8 +283,64 @@ Do not send CLs removing the interior tags from such phrases.
go mod download all.
-
- TODO: https://golang.org/cl/349595: https://golang.org/cl/349595: cmd/go: add GOAMD64 environment variable
+
+ The go mod vendor subcommand now
+ supports a -o flag to set the output directory.
+ (Other go commands still read from the vendor
+ directory at the module root when loading packages
+ with -mod=vendor, so the main use for this flag is for
+ third-party tools that need to collect package source code.)
+
+
+
+ The go build command and related commands
+ now support an -asan flag that enables interoperation
+ with C (or C++) code compiled with the address sanitizer (C compiler
+ option -fsanitize=address).
+
+
+
+ The go mod tidy command now retains
+ additional checksums in the go.sum file for modules whose source
+ code is needed to verify that each imported package is provided by only one
+ module in the build list. Because this
+ condition is rare and failure to apply it results in a build error, this
+ change is not conditioned on the go version in the main
+ module's go.mod file.
+
+
+
+ The go command now supports a "Workspace" mode. If a
+ go.work file is found in the working directory or a
+ parent directory, or one is specified using the -workfile
+ flag, it will put the go command into workspace mode.
+ In workspace mode, the go.work file will be used to
+ determine the set of main modules used as the roots for module
+ resolution, instead of using the normally-found go.mod
+ file to specify the single main module. For more information see the
+ go work
+ documentation.
+
+
+
+ The go command now supports additional command line
+ options for the new fuzzing support described
+ above:
+
+ -
+
go test supports
+ -fuzz, -fuzztime, and
+ -fuzzminimizetime options.
+ For documentation on these see
+ go help testflag.
+
+ -
+
go clean supports a -fuzzcache
+ option.
+ For documentation see
+ go help clean.
+
+
gofmt
@@ -126,26 +351,118 @@ Do not send CLs removing the interior tags from such phrases.
multiple CPUs, gofmt should now be significantly faster.
+
+ The garbage collector now includes non-heap sources of garbage collector work
+ (e.g., stack scanning) when determining how frequently to run. As a result,
+ garbage collector overhead is more predictable when these sources are
+ significant. For most applications these changes will be negligible; however,
+ some Go applications may now use less memory and spend more time on garbage
+ collection, or vice versa, than before. The intended workaround is to tweak
+ GOGC where necessary.
+
+
+
+ The runtime now returns memory to the operating system more efficiently and has
+ been tuned to work more aggressively as a result.
+
+
+
+ Go 1.17 generally improved the formatting of arguments in stack traces,
+ but could print inaccurate values for arguments passed in registers.
+ This is improved in Go 1.18 by printing a question mark (?)
+ after each value that may be inaccurate.
+ The compiler now can inline functions that contain range loops or
+ labeled for loops.
+
+
+
+ Because the compiler's type checker was replaced in its entirety to
+ support generics, some error messages now may use different wording
+ than before. In some cases, pre-Go 1.18 error messages provided more
+ detail or were phrased in a more helpful way.
+ We intend to address these cases in Go 1.19.
+
+
+
+ Because of changes in the compiler related to supporting generics, the
+ Go 1.18 compile speed can be roughly 15% slower than the Go 1.17 compile speed.
+ The execution time of the compiled code is not affected. We
+ intend to improve the speed of the compiler in Go 1.19.
- TODO: complete this section
+ The default can be temporarily reverted to TLS 1.0 by setting the
+ GODEBUG=tls10default=1 environment variable.
+ This option will be removed in Go 1.19.
+
+
+
A type determines a set of values together with operations and methods specific
-to those values. A type may be denoted by a type name, if it has one,
-or specified using a type literal, which composes a type from existing types.
+to those values. A type may be denoted by a type name, if it has one, which must be
+followed by type arguments if the type is parameterized.
+A type may also be specified using a type literal, which composes a type
+from existing types.
+Further rules apply to structs (and pointer to structs) containing embedded fields,
+as described in the section on struct types.
Any other type has an empty method set.
-In a method set, each method must have a
-unique
-non-blank method name.
+In its most basic form an interface specifies a (possibly empty) list of methods.
+The type set defined by such an interface is the set of types which implement all of
+those methods, and the corresponding method set consists
+exactly of the methods specified by the interface.
+Finally, in their most general form, an interface element may also be an arbitrary type term
+T, or a term of the form ~T specifying the underlying type T,
+or a union of terms t1|t2|…|tn.
+Together with method specifications, these elements enable the precise
+definition of an interface's type set as follows:
+
+
+
+In a union, a term cannot be a type parameter, and the type sets of all
+non-interface terms must be pairwise disjoint (the pairwise intersection of the type sets must be empty).
+Given a type parameter P:
+
+
+
+Implementation restriction:
+A union with more than one term cannot contain interface types
+with non-empty method sets or which
+are or embed the predeclared identifier
+comparable.
+
+
+
@@ -1595,18 +1801,18 @@ type (
A3 = int
A4 = func(A3, float64) *A0
A5 = func(x int, _ float64) *[]string
-)
-type (
B0 A0
B1 []string
B2 struct{ a, b int }
B3 struct{ a, c int }
B4 func(int, float64) *B0
B5 func(x int, y float64) *A1
-)
-type C0 = B0
+ C0 = B0
+ D0[P1, P2 any] struct{ x P1; y P2 }
+ E0 = D0[int, string]
+)
@@ -1620,6 +1826,7 @@ A3 and int
A4, func(int, float64) *[]string, and A5
B0 and C0
+D0[int, string] and E0
[]int and []int
struct{ a, b *T5 } and struct{ a, b *T5 }
func(x int, y float64) *[]string, func(int, float64) (result *[]string), and A5
@@ -1629,10 +1836,15 @@ func(x int, y float64) *[]string, func(int, float64) (result *[]string), and A5
B0 and B1 are different because they are new types
created by distinct type definitions;
func(int, float64) *B0 and func(x int, y float64) *[]string
-are different because B0 is different from []string.
+are different because B0 is different from []string;
+and P1 and P2 are different because they are different
+type parameters.
+D0[int, string] and struct{ x int; y string } are
+different because the former is an instantiated
+defined type while the latter is a type literal
+(but they are still assignable).
-
run-ti
Type assertions
-For an expression x of interface type
-and a type T, the primary expression
+For an expression x of interface type,
+but not a type parameter, and a type T,
+the primary expression
@@ -3452,6 +4055,12 @@ var pt *Point
pt.Scale(3.5) // method call with receiver pt
+
+If f denotes a parameterized function, it must be
+instantiated before it can be called
+or used as a function value.
+
+
In a function call, the function value and arguments are evaluated in
the usual order.
@@ -3563,6 +4172,460 @@ within Greeting, who will have the same value as
+Instantiations
+
+
+A parameterized function or type is instantiated by substituting type arguments
+for the type parameters.
+Instantiation proceeds in two phases:
+
+
+
+-
+Each type argument is substituted for its corresponding type parameter in the parameterized
+declaration.
+This substitution happens across the entire function or type declaration,
+including the type parameter list itself and any types in that list.
+
+
+-
+After substitution, each type argument must implement
+the constraint (instantiated, if necessary)
+of the corresponding type parameter. Otherwise instantiation fails.
+
+
+
+
+Instantiating a type results in a new non-parameterized named type;
+instantiating a function produces a new non-parameterized function.
+
+
+
+type parameter list type arguments after substitution
+
+[P any] int [int any]
+[S ~[]E, E any] []int, int [[]int ~[]int, int any]
+[P io.Writer] string [string io.Writer] // illegal: string doesn't implement io.Writer
+
+
+
+Type arguments may be provided explicitly, or they may be partially or completely
+inferred.
+A partially provided type argument list cannot be empty; there must be at least the
+first argument.
+
+
+
+type T[P1 ~int, P2 ~[]P1] struct{ … }
+
+T[] // illegal: at least the first type argument must be present, even if it could be inferred
+T[int] // argument for P1 explicitly provided, argument for P2 inferred
+T[int, []int] // both arguments explicitly provided
+
+
+
+A partial type argument list specifies a prefix of the full list of type arguments, leaving
+the remaining arguments to be inferred. Loosely speaking, type arguments may be omitted from
+"right to left".
+
+
+
+Parameterized types, and parameterized functions that are not called,
+require a type argument list for instantiation; if the list is partial, all
+remaining type arguments must be inferrable.
+Calls to parameterized functions may provide a (possibly partial) type
+argument list, or may omit it entirely if the omitted type arguments are
+inferrable from the ordinary (non-type) function arguments.
+
+
+
+func min[T constraints.Ordered](x, y T) T { … }
+
+f := min // illegal: min must be instantiated when used without being called
+minInt := min[int] // minInt has type func(x, y int) int
+a := minInt(2, 3) // a has value 2 of type int
+b := min[float64](2.0, 3) // b has value 2.0 of type float64
+c := min(b, -1) // c has value -1.0 of type float64
+
+
+Type inference
+
+
+Missing type arguments may be inferred by a series of steps, described below.
+Each step attempts to use known information to infer additional type arguments.
+Type inference stops as soon as all type arguments are known.
+After type inference is complete, it is still necessary to substitute all type arguments
+for type parameters and verify that each type argument implements the relevant constraint;
+it is possible for an inferred type argument to fail to implement a constraint, in which
+case instantiation fails.
+
+
+
+Type inference is based on
+
+
+
+-
+ a type parameter list
+
+-
+ a substitution map M initialized with the known type arguments, if any
+
+-
+ a (possibly empty) list of ordinary function arguments (in case of a function call only)
+
+
+
+
+and then proceeds with the following steps:
+
+
+
+-
+ apply function argument type inference
+ to all typed ordinary function arguments
+
+-
+ apply constraint type inference
+
+-
+ apply function argument type inference to all untyped ordinary function arguments
+ using the default type for each of the untyped function arguments
+
+-
+ apply constraint type inference
+
+
+
+
+If there are no ordinary or untyped function arguments, the respective steps are skipped.
+Constraint type inference is skipped if the previous step didn't infer any new type arguments,
+but it is run at least once if there are missing type arguments.
+
+
+
+The substitution map M is carried through all steps, and each step may add entries to M.
+The process stops as soon as M has a type argument for each type parameter or if an inference step fails.
+If an inference step fails, or if M is still missing type arguments after the last step, type inference fails.
+
+
+Type unification
+
+
+Type inference is based on type unification. A single unification step
+applies to a substitution map and two types, either
+or both of which may be or contain type parameters. The substitution map tracks
+the known (explicitly provided or already inferred) type arguments: the map
+contains an entry P → A for each type
+parameter P and corresponding known type argument A.
+During unification, known type arguments take the place of their corresponding type
+parameters when comparing types. Unification is the process of finding substitution
+map entries that make the two types equivalent.
+
+
+
+For unification, two types that don't contain any type parameters from the current type
+parameter list are equivalent
+if they are identical, or if they are channel types that are identical ignoring channel
+direction, or if their underlying types are equivalent.
+
+
+
+Unification works by comparing the structure of pairs of types: their structure
+disregarding type parameters must be identical, and types other than type parameters
+must be equivalent.
+A type parameter in one type may match any complete subtype in the other type;
+each successful match causes an entry to be added to the substitution map.
+If the structure differs, or types other than type parameters are not equivalent,
+unification fails.
+
+
+
+
+
+For example, if T1 and T2 are type parameters,
+[]map[int]bool can be unified with any of the following:
+
+
+
+[]map[int]bool // types are identical
+T1 // adds T1 → []map[int]bool to substitution map
+[]T1 // adds T1 → map[int]bool to substitution map
+[]map[T1]T2 // adds T1 → int and T2 → bool to substitution map
+
+
+
+On the other hand, []map[int]bool cannot be unified with any of
+
+
+
+int // int is not a slice
+struct{} // a struct is not a slice
+[]struct{} // a struct is not a map
+[]map[T1]string // map element types don't match
+
+
+
+As an exception to this general rule, because a defined type
+D and a type literal L are never equivalent,
+unification compares the underlying type of D with L instead.
+For example, given the defined type
+
+
+
+type Vector []float64
+
+
+
+and the type literal []E, unification compares []float64 with
+[]E and adds an entry E → float64 to
+the substitution map.
+
+
+Function argument type inference
+
+
+
+
+Function argument type inference infers type arguments from function arguments:
+if a function parameter is declared with a type T that uses
+type parameters,
+unifying the type of the corresponding
+function argument with T may infer type arguments for the type
+parameters used by T.
+
+
+
+For instance, given the type-parameterized function
+
+
+
+func scale[Number ~int64|~float64|~complex128](v []Number, s Number) []Number
+
+
+
+and the call
+
+
+
+var vector []float64
+scaledVector := scale(vector, 42)
+
+
+
+the type argument for Number can be inferred from the function argument
+vector by unifying the type of vector with the corresponding
+parameter type: []float64 and []Number
+match in structure and float64 matches with Number.
+This adds the entry Number → float64 to the
+substitution map.
+Untyped arguments, such as the second function argument 42 here, are ignored
+in the first round of function argument type inference and only considered if there are
+unresolved type parameters left.
+
+
+
+Function argument type inference can be used when the function has ordinary parameters
+whose types are defined using the function's type parameters. Inference happens in two
+separate phases; each phase operates on a specific list of (parameter, argument) pairs:
+
+
+
+-
+ The list Lt contains all (parameter, argument) pairs where the parameter
+ type uses type parameters and where the function argument is typed.
+
+-
+ The list Lu contains all remaining pairs where the parameter type is a single
+ type parameter. In this list, the respective function arguments are untyped.
+
+
+
+
+Any other (parameter, argument) pair is ignored.
+
+
+
+By construction, the arguments of the pairs in Lu are untyped constants
+(or the untyped boolean result of a comparison). And because default types
+of untyped values are always predeclared non-composite types, they can never match against
+a composite type, so it is sufficient to only consider parameter types that are single type
+parameters.
+
+
+
+Each list is processed in a separate phase:
+
+
+
+-
+ In the first phase, the parameter and argument types of each pair in Lt
+ are unified. If unification succeeds for a pair, it may yield new entries that
+ are added to the substitution map M. If unification fails, type inference
+ fails.
+
+-
+ The second phase considers the entries of list Lu. Type parameters for
+ which the type argument has already been determined are ignored in this phase.
+ For each remaining pair, the parameter type (which is a single type parameter) and
+ the default type of the corresponding untyped argument is
+ unified. If unification fails, type inference fails.
+
+
+
+
+Example:
+
+
+
+func min[T constraints.Ordered](x, y T) T
+
+var x int
+min(x, 2.0) // T is int, inferred from typed argument x; 2.0 is assignable to int
+min(1.0, 2.0) // T is float64, inferred from default type for 1.0 and matches default type for 2.0
+min(1.0, 2) // illegal: default type float64 (for 1.0) doesn't match default type int (for 2)
+
+
+Constraint type inference
+
+
+
+
+Constraint type inference infers type arguments from already known
+type arguments by considering structural type constraints:
+if the structural type T of a structural constraint is parameterized,
+unifying a known type argument with T may
+infer type arguments for other type parameters used by the structural type.
+
+
+
+For instance, consider the type parameter list with type parameters List and
+Elem:
+
+
+
+[List ~[]Elem, Elem any]
+
+
+
+Constraint type inference can deduce the type of Elem from the type argument
+for List because Elem is a type parameter in the structural constraint
+~[]Elem for List.
+If the type argument is Bytes:
+
+
+
+type Bytes []byte
+
+
+
+unifying the underlying type of Bytes with the structural constraint means
+unifying []byte with []Elem. That unification succeeds and yields
+the substitution map entry
+Elem → byte.
+Thus, in this example, constraint type inference can infer the second type argument from the
+first one.
+
+
+
+Generally, constraint type inference proceeds in two phases: Starting with a given
+substitution map M
+
+
+
+-
+For all type parameters with a structural constraint, unify the type parameter with the structural
+type of its constraint. If any unification fails, constraint type inference fails.
+
+
+-
+At this point, some entries in M may map type parameters to other
+type parameters or to types containing type parameters. For each entry
+
P → A in M where A is or
+contains type parameters Q for which there exist entries
+Q → B in M, substitute those
+Q with the respective B in A.
+Stop when no further substitution is possible.
+
+
+
+
+The result of constraint type inference is the final substitution map M from type
+parameters P to type arguments A where no type parameter P
+appears in any of the A.
+
+
+
+For instance, given the type parameter list
+
+
+
+[A any, B []C, C *A]
+
+
+
+and the single provided type argument int for type parameter A,
+the initial substitution map M contains the entry A → int.
+
+
+
+In the first phase, the type parameters B and C are unified
+with the structural type of their respective constraints. This adds the entries
+B → []C and C → *A
+to M.
+
+
+At this point there are two entries in M where the right-hand side
+is or contains type parameters for which there exists other entries in M:
+[]C and *A.
+In the second phase, these type parameters are replaced with their respective
+types. It doesn't matter in which order this happens. Starting with the state
+of M after the first phase:
+
+
+
+A → int,
+B → []C,
+C → *A
+
+
+
+Replace A on the right-hand side of → with int:
+
+
+
+A → int,
+B → []C,
+C → *int
+
+
+
+Replace C on the right-hand side of → with *int:
+
+
+
+A → int,
+B → []*int,
+C → *int
+
+
+
+At this point no further substitution is possible and the map is full.
+Therefore, M represents the final map of type parameters
+to type arguments for the given type parameter list.
+
Operators
@@ -3620,7 +4683,8 @@ var p = 1<<s == 1<<33 // 1 has type int; p == true
var u = 1.0<<s // illegal: 1.0 has type float64, cannot shift
var u1 = 1.0<<s != 0 // illegal: 1.0 has type float64, cannot shift
var u2 = 1<<s != 1.0 // illegal: 1 has type float64, cannot shift
-var v float32 = 1<<s // illegal: 1 has type float32, cannot shift
+var v1 float32 = 1<<s // illegal: 1 has type float32, cannot shift
+var v2 = string(1<<s) // illegal: 1 is converted to a string, cannot shift
var w int64 = 1.0<<33 // 1.0<<33 is a constant shift expression; w == 1<<33
var x = a[1.0<<s] // panics: 1.0 has type int, but 1<<33 overflows array bounds
var b = make([]byte, 1.0<<s) // 1.0 has type int; len(b) == 1<<33
@@ -3697,7 +4761,6 @@ The bitwise logical and shift operators apply to integers only.
>> right shift integer >> integer >= 0
-
Integer operators
@@ -3790,6 +4853,7 @@ the unsigned integer's type.
Loosely speaking, these unsigned integer operations
discard high bits upon overflow, and programs may rely on "wrap around".
+
For signed integers, the operations +,
-, *, /, and << may legally
@@ -4123,7 +5187,9 @@ as for non-constant x.
-Converting a constant yields a typed constant as result.
+Converting a constant to a type that is not a type parameter
+yields a typed constant.
+Converting a constant to a type parameter yields a non-constant value of that type.
@@ -4153,14 +5219,16 @@ in any of these cases:
ignoring struct tags (see below),
- x's type and T have identical
- underlying types.
+ x's type and T are not
+ type parameters but have
+ identical underlying types.
ignoring struct tags (see below),
x's type and T are pointer types
- that are not defined types,
- and their pointer base types have identical underlying types.
+ that are not named types,
+ and their pointer base types are not type parameters but
+ have identical underlying types.
x's type and T are both integer or floating
@@ -4182,6 +5250,28 @@ in any of these cases:
+
+Additionally, if T or
x's type V are type
+parameters with specific types, x
+can also be converted to type T if one of the following conditions applies:
+
+
+
+-
+Both
V and T are type parameters and a value of each
+specific type of V can be converted to each specific type
+of T.
+
+-
+Only
V is a type parameter and a value of each
+specific type of V can be converted to T.
+
+-
+Only
T is a type parameter and x can be converted to each
+specific type of T.
+
+
+
Struct tags are ignored when comparing struct types
for identity for the purpose of conversion:
@@ -5048,7 +6138,8 @@ switch x.(type) {
Cases then match actual types T against the dynamic type of the
expression x. As with type assertions, x must be of
-interface type, and each non-interface type
+interface type, but not a
+type parameter, and each non-interface type
T listed in a case must implement the type of x.
The types listed in the cases of a type switch must all be
different.
@@ -5059,7 +6150,6 @@ TypeSwitchStmt = "switch" [ SimpleStmt ";" ] TypeSwitchGuard "{" { TypeCaseClau
TypeSwitchGuard = [ identifier ":=" ] PrimaryExpr "." "(" "type" ")" .
TypeCaseClause = TypeSwitchCase ":" StatementList .
TypeSwitchCase = "case" TypeList | "default" .
-TypeList = Type { "," Type } .
@@ -5268,7 +6358,7 @@ For a string value, the "range" clause iterates over the Unicode code points
in the string starting at byte index 0. On successive iterations, the index value will be the
index of the first byte of successive UTF-8-encoded code points in the string,
and the second value, of type rune, will be the value of
-the corresponding code point. If the iteration encounters an invalid
+the corresponding code point. If the iteration encounters an invalid
UTF-8 sequence, the second value will be 0xFFFD,
the Unicode replacement character, and the next iteration will advance
a single byte in the string.
@@ -5819,7 +6909,6 @@ The multi-valued receive operation
returns a received value along with an indication of whether the channel is closed.
-
Length and capacity
@@ -5836,12 +6925,24 @@ len(s) string type string length in bytes
[]T slice length
map[K]T map length (number of defined keys)
chan T number of elements queued in channel buffer
+ type parameter see below
cap(s) [n]T, *[n]T array length (== n)
[]T slice capacity
chan T channel buffer capacity
+ type parameter see below
+
+If the argument type is a type parameter P,
+P must have specific types, and
+the call len(e) (or cap(e) respectively) must be valid for
+each specific type of P.
+The result is the length (or capacity, respectively) of the argument whose type
+corresponds to the type argument with which P was
+instantiated.
+
+
The capacity of a slice is the number of elements for which there is
space allocated in the underlying array.
@@ -6783,8 +7884,14 @@ uintptr(unsafe.Pointer(&x)) % unsafe.Alignof(x) == 0
-Calls to Alignof, Offsetof, and
-Sizeof are compile-time constant expressions of type uintptr.
+A (variable of) type T has variable size if T
+is a type parameter, or if it is an array or struct type containing elements
+or fields of variable size. Otherwise the size is constant.
+Calls to Alignof, Offsetof, and Sizeof
+are compile-time constant expressions of
+type uintptr if their arguments (or the struct s in
+the selector expression s.f for Offsetof) are types
+of constant size.
diff --git a/lib/time/update.bash b/lib/time/update.bash
index e088ea6b90..feb95e2e53 100755
--- a/lib/time/update.bash
+++ b/lib/time/update.bash
@@ -8,8 +8,8 @@
# Consult https://www.iana.org/time-zones for the latest versions.
# Versions to use.
-CODE=2021a
-DATA=2021a
+CODE=2021e
+DATA=2021e
set -e
rm -rf work
diff --git a/lib/time/zoneinfo.zip b/lib/time/zoneinfo.zip
index d32fbba517..a859b4113b 100644
Binary files a/lib/time/zoneinfo.zip and b/lib/time/zoneinfo.zip differ
diff --git a/misc/cgo/test/cgo_test.go b/misc/cgo/test/cgo_test.go
index fe99e251e9..774277e10d 100644
--- a/misc/cgo/test/cgo_test.go
+++ b/misc/cgo/test/cgo_test.go
@@ -61,6 +61,7 @@ func Test32579(t *testing.T) { test32579(t) }
func Test31891(t *testing.T) { test31891(t) }
func Test42018(t *testing.T) { test42018(t) }
func Test45451(t *testing.T) { test45451(t) }
+func Test49633(t *testing.T) { test49633(t) }
func TestAlign(t *testing.T) { testAlign(t) }
func TestAtol(t *testing.T) { testAtol(t) }
func TestBlocking(t *testing.T) { testBlocking(t) }
diff --git a/misc/cgo/test/test.go b/misc/cgo/test/test.go
index 3b8f548b13..dd81f770a2 100644
--- a/misc/cgo/test/test.go
+++ b/misc/cgo/test/test.go
@@ -915,6 +915,11 @@ void issue40494(enum Enum40494 e, union Union40494* up) {}
// Issue 45451, bad handling of go:notinheap types.
typedef struct issue45451Undefined issue45451;
+
+// Issue 49633, example of cgo.Handle with void*.
+extern void GoFunc49633(void*);
+void cfunc49633(void *context) { GoFunc49633(context); }
+
*/
import "C"
diff --git a/misc/cgo/test/testdata/issue9400_linux.go b/misc/cgo/test/testdata/issue9400_linux.go
index e94a9bb45f..051b9ab0bb 100644
--- a/misc/cgo/test/testdata/issue9400_linux.go
+++ b/misc/cgo/test/testdata/issue9400_linux.go
@@ -15,6 +15,7 @@ import "C"
import (
"runtime"
+ "runtime/debug"
"sync/atomic"
"testing"
@@ -46,6 +47,14 @@ func test9400(t *testing.T) {
big[i] = pattern
}
+ // Disable GC for the duration of the test.
+ // This avoids a potential GC deadlock when spinning in uninterruptable ASM below #49695.
+ defer debug.SetGCPercent(debug.SetGCPercent(-1))
+ // SetGCPercent waits until the mark phase is over, but the runtime
+ // also preempts at the start of the sweep phase, so make sure that's
+ // done too. See #49695.
+ runtime.GC()
+
// Temporarily rewind the stack and trigger SIGSETXID
issue9400.RewindAndSetgid()
diff --git a/misc/cgo/test/testx.go b/misc/cgo/test/testx.go
index 823c3e13d2..8ec84a8b22 100644
--- a/misc/cgo/test/testx.go
+++ b/misc/cgo/test/testx.go
@@ -113,6 +113,7 @@ typedef struct {
int i;
} Issue38408, *PIssue38408;
+extern void cfunc49633(void*); // definition is in test.go
*/
import "C"
@@ -554,3 +555,26 @@ func GoFunc37033(handle C.uintptr_t) {
// A typedef pointer can be used as the element type.
// No runtime test; just make sure it compiles.
var _ C.PIssue38408 = &C.Issue38408{i: 1}
+
+// issue 49633, example use of cgo.Handle with void*
+
+type data49633 struct {
+ msg string
+}
+
+//export GoFunc49633
+func GoFunc49633(context unsafe.Pointer) {
+ h := *(*cgo.Handle)(context)
+ v := h.Value().(*data49633)
+ v.msg = "hello"
+}
+
+func test49633(t *testing.T) {
+ v := &data49633{}
+ h := cgo.NewHandle(v)
+ defer h.Delete()
+ C.cfunc49633(unsafe.Pointer(&h))
+ if v.msg != "hello" {
+ t.Errorf("msg = %q, want 'hello'", v.msg)
+ }
+}
diff --git a/misc/cgo/testcarchive/carchive_test.go b/misc/cgo/testcarchive/carchive_test.go
index a2b43bb72d..a821396c77 100644
--- a/misc/cgo/testcarchive/carchive_test.go
+++ b/misc/cgo/testcarchive/carchive_test.go
@@ -10,12 +10,15 @@ import (
"debug/elf"
"flag"
"fmt"
+ "io"
+ "io/fs"
"log"
"os"
"os/exec"
"path/filepath"
"regexp"
"runtime"
+ "strconv"
"strings"
"syscall"
"testing"
@@ -245,6 +248,29 @@ func testInstall(t *testing.T, exe, libgoa, libgoh string, buildcmd ...string) {
var badLineRegexp = regexp.MustCompile(`(?m)^#line [0-9]+ "/.*$`)
+// checkIsExecutable verifies that exe exists and has execute permission.
+//
+// (https://golang.org/issue/49693 notes failures with "no such file or
+// directory", so we want to double-check that the executable actually exists
+// immediately after we build it in order to better understand that failure
+// mode.)
+func checkIsExecutable(t *testing.T, exe string) {
+ t.Helper()
+ fi, err := os.Stat(exe)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if runtime.GOOS == "windows" {
+ // os.File doesn't check the "execute" permission on Windows files
+ // and as a result doesn't set that bit in a file's permissions.
+ // Assume that if the file exists it is “executable enough”.
+ return
+ }
+ if fi.Mode()&0111 == 0 {
+ t.Fatalf("%s is not executable: %0o", exe, fi.Mode()&fs.ModePerm)
+ }
+}
+
// checkLineComments checks that the export header generated by
// -buildmode=c-archive doesn't have any absolute paths in the #line
// comments. We don't want those paths because they are unhelpful for
@@ -263,6 +289,173 @@ func checkLineComments(t *testing.T, hdrname string) {
}
}
+// checkArchive verifies that the created library looks OK.
+// We just check a couple of things now, we can add more checks as needed.
+func checkArchive(t *testing.T, arname string) {
+ t.Helper()
+
+ switch GOOS {
+ case "aix", "darwin", "ios", "windows":
+ // We don't have any checks for non-ELF libraries yet.
+ if _, err := os.Stat(arname); err != nil {
+ t.Errorf("archive %s does not exist: %v", arname, err)
+ }
+ default:
+ checkELFArchive(t, arname)
+ }
+}
+
+// checkELFArchive checks an ELF archive.
+func checkELFArchive(t *testing.T, arname string) {
+ t.Helper()
+
+ f, err := os.Open(arname)
+ if err != nil {
+ t.Errorf("archive %s does not exist: %v", arname, err)
+ return
+ }
+ defer f.Close()
+
+ // TODO(iant): put these in a shared package? But where?
+ const (
+ magic = "!\n"
+ fmag = "`\n"
+
+ namelen = 16
+ datelen = 12
+ uidlen = 6
+ gidlen = 6
+ modelen = 8
+ sizelen = 10
+ fmaglen = 2
+ hdrlen = namelen + datelen + uidlen + gidlen + modelen + sizelen + fmaglen
+ )
+
+ type arhdr struct {
+ name string
+ date string
+ uid string
+ gid string
+ mode string
+ size string
+ fmag string
+ }
+
+ var magbuf [len(magic)]byte
+ if _, err := io.ReadFull(f, magbuf[:]); err != nil {
+ t.Errorf("%s: archive too short", arname)
+ return
+ }
+ if string(magbuf[:]) != magic {
+ t.Errorf("%s: incorrect archive magic string %q", arname, magbuf)
+ }
+
+ off := int64(len(magic))
+ for {
+ if off&1 != 0 {
+ var b [1]byte
+ if _, err := f.Read(b[:]); err != nil {
+ if err == io.EOF {
+ break
+ }
+ t.Errorf("%s: error skipping alignment byte at %d: %v", arname, off, err)
+ }
+ off++
+ }
+
+ var hdrbuf [hdrlen]byte
+ if _, err := io.ReadFull(f, hdrbuf[:]); err != nil {
+ if err == io.EOF {
+ break
+ }
+ t.Errorf("%s: error reading archive header at %d: %v", arname, off, err)
+ return
+ }
+
+ var hdr arhdr
+ hdrslice := hdrbuf[:]
+ set := func(len int, ps *string) {
+ *ps = string(bytes.TrimSpace(hdrslice[:len]))
+ hdrslice = hdrslice[len:]
+ }
+ set(namelen, &hdr.name)
+ set(datelen, &hdr.date)
+ set(uidlen, &hdr.uid)
+ set(gidlen, &hdr.gid)
+ set(modelen, &hdr.mode)
+ set(sizelen, &hdr.size)
+ hdr.fmag = string(hdrslice[:fmaglen])
+ hdrslice = hdrslice[fmaglen:]
+ if len(hdrslice) != 0 {
+ t.Fatalf("internal error: len(hdrslice) == %d", len(hdrslice))
+ }
+
+ if hdr.fmag != fmag {
+ t.Errorf("%s: invalid fmagic value %q at %d", arname, hdr.fmag, off)
+ return
+ }
+
+ size, err := strconv.ParseInt(hdr.size, 10, 64)
+ if err != nil {
+ t.Errorf("%s: error parsing size %q at %d: %v", arname, hdr.size, off, err)
+ return
+ }
+
+ off += hdrlen
+
+ switch hdr.name {
+ case "__.SYMDEF", "/", "/SYM64/":
+ // The archive symbol map.
+ case "//", "ARFILENAMES/":
+ // The extended name table.
+ default:
+ // This should be an ELF object.
+ checkELFArchiveObject(t, arname, off, io.NewSectionReader(f, off, size))
+ }
+
+ off += size
+ if _, err := f.Seek(off, os.SEEK_SET); err != nil {
+ t.Errorf("%s: failed to seek to %d: %v", arname, off, err)
+ }
+ }
+}
+
+// checkELFArchiveObject checks an object in an ELF archive.
+func checkELFArchiveObject(t *testing.T, arname string, off int64, obj io.ReaderAt) {
+ t.Helper()
+
+ ef, err := elf.NewFile(obj)
+ if err != nil {
+ t.Errorf("%s: failed to open ELF file at %d: %v", arname, off, err)
+ return
+ }
+ defer ef.Close()
+
+ // Verify section types.
+ for _, sec := range ef.Sections {
+ want := elf.SHT_NULL
+ switch sec.Name {
+ case ".text", ".data":
+ want = elf.SHT_PROGBITS
+ case ".bss":
+ want = elf.SHT_NOBITS
+ case ".symtab":
+ want = elf.SHT_SYMTAB
+ case ".strtab":
+ want = elf.SHT_STRTAB
+ case ".init_array":
+ want = elf.SHT_INIT_ARRAY
+ case ".fini_array":
+ want = elf.SHT_FINI_ARRAY
+ case ".preinit_array":
+ want = elf.SHT_PREINIT_ARRAY
+ }
+ if want != elf.SHT_NULL && sec.Type != want {
+ t.Errorf("%s: incorrect section type in elf file at %d for section %q: got %v want %v", arname, off, sec.Name, sec.Type, want)
+ }
+ }
+}
+
func TestInstall(t *testing.T) {
if !testWork {
defer os.RemoveAll(filepath.Join(GOPATH, "pkg"))
@@ -310,7 +503,7 @@ func TestEarlySignalHandler(t *testing.T) {
defer func() {
os.Remove("libgo2.a")
os.Remove("libgo2.h")
- os.Remove("testp")
+ os.Remove("testp" + exeSuffix)
os.RemoveAll(filepath.Join(GOPATH, "pkg"))
}()
}
@@ -321,6 +514,7 @@ func TestEarlySignalHandler(t *testing.T) {
t.Fatal(err)
}
checkLineComments(t, "libgo2.h")
+ checkArchive(t, "libgo2.a")
ccArgs := append(cc, "-o", "testp"+exeSuffix, "main2.c", "libgo2.a")
if runtime.Compiler == "gccgo" {
@@ -350,7 +544,7 @@ func TestSignalForwarding(t *testing.T) {
defer func() {
os.Remove("libgo2.a")
os.Remove("libgo2.h")
- os.Remove("testp")
+ os.Remove("testp" + exeSuffix)
os.RemoveAll(filepath.Join(GOPATH, "pkg"))
}()
}
@@ -361,6 +555,7 @@ func TestSignalForwarding(t *testing.T) {
t.Fatal(err)
}
checkLineComments(t, "libgo2.h")
+ checkArchive(t, "libgo2.a")
ccArgs := append(cc, "-o", "testp"+exeSuffix, "main5.c", "libgo2.a")
if runtime.Compiler == "gccgo" {
@@ -374,7 +569,7 @@ func TestSignalForwarding(t *testing.T) {
cmd = exec.Command(bin[0], append(bin[1:], "1")...)
out, err := cmd.CombinedOutput()
- t.Logf("%s", out)
+ t.Logf("%v\n%s", cmd.Args, out)
expectSignal(t, err, syscall.SIGSEGV)
// SIGPIPE is never forwarded on darwin. See golang.org/issue/33384.
@@ -383,7 +578,9 @@ func TestSignalForwarding(t *testing.T) {
cmd = exec.Command(bin[0], append(bin[1:], "3")...)
out, err = cmd.CombinedOutput()
- t.Logf("%s", out)
+ if len(out) > 0 {
+ t.Logf("%s", out)
+ }
expectSignal(t, err, syscall.SIGPIPE)
}
}
@@ -400,7 +597,7 @@ func TestSignalForwardingExternal(t *testing.T) {
defer func() {
os.Remove("libgo2.a")
os.Remove("libgo2.h")
- os.Remove("testp")
+ os.Remove("testp" + exeSuffix)
os.RemoveAll(filepath.Join(GOPATH, "pkg"))
}()
}
@@ -411,6 +608,7 @@ func TestSignalForwardingExternal(t *testing.T) {
t.Fatal(err)
}
checkLineComments(t, "libgo2.h")
+ checkArchive(t, "libgo2.a")
ccArgs := append(cc, "-o", "testp"+exeSuffix, "main5.c", "libgo2.a")
if runtime.Compiler == "gccgo" {
@@ -517,7 +715,7 @@ func TestOsSignal(t *testing.T) {
defer func() {
os.Remove("libgo3.a")
os.Remove("libgo3.h")
- os.Remove("testp")
+ os.Remove("testp" + exeSuffix)
os.RemoveAll(filepath.Join(GOPATH, "pkg"))
}()
}
@@ -528,6 +726,7 @@ func TestOsSignal(t *testing.T) {
t.Fatal(err)
}
checkLineComments(t, "libgo3.h")
+ checkArchive(t, "libgo3.a")
ccArgs := append(cc, "-o", "testp"+exeSuffix, "main3.c", "libgo3.a")
if runtime.Compiler == "gccgo" {
@@ -554,7 +753,7 @@ func TestSigaltstack(t *testing.T) {
defer func() {
os.Remove("libgo4.a")
os.Remove("libgo4.h")
- os.Remove("testp")
+ os.Remove("testp" + exeSuffix)
os.RemoveAll(filepath.Join(GOPATH, "pkg"))
}()
}
@@ -565,6 +764,7 @@ func TestSigaltstack(t *testing.T) {
t.Fatal(err)
}
checkLineComments(t, "libgo4.h")
+ checkArchive(t, "libgo4.a")
ccArgs := append(cc, "-o", "testp"+exeSuffix, "main4.c", "libgo4.a")
if runtime.Compiler == "gccgo" {
@@ -747,25 +947,30 @@ func TestSIGPROF(t *testing.T) {
}
cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo6.a", "./libgo6")
- if out, err := cmd.CombinedOutput(); err != nil {
- t.Logf("%s", out)
+ out, err := cmd.CombinedOutput()
+ t.Logf("%v\n%s", cmd.Args, out)
+ if err != nil {
t.Fatal(err)
}
checkLineComments(t, "libgo6.h")
+ checkArchive(t, "libgo6.a")
ccArgs := append(cc, "-o", "testp6"+exeSuffix, "main6.c", "libgo6.a")
if runtime.Compiler == "gccgo" {
ccArgs = append(ccArgs, "-lgo")
}
- if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil {
- t.Logf("%s", out)
+ out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput()
+ t.Logf("%v\n%s", ccArgs, out)
+ if err != nil {
t.Fatal(err)
}
+ checkIsExecutable(t, "./testp6"+exeSuffix)
argv := cmdToRun("./testp6")
cmd = exec.Command(argv[0], argv[1:]...)
- if out, err := cmd.CombinedOutput(); err != nil {
- t.Logf("%s", out)
+ out, err = cmd.CombinedOutput()
+ t.Logf("%v\n%s", argv, out)
+ if err != nil {
t.Fatal(err)
}
}
@@ -788,13 +993,13 @@ func TestCompileWithoutShared(t *testing.T) {
}
cmd := exec.Command("go", "build", "-buildmode=c-archive", "-gcflags=-shared=false", "-o", "libgo2.a", "./libgo2")
- t.Log(cmd.Args)
out, err := cmd.CombinedOutput()
- t.Logf("%s", out)
+ t.Logf("%v\n%s", cmd.Args, out)
if err != nil {
t.Fatal(err)
}
checkLineComments(t, "libgo2.h")
+ checkArchive(t, "libgo2.a")
exe := "./testnoshared" + exeSuffix
@@ -804,23 +1009,22 @@ func TestCompileWithoutShared(t *testing.T) {
if runtime.Compiler == "gccgo" {
ccArgs = append(ccArgs, "-lgo")
}
- t.Log(ccArgs)
out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput()
+ t.Logf("%v\n%s", ccArgs, out)
// If -no-pie unrecognized, try -nopie if this is possibly clang
if err != nil && bytes.Contains(out, []byte("unknown")) && !strings.Contains(cc[0], "gcc") {
ccArgs = append(cc, "-o", exe, "-nopie", "main5.c", "libgo2.a")
- t.Log(ccArgs)
out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput()
+ t.Logf("%v\n%s", ccArgs, out)
}
// Don't use either -no-pie or -nopie
if err != nil && bytes.Contains(out, []byte("unrecognized")) {
- ccArgs := append(cc, "-o", exe, "main5.c", "libgo2.a")
- t.Log(ccArgs)
+ ccArgs = append(cc, "-o", exe, "main5.c", "libgo2.a")
out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput()
+ t.Logf("%v\n%s", ccArgs, out)
}
- t.Logf("%s", out)
if err != nil {
t.Fatal(err)
}
@@ -829,17 +1033,15 @@ func TestCompileWithoutShared(t *testing.T) {
}
binArgs := append(cmdToRun(exe), "1")
- t.Log(binArgs)
out, err = exec.Command(binArgs[0], binArgs[1:]...).CombinedOutput()
- t.Logf("%s", out)
+ t.Logf("%v\n%s", binArgs, out)
expectSignal(t, err, syscall.SIGSEGV)
// SIGPIPE is never forwarded on darwin. See golang.org/issue/33384.
if runtime.GOOS != "darwin" && runtime.GOOS != "ios" {
binArgs := append(cmdToRun(exe), "3")
- t.Log(binArgs)
out, err = exec.Command(binArgs[0], binArgs[1:]...).CombinedOutput()
- t.Logf("%s", out)
+ t.Logf("%v\n%s", binArgs, out)
expectSignal(t, err, syscall.SIGPIPE)
}
}
@@ -894,26 +1096,30 @@ func TestManyCalls(t *testing.T) {
}
cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo7.a", "./libgo7")
- if out, err := cmd.CombinedOutput(); err != nil {
- t.Logf("%s", out)
+ out, err := cmd.CombinedOutput()
+ t.Logf("%v\n%s", cmd.Args, out)
+ if err != nil {
t.Fatal(err)
}
checkLineComments(t, "libgo7.h")
+ checkArchive(t, "libgo7.a")
ccArgs := append(cc, "-o", "testp7"+exeSuffix, "main7.c", "libgo7.a")
if runtime.Compiler == "gccgo" {
ccArgs = append(ccArgs, "-lgo")
}
- if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil {
- t.Logf("%s", out)
+ out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput()
+ t.Logf("%v\n%s", ccArgs, out)
+ if err != nil {
t.Fatal(err)
}
+ checkIsExecutable(t, "./testp7"+exeSuffix)
argv := cmdToRun("./testp7")
cmd = exec.Command(argv[0], argv[1:]...)
- var sb strings.Builder
- cmd.Stdout = &sb
- cmd.Stderr = &sb
+ sb := new(strings.Builder)
+ cmd.Stdout = sb
+ cmd.Stderr = sb
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
@@ -926,8 +1132,9 @@ func TestManyCalls(t *testing.T) {
)
defer timer.Stop()
- if err := cmd.Wait(); err != nil {
- t.Log(sb.String())
+ err = cmd.Wait()
+ t.Logf("%v\n%s", cmd.Args, sb)
+ if err != nil {
t.Error(err)
}
}
@@ -949,23 +1156,27 @@ func TestPreemption(t *testing.T) {
}
cmd := exec.Command("go", "build", "-buildmode=c-archive", "-o", "libgo8.a", "./libgo8")
- if out, err := cmd.CombinedOutput(); err != nil {
- t.Logf("%s", out)
+ out, err := cmd.CombinedOutput()
+ t.Logf("%v\n%s", cmd.Args, out)
+ if err != nil {
t.Fatal(err)
}
checkLineComments(t, "libgo8.h")
+ checkArchive(t, "libgo8.a")
ccArgs := append(cc, "-o", "testp8"+exeSuffix, "main8.c", "libgo8.a")
- if out, err := exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput(); err != nil {
- t.Logf("%s", out)
+ out, err = exec.Command(ccArgs[0], ccArgs[1:]...).CombinedOutput()
+ t.Logf("%v\n%s", ccArgs, out)
+ if err != nil {
t.Fatal(err)
}
+ checkIsExecutable(t, "./testp8"+exeSuffix)
argv := cmdToRun("./testp8")
cmd = exec.Command(argv[0], argv[1:]...)
- var sb strings.Builder
- cmd.Stdout = &sb
- cmd.Stderr = &sb
+ sb := new(strings.Builder)
+ cmd.Stdout = sb
+ cmd.Stderr = sb
if err := cmd.Start(); err != nil {
t.Fatal(err)
}
@@ -978,8 +1189,9 @@ func TestPreemption(t *testing.T) {
)
defer timer.Stop()
- if err := cmd.Wait(); err != nil {
- t.Log(sb.String())
+ err = cmd.Wait()
+ t.Logf("%v\n%s", cmd.Args, sb)
+ if err != nil {
t.Error(err)
}
}
diff --git a/misc/cgo/testcshared/cshared_test.go b/misc/cgo/testcshared/cshared_test.go
index 84b92d502f..13ec8761e8 100644
--- a/misc/cgo/testcshared/cshared_test.go
+++ b/misc/cgo/testcshared/cshared_test.go
@@ -781,10 +781,10 @@ func copyFile(t *testing.T, dst, src string) {
func TestGo2C2Go(t *testing.T) {
switch GOOS {
- case "darwin", "ios":
- // Darwin shared libraries don't support the multiple
+ case "darwin", "ios", "windows":
+ // Non-ELF shared libraries don't support the multiple
// copies of the runtime package implied by this test.
- t.Skip("linking c-shared into Go programs not supported on Darwin; issue 29061")
+ t.Skipf("linking c-shared into Go programs not supported on %s; issue 29061, 49457", GOOS)
case "android":
t.Skip("test fails on android; issue 29087")
}
diff --git a/misc/cgo/testplugin/plugin_test.go b/misc/cgo/testplugin/plugin_test.go
index 9697dbf7a7..10c5db2646 100644
--- a/misc/cgo/testplugin/plugin_test.go
+++ b/misc/cgo/testplugin/plugin_test.go
@@ -265,10 +265,6 @@ func TestIssue25756(t *testing.T) {
// Test with main using -buildmode=pie with plugin for issue #43228
func TestIssue25756pie(t *testing.T) {
- if os.Getenv("GO_BUILDER_NAME") == "darwin-arm64-11_0-toothrot" {
- t.Skip("broken on darwin/arm64 builder in sharded mode; see issue 46239")
- }
-
goCmd(t, "build", "-buildmode=plugin", "-o", "life.so", "./issue25756/plugin")
goCmd(t, "build", "-buildmode=pie", "-o", "issue25756pie.exe", "./issue25756/main.go")
run(t, "./issue25756pie.exe")
@@ -293,3 +289,31 @@ func TestIssue44956(t *testing.T) {
goCmd(t, "build", "-o", "issue44956.exe", "./issue44956/main.go")
run(t, "./issue44956.exe")
}
+
+func TestForkExec(t *testing.T) {
+ // Issue 38824: importing the plugin package causes it hang in forkExec on darwin.
+
+ t.Parallel()
+ goCmd(t, "build", "-o", "forkexec.exe", "./forkexec/main.go")
+
+ var cmd *exec.Cmd
+ done := make(chan int, 1)
+
+ go func() {
+ for i := 0; i < 100; i++ {
+ cmd = exec.Command("./forkexec.exe", "1")
+ err := cmd.Run()
+ if err != nil {
+ t.Errorf("running command failed: %v", err)
+ break
+ }
+ }
+ done <- 1
+ }()
+ select {
+ case <-done:
+ case <-time.After(5 * time.Minute):
+ cmd.Process.Kill()
+ t.Fatalf("subprocess hang")
+ }
+}
diff --git a/misc/cgo/testplugin/testdata/forkexec/main.go b/misc/cgo/testplugin/testdata/forkexec/main.go
new file mode 100644
index 0000000000..3169ff5f04
--- /dev/null
+++ b/misc/cgo/testplugin/testdata/forkexec/main.go
@@ -0,0 +1,30 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "os"
+ "os/exec"
+ _ "plugin"
+ "sync"
+)
+
+func main() {
+ if os.Args[1] != "1" {
+ return
+ }
+
+ var wg sync.WaitGroup
+ for i := 0; i < 8; i++ {
+ wg.Add(1)
+ go func() {
+ defer wg.Done()
+ // does not matter what we exec, just exec itself
+ cmd := exec.Command("./forkexec.exe", "0")
+ cmd.Run()
+ }()
+ }
+ wg.Wait()
+}
diff --git a/misc/cgo/testsanitizers/asan_test.go b/misc/cgo/testsanitizers/asan_test.go
index dbcce2fe28..1b70bce3d1 100644
--- a/misc/cgo/testsanitizers/asan_test.go
+++ b/misc/cgo/testsanitizers/asan_test.go
@@ -33,11 +33,13 @@ func TestASAN(t *testing.T) {
cases := []struct {
src string
memoryAccessError string
+ errorLocation string
}{
- {src: "asan1_fail.go", memoryAccessError: "heap-use-after-free"},
- {src: "asan2_fail.go", memoryAccessError: "heap-buffer-overflow"},
- {src: "asan3_fail.go", memoryAccessError: "use-after-poison"},
- {src: "asan4_fail.go", memoryAccessError: "use-after-poison"},
+ {src: "asan1_fail.go", memoryAccessError: "heap-use-after-free", errorLocation: "asan1_fail.go:25"},
+ {src: "asan2_fail.go", memoryAccessError: "heap-buffer-overflow", errorLocation: "asan2_fail.go:31"},
+ {src: "asan3_fail.go", memoryAccessError: "use-after-poison", errorLocation: "asan3_fail.go:13"},
+ {src: "asan4_fail.go", memoryAccessError: "use-after-poison", errorLocation: "asan4_fail.go:13"},
+ {src: "asan5_fail.go", memoryAccessError: "use-after-poison", errorLocation: "asan5_fail.go:18"},
{src: "asan_useAfterReturn.go"},
}
for _, tc := range cases {
@@ -54,8 +56,21 @@ func TestASAN(t *testing.T) {
cmd := hangProneCmd(outPath)
if tc.memoryAccessError != "" {
- out, err := cmd.CombinedOutput()
- if err != nil && strings.Contains(string(out), tc.memoryAccessError) {
+ outb, err := cmd.CombinedOutput()
+ out := string(outb)
+ if err != nil && strings.Contains(out, tc.memoryAccessError) {
+ // This string is output if the
+ // sanitizer library needs a
+ // symbolizer program and can't find it.
+ const noSymbolizer = "external symbolizer"
+ // Check if -asan option can correctly print where the error occured.
+ if tc.errorLocation != "" &&
+ !strings.Contains(out, tc.errorLocation) &&
+ !strings.Contains(out, noSymbolizer) &&
+ compilerSupportsLocation() {
+
+ t.Errorf("%#q exited without expected location of the error\n%s; got failure\n%s", strings.Join(cmd.Args, " "), tc.errorLocation, out)
+ }
return
}
t.Fatalf("%#q exited without expected memory access error\n%s; got failure\n%s", strings.Join(cmd.Args, " "), tc.memoryAccessError, out)
diff --git a/misc/cgo/testsanitizers/cc_test.go b/misc/cgo/testsanitizers/cc_test.go
index b776afa3e6..05b77932b4 100644
--- a/misc/cgo/testsanitizers/cc_test.go
+++ b/misc/cgo/testsanitizers/cc_test.go
@@ -218,6 +218,23 @@ func compilerVersion() (version, error) {
return compiler.version, compiler.err
}
+// compilerSupportsLocation reports whether the compiler should be
+// able to provide file/line information in backtraces.
+func compilerSupportsLocation() bool {
+ compiler, err := compilerVersion()
+ if err != nil {
+ return false
+ }
+ switch compiler.name {
+ case "gcc":
+ return compiler.major >= 10
+ case "clang":
+ return true
+ default:
+ return false
+ }
+}
+
type compilerCheck struct {
once sync.Once
err error
@@ -269,6 +286,8 @@ func configure(sanitizer string) *config {
case "address":
c.goFlags = append(c.goFlags, "-asan")
+ // Set the debug mode to print the C stack trace.
+ c.cFlags = append(c.cFlags, "-g")
default:
panic(fmt.Sprintf("unrecognized sanitizer: %q", sanitizer))
diff --git a/misc/cgo/testsanitizers/testdata/asan1_fail.go b/misc/cgo/testsanitizers/testdata/asan1_fail.go
index e60db76981..80289e5c30 100644
--- a/misc/cgo/testsanitizers/testdata/asan1_fail.go
+++ b/misc/cgo/testsanitizers/testdata/asan1_fail.go
@@ -22,7 +22,7 @@ func main() {
// C passes Go an invalid pointer.
a := C.test()
// Use after free
- *a = 2
+ *a = 2 // BOOM
// We shouldn't get here; asan should stop us first.
fmt.Println(*a)
}
diff --git a/misc/cgo/testsanitizers/testdata/asan2_fail.go b/misc/cgo/testsanitizers/testdata/asan2_fail.go
index e35670c440..3ab0608571 100644
--- a/misc/cgo/testsanitizers/testdata/asan2_fail.go
+++ b/misc/cgo/testsanitizers/testdata/asan2_fail.go
@@ -28,7 +28,7 @@ func main() {
a := C.f()
q5 := (*C.int)(unsafe.Add(unsafe.Pointer(a), 4*5))
// Access to C pointer out of bounds.
- *q5 = 100
+ *q5 = 100 // BOOM
// We shouldn't get here; asan should stop us first.
fmt.Printf("q5: %d, %x\n", *q5, q5)
}
diff --git a/misc/cgo/testsanitizers/testdata/asan5_fail.go b/misc/cgo/testsanitizers/testdata/asan5_fail.go
new file mode 100644
index 0000000000..d6853eab73
--- /dev/null
+++ b/misc/cgo/testsanitizers/testdata/asan5_fail.go
@@ -0,0 +1,21 @@
+package main
+
+import (
+ "fmt"
+ "runtime"
+ "unsafe"
+)
+
+func main() {
+ p := new([1024 * 1000]int)
+ p[0] = 10
+ r := bar(&p[1024*1000-1])
+ fmt.Printf("r value is %d", r)
+}
+
+func bar(a *int) int {
+ p := unsafe.Add(unsafe.Pointer(a), 2*unsafe.Sizeof(int(1)))
+ runtime.ASanWrite(p, 8) // BOOM
+ *((*int)(p)) = 10
+ return *((*int)(p))
+}
diff --git a/misc/cgo/testshared/shared_test.go b/misc/cgo/testshared/shared_test.go
index 672811fe0e..b78083bc80 100644
--- a/misc/cgo/testshared/shared_test.go
+++ b/misc/cgo/testshared/shared_test.go
@@ -20,6 +20,7 @@ import (
"regexp"
"runtime"
"sort"
+ "strconv"
"strings"
"testing"
"time"
@@ -55,7 +56,7 @@ func runWithEnv(t *testing.T, msg string, env []string, args ...string) {
// t.Fatalf if the command fails.
func goCmd(t *testing.T, args ...string) string {
newargs := []string{args[0]}
- if *testX {
+ if *testX && args[0] != "env" {
newargs = append(newargs, "-x")
}
newargs = append(newargs, args[1:]...)
@@ -461,7 +462,9 @@ func TestTrivialExecutable(t *testing.T) {
run(t, "trivial executable", "../../bin/trivial")
AssertIsLinkedTo(t, "../../bin/trivial", soname)
AssertHasRPath(t, "../../bin/trivial", gorootInstallDir)
- checkSize(t, "../../bin/trivial", 100000) // it is 19K on linux/amd64, 100K should be enough
+ // It is 19K on linux/amd64, with separate-code in binutils ld and 64k being most common alignment
+ // 4*64k should be enough, but this might need revision eventually.
+ checkSize(t, "../../bin/trivial", 256000)
}
// Build a trivial program in PIE mode that links against the shared runtime and check it runs.
@@ -470,7 +473,9 @@ func TestTrivialExecutablePIE(t *testing.T) {
run(t, "trivial executable", "./trivial.pie")
AssertIsLinkedTo(t, "./trivial.pie", soname)
AssertHasRPath(t, "./trivial.pie", gorootInstallDir)
- checkSize(t, "./trivial.pie", 100000) // it is 19K on linux/amd64, 100K should be enough
+ // It is 19K on linux/amd64, with separate-code in binutils ld and 64k being most common alignment
+ // 4*64k should be enough, but this might need revision eventually.
+ checkSize(t, "./trivial.pie", 256000)
}
// Check that the file size does not exceed a limit.
@@ -694,7 +699,15 @@ func requireGccgo(t *testing.T) {
if err != nil {
t.Fatalf("%s -dumpversion failed: %v\n%s", gccgoPath, err, output)
}
- if string(output) < "5" {
+ dot := bytes.Index(output, []byte{'.'})
+ if dot > 0 {
+ output = output[:dot]
+ }
+ major, err := strconv.Atoi(string(output))
+ if err != nil {
+ t.Skipf("can't parse gccgo version number %s", output)
+ }
+ if major < 5 {
t.Skipf("gccgo too old (%s)", strings.TrimSpace(string(output)))
}
diff --git a/misc/ios/clangwrap.sh b/misc/ios/clangwrap.sh
index dca3fcc904..8f7b439315 100755
--- a/misc/ios/clangwrap.sh
+++ b/misc/ios/clangwrap.sh
@@ -17,4 +17,4 @@ export IPHONEOS_DEPLOYMENT_TARGET=5.1
# cmd/cgo doesn't support llvm-gcc-4.2, so we have to use clang.
CLANG=`xcrun --sdk $SDK --find clang`
-exec "$CLANG" -arch $CLANGARCH -isysroot "$SDK_PATH" -m${PLATFORM}-version-min=10.0 "$@"
+exec "$CLANG" -arch $CLANGARCH -isysroot "$SDK_PATH" -m${PLATFORM}-version-min=12.0 "$@"
diff --git a/misc/reboot/reboot_test.go b/misc/reboot/reboot_test.go
index 6bafc608b5..ef164d3232 100644
--- a/misc/reboot/reboot_test.go
+++ b/misc/reboot/reboot_test.go
@@ -15,6 +15,10 @@ import (
)
func TestRepeatBootstrap(t *testing.T) {
+ if testing.Short() {
+ t.Skipf("skipping test that rebuilds the entire toolchain")
+ }
+
goroot, err := os.MkdirTemp("", "reboot-goroot")
if err != nil {
t.Fatal(err)
diff --git a/src/archive/tar/common.go b/src/archive/tar/common.go
index 595de64725..c99b5c1920 100644
--- a/src/archive/tar/common.go
+++ b/src/archive/tar/common.go
@@ -538,7 +538,7 @@ type headerFileInfo struct {
func (fi headerFileInfo) Size() int64 { return fi.h.Size }
func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() }
func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime }
-func (fi headerFileInfo) Sys() interface{} { return fi.h }
+func (fi headerFileInfo) Sys() any { return fi.h }
// Name returns the base name of the file.
func (fi headerFileInfo) Name() string {
diff --git a/src/archive/tar/fuzz_test.go b/src/archive/tar/fuzz_test.go
new file mode 100644
index 0000000000..e73e0d2609
--- /dev/null
+++ b/src/archive/tar/fuzz_test.go
@@ -0,0 +1,80 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package tar
+
+import (
+ "bytes"
+ "io"
+ "testing"
+)
+
+func FuzzReader(f *testing.F) {
+ b := bytes.NewBuffer(nil)
+ w := NewWriter(b)
+ inp := []byte("Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.")
+ err := w.WriteHeader(&Header{
+ Name: "lorem.txt",
+ Mode: 0600,
+ Size: int64(len(inp)),
+ })
+ if err != nil {
+ f.Fatalf("failed to create writer: %s", err)
+ }
+ _, err = w.Write(inp)
+ if err != nil {
+ f.Fatalf("failed to write file to archive: %s", err)
+ }
+ if err := w.Close(); err != nil {
+ f.Fatalf("failed to write archive: %s", err)
+ }
+ f.Add(b.Bytes())
+
+ f.Fuzz(func(t *testing.T, b []byte) {
+ r := NewReader(bytes.NewReader(b))
+ type file struct {
+ header *Header
+ content []byte
+ }
+ files := []file{}
+ for {
+ hdr, err := r.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return
+ }
+ buf := bytes.NewBuffer(nil)
+ if _, err := io.Copy(buf, r); err != nil {
+ continue
+ }
+ files = append(files, file{header: hdr, content: buf.Bytes()})
+ }
+
+ // If we were unable to read anything out of the archive don't
+ // bother trying to roundtrip it.
+ if len(files) == 0 {
+ return
+ }
+
+ out := bytes.NewBuffer(nil)
+ w := NewWriter(out)
+ for _, f := range files {
+ if err := w.WriteHeader(f.header); err != nil {
+ t.Fatalf("unable to write previously parsed header: %s", err)
+ }
+ if _, err := w.Write(f.content); err != nil {
+ t.Fatalf("unable to write previously parsed content: %s", err)
+ }
+ }
+ if err := w.Close(); err != nil {
+ t.Fatalf("Unable to write archive: %s", err)
+ }
+
+ // TODO: We may want to check if the archive roundtrips. This would require
+ // taking into account addition of the two zero trailer blocks that Writer.Close
+ // appends.
+ })
+}
diff --git a/src/archive/tar/reader_test.go b/src/archive/tar/reader_test.go
index c31a847ec3..f21a6065b4 100644
--- a/src/archive/tar/reader_test.go
+++ b/src/archive/tar/reader_test.go
@@ -1363,7 +1363,7 @@ func TestFileReader(t *testing.T) {
wantLCnt int64
wantPCnt int64
}
- testFnc interface{} // testRead | testWriteTo | testRemaining
+ testFnc any // testRead | testWriteTo | testRemaining
)
type (
@@ -1376,7 +1376,7 @@ func TestFileReader(t *testing.T) {
spd sparseDatas
size int64
}
- fileMaker interface{} // makeReg | makeSparse
+ fileMaker any // makeReg | makeSparse
)
vectors := []struct {
diff --git a/src/archive/tar/tar_test.go b/src/archive/tar/tar_test.go
index e9fafc7cc7..a476f5eb01 100644
--- a/src/archive/tar/tar_test.go
+++ b/src/archive/tar/tar_test.go
@@ -23,7 +23,7 @@ import (
type testError struct{ error }
-type fileOps []interface{} // []T where T is (string | int64)
+type fileOps []any // []T where T is (string | int64)
// testFile is an io.ReadWriteSeeker where the IO operations performed
// on it must match the list of operations in ops.
diff --git a/src/archive/tar/writer_test.go b/src/archive/tar/writer_test.go
index 95ce99a3ed..da3fb89e65 100644
--- a/src/archive/tar/writer_test.go
+++ b/src/archive/tar/writer_test.go
@@ -67,7 +67,7 @@ func TestWriter(t *testing.T) {
testClose struct { // Close() == wantErr
wantErr error
}
- testFnc interface{} // testHeader | testWrite | testReadFrom | testClose
+ testFnc any // testHeader | testWrite | testReadFrom | testClose
)
vectors := []struct {
@@ -1031,7 +1031,7 @@ func TestFileWriter(t *testing.T) {
wantLCnt int64
wantPCnt int64
}
- testFnc interface{} // testWrite | testReadFrom | testRemaining
+ testFnc any // testWrite | testReadFrom | testRemaining
)
type (
@@ -1044,7 +1044,7 @@ func TestFileWriter(t *testing.T) {
sph sparseHoles
size int64
}
- fileMaker interface{} // makeReg | makeSparse
+ fileMaker any // makeReg | makeSparse
)
vectors := []struct {
diff --git a/src/archive/zip/fuzz_test.go b/src/archive/zip/fuzz_test.go
new file mode 100644
index 0000000000..7dffde69bf
--- /dev/null
+++ b/src/archive/zip/fuzz_test.go
@@ -0,0 +1,81 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package zip
+
+import (
+ "bytes"
+ "io"
+ "os"
+ "path/filepath"
+ "testing"
+)
+
+func FuzzReader(f *testing.F) {
+ testdata, err := os.ReadDir("testdata")
+ if err != nil {
+ f.Fatalf("failed to read testdata directory: %s", err)
+ }
+ for _, de := range testdata {
+ if de.IsDir() {
+ continue
+ }
+ b, err := os.ReadFile(filepath.Join("testdata", de.Name()))
+ if err != nil {
+ f.Fatalf("failed to read testdata: %s", err)
+ }
+ f.Add(b)
+ }
+
+ f.Fuzz(func(t *testing.T, b []byte) {
+ r, err := NewReader(bytes.NewReader(b), int64(len(b)))
+ if err != nil {
+ return
+ }
+
+ type file struct {
+ header *FileHeader
+ content []byte
+ }
+ files := []file{}
+
+ for _, f := range r.File {
+ fr, err := f.Open()
+ if err != nil {
+ continue
+ }
+ content, err := io.ReadAll(fr)
+ if err != nil {
+ continue
+ }
+ files = append(files, file{header: &f.FileHeader, content: content})
+ if _, err := r.Open(f.Name); err != nil {
+ continue
+ }
+ }
+
+ // If we were unable to read anything out of the archive don't
+ // bother trying to roundtrip it.
+ if len(files) == 0 {
+ return
+ }
+
+ w := NewWriter(io.Discard)
+ for _, f := range files {
+ ww, err := w.CreateHeader(f.header)
+ if err != nil {
+ t.Fatalf("unable to write previously parsed header: %s", err)
+ }
+ if _, err := ww.Write(f.content); err != nil {
+ t.Fatalf("unable to write previously parsed content: %s", err)
+ }
+ }
+
+ if err := w.Close(); err != nil {
+ t.Fatalf("Unable to write archive: %s", err)
+ }
+
+ // TODO: We may want to check if the archive roundtrips.
+ })
+}
diff --git a/src/archive/zip/reader.go b/src/archive/zip/reader.go
index e40a2c656b..92fd6f6a92 100644
--- a/src/archive/zip/reader.go
+++ b/src/archive/zip/reader.go
@@ -125,7 +125,6 @@ func (z *Reader) init(r io.ReaderAt, size int64) error {
if err != nil {
return err
}
- f.readDataDescriptor()
z.File = append(z.File, f)
}
if uint16(len(z.File)) != uint16(end.directoryRecords) { // only compare 16 bits here
@@ -186,10 +185,15 @@ func (f *File) Open() (io.ReadCloser, error) {
return nil, ErrAlgorithm
}
var rc io.ReadCloser = dcomp(r)
+ var desr io.Reader
+ if f.hasDataDescriptor() {
+ desr = io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, dataDescriptorLen)
+ }
rc = &checksumReader{
rc: rc,
hash: crc32.NewIEEE(),
f: f,
+ desr: desr,
}
return rc, nil
}
@@ -205,49 +209,13 @@ func (f *File) OpenRaw() (io.Reader, error) {
return r, nil
}
-func (f *File) readDataDescriptor() {
- if !f.hasDataDescriptor() {
- return
- }
-
- bodyOffset, err := f.findBodyOffset()
- if err != nil {
- f.descErr = err
- return
- }
-
- // In section 4.3.9.2 of the spec: "However ZIP64 format MAY be used
- // regardless of the size of a file. When extracting, if the zip64
- // extended information extra field is present for the file the
- // compressed and uncompressed sizes will be 8 byte values."
- //
- // Historically, this package has used the compressed and uncompressed
- // sizes from the central directory to determine if the package is
- // zip64.
- //
- // For this case we allow either the extra field or sizes to determine
- // the data descriptor length.
- zip64 := f.zip64 || f.isZip64()
- n := int64(dataDescriptorLen)
- if zip64 {
- n = dataDescriptor64Len
- }
- size := int64(f.CompressedSize64)
- r := io.NewSectionReader(f.zipr, f.headerOffset+bodyOffset+size, n)
- dd, err := readDataDescriptor(r, zip64)
- if err != nil {
- f.descErr = err
- return
- }
- f.CRC32 = dd.crc32
-}
-
type checksumReader struct {
rc io.ReadCloser
hash hash.Hash32
nread uint64 // number of bytes read so far
f *File
- err error // sticky error
+ desr io.Reader // if non-nil, where to read the data descriptor
+ err error // sticky error
}
func (r *checksumReader) Stat() (fs.FileInfo, error) {
@@ -268,12 +236,12 @@ func (r *checksumReader) Read(b []byte) (n int, err error) {
if r.nread != r.f.UncompressedSize64 {
return 0, io.ErrUnexpectedEOF
}
- if r.f.hasDataDescriptor() {
- if r.f.descErr != nil {
- if r.f.descErr == io.EOF {
+ if r.desr != nil {
+ if err1 := readDataDescriptor(r.desr, r.f); err1 != nil {
+ if err1 == io.EOF {
err = io.ErrUnexpectedEOF
} else {
- err = r.f.descErr
+ err = err1
}
} else if r.hash.Sum32() != r.f.CRC32 {
err = ErrChecksum
@@ -485,10 +453,8 @@ parseExtras:
return nil
}
-func readDataDescriptor(r io.Reader, zip64 bool) (*dataDescriptor, error) {
- // Create enough space for the largest possible size
- var buf [dataDescriptor64Len]byte
-
+func readDataDescriptor(r io.Reader, f *File) error {
+ var buf [dataDescriptorLen]byte
// The spec says: "Although not originally assigned a
// signature, the value 0x08074b50 has commonly been adopted
// as a signature value for the data descriptor record.
@@ -497,9 +463,10 @@ func readDataDescriptor(r io.Reader, zip64 bool) (*dataDescriptor, error) {
// descriptors and should account for either case when reading
// ZIP files to ensure compatibility."
//
- // First read just those 4 bytes to see if the signature exists.
+ // dataDescriptorLen includes the size of the signature but
+ // first read just those 4 bytes to see if it exists.
if _, err := io.ReadFull(r, buf[:4]); err != nil {
- return nil, err
+ return err
}
off := 0
maybeSig := readBuf(buf[:4])
@@ -508,28 +475,21 @@ func readDataDescriptor(r io.Reader, zip64 bool) (*dataDescriptor, error) {
// bytes.
off += 4
}
-
- end := dataDescriptorLen - 4
- if zip64 {
- end = dataDescriptor64Len - 4
+ if _, err := io.ReadFull(r, buf[off:12]); err != nil {
+ return err
}
- if _, err := io.ReadFull(r, buf[off:end]); err != nil {
- return nil, err
- }
- b := readBuf(buf[:end])
-
- out := &dataDescriptor{
- crc32: b.uint32(),
+ b := readBuf(buf[:12])
+ if b.uint32() != f.CRC32 {
+ return ErrChecksum
}
- if zip64 {
- out.compressedSize = b.uint64()
- out.uncompressedSize = b.uint64()
- } else {
- out.compressedSize = uint64(b.uint32())
- out.uncompressedSize = uint64(b.uint32())
- }
- return out, nil
+ // The two sizes that follow here can be either 32 bits or 64 bits
+ // but the spec is not very clear on this and different
+ // interpretations has been made causing incompatibilities. We
+ // already have the sizes from the central directory so we can
+ // just ignore these.
+
+ return nil
}
func readDirectoryEnd(r io.ReaderAt, size int64) (dir *directoryEnd, err error) {
@@ -710,7 +670,7 @@ func (f *fileListEntry) Size() int64 { return 0 }
func (f *fileListEntry) Mode() fs.FileMode { return fs.ModeDir | 0555 }
func (f *fileListEntry) Type() fs.FileMode { return fs.ModeDir }
func (f *fileListEntry) IsDir() bool { return true }
-func (f *fileListEntry) Sys() interface{} { return nil }
+func (f *fileListEntry) Sys() any { return nil }
func (f *fileListEntry) ModTime() time.Time {
if f.file == nil {
diff --git a/src/archive/zip/reader_test.go b/src/archive/zip/reader_test.go
index a54915316c..d1a9bdd334 100644
--- a/src/archive/zip/reader_test.go
+++ b/src/archive/zip/reader_test.go
@@ -1214,128 +1214,6 @@ func TestCVE202127919(t *testing.T) {
}
}
-func TestReadDataDescriptor(t *testing.T) {
- tests := []struct {
- desc string
- in []byte
- zip64 bool
- want *dataDescriptor
- wantErr error
- }{{
- desc: "valid 32 bit with signature",
- in: []byte{
- 0x50, 0x4b, 0x07, 0x08, // signature
- 0x00, 0x01, 0x02, 0x03, // crc32
- 0x04, 0x05, 0x06, 0x07, // compressed size
- 0x08, 0x09, 0x0a, 0x0b, // uncompressed size
- },
- want: &dataDescriptor{
- crc32: 0x03020100,
- compressedSize: 0x07060504,
- uncompressedSize: 0x0b0a0908,
- },
- }, {
- desc: "valid 32 bit without signature",
- in: []byte{
- 0x00, 0x01, 0x02, 0x03, // crc32
- 0x04, 0x05, 0x06, 0x07, // compressed size
- 0x08, 0x09, 0x0a, 0x0b, // uncompressed size
- },
- want: &dataDescriptor{
- crc32: 0x03020100,
- compressedSize: 0x07060504,
- uncompressedSize: 0x0b0a0908,
- },
- }, {
- desc: "valid 64 bit with signature",
- in: []byte{
- 0x50, 0x4b, 0x07, 0x08, // signature
- 0x00, 0x01, 0x02, 0x03, // crc32
- 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, // compressed size
- 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, // uncompressed size
- },
- zip64: true,
- want: &dataDescriptor{
- crc32: 0x03020100,
- compressedSize: 0x0b0a090807060504,
- uncompressedSize: 0x131211100f0e0d0c,
- },
- }, {
- desc: "valid 64 bit without signature",
- in: []byte{
- 0x00, 0x01, 0x02, 0x03, // crc32
- 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, // compressed size
- 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, // uncompressed size
- },
- zip64: true,
- want: &dataDescriptor{
- crc32: 0x03020100,
- compressedSize: 0x0b0a090807060504,
- uncompressedSize: 0x131211100f0e0d0c,
- },
- }, {
- desc: "invalid 32 bit with signature",
- in: []byte{
- 0x50, 0x4b, 0x07, 0x08, // signature
- 0x00, 0x01, 0x02, 0x03, // crc32
- 0x04, 0x05, // unexpected end
- },
- wantErr: io.ErrUnexpectedEOF,
- }, {
- desc: "invalid 32 bit without signature",
- in: []byte{
- 0x00, 0x01, 0x02, 0x03, // crc32
- 0x04, 0x05, // unexpected end
- },
- wantErr: io.ErrUnexpectedEOF,
- }, {
- desc: "invalid 64 bit with signature",
- in: []byte{
- 0x50, 0x4b, 0x07, 0x08, // signature
- 0x00, 0x01, 0x02, 0x03, // crc32
- 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, // compressed size
- 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, // unexpected end
- },
- zip64: true,
- wantErr: io.ErrUnexpectedEOF,
- }, {
- desc: "invalid 64 bit without signature",
- in: []byte{
- 0x00, 0x01, 0x02, 0x03, // crc32
- 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, // compressed size
- 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, // unexpected end
- },
- zip64: true,
- wantErr: io.ErrUnexpectedEOF,
- }}
-
- for _, test := range tests {
- t.Run(test.desc, func(t *testing.T) {
- r := bytes.NewReader(test.in)
-
- desc, err := readDataDescriptor(r, test.zip64)
- if err != test.wantErr {
- t.Fatalf("got err %v; want nil", err)
- }
- if test.want == nil {
- return
- }
- if desc == nil {
- t.Fatalf("got nil DataDescriptor; want non-nil")
- }
- if desc.crc32 != test.want.crc32 {
- t.Errorf("got CRC32 %#x; want %#x", desc.crc32, test.want.crc32)
- }
- if desc.compressedSize != test.want.compressedSize {
- t.Errorf("got CompressedSize %#x; want %#x", desc.compressedSize, test.want.compressedSize)
- }
- if desc.uncompressedSize != test.want.uncompressedSize {
- t.Errorf("got UncompressedSize %#x; want %#x", desc.uncompressedSize, test.want.uncompressedSize)
- }
- })
- }
-}
-
func TestCVE202133196(t *testing.T) {
// Archive that indicates it has 1 << 128 -1 files,
// this would previously cause a panic due to attempting
diff --git a/src/archive/zip/struct.go b/src/archive/zip/struct.go
index ff9f605eb6..6f73fb8376 100644
--- a/src/archive/zip/struct.go
+++ b/src/archive/zip/struct.go
@@ -163,7 +163,7 @@ func (fi headerFileInfo) ModTime() time.Time {
}
func (fi headerFileInfo) Mode() fs.FileMode { return fi.fh.Mode() }
func (fi headerFileInfo) Type() fs.FileMode { return fi.fh.Mode().Type() }
-func (fi headerFileInfo) Sys() interface{} { return fi.fh }
+func (fi headerFileInfo) Sys() any { return fi.fh }
func (fi headerFileInfo) Info() (fs.FileInfo, error) { return fi, nil }
@@ -390,11 +390,3 @@ func unixModeToFileMode(m uint32) fs.FileMode {
}
return mode
}
-
-// dataDescriptor holds the data descriptor that optionally follows the file
-// contents in the zip file.
-type dataDescriptor struct {
- crc32 uint32
- compressedSize uint64
- uncompressedSize uint64
-}
diff --git a/src/bufio/bufio.go b/src/bufio/bufio.go
index 063a7785f3..7483946fc0 100644
--- a/src/bufio/bufio.go
+++ b/src/bufio/bufio.go
@@ -244,6 +244,8 @@ func (b *Reader) Read(p []byte) (n int, err error) {
}
// copy as much as we can
+ // Note: if the slice panics here, it is probably because
+ // the underlying reader returned a bad count. See issue 49795.
n = copy(p, b.buf[b.r:b.w])
b.r += n
b.lastByte = int(b.buf[b.r-1])
@@ -593,6 +595,8 @@ func NewWriterSize(w io.Writer, size int) *Writer {
}
// NewWriter returns a new Writer whose buffer has the default size.
+// If the argument io.Writer is already a Writer with large enough buffer size,
+// it returns the underlying Writer.
func NewWriter(w io.Writer) *Writer {
return NewWriterSize(w, defaultBufSize)
}
diff --git a/src/bufio/bufio_test.go b/src/bufio/bufio_test.go
index 4dddfa9085..ff3396e946 100644
--- a/src/bufio/bufio_test.go
+++ b/src/bufio/bufio_test.go
@@ -657,7 +657,7 @@ func TestWriterAppend(t *testing.T) {
}
// While not recommended, it is valid to append to a shifted buffer.
- // This forces Write to copy the the input.
+ // This forces Write to copy the input.
if rn.Intn(8) == 0 && cap(b) > 0 {
b = b[1:1:cap(b)]
}
diff --git a/src/builtin/builtin.go b/src/builtin/builtin.go
index 01190e9900..08ae7ed313 100644
--- a/src/builtin/builtin.go
+++ b/src/builtin/builtin.go
@@ -91,6 +91,16 @@ type byte = uint8
// used, by convention, to distinguish character values from integer values.
type rune = int32
+// any is an alias for interface{} and is equivalent to interface{} in all ways.
+type any = interface{}
+
+// comparable is an interface that is implemented by all comparable types
+// (booleans, numbers, strings, pointers, channels, interfaces,
+// arrays of comparable types, structs whose fields are all comparable types).
+// The comparable interface may only be used as a type parameter constraint,
+// not as the type of a variable.
+type comparable comparable
+
// iota is a predeclared identifier representing the untyped integer ordinal
// number of the current const specification in a (usually parenthesized)
// const declaration. It is zero-indexed.
@@ -229,7 +239,7 @@ func close(c chan<- Type)
// that point, the program is terminated with a non-zero exit code. This
// termination sequence is called panicking and can be controlled by the
// built-in function recover.
-func panic(v interface{})
+func panic(v any)
// The recover built-in function allows a program to manage behavior of a
// panicking goroutine. Executing a call to recover inside a deferred
@@ -240,7 +250,7 @@ func panic(v interface{})
// panicking, or if the argument supplied to panic was nil, recover returns
// nil. Thus the return value from recover reports whether the goroutine is
// panicking.
-func recover() interface{}
+func recover() any
// The print built-in function formats its arguments in an
// implementation-specific way and writes the result to standard error.
diff --git a/src/bytes/reader_test.go b/src/bytes/reader_test.go
index 8baac5046c..9119c944ac 100644
--- a/src/bytes/reader_test.go
+++ b/src/bytes/reader_test.go
@@ -76,7 +76,7 @@ func TestReaderAt(t *testing.T) {
off int64
n int
want string
- wanterr interface{}
+ wanterr any
}{
{0, 10, "0123456789", nil},
{1, 10, "123456789", io.EOF},
diff --git a/src/cmd/api/goapi.go b/src/cmd/api/goapi.go
index 0c61b1b489..5ae059e4ce 100644
--- a/src/cmd/api/goapi.go
+++ b/src/cmd/api/goapi.go
@@ -459,8 +459,11 @@ type listImports struct {
var listCache sync.Map // map[string]listImports, keyed by contextName
-// listSem is a semaphore restricting concurrent invocations of 'go list'.
-var listSem = make(chan semToken, runtime.GOMAXPROCS(0))
+// listSem is a semaphore restricting concurrent invocations of 'go list'. 'go
+// list' has its own internal concurrency, so we use a hard-coded constant (to
+// allow the I/O-intensive phases of 'go list' to overlap) instead of scaling
+// all the way up to GOMAXPROCS.
+var listSem = make(chan semToken, 2)
type semToken struct{}
@@ -1071,7 +1074,7 @@ func (w *Walker) emitMethod(m *types.Selection) {
w.emitf("method (%s%s) %s%s", w.typeString(recv), tps, m.Obj().Name(), w.signatureString(sig))
}
-func (w *Walker) emitf(format string, args ...interface{}) {
+func (w *Walker) emitf(format string, args ...any) {
f := strings.Join(w.scope, ", ") + ", " + fmt.Sprintf(format, args...)
if strings.Contains(f, "\n") {
panic("feature contains newlines: " + f)
diff --git a/src/cmd/api/testdata/src/issue21181/p/p_generic.go b/src/cmd/api/testdata/src/issue21181/p/p_generic.go
index 4d75809676..ad6df20187 100644
--- a/src/cmd/api/testdata/src/issue21181/p/p_generic.go
+++ b/src/cmd/api/testdata/src/issue21181/p/p_generic.go
@@ -1,3 +1,4 @@
+//go:build !amd64
// +build !amd64
package p
diff --git a/src/cmd/api/testdata/src/pkg/p1/p1.go b/src/cmd/api/testdata/src/pkg/p1/p1.go
index 65181b248a..81826d768b 100644
--- a/src/cmd/api/testdata/src/pkg/p1/p1.go
+++ b/src/cmd/api/testdata/src/pkg/p1/p1.go
@@ -197,7 +197,7 @@ var m map[string]int
var chanVar chan int
-var ifaceVar interface{} = 5
+var ifaceVar any = 5
var assertVar = ifaceVar.(int)
diff --git a/src/cmd/api/testdata/src/pkg/p4/p4.go b/src/cmd/api/testdata/src/pkg/p4/p4.go
index 462a75be1a..1f90e779dd 100644
--- a/src/cmd/api/testdata/src/pkg/p4/p4.go
+++ b/src/cmd/api/testdata/src/pkg/p4/p4.go
@@ -4,12 +4,12 @@
package p4
-type Pair[T1 interface { M() }, T2 ~int] struct {
+type Pair[T1 interface{ M() }, T2 ~int] struct {
f1 T1
f2 T2
}
-func NewPair[T1 interface { M() }, T2 ~int](v1 T1, v2 T2) Pair[T1, T2] {
+func NewPair[T1 interface{ M() }, T2 ~int](v1 T1, v2 T2) Pair[T1, T2] {
return Pair[T1, T2]{f1: v1, f2: v2}
}
diff --git a/src/cmd/compile/README.md b/src/cmd/compile/README.md
index babc3f7679..25fa8187bb 100644
--- a/src/cmd/compile/README.md
+++ b/src/cmd/compile/README.md
@@ -40,12 +40,12 @@ which is used for error reporting and the creation of debugging information.
* `cmd/compile/internal/gc` (create compiler AST, type checking, AST transformations)
-The gc package includes an AST definition carried over from when it was written
-in C. All of its code is written in terms of it, so the first thing that the gc
+The gc package includes its own AST definition carried over from when it was written
+in C. All of its code is written in terms of this AST, so the first thing that the gc
package must do is convert the syntax package's syntax tree to the compiler's
AST representation. This extra step may be refactored away in the future.
-The AST is then type-checked. The first steps are name resolution and type
+The gc AST is then type-checked. The first steps are name resolution and type
inference, which determine which object belongs to which identifier, and what
type each expression has. Type-checking includes certain extra checks, such as
"declared and not used" as well as determining whether or not a function
@@ -79,8 +79,7 @@ historical reasons, but the long-term plan is to move all of them here.
Then, a series of machine-independent passes and rules are applied. These do not
concern any single computer architecture, and thus run on all `GOARCH` variants.
-
-Some examples of these generic passes include dead code elimination, removal of
+These passes include dead code elimination, removal of
unneeded nil checks, and removal of unused branches. The generic rewrite rules
mainly concern expressions, such as replacing some expressions with constant
values, and optimizing multiplications and float operations.
diff --git a/src/cmd/compile/abi-internal.md b/src/cmd/compile/abi-internal.md
index 50d8ed9159..7fe4463665 100644
--- a/src/cmd/compile/abi-internal.md
+++ b/src/cmd/compile/abi-internal.md
@@ -410,7 +410,11 @@ Special-purpose registers are as follows:
| R13 | Scratch | Scratch | Scratch |
| R14 | Current goroutine | Same | Same |
| R15 | GOT reference temporary if dynlink | Same | Same |
-| X15 | Zero value | Same | Scratch |
+| X15 | Zero value (*) | Same | Scratch |
+
+(*) Except on Plan 9, where X15 is a scratch register because SSE
+registers cannot be used in note handlers (so the compiler avoids
+using them except when absolutely necessary).
*Rationale*: These register meanings are compatible with Go’s
stack-based calling convention except for R14 and X15, which will have
diff --git a/src/cmd/compile/internal/abi/abiutils.go b/src/cmd/compile/internal/abi/abiutils.go
index 74c8707b29..529150a390 100644
--- a/src/cmd/compile/internal/abi/abiutils.go
+++ b/src/cmd/compile/internal/abi/abiutils.go
@@ -715,19 +715,20 @@ func setup() {
synthOnce.Do(func() {
fname := types.BuiltinPkg.Lookup
nxp := src.NoXPos
- unsp := types.Types[types.TUNSAFEPTR]
- ui := types.Types[types.TUINTPTR]
+ bp := types.NewPtr(types.Types[types.TUINT8])
+ it := types.Types[types.TINT]
synthSlice = types.NewStruct(types.NoPkg, []*types.Field{
- types.NewField(nxp, fname("ptr"), unsp),
- types.NewField(nxp, fname("len"), ui),
- types.NewField(nxp, fname("cap"), ui),
+ types.NewField(nxp, fname("ptr"), bp),
+ types.NewField(nxp, fname("len"), it),
+ types.NewField(nxp, fname("cap"), it),
})
types.CalcStructSize(synthSlice)
synthString = types.NewStruct(types.NoPkg, []*types.Field{
- types.NewField(nxp, fname("data"), unsp),
- types.NewField(nxp, fname("len"), ui),
+ types.NewField(nxp, fname("data"), bp),
+ types.NewField(nxp, fname("len"), it),
})
types.CalcStructSize(synthString)
+ unsp := types.Types[types.TUNSAFEPTR]
synthIface = types.NewStruct(types.NoPkg, []*types.Field{
types.NewField(nxp, fname("f1"), unsp),
types.NewField(nxp, fname("f2"), unsp),
diff --git a/src/cmd/compile/internal/amd64/versions_test.go b/src/cmd/compile/internal/amd64/versions_test.go
index e8bda78291..a21e5f2e6f 100644
--- a/src/cmd/compile/internal/amd64/versions_test.go
+++ b/src/cmd/compile/internal/amd64/versions_test.go
@@ -76,8 +76,18 @@ func TestGoAMD64v1(t *testing.T) {
if err != nil {
t.Fatalf("couldn't execute test: %s", err)
}
- if string(out) != "PASS\n" {
- t.Fatalf("test reported error: %s", string(out))
+ // Expect to see output of the form "PASS\n", unless the test binary
+ // was compiled for coverage (in which case there will be an extra line).
+ success := false
+ lines := strings.Split(string(out), "\n")
+ if len(lines) == 2 {
+ success = lines[0] == "PASS" && lines[1] == ""
+ } else if len(lines) == 3 {
+ success = lines[0] == "PASS" &&
+ strings.HasPrefix(lines[1], "coverage") && lines[2] == ""
+ }
+ if !success {
+ t.Fatalf("test reported error: %s lines=%+v", string(out), lines)
}
}
diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go
index 4afe2eb9ee..955f9d2077 100644
--- a/src/cmd/compile/internal/base/print.go
+++ b/src/cmd/compile/internal/base/print.go
@@ -217,10 +217,10 @@ func FatalfAt(pos src.XPos, format string, args ...interface{}) {
fmt.Printf("\n")
// If this is a released compiler version, ask for a bug report.
- if strings.HasPrefix(buildcfg.Version, "go") {
+ if Debug.Panic == 0 && strings.HasPrefix(buildcfg.Version, "go") {
fmt.Printf("\n")
fmt.Printf("Please file a bug report including a short program that triggers the error.\n")
- fmt.Printf("https://golang.org/issue/new\n")
+ fmt.Printf("https://go.dev/issue/new\n")
} else {
// Not a release; dump a stack trace, too.
fmt.Println()
diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go
index 3007262db9..e249a52e57 100644
--- a/src/cmd/compile/internal/dwarfgen/dwarf.go
+++ b/src/cmd/compile/internal/dwarfgen/dwarf.go
@@ -150,6 +150,19 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir
dcl := apDecls
if fnsym.WasInlined() {
dcl = preInliningDcls(fnsym)
+ } else {
+ // The backend's stackframe pass prunes away entries from the
+ // fn's Dcl list, including PARAMOUT nodes that correspond to
+ // output params passed in registers. Add back in these
+ // entries here so that we can process them properly during
+ // DWARF-gen. See issue 48573 for more details.
+ debugInfo := fn.DebugInfo.(*ssa.FuncDebug)
+ for _, n := range debugInfo.RegOutputParams {
+ if n.Class != ir.PPARAMOUT || !n.IsOutputParamInRegisters() {
+ panic("invalid ir.Name on debugInfo.RegOutputParams list")
+ }
+ dcl = append(dcl, n)
+ }
}
// If optimization is enabled, the list above will typically be
diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go
index 61e0121a40..c2145bdf91 100644
--- a/src/cmd/compile/internal/escape/escape.go
+++ b/src/cmd/compile/internal/escape/escape.go
@@ -293,6 +293,14 @@ func (b *batch) finish(fns []*ir.Func) {
// TODO(mdempsky): Update tests to expect this.
goDeferWrapper := n.Op() == ir.OCLOSURE && n.(*ir.ClosureExpr).Func.Wrapper()
+ if n.Op() == ir.OCONVIDATA && n.(*ir.ConvExpr).NonEscaping {
+ // The allocation for the data word of an interface is known to not escape.
+ // See issue 50182.
+ // (But we do still need to process that allocation, as pointers inside
+ // the data word may escape.)
+ loc.escapes = false
+ }
+
if loc.escapes {
if n.Op() == ir.ONAME {
if base.Flag.CompilingRuntime {
diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go
index eed438705a..c9acfc1710 100644
--- a/src/cmd/compile/internal/gc/export.go
+++ b/src/cmd/compile/internal/gc/export.go
@@ -31,7 +31,7 @@ func dumpasmhdr() {
if t == constant.Float || t == constant.Complex {
break
}
- fmt.Fprintf(b, "#define const_%s %v\n", n.Sym().Name, n.Val())
+ fmt.Fprintf(b, "#define const_%s %v\n", n.Sym().Name, n.Val().ExactString())
case ir.OTYPE:
t := n.Type()
diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go
index ed81ef7bc0..4c4a724cdf 100644
--- a/src/cmd/compile/internal/gc/main.go
+++ b/src/cmd/compile/internal/gc/main.go
@@ -35,18 +35,18 @@ import (
"sort"
)
-func hidePanic() {
- if base.Debug.Panic == 0 && base.Errors() > 0 {
- // If we've already complained about things
- // in the program, don't bother complaining
- // about a panic too; let the user clean up
- // the code and try again.
- if err := recover(); err != nil {
- if err == "-h" {
- panic(err)
- }
- base.ErrorExit()
+// handlePanic ensures that we print out an "internal compiler error" for any panic
+// or runtime exception during front-end compiler processing (unless there have
+// already been some compiler errors). It may also be invoked from the explicit panic in
+// hcrash(), in which case, we pass the panic on through.
+func handlePanic() {
+ if err := recover(); err != nil {
+ if err == "-h" {
+ // Force real panic now with -h option (hcrash) - the error
+ // information will have already been printed.
+ panic(err)
}
+ base.Fatalf("panic: %v", err)
}
}
@@ -56,7 +56,7 @@ func hidePanic() {
func Main(archInit func(*ssagen.ArchInfo)) {
base.Timer.Start("fe", "init")
- defer hidePanic()
+ defer handlePanic()
archInit(&ssagen.Arch)
@@ -245,11 +245,6 @@ func Main(archInit func(*ssagen.ArchInfo)) {
base.Timer.Start("fe", "inlining")
if base.Flag.LowerL != 0 {
inline.InlinePackage()
- // If any new fully-instantiated types were referenced during
- // inlining, we need to create needed instantiations.
- if len(typecheck.GetInstTypeList()) > 0 {
- noder.BuildInstantiations(false)
- }
}
noder.MakeWrappers(typecheck.Target) // must happen after inlining
diff --git a/src/cmd/compile/internal/importer/gcimporter_test.go b/src/cmd/compile/internal/importer/gcimporter_test.go
index 44c5e06cd6..5d80db244b 100644
--- a/src/cmd/compile/internal/importer/gcimporter_test.go
+++ b/src/cmd/compile/internal/importer/gcimporter_test.go
@@ -8,6 +8,7 @@ import (
"bytes"
"cmd/compile/internal/types2"
"fmt"
+ "internal/goexperiment"
"internal/testenv"
"os"
"os/exec"
@@ -107,25 +108,29 @@ func TestImportTestdata(t *testing.T) {
t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
}
- tmpdir := mktmpdir(t)
- defer os.RemoveAll(tmpdir)
+ testfiles := map[string][]string{
+ "exports.go": {"go/ast", "go/token"},
+ }
+ if !goexperiment.Unified {
+ testfiles["generics.go"] = nil
+ }
- compile(t, "testdata", "exports.go", filepath.Join(tmpdir, "testdata"))
+ for testfile, wantImports := range testfiles {
+ tmpdir := mktmpdir(t)
+ defer os.RemoveAll(tmpdir)
- if pkg := testPath(t, "./testdata/exports", tmpdir); pkg != nil {
- // The package's Imports list must include all packages
- // explicitly imported by exports.go, plus all packages
- // referenced indirectly via exported objects in exports.go.
- // With the textual export format, the list may also include
- // additional packages that are not strictly required for
- // import processing alone (they are exported to err "on
- // the safe side").
- // TODO(gri) update the want list to be precise, now that
- // the textual export data is gone.
- got := fmt.Sprint(pkg.Imports())
- for _, want := range []string{"go/ast", "go/token"} {
- if !strings.Contains(got, want) {
- t.Errorf(`Package("exports").Imports() = %s, does not contain %s`, got, want)
+ compile(t, "testdata", testfile, filepath.Join(tmpdir, "testdata"))
+ path := "./testdata/" + strings.TrimSuffix(testfile, ".go")
+
+ if pkg := testPath(t, path, tmpdir); pkg != nil {
+ // The package's Imports list must include all packages
+ // explicitly imported by testfile, plus all packages
+ // referenced indirectly via exported objects in testfile.
+ got := fmt.Sprint(pkg.Imports())
+ for _, want := range wantImports {
+ if !strings.Contains(got, want) {
+ t.Errorf(`Package("exports").Imports() = %s, does not contain %s`, got, want)
+ }
}
}
}
@@ -253,7 +258,7 @@ var importedObjectTests = []struct {
{"go/internal/gcimporter.FindPkg", "func FindPkg(path string, srcDir string) (filename string, id string)"},
// interfaces
- {"context.Context", "type Context interface{Deadline() (deadline time.Time, ok bool); Done() <-chan struct{}; Err() error; Value(key interface{}) interface{}}"},
+ {"context.Context", "type Context interface{Deadline() (deadline time.Time, ok bool); Done() <-chan struct{}; Err() error; Value(key any) any}"},
{"crypto.Decrypter", "type Decrypter interface{Decrypt(rand io.Reader, msg []byte, opts DecrypterOpts) (plaintext []byte, err error); Public() PublicKey}"},
{"encoding.BinaryMarshaler", "type BinaryMarshaler interface{MarshalBinary() (data []byte, err error)}"},
{"io.Reader", "type Reader interface{Read(p []byte) (n int, err error)}"},
diff --git a/src/cmd/compile/internal/importer/iimport.go b/src/cmd/compile/internal/importer/iimport.go
index d04ef5c34d..a827987a48 100644
--- a/src/cmd/compile/internal/importer/iimport.go
+++ b/src/cmd/compile/internal/importer/iimport.go
@@ -9,6 +9,7 @@ package importer
import (
"cmd/compile/internal/syntax"
+ "cmd/compile/internal/typecheck"
"cmd/compile/internal/types2"
"encoding/binary"
"fmt"
@@ -45,7 +46,7 @@ func (r *intReader) uint64() uint64 {
const (
iexportVersionGo1_11 = 0
iexportVersionPosCol = 1
- iexportVersionGenerics = 1 // probably change to 2 before release
+ iexportVersionGenerics = 2
iexportVersionGo1_18 = 2
iexportVersionCurrent = 2
@@ -126,7 +127,7 @@ func ImportData(imports map[string]*types2.Package, data, path string) (pkg *typ
typCache: make(map[uint64]types2.Type),
// Separate map for typeparams, keyed by their package and unique
// name (name with subscript).
- tparamIndex: make(map[ident]types2.Type),
+ tparamIndex: make(map[ident]*types2.TypeParam),
}
for i, pt := range predeclared {
@@ -202,7 +203,7 @@ type iimporter struct {
declData string
pkgIndex map[*types2.Package]map[string]uint64
typCache map[uint64]types2.Type
- tparamIndex map[ident]types2.Type
+ tparamIndex map[ident]*types2.TypeParam
interfaceList []*types2.Interface
}
@@ -259,7 +260,7 @@ func (p *iimporter) posBaseAt(off uint64) *syntax.PosBase {
}
func (p *iimporter) typAt(off uint64, base *types2.Named) types2.Type {
- if t, ok := p.typCache[off]; ok && (base == nil || !isInterface(t)) {
+ if t, ok := p.typCache[off]; ok && canReuse(base, t) {
return t
}
@@ -274,12 +275,30 @@ func (p *iimporter) typAt(off uint64, base *types2.Named) types2.Type {
r.declReader = *strings.NewReader(p.declData[off-predeclReserved:])
t := r.doType(base)
- if base == nil || !isInterface(t) {
+ if canReuse(base, t) {
p.typCache[off] = t
}
return t
}
+// canReuse reports whether the type rhs on the RHS of the declaration for def
+// may be re-used.
+//
+// Specifically, if def is non-nil and rhs is an interface type with methods, it
+// may not be re-used because we have a convention of setting the receiver type
+// for interface methods to def.
+func canReuse(def *types2.Named, rhs types2.Type) bool {
+ if def == nil {
+ return true
+ }
+ iface, _ := rhs.(*types2.Interface)
+ if iface == nil {
+ return true
+ }
+ // Don't use iface.Empty() here as iface may not be complete.
+ return iface.NumEmbeddeds() == 0 && iface.NumExplicitMethods() == 0
+}
+
type importReader struct {
p *iimporter
declReader strings.Reader
@@ -358,12 +377,12 @@ func (r *importReader) obj(name string) {
if r.p.exportVersion < iexportVersionGenerics {
errorf("unexpected type param type")
}
- // Remove the "path" from the type param name that makes it unique
- ix := strings.LastIndex(name, ".")
- if ix < 0 {
- errorf("missing path for type param")
+ name0 := typecheck.TparamName(name)
+ if name0 == "" {
+ errorf("malformed type parameter export name %s: missing prefix", name)
}
- tn := types2.NewTypeName(pos, r.currPkg, name[ix+1:], nil)
+
+ tn := types2.NewTypeName(pos, r.currPkg, name0, nil)
t := types2.NewTypeParam(tn, nil)
// To handle recursive references to the typeparam within its
// bound, save the partial type in tparamIndex before reading the bounds.
@@ -706,8 +725,7 @@ func (r *importReader) tparamList() []*types2.TypeParam {
}
xs := make([]*types2.TypeParam, n)
for i := range xs {
- typ := r.typ()
- xs[i] = types2.AsTypeParam(typ)
+ xs[i] = r.typ().(*types2.TypeParam)
}
return xs
}
diff --git a/src/cmd/compile/internal/importer/support.go b/src/cmd/compile/internal/importer/support.go
index 6ceb413601..9377d99779 100644
--- a/src/cmd/compile/internal/importer/support.go
+++ b/src/cmd/compile/internal/importer/support.go
@@ -118,10 +118,14 @@ var predeclared = []types2.Type{
types2.Typ[types2.Invalid], // only appears in packages with errors
// used internally by gc; never used by this package or in .a files
+ // not to be confused with the universe any
anyType{},
// comparable
types2.Universe.Lookup("comparable").Type(),
+
+ // any
+ types2.Universe.Lookup("any").Type(),
}
type anyType struct{}
diff --git a/src/cmd/compile/internal/importer/testdata/exports.go b/src/cmd/compile/internal/importer/testdata/exports.go
index 8ba3242102..91598c03e3 100644
--- a/src/cmd/compile/internal/importer/testdata/exports.go
+++ b/src/cmd/compile/internal/importer/testdata/exports.go
@@ -15,14 +15,17 @@ const init1 = 0
func init() {}
const (
- C0 int = 0
- C1 = 3.14159265
- C2 = 2.718281828i
- C3 = -123.456e-789
- C4 = +123.456e+789
- C5 = 1234i
- C6 = "foo\n"
- C7 = `bar\n`
+ C0 int = 0
+ C1 = 3.14159265
+ C2 = 2.718281828i
+ C3 = -123.456e-789
+ C4 = +123.456e+789
+ C5 = 1234i
+ C6 = "foo\n"
+ C7 = `bar\n`
+ C8 = 42
+ C9 int = 42
+ C10 float64 = 42
)
type (
diff --git a/src/cmd/compile/internal/importer/testdata/generics.go b/src/cmd/compile/internal/importer/testdata/generics.go
new file mode 100644
index 0000000000..00bf04000f
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/generics.go
@@ -0,0 +1,29 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is used to generate an object file which
+// serves as test file for gcimporter_test.go.
+
+package generics
+
+type Any any
+
+var x any
+
+type T[A, B any] struct {
+ Left A
+ Right B
+}
+
+var X T[int, string] = T[int, string]{1, "hi"}
+
+func ToInt[P interface{ ~int }](p P) int { return int(p) }
+
+var IntID = ToInt[int]
+
+type G[C comparable] int
+
+func ImplicitFunc[T ~int]() {}
+
+type ImplicitType[T ~int] int
diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go
index 47b895f7e3..716a7fbcd9 100644
--- a/src/cmd/compile/internal/inline/inl.go
+++ b/src/cmd/compile/internal/inline/inl.go
@@ -1108,11 +1108,15 @@ func (subst *inlsubst) clovar(n *ir.Name) *ir.Name {
// closure does the necessary substitions for a ClosureExpr n and returns the new
// closure node.
func (subst *inlsubst) closure(n *ir.ClosureExpr) ir.Node {
- // Prior to the subst edit, set a flag in the inlsubst to
- // indicated that we don't want to update the source positions in
- // the new closure. If we do this, it will appear that the closure
- // itself has things inlined into it, which is not the case. See
- // issue #46234 for more details.
+ // Prior to the subst edit, set a flag in the inlsubst to indicate
+ // that we don't want to update the source positions in the new
+ // closure function. If we do this, it will appear that the
+ // closure itself has things inlined into it, which is not the
+ // case. See issue #46234 for more details. At the same time, we
+ // do want to update the position in the new ClosureExpr (which is
+ // part of the function we're working on). See #49171 for an
+ // example of what happens if we miss that update.
+ newClosurePos := subst.updatedPos(n.Pos())
defer func(prev bool) { subst.noPosUpdate = prev }(subst.noPosUpdate)
subst.noPosUpdate = true
@@ -1175,6 +1179,7 @@ func (subst *inlsubst) closure(n *ir.ClosureExpr) ir.Node {
// Actually create the named function for the closure, now that
// the closure is inlined in a specific function.
newclo := newfn.OClosure
+ newclo.SetPos(newClosurePos)
newclo.SetInit(subst.list(n.Init()))
return typecheck.Expr(newclo)
}
diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go
index f526d987a7..68303c0581 100644
--- a/src/cmd/compile/internal/ir/expr.go
+++ b/src/cmd/compile/internal/ir/expr.go
@@ -250,7 +250,8 @@ func (n *ConstExpr) Val() constant.Value { return n.val }
// It may end up being a value or a type.
type ConvExpr struct {
miniExpr
- X Node
+ X Node
+ NonEscaping bool // The allocation needed for the conversion to interface is known not to escape
}
func NewConvExpr(pos src.XPos, op Op, typ *types.Type, x Node) *ConvExpr {
diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go
index 8784f9ef99..5fdccf8927 100644
--- a/src/cmd/compile/internal/ir/node.go
+++ b/src/cmd/compile/internal/ir/node.go
@@ -471,7 +471,7 @@ const (
UintptrEscapes // pointers converted to uintptr escape
// Runtime-only func pragmas.
- // See ../../../../runtime/README.md for detailed descriptions.
+ // See ../../../../runtime/HACKING.md for detailed descriptions.
Systemstack // func must run on system stack
Nowritebarrier // emit compiler error instead of write barrier
Nowritebarrierrec // error on write barrier in this or recursive callees
@@ -584,7 +584,7 @@ func OuterValue(n Node) Node {
for {
switch nn := n; nn.Op() {
case OXDOT:
- base.FatalfAt(n.Pos(), "OXDOT in walk: %v", n)
+ base.FatalfAt(n.Pos(), "OXDOT in OuterValue: %v", n)
case ODOT:
nn := nn.(*SelectorExpr)
n = nn.X
diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go
index 990b9788f7..6e12c6cb94 100644
--- a/src/cmd/compile/internal/mips64/ssa.go
+++ b/src/cmd/compile/internal/mips64/ssa.go
@@ -320,7 +320,10 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) {
for a.Op == ssa.OpCopy || a.Op == ssa.OpMIPS64MOVVreg {
a = a.Args[0]
}
- if a.Op == ssa.OpLoadReg {
+ if a.Op == ssa.OpLoadReg && mips.REG_R0 <= a.Reg() && a.Reg() <= mips.REG_R31 {
+ // LoadReg from a narrower type does an extension, except loading
+ // to a floating point register. So only eliminate the extension
+ // if it is loaded to an integer register.
t := a.Type
switch {
case v.Op == ssa.OpMIPS64MOVBreg && t.Size() == 1 && t.IsSigned(),
diff --git a/src/cmd/compile/internal/noder/decl.go b/src/cmd/compile/internal/noder/decl.go
index 82455f7d4a..a9522d09af 100644
--- a/src/cmd/compile/internal/noder/decl.go
+++ b/src/cmd/compile/internal/noder/decl.go
@@ -86,6 +86,7 @@ func (g *irgen) constDecl(out *ir.Nodes, decl *syntax.ConstDecl) {
}
func (g *irgen) funcDecl(out *ir.Nodes, decl *syntax.FuncDecl) {
+ assert(g.curDecl == "")
// Set g.curDecl to the function name, as context for the type params declared
// during types2-to-types1 translation if this is a generic function.
g.curDecl = decl.Name.Value
@@ -94,7 +95,7 @@ func (g *irgen) funcDecl(out *ir.Nodes, decl *syntax.FuncDecl) {
if recv != nil {
t2 := deref2(recv.Type())
// This is a method, so set g.curDecl to recvTypeName.methName instead.
- g.curDecl = types2.AsNamed(t2).Obj().Name() + "." + g.curDecl
+ g.curDecl = t2.(*types2.Named).Obj().Name() + "." + g.curDecl
}
fn := ir.NewFunc(g.pos(decl))
@@ -132,11 +133,20 @@ func (g *irgen) funcDecl(out *ir.Nodes, decl *syntax.FuncDecl) {
g.target.Inits = append(g.target.Inits, fn)
}
- haveEmbed := g.haveEmbed
+ saveHaveEmbed := g.haveEmbed
+ saveCurDecl := g.curDecl
+ g.curDecl = ""
g.later(func() {
- defer func(b bool) { g.haveEmbed = b }(g.haveEmbed)
+ defer func(b bool, s string) {
+ // Revert haveEmbed and curDecl back to what they were before
+ // the "later" function.
+ g.haveEmbed = b
+ g.curDecl = s
+ }(g.haveEmbed, g.curDecl)
- g.haveEmbed = haveEmbed
+ // Set haveEmbed and curDecl to what they were for this funcDecl.
+ g.haveEmbed = saveHaveEmbed
+ g.curDecl = saveCurDecl
if fn.Type().HasTParam() {
g.topFuncIsGeneric = true
}
@@ -158,8 +168,12 @@ func (g *irgen) funcDecl(out *ir.Nodes, decl *syntax.FuncDecl) {
}
func (g *irgen) typeDecl(out *ir.Nodes, decl *syntax.TypeDecl) {
+ // Set the position for any error messages we might print (e.g. too large types).
+ base.Pos = g.pos(decl)
+ assert(ir.CurFunc != nil || g.curDecl == "")
// Set g.curDecl to the type name, as context for the type params declared
// during types2-to-types1 translation if this is a generic type.
+ saveCurDecl := g.curDecl
g.curDecl = decl.Name.Value
if decl.Alias {
name, _ := g.def(decl.Name)
@@ -167,6 +181,7 @@ func (g *irgen) typeDecl(out *ir.Nodes, decl *syntax.TypeDecl) {
assert(name.Alias()) // should be set by irgen.obj
out.Append(ir.NewDecl(g.pos(decl), ir.ODCLTYPE, name))
+ g.curDecl = ""
return
}
@@ -219,6 +234,7 @@ func (g *irgen) typeDecl(out *ir.Nodes, decl *syntax.TypeDecl) {
}
types.ResumeCheckSize()
+ g.curDecl = saveCurDecl
if otyp, ok := otyp.(*types2.Named); ok && otyp.NumMethods() != 0 {
methods := make([]*types.Field, otyp.NumMethods())
for i := range methods {
@@ -229,6 +245,7 @@ func (g *irgen) typeDecl(out *ir.Nodes, decl *syntax.TypeDecl) {
meth := g.obj(m)
methods[i] = types.NewField(meth.Pos(), g.selector(m), meth.Type())
methods[i].Nname = meth
+ g.curDecl = ""
}
ntyp.Methods().Set(methods)
}
@@ -238,6 +255,8 @@ func (g *irgen) typeDecl(out *ir.Nodes, decl *syntax.TypeDecl) {
func (g *irgen) varDecl(out *ir.Nodes, decl *syntax.VarDecl) {
pos := g.pos(decl)
+ // Set the position for any error messages we might print (e.g. too large types).
+ base.Pos = pos
names := make([]*ir.Name, len(decl.NameList))
for i, name := range decl.NameList {
names[i], _ = g.def(name)
@@ -276,22 +295,26 @@ func (g *irgen) varDecl(out *ir.Nodes, decl *syntax.VarDecl) {
} else if ir.CurFunc == nil {
name.Defn = as
}
- lhs := []ir.Node{as.X}
- rhs := []ir.Node{}
- if as.Y != nil {
- rhs = []ir.Node{as.Y}
- }
- transformAssign(as, lhs, rhs)
- as.X = lhs[0]
- if as.Y != nil {
- as.Y = rhs[0]
+ if !g.delayTransform() {
+ lhs := []ir.Node{as.X}
+ rhs := []ir.Node{}
+ if as.Y != nil {
+ rhs = []ir.Node{as.Y}
+ }
+ transformAssign(as, lhs, rhs)
+ as.X = lhs[0]
+ if as.Y != nil {
+ as.Y = rhs[0]
+ }
}
as.SetTypecheck(1)
out.Append(as)
}
}
if as2 != nil {
- transformAssign(as2, as2.Lhs, as2.Rhs)
+ if !g.delayTransform() {
+ transformAssign(as2, as2.Lhs, as2.Rhs)
+ }
as2.SetTypecheck(1)
out.Append(as2)
}
diff --git a/src/cmd/compile/internal/noder/expr.go b/src/cmd/compile/internal/noder/expr.go
index 24e6dbefe7..8a9afeb095 100644
--- a/src/cmd/compile/internal/noder/expr.go
+++ b/src/cmd/compile/internal/noder/expr.go
@@ -266,7 +266,7 @@ func (g *irgen) selectorExpr(pos src.XPos, typ types2.Type, expr *syntax.Selecto
if wantPtr {
recvType2Base = types2.AsPointer(recvType2).Elem()
}
- if types2.AsNamed(recvType2Base).TypeParams().Len() > 0 {
+ if recvType2Base.(*types2.Named).TypeParams().Len() > 0 {
// recvType2 is the original generic type that is
// instantiated for this method call.
// selinfo.Recv() is the instantiated type
@@ -332,13 +332,13 @@ func (g *irgen) exprs(exprs []syntax.Expr) []ir.Node {
}
func (g *irgen) compLit(typ types2.Type, lit *syntax.CompositeLit) ir.Node {
- if ptr, ok := typ.Underlying().(*types2.Pointer); ok {
+ if ptr, ok := types2.StructuralType(typ).(*types2.Pointer); ok {
n := ir.NewAddrExpr(g.pos(lit), g.compLit(ptr.Elem(), lit))
n.SetOp(ir.OPTRLIT)
return typed(g.typ(typ), n)
}
- _, isStruct := types2.Structure(typ).(*types2.Struct)
+ _, isStruct := types2.StructuralType(typ).(*types2.Struct)
exprs := make([]ir.Node, len(lit.ElemList))
for i, elem := range lit.ElemList {
diff --git a/src/cmd/compile/internal/noder/irgen.go b/src/cmd/compile/internal/noder/irgen.go
index e20939de66..52224c4046 100644
--- a/src/cmd/compile/internal/noder/irgen.go
+++ b/src/cmd/compile/internal/noder/irgen.go
@@ -96,6 +96,17 @@ func check2(noders []*noder) {
}
}
+// Information about sub-dictionary entries in a dictionary
+type subDictInfo struct {
+ // Call or XDOT node that requires a dictionary.
+ callNode ir.Node
+ // Saved CallExpr.X node (*ir.SelectorExpr or *InstExpr node) for a generic
+ // method or function call, since this node will get dropped when the generic
+ // method/function call is transformed to a call on the instantiated shape
+ // function. Nil for other kinds of calls or XDOTs.
+ savedXNode ir.Node
+}
+
// dictInfo is the dictionary format for an instantiation of a generic function with
// particular shapes. shapeParams, derivedTypes, subDictCalls, and itabConvs describe
// the actual dictionary entries in order, and the remaining fields are other info
@@ -108,7 +119,7 @@ type dictInfo struct {
// Nodes in the instantiation that requires a subdictionary. Includes
// method and function calls (OCALL), function values (OFUNCINST), method
// values/expressions (OXDOT).
- subDictCalls []ir.Node
+ subDictCalls []subDictInfo
// Nodes in the instantiation that are a conversion from a typeparam/derived
// type to a specific interface.
itabConvs []ir.Node
@@ -317,7 +328,7 @@ Outer:
// Create any needed instantiations of generic functions and transform
// existing and new functions to use those instantiations.
- BuildInstantiations(true)
+ BuildInstantiations()
// Remove all generic functions from g.target.Decl, since they have been
// used for stenciling, but don't compile. Generic functions will already
diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go
index 0bc9135999..5d17c534c1 100644
--- a/src/cmd/compile/internal/noder/reader.go
+++ b/src/cmd/compile/internal/noder/reader.go
@@ -2029,7 +2029,7 @@ func InlineCall(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExp
// Quirk: If deadcode elimination turned a non-empty function into
// an empty one, we need to set the position for the empty block
- // left behind to the the inlined position for src.NoXPos, so that
+ // left behind to the inlined position for src.NoXPos, so that
// an empty string gets added into the DWARF file name listing at
// the appropriate index.
if quirksMode() && len(body) == 1 {
diff --git a/src/cmd/compile/internal/noder/reader2.go b/src/cmd/compile/internal/noder/reader2.go
index 9396c0c87c..c028d21c67 100644
--- a/src/cmd/compile/internal/noder/reader2.go
+++ b/src/cmd/compile/internal/noder/reader2.go
@@ -250,7 +250,7 @@ func (r *reader2) doTyp() (res types2.Type) {
case typePointer:
return types2.NewPointer(r.typ())
case typeSignature:
- return r.signature(nil)
+ return r.signature(nil, nil, nil)
case typeSlice:
return types2.NewSlice(r.typ())
case typeStruct:
@@ -298,7 +298,7 @@ func (r *reader2) interfaceType() *types2.Interface {
for i := range methods {
pos := r.pos()
pkg, name := r.selector()
- mtyp := r.signature(nil)
+ mtyp := r.signature(nil, nil, nil)
methods[i] = types2.NewFunc(pos, pkg, name, mtyp)
}
@@ -309,14 +309,14 @@ func (r *reader2) interfaceType() *types2.Interface {
return types2.NewInterfaceType(methods, embeddeds)
}
-func (r *reader2) signature(recv *types2.Var) *types2.Signature {
+func (r *reader2) signature(recv *types2.Var, rtparams, tparams []*types2.TypeParam) *types2.Signature {
r.sync(syncSignature)
params := r.params()
results := r.params()
variadic := r.bool()
- return types2.NewSignatureType(recv, nil, nil, params, results, variadic)
+ return types2.NewSignatureType(recv, rtparams, tparams, params, results, variadic)
}
func (r *reader2) params() *types2.Tuple {
@@ -393,8 +393,7 @@ func (pr *pkgReader2) objIdx(idx int) (*types2.Package, string) {
case objFunc:
pos := r.pos()
tparams := r.typeParamNames()
- sig := r.signature(nil)
- sig.SetTypeParams(tparams)
+ sig := r.signature(nil, nil, tparams)
return types2.NewFunc(pos, objPkg, objName, sig)
case objType:
@@ -490,9 +489,8 @@ func (r *reader2) method() *types2.Func {
pos := r.pos()
pkg, name := r.selector()
- rparams := r.typeParamNames()
- sig := r.signature(r.param())
- sig.SetRecvTypeParams(rparams)
+ rtparams := r.typeParamNames()
+ sig := r.signature(r.param(), rtparams, nil)
_ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go.
return types2.NewFunc(pos, pkg, name, sig)
diff --git a/src/cmd/compile/internal/noder/stencil.go b/src/cmd/compile/internal/noder/stencil.go
index 74281bc479..50b6c0efcd 100644
--- a/src/cmd/compile/internal/noder/stencil.go
+++ b/src/cmd/compile/internal/noder/stencil.go
@@ -9,7 +9,6 @@ package noder
import (
"cmd/compile/internal/base"
- "cmd/compile/internal/inline"
"cmd/compile/internal/ir"
"cmd/compile/internal/objw"
"cmd/compile/internal/reflectdata"
@@ -22,7 +21,7 @@ import (
)
// Enable extra consistency checks.
-const doubleCheck = true
+const doubleCheck = false
func assert(p bool) {
base.Assert(p)
@@ -40,34 +39,29 @@ func infoPrint(format string, a ...interface{}) {
var geninst genInst
-func BuildInstantiations(preinliningMainScan bool) {
- if geninst.instInfoMap == nil {
- geninst.instInfoMap = make(map[*types.Sym]*instInfo)
- }
- geninst.buildInstantiations(preinliningMainScan)
+func BuildInstantiations() {
+ geninst.instInfoMap = make(map[*types.Sym]*instInfo)
+ geninst.buildInstantiations()
+ geninst.instInfoMap = nil
}
// buildInstantiations scans functions for generic function calls and methods, and
// creates the required instantiations. It also creates instantiated methods for all
// fully-instantiated generic types that have been encountered already or new ones
-// that are encountered during the instantiation process. If preinliningMainScan is
-// true, it scans all declarations in typecheck.Target.Decls first, before scanning
-// any new instantiations created. If preinliningMainScan is false, we do not scan
-// any existing decls - we only scan method instantiations for any new
-// fully-instantiated types that we saw during inlining.
-func (g *genInst) buildInstantiations(preinliningMainScan bool) {
+// that are encountered during the instantiation process. It scans all declarations
+// in typecheck.Target.Decls first, before scanning any new instantiations created.
+func (g *genInst) buildInstantiations() {
// Instantiate the methods of instantiated generic types that we have seen so far.
g.instantiateMethods()
- if preinliningMainScan {
- n := len(typecheck.Target.Decls)
- for i := 0; i < n; i++ {
- g.scanForGenCalls(typecheck.Target.Decls[i])
- }
+ // Scan all currentdecls for call to generic functions/methods.
+ n := len(typecheck.Target.Decls)
+ for i := 0; i < n; i++ {
+ g.scanForGenCalls(typecheck.Target.Decls[i])
}
// Scan all new instantiations created due to g.instantiateMethods() and the
- // scan of current decls (if done). This loop purposely runs until no new
+ // scan of current decls. This loop purposely runs until no new
// instantiations are created.
for i := 0; i < len(g.newInsts); i++ {
g.scanForGenCalls(g.newInsts[i])
@@ -82,10 +76,6 @@ func (g *genInst) buildInstantiations(preinliningMainScan bool) {
for _, fun := range g.newInsts {
info := g.instInfoMap[fun.Sym()]
g.dictPass(info)
- if !preinliningMainScan {
- // Prepare for the round of inlining below.
- inline.CanInline(fun.(*ir.Func))
- }
if doubleCheck {
ir.Visit(info.fun, func(n ir.Node) {
if n.Op() != ir.OCONVIFACE {
@@ -103,14 +93,6 @@ func (g *genInst) buildInstantiations(preinliningMainScan bool) {
ir.Dump(fmt.Sprintf("\ndictpass %v", info.fun), info.fun)
}
}
- if !preinliningMainScan {
- // Extra round of inlining for the new instantiations (only if
- // preinliningMainScan is false, which means we have already done the
- // main round of inlining)
- for _, fun := range g.newInsts {
- inline.InlineCalls(fun.(*ir.Func))
- }
- }
assert(l == len(g.newInsts))
g.newInsts = nil
}
@@ -500,7 +482,7 @@ func (g *genInst) buildClosure(outer *ir.Func, x ir.Node) ir.Node {
// explicitly traverse any embedded fields in the receiver
// argument in order to call the method instantiation.
arg0 := formalParams[0].Nname.(ir.Node)
- arg0 = typecheck.AddImplicitDots(ir.NewSelectorExpr(base.Pos, ir.OXDOT, arg0, x.(*ir.SelectorExpr).Sel)).X
+ arg0 = typecheck.AddImplicitDots(ir.NewSelectorExpr(x.Pos(), ir.OXDOT, arg0, x.(*ir.SelectorExpr).Sel)).X
if valueMethod && arg0.Type().IsPtr() {
// For handling the (*T).M case: if we have a pointer
// receiver after following all the embedded fields,
@@ -515,6 +497,7 @@ func (g *genInst) buildClosure(outer *ir.Func, x ir.Node) ir.Node {
// Build call itself.
var innerCall ir.Node = ir.NewCallExpr(pos, ir.OCALL, target.Nname, args)
+ innerCall.(*ir.CallExpr).IsDDD = typ.IsVariadic()
if len(formalResults) > 0 {
innerCall = ir.NewReturnStmt(pos, []ir.Node{innerCall})
}
@@ -601,7 +584,7 @@ func (g *genInst) getDictOrSubdict(declInfo *instInfo, n ir.Node, nameNode *ir.N
if declInfo != nil {
entry := -1
for i, de := range declInfo.dictInfo.subDictCalls {
- if n == de {
+ if n == de.callNode {
entry = declInfo.dictInfo.startSubDict + i
break
}
@@ -615,7 +598,7 @@ func (g *genInst) getDictOrSubdict(declInfo *instInfo, n ir.Node, nameNode *ir.N
}
}
if !usingSubdict {
- dict = g.getDictionaryValue(nameNode, targs, isMeth)
+ dict = g.getDictionaryValue(n.Pos(), nameNode, targs, isMeth)
}
return dict, usingSubdict
}
@@ -651,17 +634,38 @@ func (g *genInst) getInstantiation(nameNode *ir.Name, shapes []*types.Type, isMe
checkFetchBody(nameNode)
}
+ var tparams []*types.Type
+ if isMeth {
+ // Get the type params from the method receiver (after skipping
+ // over any pointer)
+ recvType := nameNode.Type().Recv().Type
+ recvType = deref(recvType)
+ tparams = recvType.RParams()
+ } else {
+ fields := nameNode.Type().TParams().Fields().Slice()
+ tparams = make([]*types.Type, len(fields))
+ for i, f := range fields {
+ tparams[i] = f.Type
+ }
+ }
+
// Convert any non-shape type arguments to their shape, so we can reduce the
// number of instantiations we have to generate. You can actually have a mix
// of shape and non-shape arguments, because of inferred or explicitly
// specified concrete type args.
s1 := make([]*types.Type, len(shapes))
for i, t := range shapes {
+ var tparam *types.Type
+ if tparams[i].Kind() == types.TTYPEPARAM {
+ // Shapes are grouped differently for structural types, so we
+ // pass the type param to Shapify(), so we can distinguish.
+ tparam = tparams[i]
+ }
if !t.IsShape() {
- s1[i] = typecheck.Shapify(t, i)
+ s1[i] = typecheck.Shapify(t, i, tparam)
} else {
// Already a shape, but make sure it has the correct index.
- s1[i] = typecheck.Shapify(shapes[i].Underlying(), i)
+ s1[i] = typecheck.Shapify(shapes[i].Underlying(), i, tparam)
}
}
shapes = s1
@@ -676,8 +680,23 @@ func (g *genInst) getInstantiation(nameNode *ir.Name, shapes []*types.Type, isMe
}
info.dictInfo.shapeToBound = make(map[*types.Type]*types.Type)
- // genericSubst fills in info.dictParam and info.tparamToBound.
- st := g.genericSubst(sym, nameNode, shapes, isMeth, info)
+ if sym.Def != nil {
+ // This instantiation must have been imported from another
+ // package (because it was needed for inlining), so we should
+ // not re-generate it and have conflicting definitions for the
+ // symbol (issue #50121). It will have already gone through the
+ // dictionary transformations of dictPass, so we don't actually
+ // need the info.dictParam and info.shapeToBound info filled in
+ // below. We just set the imported instantiation as info.fun.
+ assert(sym.Pkg != types.LocalPkg)
+ info.fun = sym.Def.(*ir.Name).Func
+ assert(info.fun != nil)
+ g.instInfoMap[sym] = info
+ return info
+ }
+
+ // genericSubst fills in info.dictParam and info.shapeToBound.
+ st := g.genericSubst(sym, nameNode, tparams, shapes, isMeth, info)
info.fun = st
g.instInfoMap[sym] = info
@@ -714,22 +733,8 @@ type subster struct {
// args shapes. For a method with a generic receiver, it returns an instantiated
// function type where the receiver becomes the first parameter. For either a generic
// method or function, a dictionary parameter is the added as the very first
-// parameter. genericSubst fills in info.dictParam and info.tparamToBound.
-func (g *genInst) genericSubst(newsym *types.Sym, nameNode *ir.Name, shapes []*types.Type, isMethod bool, info *instInfo) *ir.Func {
- var tparams []*types.Type
- if isMethod {
- // Get the type params from the method receiver (after skipping
- // over any pointer)
- recvType := nameNode.Type().Recv().Type
- recvType = deref(recvType)
- tparams = recvType.RParams()
- } else {
- fields := nameNode.Type().TParams().Fields().Slice()
- tparams = make([]*types.Type, len(fields))
- for i, f := range fields {
- tparams[i] = f.Type
- }
- }
+// parameter. genericSubst fills in info.dictParam and info.shapeToBound.
+func (g *genInst) genericSubst(newsym *types.Sym, nameNode *ir.Name, tparams []*types.Type, shapes []*types.Type, isMethod bool, info *instInfo) *ir.Func {
gf := nameNode.Func
// Pos of the instantiated function is same as the generic function
newf := ir.NewFunc(gf.Pos())
@@ -801,11 +806,12 @@ func (g *genInst) genericSubst(newsym *types.Sym, nameNode *ir.Name, shapes []*t
// Make sure name/type of newf is set before substituting the body.
newf.Body = subst.list(gf.Body)
-
- // Add code to check that the dictionary is correct.
- // TODO: must be adjusted to deal with shapes, but will go away soon when we move
- // to many->1 shape to concrete mapping.
- // newf.Body.Prepend(subst.checkDictionary(dictionaryName, shapes)...)
+ if len(newf.Body) == 0 {
+ // Ensure the body is nonempty, for issue 49524.
+ // TODO: have some other way to detect the difference between
+ // a function declared with no body, vs. one with an empty body?
+ newf.Body = append(newf.Body, ir.NewBlockStmt(gf.Pos(), nil))
+ }
if len(subst.defnMap) > 0 {
base.Fatalf("defnMap is not empty")
@@ -859,49 +865,6 @@ func (subst *subster) localvar(name *ir.Name) *ir.Name {
return m
}
-// checkDictionary returns code that does runtime consistency checks
-// between the dictionary and the types it should contain.
-func (subst *subster) checkDictionary(name *ir.Name, targs []*types.Type) (code []ir.Node) {
- if false {
- return // checking turned off
- }
- // TODO: when moving to GCshape, this test will become harder. Call into
- // runtime to check the expected shape is correct?
- pos := name.Pos()
- // Convert dictionary to *[N]uintptr
- d := ir.NewConvExpr(pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], name)
- d.SetTypecheck(1)
- d = ir.NewConvExpr(pos, ir.OCONVNOP, types.NewArray(types.Types[types.TUINTPTR], int64(len(targs))).PtrTo(), d)
- d.SetTypecheck(1)
- types.CheckSize(d.Type().Elem())
-
- // Check that each type entry in the dictionary is correct.
- for i, t := range targs {
- if t.HasShape() {
- // Check the concrete type, not the shape type.
- base.Fatalf("shape type in dictionary %s %+v\n", name.Sym().Name, t)
- }
- want := reflectdata.TypePtr(t)
- typed(types.Types[types.TUINTPTR], want)
- deref := ir.NewStarExpr(pos, d)
- typed(d.Type().Elem(), deref)
- idx := ir.NewConstExpr(constant.MakeUint64(uint64(i)), name) // TODO: what to set orig to?
- typed(types.Types[types.TUINTPTR], idx)
- got := ir.NewIndexExpr(pos, deref, idx)
- typed(types.Types[types.TUINTPTR], got)
- cond := ir.NewBinaryExpr(pos, ir.ONE, want, got)
- typed(types.Types[types.TBOOL], cond)
- panicArg := ir.NewNilExpr(pos)
- typed(types.NewInterface(types.LocalPkg, nil, false), panicArg)
- then := ir.NewUnaryExpr(pos, ir.OPANIC, panicArg)
- then.SetTypecheck(1)
- x := ir.NewIfStmt(pos, cond, []ir.Node{then}, nil)
- x.SetTypecheck(1)
- code = append(code, x)
- }
- return
-}
-
// getDictionaryEntry gets the i'th entry in the dictionary dict.
func getDictionaryEntry(pos src.XPos, dict *ir.Name, i int, size int) ir.Node {
// Convert dictionary to *[N]uintptr
@@ -946,6 +909,10 @@ func (subst *subster) node(n ir.Node) ir.Node {
// Use closure to capture all state needed by the ir.EditChildren argument.
var edit func(ir.Node) ir.Node
edit = func(x ir.Node) ir.Node {
+ // Analogous to ir.SetPos() at beginning of typecheck.typecheck() -
+ // allows using base.Pos during the transform functions, just like
+ // the tc*() functions.
+ ir.SetPos(x)
switch x.Op() {
case ir.OTYPE:
return ir.TypeNode(subst.ts.Typ(x.Type()))
@@ -1069,13 +1036,13 @@ func (subst *subster) node(n ir.Node) ir.Node {
}
case ir.OXDOT:
- // Finish the transformation of an OXDOT, unless this was a
- // bound call (a direct call on a type param). A bound call
- // will be transformed during the dictPass. Otherwise, m
- // will be transformed to an OMETHVALUE node. It will be
- // transformed to an ODOTMETH or ODOTINTER node if we find in
- // the OCALL case below that the method value is actually
- // called.
+ // Finish the transformation of an OXDOT, unless this is
+ // bound call or field access on a type param. A bound call
+ // or field access on a type param will be transformed during
+ // the dictPass. Otherwise, m will be transformed to an
+ // OMETHVALUE node. It will be transformed to an ODOTMETH or
+ // ODOTINTER node if we find in the OCALL case below that the
+ // method value is actually called.
mse := m.(*ir.SelectorExpr)
if src := mse.X.Type(); !src.IsShape() {
transformDot(mse, false)
@@ -1134,8 +1101,12 @@ func (subst *subster) node(n ir.Node) ir.Node {
transformEarlyCall(call)
case ir.OXDOT:
- // This is the case of a bound call on a typeparam,
- // which will be handled in the dictPass.
+ // This is the case of a bound call or a field access
+ // on a typeparam, which will be handled in the
+ // dictPass. As with OFUNCINST, we must transform the
+ // arguments of the call now, so any needed CONVIFACE
+ // nodes are exposed.
+ transformEarlyCall(call)
case ir.ODOTTYPE, ir.ODOTTYPE2:
// These are DOTTYPEs that could get transformed into
@@ -1245,39 +1216,53 @@ func (g *genInst) dictPass(info *instInfo) {
ir.CurFunc = info.fun
case ir.OXDOT:
+ // This is the case of a dot access on a type param. This is
+ // typically a bound call on the type param, but could be a
+ // field access, if the constraint has a single structural type.
mse := m.(*ir.SelectorExpr)
src := mse.X.Type()
assert(src.IsShape())
- // The only dot on a shape type value are methods.
if mse.X.Op() == ir.OTYPE {
// Method expression T.M
m = g.buildClosure2(info, m)
// No need for transformDot - buildClosure2 has already
// transformed to OCALLINTER/ODOTINTER.
} else {
- // Implement x.M as a conversion-to-bound-interface
- // 1) convert x to the bound interface
- // 2) call M on that interface
- dst := info.dictInfo.shapeToBound[m.(*ir.SelectorExpr).X.Type()]
- if src.IsInterface() {
- // If type arg is an interface (unusual case),
- // we do a type assert to the type bound.
- mse.X = assertToBound(info, info.dictParam, m.Pos(), mse.X, dst)
- } else {
- mse.X = convertUsingDictionary(info, info.dictParam, m.Pos(), mse.X, m, dst)
+ // If we can't find the selected method in the
+ // AllMethods of the bound, then this must be an access
+ // to a field of a structural type. If so, we skip the
+ // dictionary lookups - transformDot() will convert to
+ // the desired direct field access.
+ if isBoundMethod(info.dictInfo, mse) {
+ dst := info.dictInfo.shapeToBound[mse.X.Type()]
+ // Implement x.M as a conversion-to-bound-interface
+ // 1) convert x to the bound interface
+ // 2) call M on that interface
+ if src.IsInterface() {
+ // If type arg is an interface (unusual case),
+ // we do a type assert to the type bound.
+ mse.X = assertToBound(info, info.dictParam, m.Pos(), mse.X, dst)
+ } else {
+ mse.X = convertUsingDictionary(info, info.dictParam, m.Pos(), mse.X, m, dst, true)
+ // Note: we set nonEscaping==true, because we can assume the backing store for the
+ // interface conversion doesn't escape. The method call will immediately go to
+ // a wrapper function which copies all the data out of the interface value.
+ // (It only matters for non-pointer-shaped interface conversions. See issue 50182.)
+ }
}
transformDot(mse, false)
}
case ir.OCALL:
- op := m.(*ir.CallExpr).X.Op()
+ call := m.(*ir.CallExpr)
+ op := call.X.Op()
if op == ir.OMETHVALUE {
// Redo the transformation of OXDOT, now that we
// know the method value is being called.
- m.(*ir.CallExpr).X.(*ir.SelectorExpr).SetOp(ir.OXDOT)
- transformDot(m.(*ir.CallExpr).X.(*ir.SelectorExpr), true)
+ call.X.(*ir.SelectorExpr).SetOp(ir.OXDOT)
+ transformDot(call.X.(*ir.SelectorExpr), true)
}
- transformCall(m.(*ir.CallExpr))
+ transformCall(call)
case ir.OCONVIFACE:
if m.Type().IsEmptyInterface() && m.(*ir.ConvExpr).X.Type().IsEmptyInterface() {
@@ -1290,7 +1275,7 @@ func (g *genInst) dictPass(info *instInfo) {
// Note: x's argument is still typed as a type parameter.
// m's argument now has an instantiated type.
if mce.X.Type().HasShape() || (mce.X.Type().IsInterface() && m.Type().HasShape()) {
- m = convertUsingDictionary(info, info.dictParam, m.Pos(), m.(*ir.ConvExpr).X, m, m.Type())
+ m = convertUsingDictionary(info, info.dictParam, m.Pos(), m.(*ir.ConvExpr).X, m, m.Type(), false)
}
case ir.ODOTTYPE, ir.ODOTTYPE2:
if !m.Type().HasShape() {
@@ -1383,7 +1368,9 @@ func findDictType(info *instInfo, t *types.Type) int {
// type dst, by returning a new set of nodes that make use of a dictionary entry. in is the
// instantiated node of the CONVIFACE node or XDOT node (for a bound method call) that is causing the
// conversion.
-func convertUsingDictionary(info *instInfo, dictParam *ir.Name, pos src.XPos, v ir.Node, in ir.Node, dst *types.Type) ir.Node {
+// If nonEscaping is true, the caller guarantees that the backing store needed for the interface data
+// word will not escape.
+func convertUsingDictionary(info *instInfo, dictParam *ir.Name, pos src.XPos, v ir.Node, in ir.Node, dst *types.Type, nonEscaping bool) ir.Node {
assert(v.Type().HasShape() || v.Type().IsInterface() && in.Type().HasShape())
assert(dst.IsInterface())
@@ -1453,6 +1440,7 @@ func convertUsingDictionary(info *instInfo, dictParam *ir.Name, pos src.XPos, v
// Figure out what the data field of the interface will be.
data := ir.NewConvExpr(pos, ir.OCONVIDATA, nil, v)
typed(types.Types[types.TUNSAFEPTR], data)
+ data.NonEscaping = nonEscaping
// Build an interface from the type and data parts.
var i ir.Node = ir.NewBinaryExpr(pos, ir.OEFACE, rt, data)
@@ -1581,8 +1569,9 @@ func (g *genInst) getDictionarySym(gf *ir.Name, targs []*types.Type, isMeth bool
markTypeUsed(ts, lsym)
}
// Emit an entry for each subdictionary (after substituting targs)
- for _, n := range info.subDictCalls {
+ for _, subDictInfo := range info.subDictCalls {
var sym *types.Sym
+ n := subDictInfo.callNode
switch n.Op() {
case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH:
call := n.(*ir.CallExpr)
@@ -1592,9 +1581,9 @@ func (g *genInst) getDictionarySym(gf *ir.Name, targs []*types.Type, isMeth bool
if se.X.Type().IsShape() {
// This is a method call enabled by a type bound.
- // We need this extra check for type expressions, which
- // don't add in the implicit XDOTs.
- tmpse := ir.NewSelectorExpr(base.Pos, ir.OXDOT, se.X, se.Sel)
+ // We need this extra check for method expressions,
+ // which don't add in the implicit XDOTs.
+ tmpse := ir.NewSelectorExpr(src.NoXPos, ir.OXDOT, se.X, se.Sel)
tmpse = typecheck.AddImplicitDots(tmpse)
tparam := tmpse.X.Type()
if !tparam.IsShape() {
@@ -1629,31 +1618,31 @@ func (g *genInst) getDictionarySym(gf *ir.Name, targs []*types.Type, isMeth bool
} else {
// This is the case of a normal
// method call on a generic type.
- recvType := deref(call.X.(*ir.SelectorExpr).X.Type())
- genRecvType := recvType.OrigSym().Def.Type()
- nameNode = typecheck.Lookdot1(call.X, se.Sel, genRecvType, genRecvType.Methods(), 1).Nname.(*ir.Name)
- subtargs := recvType.RParams()
- s2targs := make([]*types.Type, len(subtargs))
- for i, t := range subtargs {
- s2targs[i] = subst.Typ(t)
- }
- sym = g.getDictionarySym(nameNode, s2targs, true)
+ assert(subDictInfo.savedXNode == se)
+ sym = g.getSymForMethodCall(se, &subst)
}
} else {
- inst := call.X.(*ir.InstExpr)
- var nameNode *ir.Name
- var meth *ir.SelectorExpr
- var isMeth bool
- if meth, isMeth = inst.X.(*ir.SelectorExpr); isMeth {
- nameNode = meth.Selection.Nname.(*ir.Name)
+ inst, ok := call.X.(*ir.InstExpr)
+ if ok {
+ // Code hasn't been transformed yet
+ assert(subDictInfo.savedXNode == inst)
+ }
+ // If !ok, then the generic method/function call has
+ // already been transformed to a shape instantiation
+ // call. Either way, use the SelectorExpr/InstExpr
+ // node saved in info.
+ cex := subDictInfo.savedXNode
+ if se, ok := cex.(*ir.SelectorExpr); ok {
+ sym = g.getSymForMethodCall(se, &subst)
} else {
- nameNode = inst.X.(*ir.Name)
+ inst := cex.(*ir.InstExpr)
+ nameNode := inst.X.(*ir.Name)
+ subtargs := typecheck.TypesOf(inst.Targs)
+ for i, t := range subtargs {
+ subtargs[i] = subst.Typ(t)
+ }
+ sym = g.getDictionarySym(nameNode, subtargs, false)
}
- subtargs := typecheck.TypesOf(inst.Targs)
- for i, t := range subtargs {
- subtargs[i] = subst.Typ(t)
- }
- sym = g.getDictionarySym(nameNode, subtargs, isMeth)
}
case ir.OFUNCINST:
@@ -1666,16 +1655,7 @@ func (g *genInst) getDictionarySym(gf *ir.Name, targs []*types.Type, isMeth bool
sym = g.getDictionarySym(nameNode, subtargs, false)
case ir.OXDOT, ir.OMETHEXPR, ir.OMETHVALUE:
- selExpr := n.(*ir.SelectorExpr)
- recvType := deref(selExpr.Selection.Type.Recv().Type)
- genRecvType := recvType.OrigSym().Def.Type()
- subtargs := recvType.RParams()
- s2targs := make([]*types.Type, len(subtargs))
- for i, t := range subtargs {
- s2targs[i] = subst.Typ(t)
- }
- nameNode := typecheck.Lookdot1(selExpr, selExpr.Sel, genRecvType, genRecvType.Methods(), 1).Nname.(*ir.Name)
- sym = g.getDictionarySym(nameNode, s2targs, true)
+ sym = g.getSymForMethodCall(n.(*ir.SelectorExpr), &subst)
default:
assert(false)
@@ -1703,6 +1683,24 @@ func (g *genInst) getDictionarySym(gf *ir.Name, targs []*types.Type, isMeth bool
return sym
}
+// getSymForMethodCall gets the dictionary sym for a method call, method value, or method
+// expression that has selector se. subst gives the substitution from shape types to
+// concrete types.
+func (g *genInst) getSymForMethodCall(se *ir.SelectorExpr, subst *typecheck.Tsubster) *types.Sym {
+ // For everything except method expressions, 'recvType = deref(se.X.Type)' would
+ // also give the receiver type. For method expressions with embedded types, we
+ // need to look at the type of the selection to get the final receiver type.
+ recvType := deref(se.Selection.Type.Recv().Type)
+ genRecvType := recvType.OrigSym().Def.Type()
+ nameNode := typecheck.Lookdot1(se, se.Sel, genRecvType, genRecvType.Methods(), 1).Nname.(*ir.Name)
+ subtargs := recvType.RParams()
+ s2targs := make([]*types.Type, len(subtargs))
+ for i, t := range subtargs {
+ s2targs[i] = subst.Typ(t)
+ }
+ return g.getDictionarySym(nameNode, s2targs, true)
+}
+
// finalizeSyms finishes up all dictionaries on g.dictSymsToFinalize, by writing out
// any needed LSyms for itabs. The itab lsyms create wrappers which need various
// dictionaries and method instantiations to be complete, so, to avoid recursive
@@ -1762,7 +1760,7 @@ func (g *genInst) finalizeSyms() {
g.dictSymsToFinalize = nil
}
-func (g *genInst) getDictionaryValue(gf *ir.Name, targs []*types.Type, isMeth bool) ir.Node {
+func (g *genInst) getDictionaryValue(pos src.XPos, gf *ir.Name, targs []*types.Type, isMeth bool) ir.Node {
sym := g.getDictionarySym(gf, targs, isMeth)
// Make (or reuse) a node referencing the dictionary symbol.
@@ -1770,15 +1768,18 @@ func (g *genInst) getDictionaryValue(gf *ir.Name, targs []*types.Type, isMeth bo
if sym.Def != nil {
n = sym.Def.(*ir.Name)
} else {
- n = typecheck.NewName(sym)
+ // We set the position of a static dictionary to be the position of
+ // one of its uses.
+ n = ir.NewNameAt(pos, sym)
+ n.Curfn = ir.CurFunc
n.SetType(types.Types[types.TUINTPTR]) // should probably be [...]uintptr, but doesn't really matter
n.SetTypecheck(1)
n.Class = ir.PEXTERN
sym.Def = n
}
- // Return the address of the dictionary.
- np := typecheck.NodAddr(n)
+ // Return the address of the dictionary. Addr node gets position that was passed in.
+ np := typecheck.NodAddrAt(pos, n)
// Note: treat dictionary pointers as uintptrs, so they aren't pointers
// with respect to GC. That saves on stack scanning work, write barriers, etc.
// We can get away with it because dictionaries are global variables.
@@ -1847,7 +1848,7 @@ func (g *genInst) getInstInfo(st *ir.Func, shapes []*types.Type, instInfo *instI
case ir.OFUNCINST:
if !callMap[n] && hasShapeNodes(n.(*ir.InstExpr).Targs) {
infoPrint(" Closure&subdictionary required at generic function value %v\n", n.(*ir.InstExpr).X)
- info.subDictCalls = append(info.subDictCalls, n)
+ info.subDictCalls = append(info.subDictCalls, subDictInfo{callNode: n, savedXNode: nil})
}
case ir.OMETHEXPR, ir.OMETHVALUE:
if !callMap[n] && !types.IsInterfaceMethod(n.(*ir.SelectorExpr).Selection.Type) &&
@@ -1858,7 +1859,7 @@ func (g *genInst) getInstInfo(st *ir.Func, shapes []*types.Type, instInfo *instI
} else {
infoPrint(" Closure&subdictionary required at generic meth value %v\n", n)
}
- info.subDictCalls = append(info.subDictCalls, n)
+ info.subDictCalls = append(info.subDictCalls, subDictInfo{callNode: n, savedXNode: nil})
}
case ir.OCALL:
ce := n.(*ir.CallExpr)
@@ -1866,14 +1867,22 @@ func (g *genInst) getInstInfo(st *ir.Func, shapes []*types.Type, instInfo *instI
callMap[ce.X] = true
if hasShapeNodes(ce.X.(*ir.InstExpr).Targs) {
infoPrint(" Subdictionary at generic function/method call: %v - %v\n", ce.X.(*ir.InstExpr).X, n)
- info.subDictCalls = append(info.subDictCalls, n)
+ // Save the instExpr node for the function call,
+ // since we will lose this information when the
+ // generic function call is transformed to a call
+ // on the shape instantiation.
+ info.subDictCalls = append(info.subDictCalls, subDictInfo{callNode: n, savedXNode: ce.X})
}
}
- if ce.X.Op() == ir.OXDOT &&
- isShapeDeref(ce.X.(*ir.SelectorExpr).X.Type()) {
+ // Note: this XDOT code is not actually needed as long as we
+ // continue to disable type parameters on RHS of type
+ // declarations (#45639).
+ if ce.X.Op() == ir.OXDOT {
callMap[ce.X] = true
- infoPrint(" Optional subdictionary at generic bound call: %v\n", n)
- info.subDictCalls = append(info.subDictCalls, n)
+ if isBoundMethod(info, ce.X.(*ir.SelectorExpr)) {
+ infoPrint(" Optional subdictionary at generic bound call: %v\n", n)
+ info.subDictCalls = append(info.subDictCalls, subDictInfo{callNode: n, savedXNode: nil})
+ }
}
case ir.OCALLMETH:
ce := n.(*ir.CallExpr)
@@ -1882,7 +1891,11 @@ func (g *genInst) getInstInfo(st *ir.Func, shapes []*types.Type, instInfo *instI
callMap[ce.X] = true
if hasShapeTypes(deref(ce.X.(*ir.SelectorExpr).X.Type()).RParams()) {
infoPrint(" Subdictionary at generic method call: %v\n", n)
- info.subDictCalls = append(info.subDictCalls, n)
+ // Save the selector for the method call, since we
+ // will eventually lose this information when the
+ // generic method call is transformed into a
+ // function call on the method shape instantiation.
+ info.subDictCalls = append(info.subDictCalls, subDictInfo{callNode: n, savedXNode: ce.X})
}
}
case ir.OCONVIFACE:
@@ -1892,7 +1905,8 @@ func (g *genInst) getInstInfo(st *ir.Func, shapes []*types.Type, instInfo *instI
info.itabConvs = append(info.itabConvs, n)
}
case ir.OXDOT:
- if n.(*ir.SelectorExpr).X.Type().IsShape() {
+ se := n.(*ir.SelectorExpr)
+ if isBoundMethod(info, se) {
infoPrint(" Itab for bound call: %v\n", n)
info.itabConvs = append(info.itabConvs, n)
}
@@ -1948,11 +1962,13 @@ func (g *genInst) getInstInfo(st *ir.Func, shapes []*types.Type, instInfo *instI
info.dictLen = len(info.shapeParams) + len(info.derivedTypes) + len(info.subDictCalls) + len(info.itabConvs)
}
-// isShapeDeref returns true if t is either a shape or a pointer to a shape. (We
-// can't just use deref(t).IsShape(), since a shape type is a complex type and may
-// have a pointer as part of its shape.)
-func isShapeDeref(t *types.Type) bool {
- return t.IsShape() || t.IsPtr() && t.Elem().IsShape()
+// isBoundMethod returns true if the selection indicated by se is a bound method of
+// se.X. se.X must be a shape type (i.e. substituted directly from a type param). If
+// isBoundMethod returns false, then the selection must be a field access of a
+// structural type.
+func isBoundMethod(info *dictInfo, se *ir.SelectorExpr) bool {
+ bound := info.shapeToBound[se.X.Type()]
+ return typecheck.Lookdot1(se, se.Sel, bound, bound.AllMethods(), 1) != nil
}
// addType adds t to info.derivedTypes if it is parameterized type (which is not
@@ -2085,6 +2101,7 @@ func startClosure(pos src.XPos, outer *ir.Func, typ *types.Type) (*ir.Func, []*t
fn.Dcl = append(fn.Dcl, arg)
f := types.NewField(pos, arg.Sym(), t)
f.Nname = arg
+ f.SetIsDDD(typ.Params().Field(i).IsDDD())
formalParams = append(formalParams, f)
}
for i := 0; i < typ.NumResults(); i++ {
@@ -2161,7 +2178,7 @@ func (g *genInst) buildClosure2(info *instInfo, m ir.Node) ir.Node {
// the type bound.
rcvr = assertToBound(info, dictVar, pos, rcvr, dst)
} else {
- rcvr = convertUsingDictionary(info, dictVar, pos, rcvr, m, dst)
+ rcvr = convertUsingDictionary(info, dictVar, pos, rcvr, m, dst, false)
}
dot := ir.NewSelectorExpr(pos, ir.ODOTINTER, rcvr, m.(*ir.SelectorExpr).Sel)
dot.Selection = typecheck.Lookdot1(dot, dot.Sel, dot.X.Type(), dot.X.Type().AllMethods(), 1)
diff --git a/src/cmd/compile/internal/noder/stmt.go b/src/cmd/compile/internal/noder/stmt.go
index aedb09e21e..a349a7ef10 100644
--- a/src/cmd/compile/internal/noder/stmt.go
+++ b/src/cmd/compile/internal/noder/stmt.go
@@ -13,8 +13,10 @@ import (
"cmd/internal/src"
)
+// stmts creates nodes for a slice of statements that form a scope.
func (g *irgen) stmts(stmts []syntax.Stmt) []ir.Node {
var nodes []ir.Node
+ types.Markdcl()
for _, stmt := range stmts {
switch s := g.stmt(stmt).(type) {
case nil: // EmptyStmt
@@ -24,6 +26,7 @@ func (g *irgen) stmts(stmts []syntax.Stmt) []ir.Node {
nodes = append(nodes, s)
}
}
+ types.Popdcl()
return nodes
}
@@ -46,6 +49,12 @@ func (g *irgen) stmt(stmt syntax.Stmt) ir.Node {
n.SetTypecheck(1)
return n
case *syntax.DeclStmt:
+ if g.topFuncIsGeneric && len(stmt.DeclList) > 0 {
+ if _, ok := stmt.DeclList[0].(*syntax.TypeDecl); ok {
+ // TODO: remove this restriction. See issue 47631.
+ base.ErrorfAt(g.pos(stmt), "type declarations inside generic functions are not currently supported")
+ }
+ }
n := ir.NewBlockStmt(g.pos(stmt), nil)
g.decls(&n.List, stmt.DeclList)
return n
diff --git a/src/cmd/compile/internal/noder/transform.go b/src/cmd/compile/internal/noder/transform.go
index 47e6397206..5f1f41163b 100644
--- a/src/cmd/compile/internal/noder/transform.go
+++ b/src/cmd/compile/internal/noder/transform.go
@@ -115,6 +115,31 @@ func transformConv(n *ir.ConvExpr) ir.Node {
if n.X.Op() == ir.OLITERAL {
return stringtoruneslit(n)
}
+
+ case ir.OBYTES2STR:
+ assert(t.IsSlice())
+ assert(t.Elem().Kind() == types.TUINT8)
+ if t.Elem() != types.ByteType && t.Elem() != types.Types[types.TUINT8] {
+ // If t is a slice of a user-defined byte type B (not uint8
+ // or byte), then add an extra CONVNOP from []B to []byte, so
+ // that the call to slicebytetostring() added in walk will
+ // typecheck correctly.
+ n.X = ir.NewConvExpr(n.X.Pos(), ir.OCONVNOP, types.NewSlice(types.ByteType), n.X)
+ n.X.SetTypecheck(1)
+ }
+
+ case ir.ORUNES2STR:
+ assert(t.IsSlice())
+ assert(t.Elem().Kind() == types.TINT32)
+ if t.Elem() != types.RuneType && t.Elem() != types.Types[types.TINT32] {
+ // If t is a slice of a user-defined rune type B (not uint32
+ // or rune), then add an extra CONVNOP from []B to []rune, so
+ // that the call to slicerunetostring() added in walk will
+ // typecheck correctly.
+ n.X = ir.NewConvExpr(n.X.Pos(), ir.OCONVNOP, types.NewSlice(types.RuneType), n.X)
+ n.X.SetTypecheck(1)
+ }
+
}
return n
}
@@ -133,6 +158,9 @@ func transformConvCall(n *ir.CallExpr) ir.Node {
// (non-conversion, non-builtin part) of typecheck.tcCall. This code should work even
// in the case of OCALL/OFUNCINST.
func transformCall(n *ir.CallExpr) {
+ // Set base.Pos, since transformArgs below may need it, but transformCall
+ // is called in some passes that don't set base.Pos.
+ ir.SetPos(n)
// n.Type() can be nil for calls with no return value
assert(n.Typecheck() == 1)
transformArgs(n)
@@ -328,6 +356,37 @@ assignOK:
}
checkLHS(0, r.Type())
checkLHS(1, types.UntypedBool)
+ t := lhs[0].Type()
+ if t != nil && rhs[0].Type().HasShape() && t.IsInterface() && !types.IdenticalStrict(t, rhs[0].Type()) {
+ // This is a multi-value assignment (map, channel, or dot-type)
+ // where the main result is converted to an interface during the
+ // assignment. Normally, the needed CONVIFACE is not created
+ // until (*orderState).as2ok(), because the AS2* ops and their
+ // sub-ops are so tightly intertwined. But we need to create the
+ // CONVIFACE now to enable dictionary lookups. So, assign the
+ // results first to temps, so that we can manifest the CONVIFACE
+ // in assigning the first temp to lhs[0]. If we added the
+ // CONVIFACE into rhs[0] directly, we would break a lot of later
+ // code that depends on the tight coupling between the AS2* ops
+ // and their sub-ops. (Issue #50642).
+ v := typecheck.Temp(rhs[0].Type())
+ ok := typecheck.Temp(types.Types[types.TBOOL])
+ as := ir.NewAssignListStmt(base.Pos, stmt.Op(), []ir.Node{v, ok}, []ir.Node{r})
+ as.Def = true
+ as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, v))
+ as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, ok))
+ as.SetTypecheck(1)
+ // Change stmt to be a normal assignment of the temps to the final
+ // left-hand-sides. We re-create the original multi-value assignment
+ // so that it assigns to the temps and add it as an init of stmt.
+ //
+ // TODO: fix the order of evaluation, so that the lval of lhs[0]
+ // is evaluated before rhs[0] (similar to problem in #50672).
+ stmt.SetOp(ir.OAS2)
+ stmt.PtrInit().Append(as)
+ // assignconvfn inserts the CONVIFACE.
+ stmt.Rhs = []ir.Node{assignconvfn(v, t), ok}
+ }
return
}
diff --git a/src/cmd/compile/internal/noder/types.go b/src/cmd/compile/internal/noder/types.go
index f035e0da97..e7ce4c1089 100644
--- a/src/cmd/compile/internal/noder/types.go
+++ b/src/cmd/compile/internal/noder/types.go
@@ -26,6 +26,8 @@ func (g *irgen) pkg(pkg *types2.Package) *types.Pkg {
return types.NewPkg(pkg.Path(), pkg.Name())
}
+var universeAny = types2.Universe.Lookup("any").Type()
+
// typ converts a types2.Type to a types.Type, including caching of previously
// translated types.
func (g *irgen) typ(typ types2.Type) *types.Type {
@@ -53,6 +55,12 @@ func (g *irgen) typ(typ types2.Type) *types.Type {
// constructed part of a recursive type. Should not be called from outside this
// file (g.typ is the "external" entry point).
func (g *irgen) typ1(typ types2.Type) *types.Type {
+ // See issue 49583: the type checker has trouble keeping track of aliases,
+ // but for such a common alias as any we can improve things by preserving a
+ // pointer identity that can be checked when formatting type strings.
+ if typ == universeAny {
+ return types.AnyType
+ }
// Cache type2-to-type mappings. Important so that each defined generic
// type (instantiated or not) has a single types.Type representation.
// Also saves a lot of computation and memory by avoiding re-translating
@@ -105,6 +113,15 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
// based on the names of the type arguments.
instName := g.instTypeName2(typ.Obj().Name(), typ.TypeArgs())
s := g.pkg(typ.Obj().Pkg()).Lookup(instName)
+
+ // Make sure the base generic type exists in type1 (it may
+ // not yet if we are referecing an imported generic type, as
+ // opposed to a generic type declared in this package). Make
+ // sure to do this lookup before checking s.Def, in case
+ // s.Def gets defined while importing base (if an imported
+ // type). (Issue #50486).
+ base := g.obj(typ.Origin().Obj())
+
if s.Def != nil {
// We have already encountered this instantiation.
// Use the type we previously created, since there
@@ -112,10 +129,13 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
return s.Def.Type()
}
- // Make sure the base generic type exists in type1 (it may
- // not yet if we are referecing an imported generic type, as
- // opposed to a generic type declared in this package).
- _ = g.obj(typ.Origin().Obj())
+ if base.Class == ir.PAUTO {
+ // If the base type is a local type, we want to pop
+ // this instantiated type symbol/definition when we
+ // leave the containing block, so we don't use it
+ // incorrectly later.
+ types.Pushdcl(s)
+ }
// Create a forwarding type first and put it in the g.typs
// map, in order to deal with recursive generic types
@@ -219,9 +239,13 @@ func (g *irgen) typ0(typ types2.Type) *types.Type {
// Save the name of the type parameter in the sym of the type.
// Include the types2 subscript in the sym name
pkg := g.tpkg(typ)
- // Create the unique types1 name for a type param, using its context with a
- // function, type, or method declaration.
- nm := g.curDecl + "." + typ.Obj().Name()
+ // Create the unique types1 name for a type param, using its context
+ // with a function, type, or method declaration. Also, map blank type
+ // param names to a unique name based on their type param index. The
+ // unique blank names will be exported, but will be reverted during
+ // types2 and gcimporter import.
+ assert(g.curDecl != "")
+ nm := typecheck.TparamExportName(g.curDecl, typ.Obj().Name(), typ.Index())
sym := pkg.Lookup(nm)
if sym.Def != nil {
// Make sure we use the same type param type for the same
@@ -323,11 +347,15 @@ func (g *irgen) fillinMethods(typ *types2.Named, ntyp *types.Type) {
tparams := make([]*types.Type, rparams.Len())
// Set g.curDecl to be the method context, so type
// params in the receiver of the method that we are
- // translating gets the right unique name.
+ // translating gets the right unique name. We could
+ // be in a top-level typeDecl, so save and restore
+ // the current contents of g.curDecl.
+ savedCurDecl := g.curDecl
g.curDecl = typ.Obj().Name() + "." + m.Name()
for i := range tparams {
tparams[i] = g.typ1(rparams.At(i))
}
+ g.curDecl = savedCurDecl
assert(len(tparams) == len(targs))
ts := typecheck.Tsubster{
Tparams: tparams,
diff --git a/src/cmd/compile/internal/noder/writer.go b/src/cmd/compile/internal/noder/writer.go
index dde42c85d6..933f577825 100644
--- a/src/cmd/compile/internal/noder/writer.go
+++ b/src/cmd/compile/internal/noder/writer.go
@@ -229,6 +229,8 @@ func (pw *pkgWriter) pkgIdx(pkg *types2.Package) int {
// @@@ Types
+var anyTypeName = types2.Universe.Lookup("any").(*types2.TypeName)
+
func (w *writer) typ(typ types2.Type) {
w.typInfo(w.p.typIdx(typ, w.dict))
}
@@ -350,6 +352,12 @@ func (pw *pkgWriter) typIdx(typ types2.Type, dict *writerDict) typeInfo {
w.structType(typ)
case *types2.Interface:
+ if typ == anyTypeName.Type() {
+ w.code(typeNamed)
+ w.obj(anyTypeName, nil)
+ break
+ }
+
w.code(typeInterface)
w.interfaceType(typ)
@@ -1210,6 +1218,7 @@ func (w *writer) expr(expr syntax.Expr) {
}
obj := obj.(*types2.Var)
+ assert(!obj.IsField())
assert(targs.Len() == 0)
w.code(exprLocal)
@@ -1329,10 +1338,10 @@ func (w *writer) compLit(lit *syntax.CompositeLit) {
w.typ(tv.Type)
typ := tv.Type
- if ptr, ok := typ.Underlying().(*types2.Pointer); ok {
+ if ptr, ok := types2.StructuralType(typ).(*types2.Pointer); ok {
typ = ptr.Elem()
}
- str, isStruct := typ.Underlying().(*types2.Struct)
+ str, isStruct := types2.StructuralType(typ).(*types2.Struct)
w.len(len(lit.ElemList))
for i, elem := range lit.ElemList {
diff --git a/src/cmd/compile/internal/ppc64/galign.go b/src/cmd/compile/internal/ppc64/galign.go
index 6f9d1407d6..20fd8cec54 100644
--- a/src/cmd/compile/internal/ppc64/galign.go
+++ b/src/cmd/compile/internal/ppc64/galign.go
@@ -16,7 +16,7 @@ func Init(arch *ssagen.ArchInfo) {
arch.LinkArch = &ppc64.Linkppc64le
}
arch.REGSP = ppc64.REGSP
- arch.MAXWIDTH = 1 << 60
+ arch.MAXWIDTH = 1 << 50
arch.ZeroRange = zerorange
arch.Ginsnop = ginsnop
diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go
index 0d3070fd39..e76f9c5f3c 100644
--- a/src/cmd/compile/internal/reflectdata/reflect.go
+++ b/src/cmd/compile/internal/reflectdata/reflect.go
@@ -846,14 +846,19 @@ func TypePtr(t *types.Type) *ir.AddrExpr {
return typecheck.Expr(typecheck.NodAddr(n)).(*ir.AddrExpr)
}
-// ITabLsym returns the LSym representing the itab for concreate type typ
-// implementing interface iface.
+// ITabLsym returns the LSym representing the itab for concrete type typ implementing
+// interface iface. A dummy tab will be created in the unusual case where typ doesn't
+// implement iface. Normally, this wouldn't happen, because the typechecker would
+// have reported a compile-time error. This situation can only happen when the
+// destination type of a type assert or a type in a type switch is parameterized, so
+// it may sometimes, but not always, be a type that can't implement the specified
+// interface.
func ITabLsym(typ, iface *types.Type) *obj.LSym {
s, existed := ir.Pkgs.Itab.LookupOK(typ.LinkString() + "," + iface.LinkString())
lsym := s.Linksym()
if !existed {
- writeITab(lsym, typ, iface)
+ writeITab(lsym, typ, iface, true)
}
return lsym
}
@@ -865,7 +870,7 @@ func ITabAddr(typ, iface *types.Type) *ir.AddrExpr {
lsym := s.Linksym()
if !existed {
- writeITab(lsym, typ, iface)
+ writeITab(lsym, typ, iface, false)
}
n := ir.NewLinksymExpr(base.Pos, lsym, types.Types[types.TUINT8])
@@ -924,11 +929,12 @@ func hashMightPanic(t *types.Type) bool {
}
}
-// formalType replaces byte and rune aliases with real types.
+// formalType replaces predeclared aliases with real types.
// They've been separate internally to make error messages
// better, but we have to merge them in the reflect tables.
func formalType(t *types.Type) *types.Type {
- if t == types.ByteType || t == types.RuneType {
+ switch t {
+ case types.AnyType, types.ByteType, types.RuneType:
return types.Types[t.Kind()]
}
return t
@@ -1303,9 +1309,10 @@ func WriteRuntimeTypes() {
}
}
-// writeITab writes the itab for concrete type typ implementing
-// interface iface.
-func writeITab(lsym *obj.LSym, typ, iface *types.Type) {
+// writeITab writes the itab for concrete type typ implementing interface iface. If
+// allowNonImplement is true, allow the case where typ does not implement iface, and just
+// create a dummy itab with zeroed-out method entries.
+func writeITab(lsym *obj.LSym, typ, iface *types.Type, allowNonImplement bool) {
// TODO(mdempsky): Fix methodWrapper, geneq, and genhash (and maybe
// others) to stop clobbering these.
oldpos, oldfn := base.Pos, ir.CurFunc
@@ -1331,14 +1338,9 @@ func writeITab(lsym *obj.LSym, typ, iface *types.Type) {
break
}
}
- if sigs[0].Sym.Name == "==" {
- sigs = sigs[1:]
- if len(sigs) == 0 {
- break
- }
- }
}
- if len(sigs) != 0 {
+ completeItab := len(sigs) == 0
+ if !allowNonImplement && !completeItab {
base.Fatalf("incomplete itab")
}
@@ -1355,7 +1357,12 @@ func writeITab(lsym *obj.LSym, typ, iface *types.Type) {
o = objw.Uint32(lsym, o, types.TypeHash(typ)) // copy of type hash
o += 4 // skip unused field
for _, fn := range entries {
- o = objw.SymPtrWeak(lsym, o, fn, 0) // method pointer for each method
+ if !completeItab {
+ // If typ doesn't implement iface, make method entries be zero.
+ o = objw.Uintptr(lsym, o, 0)
+ } else {
+ o = objw.SymPtrWeak(lsym, o, fn, 0) // method pointer for each method
+ }
}
// Nothing writes static itabs, so they are read only.
objw.Global(lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
@@ -1417,6 +1424,9 @@ func WriteBasicTypes() {
}
writeType(types.NewPtr(types.Types[types.TSTRING]))
writeType(types.NewPtr(types.Types[types.TUNSAFEPTR]))
+ if base.Flag.G > 0 {
+ writeType(types.AnyType)
+ }
// emit type structs for error and func(error) string.
// The latter is the type of an auto-generated wrapper.
@@ -1938,18 +1948,25 @@ func methodWrapper(rcvr *types.Type, method *types.Field, forItab bool) *obj.LSy
// Target method uses shaped names.
targs2 := make([]*types.Type, len(targs))
+ origRParams := deref(orig).OrigSym().Def.(*ir.Name).Type().RParams()
for i, t := range targs {
- targs2[i] = typecheck.Shapify(t, i)
+ targs2[i] = typecheck.Shapify(t, i, origRParams[i])
}
targs = targs2
sym := typecheck.MakeFuncInstSym(ir.MethodSym(methodrcvr, method.Sym), targs, false, true)
if sym.Def == nil {
- // Currently we make sure that we have all the instantiations
- // we need by generating them all in ../noder/stencil.go:instantiateMethods
- // TODO: maybe there's a better, more incremental way to generate
- // only the instantiations we need?
- base.Fatalf("instantiation %s not found", sym.Name)
+ // Currently we make sure that we have all the
+ // instantiations we need by generating them all in
+ // ../noder/stencil.go:instantiateMethods
+ // Extra instantiations because of an inlined function
+ // should have been exported, and so available via
+ // Resolve.
+ in := typecheck.Resolve(ir.NewIdent(src.NoXPos, sym))
+ if in.Op() == ir.ONONAME {
+ base.Fatalf("instantiation %s not found", sym.Name)
+ }
+ sym = in.Sym()
}
target := ir.AsNode(sym.Def)
call = ir.NewCallExpr(base.Pos, ir.OCALL, target, args)
@@ -2075,8 +2092,14 @@ func getDictionary(gf *types.Sym, targs []*types.Type) ir.Node {
sym := typecheck.MakeDictSym(gf, targs, true)
// Dictionary should already have been generated by instantiateMethods().
+ // Extra dictionaries needed because of an inlined function should have been
+ // exported, and so available via Resolve.
if lsym := sym.Linksym(); len(lsym.P) == 0 {
- base.Fatalf("Dictionary should have already been generated: %s.%s", sym.Pkg.Path, sym.Name)
+ in := typecheck.Resolve(ir.NewIdent(src.NoXPos, sym))
+ if in.Op() == ir.ONONAME {
+ base.Fatalf("Dictionary should have already been generated: %s.%s", sym.Pkg.Path, sym.Name)
+ }
+ sym = in.Sym()
}
// Make (or reuse) a node referencing the dictionary symbol.
diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go
index e78eb5c0e4..aad59fa24e 100644
--- a/src/cmd/compile/internal/ssa/debug.go
+++ b/src/cmd/compile/internal/ssa/debug.go
@@ -34,6 +34,9 @@ type FuncDebug struct {
VarSlots [][]SlotID
// The location list data, indexed by VarID. Must be processed by PutLocationList.
LocationLists [][]byte
+ // Register-resident output parameters for the function. This is filled in at
+ // SSA generation time.
+ RegOutputParams []*ir.Name
// Filled in by the user. Translates Block and Value ID to PC.
GetPC func(ID, ID) int64
@@ -548,10 +551,10 @@ func PopulateABIInRegArgOps(f *Func) {
f.Entry.Values = append(newValues, f.Entry.Values...)
}
-// BuildFuncDebug returns debug information for f.
+// BuildFuncDebug debug information for f, placing the results in "rval".
// f must be fully processed, so that each Value is where it will be when
// machine code is emitted.
-func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset func(LocalSlot) int32) *FuncDebug {
+func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset func(LocalSlot) int32, rval *FuncDebug) {
if f.RegAlloc == nil {
f.Fatalf("BuildFuncDebug on func %v that has not been fully processed", f)
}
@@ -661,12 +664,11 @@ func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset fu
blockLocs := state.liveness()
state.buildLocationLists(blockLocs)
- return &FuncDebug{
- Slots: state.slots,
- VarSlots: state.varSlots,
- Vars: state.vars,
- LocationLists: state.lists,
- }
+ // Populate "rval" with what we've computed.
+ rval.Slots = state.slots
+ rval.VarSlots = state.varSlots
+ rval.Vars = state.vars
+ rval.LocationLists = state.lists
}
// liveness walks the function in control flow order, calculating the start
@@ -1120,54 +1122,93 @@ func (state *debugState) buildLocationLists(blockLocs []*BlockDebug) {
v.Op == OpArgIntReg || v.Op == OpArgFloatReg
}
+ blockPrologComplete := func(v *Value) bool {
+ if b.ID != state.f.Entry.ID {
+ return !opcodeTable[v.Op].zeroWidth
+ } else {
+ return v.Op == OpInitMem
+ }
+ }
+
+ // Examine the prolog portion of the block to process special
+ // zero-width ops such as Arg, Phi, LoweredGetClosurePtr (etc)
+ // whose lifetimes begin at the block starting point. In an
+ // entry block, allow for the possibility that we may see Arg
+ // ops that appear _after_ other non-zero-width operations.
+ // Example:
+ //
+ // v33 = ArgIntReg {foo+0} [0] : AX (foo)
+ // v34 = ArgIntReg {bar+0} [0] : BX (bar)
+ // ...
+ // v77 = StoreReg v67 : ctx+8[unsafe.Pointer]
+ // v78 = StoreReg v68 : ctx[unsafe.Pointer]
+ // v79 = Arg <*uint8> {args} : args[*uint8] (args[*uint8])
+ // v80 = Arg {args} [8] : args+8[int] (args+8[int])
+ // ...
+ // v1 = InitMem
+ //
+ // We can stop scanning the initial portion of the block when
+ // we either see the InitMem op (for entry blocks) or the
+ // first non-zero-width op (for other blocks).
+ for idx := 0; idx < len(b.Values); idx++ {
+ v := b.Values[idx]
+ if blockPrologComplete(v) {
+ break
+ }
+ // Consider only "lifetime begins at block start" ops.
+ if !mustBeFirst(v) && v.Op != OpArg {
+ continue
+ }
+ slots := state.valueNames[v.ID]
+ reg, _ := state.f.getHome(v.ID).(*Register)
+ changed := state.processValue(v, slots, reg) // changed == added to state.changedVars
+ if changed {
+ for _, varID := range state.changedVars.contents() {
+ state.updateVar(VarID(varID), v.Block, BlockStart)
+ }
+ state.changedVars.clear()
+ }
+ }
+
+ // Now examine the block again, handling things other than the
+ // "begins at block start" lifetimes.
zeroWidthPending := false
- blockPrologComplete := false // set to true at first non-zero-width op
- apcChangedSize := 0 // size of changedVars for leading Args, Phi, ClosurePtr
+ prologComplete := false
// expect to see values in pattern (apc)* (zerowidth|real)*
for _, v := range b.Values {
+ if blockPrologComplete(v) {
+ prologComplete = true
+ }
slots := state.valueNames[v.ID]
reg, _ := state.f.getHome(v.ID).(*Register)
changed := state.processValue(v, slots, reg) // changed == added to state.changedVars
if opcodeTable[v.Op].zeroWidth {
+ if prologComplete && mustBeFirst(v) {
+ panic(fmt.Errorf("Unexpected placement of op '%s' appearing after non-pseudo-op at beginning of block %s in %s\n%s", v.LongString(), b, b.Func.Name, b.Func))
+ }
if changed {
if mustBeFirst(v) || v.Op == OpArg {
- // These ranges begin at true beginning of block, not after first instruction
- if blockPrologComplete && mustBeFirst(v) {
- panic(fmt.Errorf("Unexpected placement of op '%s' appearing after non-pseudo-op at beginning of block %s in %s\n%s", v.LongString(), b, b.Func.Name, b.Func))
- }
- apcChangedSize = len(state.changedVars.contents())
- // Other zero-width ops must wait on a "real" op.
- zeroWidthPending = true
+ // already taken care of above
continue
}
+ zeroWidthPending = true
}
continue
}
-
if !changed && !zeroWidthPending {
continue
}
- // Not zero-width; i.e., a "real" instruction.
+ // Not zero-width; i.e., a "real" instruction.
zeroWidthPending = false
- blockPrologComplete = true
- for i, varID := range state.changedVars.contents() {
- if i < apcChangedSize { // buffered true start-of-block changes
- state.updateVar(VarID(varID), v.Block, BlockStart)
- } else {
- state.updateVar(VarID(varID), v.Block, v)
- }
+ for _, varID := range state.changedVars.contents() {
+ state.updateVar(VarID(varID), v.Block, v)
}
state.changedVars.clear()
- apcChangedSize = 0
}
- for i, varID := range state.changedVars.contents() {
- if i < apcChangedSize { // buffered true start-of-block changes
- state.updateVar(VarID(varID), b, BlockStart)
- } else {
- state.updateVar(VarID(varID), b, BlockEnd)
- }
+ for _, varID := range state.changedVars.contents() {
+ state.updateVar(VarID(varID), b, BlockEnd)
}
prevBlock = b
@@ -1554,7 +1595,7 @@ func isNamedRegParam(p abi.ABIParamAssignment) bool {
return true
}
-// BuildFuncDebugNoOptimized constructs a FuncDebug object with
+// BuildFuncDebugNoOptimized populates a FuncDebug object "rval" with
// entries corresponding to the register-resident input parameters for
// the function "f"; it is used when we are compiling without
// optimization but the register ABI is enabled. For each reg param,
@@ -1562,8 +1603,7 @@ func isNamedRegParam(p abi.ABIParamAssignment) bool {
// the input register, and the second element holds the stack location
// of the param (the assumption being that when optimization is off,
// each input param reg will be spilled in the prolog.
-func BuildFuncDebugNoOptimized(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset func(LocalSlot) int32) *FuncDebug {
- fd := FuncDebug{}
+func BuildFuncDebugNoOptimized(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset func(LocalSlot) int32, rval *FuncDebug) {
pri := f.ABISelf.ABIAnalyzeFuncType(f.Type.FuncType())
@@ -1577,7 +1617,7 @@ func BuildFuncDebugNoOptimized(ctxt *obj.Link, f *Func, loggingEnabled bool, sta
}
}
if numRegParams == 0 {
- return &fd
+ return
}
state := debugState{f: f}
@@ -1587,7 +1627,7 @@ func BuildFuncDebugNoOptimized(ctxt *obj.Link, f *Func, loggingEnabled bool, sta
}
// Allocate location lists.
- fd.LocationLists = make([][]byte, numRegParams)
+ rval.LocationLists = make([][]byte, numRegParams)
// Locate the value corresponding to the last spill of
// an input register.
@@ -1603,10 +1643,10 @@ func BuildFuncDebugNoOptimized(ctxt *obj.Link, f *Func, loggingEnabled bool, sta
n := inp.Name.(*ir.Name)
sl := LocalSlot{N: n, Type: inp.Type, Off: 0}
- fd.Vars = append(fd.Vars, n)
- fd.Slots = append(fd.Slots, sl)
- slid := len(fd.VarSlots)
- fd.VarSlots = append(fd.VarSlots, []SlotID{SlotID(slid)})
+ rval.Vars = append(rval.Vars, n)
+ rval.Slots = append(rval.Slots, sl)
+ slid := len(rval.VarSlots)
+ rval.VarSlots = append(rval.VarSlots, []SlotID{SlotID(slid)})
if afterPrologVal == ID(-1) {
// This can happen for degenerate functions with infinite
@@ -1623,7 +1663,7 @@ func BuildFuncDebugNoOptimized(ctxt *obj.Link, f *Func, loggingEnabled bool, sta
// Param is arriving in one or more registers. We need a 2-element
// location expression for it. First entry in location list
// will correspond to lifetime in input registers.
- list, sizeIdx := setupLocList(ctxt, f, fd.LocationLists[pidx],
+ list, sizeIdx := setupLocList(ctxt, f, rval.LocationLists[pidx],
BlockStart.ID, afterPrologVal)
if list == nil {
pidx++
@@ -1688,8 +1728,7 @@ func BuildFuncDebugNoOptimized(ctxt *obj.Link, f *Func, loggingEnabled bool, sta
// fill in size
ctxt.Arch.ByteOrder.PutUint16(list[sizeIdx:], uint16(len(list)-sizeIdx-2))
- fd.LocationLists[pidx] = list
+ rval.LocationLists[pidx] = list
pidx++
}
- return &fd
}
diff --git a/src/cmd/compile/internal/ssa/debug_lines_test.go b/src/cmd/compile/internal/ssa/debug_lines_test.go
index da04e5b04e..c0ccdb1c93 100644
--- a/src/cmd/compile/internal/ssa/debug_lines_test.go
+++ b/src/cmd/compile/internal/ssa/debug_lines_test.go
@@ -8,10 +8,10 @@ import (
"bufio"
"bytes"
"flag"
+ "internal/buildcfg"
"runtime"
"sort"
- // "flag"
"fmt"
"internal/testenv"
"io/ioutil"
@@ -45,7 +45,7 @@ func testGoArch() string {
return *testGoArchFlag
}
-func TestDebugLines(t *testing.T) {
+func TestDebugLinesSayHi(t *testing.T) {
// This test is potentially fragile, the goal is that debugging should step properly through "sayhi"
// If the blocks are reordered in a way that changes the statement order but execution flows correctly,
// then rearrange the expected numbers. Register abi and not-register-abi also have different sequences,
@@ -53,16 +53,54 @@ func TestDebugLines(t *testing.T) {
switch testGoArch() {
case "arm64", "amd64": // register ABI
- testDebugLines(t, "sayhi.go", "sayhi", []int{8, 9, 10, 11})
+ testDebugLines(t, "-N -l", "sayhi.go", "sayhi", []int{8, 9, 10, 11}, false)
case "arm", "386": // probably not register ABI for a while
- testDebugLines(t, "sayhi.go", "sayhi", []int{9, 10, 11})
+ testDebugLines(t, "-N -l", "sayhi.go", "sayhi", []int{9, 10, 11}, false)
default: // expect ppc64le and riscv will pick up register ABI soonish, not sure about others
t.Skip("skipped for many architectures, also changes w/ register ABI")
}
}
+func TestDebugLinesPushback(t *testing.T) {
+ if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { // in particular, it could be windows.
+ t.Skip("this test depends on creating a file with a wonky name, only works for sure on Linux and Darwin")
+ }
+
+ switch testGoArch() {
+ default:
+ t.Skip("skipped for many architectures")
+
+ case "arm64", "amd64": // register ABI
+ fn := "(*List[go.shape.int_0]).PushBack"
+ if buildcfg.Experiment.Unified {
+ // Unified mangles differently
+ fn = "(*List[int]).PushBack"
+ }
+ testDebugLines(t, "-N -l -G=3", "pushback.go", fn, []int{17, 18, 19, 20, 21, 22, 24}, true)
+ }
+}
+
+func TestDebugLinesConvert(t *testing.T) {
+ if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { // in particular, it could be windows.
+ t.Skip("this test depends on creating a file with a wonky name, only works for sure on Linux and Darwin")
+ }
+
+ switch testGoArch() {
+ default:
+ t.Skip("skipped for many architectures")
+
+ case "arm64", "amd64": // register ABI
+ fn := "G[go.shape.int_0]"
+ if buildcfg.Experiment.Unified {
+ // Unified mangles differently
+ fn = "G[int]"
+ }
+ testDebugLines(t, "-N -l -G=3", "convertline.go", fn, []int{9, 10, 11}, true)
+ }
+}
+
func TestInlineLines(t *testing.T) {
if runtime.GOARCH != "amd64" && *testGoArchFlag == "" {
// As of september 2021, works for everything except mips64, but still potentially fragile
@@ -181,8 +219,8 @@ func testInlineStack(t *testing.T, file, function string, wantStacks [][]int) {
// then verifies that the statement-marked lines in that file are the same as those in wantStmts
// These files must all be short because this is super-fragile.
// "go build" is run in a temporary directory that is normally deleted, unless -test.v
-func testDebugLines(t *testing.T, file, function string, wantStmts []int) {
- dumpBytes := compileAndDump(t, file, function, "-N -l")
+func testDebugLines(t *testing.T, gcflags, file, function string, wantStmts []int, ignoreRepeats bool) {
+ dumpBytes := compileAndDump(t, file, function, gcflags)
dump := bufio.NewScanner(bytes.NewReader(dumpBytes))
var gotStmts []int
dumpLineNum := 0
@@ -201,8 +239,20 @@ func testDebugLines(t *testing.T, file, function string, wantStmts []int) {
gotStmts = append(gotStmts, int(stmt))
}
}
- if !reflect.DeepEqual(wantStmts, gotStmts) {
- t.Errorf("wanted stmts %v but got %v", wantStmts, gotStmts)
- }
+ if ignoreRepeats { // remove repeats from gotStmts
+ newGotStmts := []int{gotStmts[0]}
+ for _, x := range gotStmts {
+ if x != newGotStmts[len(newGotStmts)-1] {
+ newGotStmts = append(newGotStmts, x)
+ }
+ }
+ if !reflect.DeepEqual(wantStmts, newGotStmts) {
+ t.Errorf("wanted stmts %v but got %v (with repeats still in: %v)", wantStmts, newGotStmts, gotStmts)
+ }
+ } else {
+ if !reflect.DeepEqual(wantStmts, gotStmts) {
+ t.Errorf("wanted stmts %v but got %v", wantStmts, gotStmts)
+ }
+ }
}
diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules
index 2bc58a3c47..23f113285b 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM.rules
@@ -1270,8 +1270,8 @@
(SRLconst (SLLconst x [c]) [d]) && buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFXU [(d-c)|(32-d)<<8] x)
// comparison simplification
-((LT|LE|EQ|NE|GE|GT) (CMP x (RSBconst [0] y))) => ((LT|LE|EQ|NE|GE|GT) (CMN x y)) // sense of carry bit not preserved
-((LT|LE|EQ|NE|GE|GT) (CMN x (RSBconst [0] y))) => ((LT|LE|EQ|NE|GE|GT) (CMP x y)) // sense of carry bit not preserved
+((EQ|NE) (CMP x (RSBconst [0] y))) => ((EQ|NE) (CMN x y)) // sense of carry bit not preserved; see also #50854
+((EQ|NE) (CMN x (RSBconst [0] y))) => ((EQ|NE) (CMP x y)) // sense of carry bit not preserved; see also #50864
(EQ (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (EQ (CMP x y) yes no)
(EQ (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (EQ (CMP a (MUL x y)) yes no)
(EQ (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (EQ (CMPconst [c] x) yes no)
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
index d34e1899db..be8be4ebe3 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64.rules
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -649,19 +649,13 @@
(GT (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (GTnoov (CMNW x y) yes no)
(GE (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (GEnoov (CMNW x y) yes no)
+// CMP(x,-y) -> CMN(x,y) is only valid for unordered comparison, if y can be -1<<63
(EQ (CMP x z:(NEG y)) yes no) && z.Uses == 1 => (EQ (CMN x y) yes no)
(NE (CMP x z:(NEG y)) yes no) && z.Uses == 1 => (NE (CMN x y) yes no)
-(LT (CMP x z:(NEG y)) yes no) && z.Uses == 1 => (LT (CMN x y) yes no)
-(LE (CMP x z:(NEG y)) yes no) && z.Uses == 1 => (LE (CMN x y) yes no)
-(GT (CMP x z:(NEG y)) yes no) && z.Uses == 1 => (GT (CMN x y) yes no)
-(GE (CMP x z:(NEG y)) yes no) && z.Uses == 1 => (GE (CMN x y) yes no)
+// CMPW(x,-y) -> CMNW(x,y) is only valid for unordered comparison, if y can be -1<<31
(EQ (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => (EQ (CMNW x y) yes no)
(NE (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => (NE (CMNW x y) yes no)
-(LT (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => (LT (CMNW x y) yes no)
-(LE (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => (LE (CMNW x y) yes no)
-(GT (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => (GT (CMNW x y) yes no)
-(GE (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => (GE (CMNW x y) yes no)
(EQ (CMPconst [0] x) yes no) => (Z x yes no)
(NE (CMPconst [0] x) yes no) => (NZ x yes no)
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
index e052ce09f4..2d03c44988 100644
--- a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
+++ b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
@@ -285,9 +285,9 @@ func init() {
{name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to auxInt
{name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1, 32 bit
{name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", aux: "Int32", typ: "Flags"}, // arg0 compare to auxInt, 32 bit
- {name: "CMN", argLength: 2, reg: gp2flags, asm: "CMN", typ: "Flags", commutative: true}, // arg0 compare to -arg1
+ {name: "CMN", argLength: 2, reg: gp2flags, asm: "CMN", typ: "Flags", commutative: true}, // arg0 compare to -arg1, provided arg1 is not 1<<63
{name: "CMNconst", argLength: 1, reg: gp1flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // arg0 compare to -auxInt
- {name: "CMNW", argLength: 2, reg: gp2flags, asm: "CMNW", typ: "Flags", commutative: true}, // arg0 compare to -arg1, 32 bit
+ {name: "CMNW", argLength: 2, reg: gp2flags, asm: "CMNW", typ: "Flags", commutative: true}, // arg0 compare to -arg1, 32 bit, provided arg1 is not 1<<31
{name: "CMNWconst", argLength: 1, reg: gp1flags, asm: "CMNW", aux: "Int32", typ: "Flags"}, // arg0 compare to -auxInt, 32 bit
{name: "TST", argLength: 2, reg: gp2flags, asm: "TST", typ: "Flags", commutative: true}, // arg0 & arg1 compare to 0
{name: "TSTconst", argLength: 1, reg: gp1flags, asm: "TST", aux: "Int64", typ: "Flags"}, // arg0 & auxInt compare to 0
diff --git a/src/cmd/compile/internal/ssa/gen/ARMOps.go b/src/cmd/compile/internal/ssa/gen/ARMOps.go
index 2f004205a5..3803f273c1 100644
--- a/src/cmd/compile/internal/ssa/gen/ARMOps.go
+++ b/src/cmd/compile/internal/ssa/gen/ARMOps.go
@@ -331,7 +331,7 @@ func init() {
// comparisons
{name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1
{name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to auxInt
- {name: "CMN", argLength: 2, reg: gp2flags, asm: "CMN", typ: "Flags", commutative: true}, // arg0 compare to -arg1
+ {name: "CMN", argLength: 2, reg: gp2flags, asm: "CMN", typ: "Flags", commutative: true}, // arg0 compare to -arg1, provided arg1 is not 1<<63
{name: "CMNconst", argLength: 1, reg: gp1flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -auxInt
{name: "TST", argLength: 2, reg: gp2flags, asm: "TST", typ: "Flags", commutative: true}, // arg0 & arg1 compare to 0
{name: "TSTconst", argLength: 1, reg: gp1flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & auxInt compare to 0
diff --git a/src/cmd/compile/internal/ssa/gen/S390XOps.go b/src/cmd/compile/internal/ssa/gen/S390XOps.go
index cd7bad7acb..eef8a2557c 100644
--- a/src/cmd/compile/internal/ssa/gen/S390XOps.go
+++ b/src/cmd/compile/internal/ssa/gen/S390XOps.go
@@ -509,8 +509,9 @@ func init() {
// LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
// It saves all GP registers if necessary,
- // but clobbers R14 (LR) because it's a call.
- {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R2"), buildReg("R3")}, clobbers: (callerSave &^ gpg) | buildReg("R14")}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+ // but clobbers R14 (LR) because it's a call,
+ // and also clobbers R1 as the PLT stub does.
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R2"), buildReg("R3")}, clobbers: (callerSave &^ gpg) | buildReg("R14") | r1}, clobberFlags: true, aux: "Sym", symEffect: "None"},
// There are three of these functions so that they can have three different register inputs.
// When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
index 2038575b0c..81fe5d4c23 100644
--- a/src/cmd/compile/internal/ssa/opGen.go
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -32779,7 +32779,7 @@ var opcodeTable = [...]opInfo{
{0, 4}, // R2
{1, 8}, // R3
},
- clobbers: 4294918144, // R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ clobbers: 4294918146, // R1 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
},
},
{
diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go
index 496f9b4ae2..1b50bf9aa6 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM.go
@@ -17153,42 +17153,6 @@ func rewriteBlockARM(b *Block) bool {
b.resetWithControl(BlockARMLE, cmp)
return true
}
- // match: (GE (CMP x (RSBconst [0] y)))
- // result: (GE (CMN x y))
- for b.Controls[0].Op == OpARMCMP {
- v_0 := b.Controls[0]
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
- break
- }
- y := v_0_1.Args[0]
- v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
- v0.AddArg2(x, y)
- b.resetWithControl(BlockARMGE, v0)
- return true
- }
- // match: (GE (CMN x (RSBconst [0] y)))
- // result: (GE (CMP x y))
- for b.Controls[0].Op == OpARMCMN {
- v_0 := b.Controls[0]
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- x := v_0_0
- if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
- continue
- }
- y := v_0_1.Args[0]
- v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
- v0.AddArg2(x, y)
- b.resetWithControl(BlockARMGE, v0)
- return true
- }
- break
- }
// match: (GE (CMPconst [0] l:(SUB x y)) yes no)
// cond: l.Uses==1
// result: (GEnoov (CMP x y) yes no)
@@ -18069,42 +18033,6 @@ func rewriteBlockARM(b *Block) bool {
b.resetWithControl(BlockARMLT, cmp)
return true
}
- // match: (GT (CMP x (RSBconst [0] y)))
- // result: (GT (CMN x y))
- for b.Controls[0].Op == OpARMCMP {
- v_0 := b.Controls[0]
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
- break
- }
- y := v_0_1.Args[0]
- v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
- v0.AddArg2(x, y)
- b.resetWithControl(BlockARMGT, v0)
- return true
- }
- // match: (GT (CMN x (RSBconst [0] y)))
- // result: (GT (CMP x y))
- for b.Controls[0].Op == OpARMCMN {
- v_0 := b.Controls[0]
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- x := v_0_0
- if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
- continue
- }
- y := v_0_1.Args[0]
- v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
- v0.AddArg2(x, y)
- b.resetWithControl(BlockARMGT, v0)
- return true
- }
- break
- }
// match: (GT (CMPconst [0] l:(SUB x y)) yes no)
// cond: l.Uses==1
// result: (GTnoov (CMP x y) yes no)
@@ -19076,42 +19004,6 @@ func rewriteBlockARM(b *Block) bool {
b.resetWithControl(BlockARMGE, cmp)
return true
}
- // match: (LE (CMP x (RSBconst [0] y)))
- // result: (LE (CMN x y))
- for b.Controls[0].Op == OpARMCMP {
- v_0 := b.Controls[0]
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
- break
- }
- y := v_0_1.Args[0]
- v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
- v0.AddArg2(x, y)
- b.resetWithControl(BlockARMLE, v0)
- return true
- }
- // match: (LE (CMN x (RSBconst [0] y)))
- // result: (LE (CMP x y))
- for b.Controls[0].Op == OpARMCMN {
- v_0 := b.Controls[0]
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- x := v_0_0
- if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
- continue
- }
- y := v_0_1.Args[0]
- v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
- v0.AddArg2(x, y)
- b.resetWithControl(BlockARMLE, v0)
- return true
- }
- break
- }
// match: (LE (CMPconst [0] l:(SUB x y)) yes no)
// cond: l.Uses==1
// result: (LEnoov (CMP x y) yes no)
@@ -19992,42 +19884,6 @@ func rewriteBlockARM(b *Block) bool {
b.resetWithControl(BlockARMGT, cmp)
return true
}
- // match: (LT (CMP x (RSBconst [0] y)))
- // result: (LT (CMN x y))
- for b.Controls[0].Op == OpARMCMP {
- v_0 := b.Controls[0]
- _ = v_0.Args[1]
- x := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
- break
- }
- y := v_0_1.Args[0]
- v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
- v0.AddArg2(x, y)
- b.resetWithControl(BlockARMLT, v0)
- return true
- }
- // match: (LT (CMN x (RSBconst [0] y)))
- // result: (LT (CMP x y))
- for b.Controls[0].Op == OpARMCMN {
- v_0 := b.Controls[0]
- _ = v_0.Args[1]
- v_0_0 := v_0.Args[0]
- v_0_1 := v_0.Args[1]
- for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
- x := v_0_0
- if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
- continue
- }
- y := v_0_1.Args[0]
- v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
- v0.AddArg2(x, y)
- b.resetWithControl(BlockARMLT, v0)
- return true
- }
- break
- }
// match: (LT (CMPconst [0] l:(SUB x y)) yes no)
// cond: l.Uses==1
// result: (LTnoov (CMP x y) yes no)
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
index ad34855c30..c5f53e5507 100644
--- a/src/cmd/compile/internal/ssa/rewriteARM64.go
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -27983,46 +27983,6 @@ func rewriteBlockARM64(b *Block) bool {
}
break
}
- // match: (GE (CMP x z:(NEG y)) yes no)
- // cond: z.Uses == 1
- // result: (GE (CMN x y) yes no)
- for b.Controls[0].Op == OpARM64CMP {
- v_0 := b.Controls[0]
- _ = v_0.Args[1]
- x := v_0.Args[0]
- z := v_0.Args[1]
- if z.Op != OpARM64NEG {
- break
- }
- y := z.Args[0]
- if !(z.Uses == 1) {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
- v0.AddArg2(x, y)
- b.resetWithControl(BlockARM64GE, v0)
- return true
- }
- // match: (GE (CMPW x z:(NEG y)) yes no)
- // cond: z.Uses == 1
- // result: (GE (CMNW x y) yes no)
- for b.Controls[0].Op == OpARM64CMPW {
- v_0 := b.Controls[0]
- _ = v_0.Args[1]
- x := v_0.Args[0]
- z := v_0.Args[1]
- if z.Op != OpARM64NEG {
- break
- }
- y := z.Args[0]
- if !(z.Uses == 1) {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
- v0.AddArg2(x, y)
- b.resetWithControl(BlockARM64GE, v0)
- return true
- }
// match: (GE (CMPconst [0] z:(MADD a x y)) yes no)
// cond: z.Uses==1
// result: (GEnoov (CMN a (MUL x y)) yes no)
@@ -28419,46 +28379,6 @@ func rewriteBlockARM64(b *Block) bool {
}
break
}
- // match: (GT (CMP x z:(NEG y)) yes no)
- // cond: z.Uses == 1
- // result: (GT (CMN x y) yes no)
- for b.Controls[0].Op == OpARM64CMP {
- v_0 := b.Controls[0]
- _ = v_0.Args[1]
- x := v_0.Args[0]
- z := v_0.Args[1]
- if z.Op != OpARM64NEG {
- break
- }
- y := z.Args[0]
- if !(z.Uses == 1) {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
- v0.AddArg2(x, y)
- b.resetWithControl(BlockARM64GT, v0)
- return true
- }
- // match: (GT (CMPW x z:(NEG y)) yes no)
- // cond: z.Uses == 1
- // result: (GT (CMNW x y) yes no)
- for b.Controls[0].Op == OpARM64CMPW {
- v_0 := b.Controls[0]
- _ = v_0.Args[1]
- x := v_0.Args[0]
- z := v_0.Args[1]
- if z.Op != OpARM64NEG {
- break
- }
- y := z.Args[0]
- if !(z.Uses == 1) {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
- v0.AddArg2(x, y)
- b.resetWithControl(BlockARM64GT, v0)
- return true
- }
// match: (GT (CMPconst [0] z:(MADD a x y)) yes no)
// cond: z.Uses==1
// result: (GTnoov (CMN a (MUL x y)) yes no)
@@ -28951,46 +28871,6 @@ func rewriteBlockARM64(b *Block) bool {
}
break
}
- // match: (LE (CMP x z:(NEG y)) yes no)
- // cond: z.Uses == 1
- // result: (LE (CMN x y) yes no)
- for b.Controls[0].Op == OpARM64CMP {
- v_0 := b.Controls[0]
- _ = v_0.Args[1]
- x := v_0.Args[0]
- z := v_0.Args[1]
- if z.Op != OpARM64NEG {
- break
- }
- y := z.Args[0]
- if !(z.Uses == 1) {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
- v0.AddArg2(x, y)
- b.resetWithControl(BlockARM64LE, v0)
- return true
- }
- // match: (LE (CMPW x z:(NEG y)) yes no)
- // cond: z.Uses == 1
- // result: (LE (CMNW x y) yes no)
- for b.Controls[0].Op == OpARM64CMPW {
- v_0 := b.Controls[0]
- _ = v_0.Args[1]
- x := v_0.Args[0]
- z := v_0.Args[1]
- if z.Op != OpARM64NEG {
- break
- }
- y := z.Args[0]
- if !(z.Uses == 1) {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
- v0.AddArg2(x, y)
- b.resetWithControl(BlockARM64LE, v0)
- return true
- }
// match: (LE (CMPconst [0] z:(MADD a x y)) yes no)
// cond: z.Uses==1
// result: (LEnoov (CMN a (MUL x y)) yes no)
@@ -29363,46 +29243,6 @@ func rewriteBlockARM64(b *Block) bool {
}
break
}
- // match: (LT (CMP x z:(NEG y)) yes no)
- // cond: z.Uses == 1
- // result: (LT (CMN x y) yes no)
- for b.Controls[0].Op == OpARM64CMP {
- v_0 := b.Controls[0]
- _ = v_0.Args[1]
- x := v_0.Args[0]
- z := v_0.Args[1]
- if z.Op != OpARM64NEG {
- break
- }
- y := z.Args[0]
- if !(z.Uses == 1) {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
- v0.AddArg2(x, y)
- b.resetWithControl(BlockARM64LT, v0)
- return true
- }
- // match: (LT (CMPW x z:(NEG y)) yes no)
- // cond: z.Uses == 1
- // result: (LT (CMNW x y) yes no)
- for b.Controls[0].Op == OpARM64CMPW {
- v_0 := b.Controls[0]
- _ = v_0.Args[1]
- x := v_0.Args[0]
- z := v_0.Args[1]
- if z.Op != OpARM64NEG {
- break
- }
- y := z.Args[0]
- if !(z.Uses == 1) {
- break
- }
- v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
- v0.AddArg2(x, y)
- b.resetWithControl(BlockARM64LT, v0)
- return true
- }
// match: (LT (CMPconst [0] z:(MADD a x y)) yes no)
// cond: z.Uses==1
// result: (LTnoov (CMN a (MUL x y)) yes no)
diff --git a/src/cmd/compile/internal/ssa/testdata/convertline.go b/src/cmd/compile/internal/ssa/testdata/convertline.go
new file mode 100644
index 0000000000..08f3ae8a35
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/convertline.go
@@ -0,0 +1,16 @@
+package main
+
+import "fmt"
+
+func F[T any](n T) {
+ fmt.Printf("called\n")
+}
+
+func G[T any](n T) {
+ F(n)
+ fmt.Printf("after\n")
+}
+
+func main() {
+ G(3)
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/pushback.go b/src/cmd/compile/internal/ssa/testdata/pushback.go
new file mode 100644
index 0000000000..754e6cbb23
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/pushback.go
@@ -0,0 +1,30 @@
+package main
+
+type Node struct {
+ Circular bool
+}
+
+type ExtNode[V any] struct {
+ v V
+ Node
+}
+
+type List[V any] struct {
+ root *ExtNode[V]
+ len int
+}
+
+func (list *List[V]) PushBack(arg V) {
+ if list.len == 0 {
+ list.root = &ExtNode[V]{v: arg}
+ list.root.Circular = true
+ list.len++
+ return
+ }
+ list.len++
+}
+
+func main() {
+ var v List[int]
+ v.PushBack(1)
+}
diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go
index b84199790f..0b54925696 100644
--- a/src/cmd/compile/internal/ssagen/ssa.go
+++ b/src/cmd/compile/internal/ssagen/ssa.go
@@ -484,6 +484,19 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func {
var params *abi.ABIParamResultInfo
params = s.f.ABISelf.ABIAnalyze(fn.Type(), true)
+ // The backend's stackframe pass prunes away entries from the fn's
+ // Dcl list, including PARAMOUT nodes that correspond to output
+ // params passed in registers. Walk the Dcl list and capture these
+ // nodes to a side list, so that we'll have them available during
+ // DWARF-gen later on. See issue 48573 for more details.
+ var debugInfo ssa.FuncDebug
+ for _, n := range fn.Dcl {
+ if n.Class == ir.PPARAMOUT && n.IsOutputParamInRegisters() {
+ debugInfo.RegOutputParams = append(debugInfo.RegOutputParams, n)
+ }
+ }
+ fn.DebugInfo = &debugInfo
+
// Generate addresses of local declarations
s.decladdrs = map[*ir.Name]*ssa.Value{}
for _, n := range fn.Dcl {
@@ -2433,6 +2446,38 @@ func (s *state) conv(n ir.Node, v *ssa.Value, ft, tt *types.Type) *ssa.Value {
return s.newValue1(op, tt, v)
}
+ if ft.IsComplex() && tt.IsComplex() {
+ var op ssa.Op
+ if ft.Size() == tt.Size() {
+ switch ft.Size() {
+ case 8:
+ op = ssa.OpRound32F
+ case 16:
+ op = ssa.OpRound64F
+ default:
+ s.Fatalf("weird complex conversion %v -> %v", ft, tt)
+ }
+ } else if ft.Size() == 8 && tt.Size() == 16 {
+ op = ssa.OpCvt32Fto64F
+ } else if ft.Size() == 16 && tt.Size() == 8 {
+ op = ssa.OpCvt64Fto32F
+ } else {
+ s.Fatalf("weird complex conversion %v -> %v", ft, tt)
+ }
+ ftp := types.FloatForComplex(ft)
+ ttp := types.FloatForComplex(tt)
+ return s.newValue2(ssa.OpComplexMake, tt,
+ s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, v)),
+ s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, v)))
+ }
+
+ if tt.IsComplex() { // and ft is not complex
+ // Needed for generics support - can't happen in normal Go code.
+ et := types.FloatForComplex(tt)
+ v = s.conv(n, v, ft, et)
+ return s.newValue2(ssa.OpComplexMake, tt, v, s.zeroVal(et))
+ }
+
if ft.IsFloat() || tt.IsFloat() {
conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
if s.config.RegSize == 4 && Arch.LinkArch.Family != sys.MIPS && !s.softFloat {
@@ -2506,31 +2551,6 @@ func (s *state) conv(n ir.Node, v *ssa.Value, ft, tt *types.Type) *ssa.Value {
return nil
}
- if ft.IsComplex() && tt.IsComplex() {
- var op ssa.Op
- if ft.Size() == tt.Size() {
- switch ft.Size() {
- case 8:
- op = ssa.OpRound32F
- case 16:
- op = ssa.OpRound64F
- default:
- s.Fatalf("weird complex conversion %v -> %v", ft, tt)
- }
- } else if ft.Size() == 8 && tt.Size() == 16 {
- op = ssa.OpCvt32Fto64F
- } else if ft.Size() == 16 && tt.Size() == 8 {
- op = ssa.OpCvt64Fto32F
- } else {
- s.Fatalf("weird complex conversion %v -> %v", ft, tt)
- }
- ftp := types.FloatForComplex(ft)
- ttp := types.FloatForComplex(tt)
- return s.newValue2(ssa.OpComplexMake, tt,
- s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, v)),
- s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, v)))
- }
-
s.Fatalf("unhandled OCONV %s -> %s", ft.Kind(), tt.Kind())
return nil
}
@@ -5075,6 +5095,18 @@ func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Val
for _, p := range params.InParams() { // includes receiver for interface calls
ACArgs = append(ACArgs, p.Type)
}
+
+ // Split the entry block if there are open defers, because later calls to
+ // openDeferSave may cause a mismatch between the mem for an OpDereference
+ // and the call site which uses it. See #49282.
+ if s.curBlock.ID == s.f.Entry.ID && s.hasOpenDefers {
+ b := s.endBlock()
+ b.Kind = ssa.BlockPlain
+ curb := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(curb)
+ s.startBlock(curb)
+ }
+
for i, n := range args {
callArgs = append(callArgs, s.putArg(n, t.Params().Field(i).Type))
}
@@ -6552,6 +6584,22 @@ func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) {
// explicit statement boundaries should appear
// in the generated code.
if p.IsStmt() != src.PosIsStmt {
+ if s.pp.Pos.IsStmt() == src.PosIsStmt && s.pp.Pos.SameFileAndLine(p) {
+ // If s.pp.Pos already has a statement mark, then it was set here (below) for
+ // the previous value. If an actual instruction had been emitted for that
+ // value, then the statement mark would have been reset. Since the statement
+ // mark of s.pp.Pos was not reset, this position (file/line) still needs a
+ // statement mark on an instruction. If file and line for this value are
+ // the same as the previous value, then the first instruction for this
+ // value will work to take the statement mark. Return early to avoid
+ // resetting the statement mark.
+ //
+ // The reset of s.pp.Pos occurs in (*Progs).Prog() -- if it emits
+ // an instruction, and the instruction's statement mark was set,
+ // and it is not one of the LosesStmtMark instructions,
+ // then Prog() resets the statement mark on the (*Progs).Pos.
+ return
+ }
p = p.WithNotStmt()
// Calls use the pos attached to v, but copy the statement mark from State
}
@@ -6793,6 +6841,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
for i, b := range f.Blocks {
s.bstart[b.ID] = s.pp.Next
s.lineRunStart = nil
+ s.SetPos(s.pp.Pos.WithNotStmt()) // It needs a non-empty Pos, but cannot be a statement boundary (yet).
// Attach a "default" liveness info. Normally this will be
// overwritten in the Values loop below for each Value. But
@@ -6991,12 +7040,12 @@ func genssa(f *ssa.Func, pp *objw.Progs) {
if base.Ctxt.Flag_locationlists {
var debugInfo *ssa.FuncDebug
+ debugInfo = e.curfn.DebugInfo.(*ssa.FuncDebug)
if e.curfn.ABI == obj.ABIInternal && base.Flag.N != 0 {
- debugInfo = ssa.BuildFuncDebugNoOptimized(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset)
+ ssa.BuildFuncDebugNoOptimized(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset, debugInfo)
} else {
- debugInfo = ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset)
+ ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset, debugInfo)
}
- e.curfn.DebugInfo = debugInfo
bstart := s.bstart
idToIdx := make([]int, f.NumBlocks())
for i, b := range f.Blocks {
diff --git a/src/cmd/compile/internal/syntax/parser.go b/src/cmd/compile/internal/syntax/parser.go
index 770175fe54..a75a3b1a2e 100644
--- a/src/cmd/compile/internal/syntax/parser.go
+++ b/src/cmd/compile/internal/syntax/parser.go
@@ -588,44 +588,81 @@ func (p *parser) typeDecl(group *Group) Decl {
d.Name = p.name()
if p.allowGenerics() && p.tok == _Lbrack {
// d.Name "[" ...
- // array/slice or type parameter list
+ // array/slice type or type parameter list
pos := p.pos()
p.next()
switch p.tok {
case _Name:
- // d.Name "[" name ...
- // array or type parameter list
- name := p.name()
- // Index or slice expressions are never constant and thus invalid
- // array length expressions. Thus, if we see a "[" following name
- // we can safely assume that "[" name starts a type parameter list.
- var x Expr // x != nil means x is the array length expression
+ // We may have an array type or a type parameter list.
+ // In either case we expect an expression x (which may
+ // just be a name, or a more complex expression) which
+ // we can analyze further.
+ //
+ // A type parameter list may have a type bound starting
+ // with a "[" as in: P []E. In that case, simply parsing
+ // an expression would lead to an error: P[] is invalid.
+ // But since index or slice expressions are never constant
+ // and thus invalid array length expressions, if we see a
+ // "[" following a name it must be the start of an array
+ // or slice constraint. Only if we don't see a "[" do we
+ // need to parse a full expression.
+ var x Expr = p.name()
if p.tok != _Lbrack {
- // d.Name "[" name ...
- // If we reach here, the next token is not a "[", and we need to
- // parse the expression starting with name. If that expression is
- // just that name, not followed by a "]" (in which case we might
- // have the array length "[" name "]"), we can also safely assume
- // a type parameter list.
+ // To parse the expression starting with name, expand
+ // the call sequence we would get by passing in name
+ // to parser.expr, and pass in name to parser.pexpr.
p.xnest++
- // To parse the expression starting with name, expand the call
- // sequence we would get by passing in name to parser.expr, and
- // pass in name to parser.pexpr.
- x = p.binaryExpr(p.pexpr(name, false), 0)
+ x = p.binaryExpr(p.pexpr(x, false), 0)
p.xnest--
- if x == name && p.tok != _Rbrack {
- x = nil
+ }
+
+ // analyze the cases
+ var pname *Name // pname != nil means pname is the type parameter name
+ var ptype Expr // ptype != nil means ptype is the type parameter type; pname != nil in this case
+ switch t := x.(type) {
+ case *Name:
+ // Unless we see a "]", we are at the start of a type parameter list.
+ if p.tok != _Rbrack {
+ // d.Name "[" name ...
+ pname = t
+ // no ptype
+ }
+ case *Operation:
+ // If we have an expression of the form name*T, and T is a (possibly
+ // parenthesized) type literal or the next token is a comma, we are
+ // at the start of a type parameter list.
+ if name, _ := t.X.(*Name); name != nil {
+ if t.Op == Mul && (isTypeLit(t.Y) || p.tok == _Comma) {
+ // d.Name "[" name "*" t.Y
+ // d.Name "[" name "*" t.Y ","
+ t.X, t.Y = t.Y, nil // convert t into unary *t.Y
+ pname = name
+ ptype = t
+ }
+ }
+ case *CallExpr:
+ // If we have an expression of the form name(T), and T is a (possibly
+ // parenthesized) type literal or the next token is a comma, we are
+ // at the start of a type parameter list.
+ if name, _ := t.Fun.(*Name); name != nil {
+ if len(t.ArgList) == 1 && !t.HasDots && (isTypeLit(t.ArgList[0]) || p.tok == _Comma) {
+ // d.Name "[" name "(" t.ArgList[0] ")"
+ // d.Name "[" name "(" t.ArgList[0] ")" ","
+ pname = name
+ ptype = t.ArgList[0]
+ }
}
}
- if x == nil {
- // d.Name "[" name ...
- // type parameter list
- d.TParamList = p.paramList(name, _Rbrack, true)
+
+ if pname != nil {
+ // d.Name "[" pname ...
+ // d.Name "[" pname ptype ...
+ // d.Name "[" pname ptype "," ...
+ d.TParamList = p.paramList(pname, ptype, _Rbrack, true)
d.Alias = p.gotAssign()
d.Type = p.typeOrNil()
} else {
- // d.Name "[" x "]" ...
- // x is the array length expression
+ // d.Name "[" x ...
d.Type = p.arrayType(pos, x)
}
case _Rbrack:
@@ -650,6 +687,21 @@ func (p *parser) typeDecl(group *Group) Decl {
return d
}
+// isTypeLit reports whether x is a (possibly parenthesized) type literal.
+func isTypeLit(x Expr) bool {
+ switch x := x.(type) {
+ case *ArrayType, *StructType, *FuncType, *InterfaceType, *SliceType, *MapType, *ChanType:
+ return true
+ case *Operation:
+ // *T may be a pointer dereferenciation.
+ // Only consider *T as type literal if T is a type literal.
+ return x.Op == Mul && x.Y == nil && isTypeLit(x.X)
+ case *ParenExpr:
+ return isTypeLit(x.X)
+ }
+ return false
+}
+
// VarSpec = IdentifierList ( Type [ "=" ExpressionList ] | "=" ExpressionList ) .
func (p *parser) varDecl(group *Group) Decl {
if trace {
@@ -689,7 +741,7 @@ func (p *parser) funcDeclOrNil() *FuncDecl {
f.Pragma = p.takePragma()
if p.got(_Lparen) {
- rcvr := p.paramList(nil, _Rparen, false)
+ rcvr := p.paramList(nil, nil, _Rparen, false)
switch len(rcvr) {
case 0:
p.error("method has no receiver")
@@ -708,7 +760,13 @@ func (p *parser) funcDeclOrNil() *FuncDecl {
}
f.Name = p.name()
- f.TParamList, f.Type = p.funcType("")
+
+ context := ""
+ if f.Recv != nil && p.mode&AllowMethodTypeParams == 0 {
+ context = "method" // don't permit (method) type parameters in funcType
+ }
+ f.TParamList, f.Type = p.funcType(context)
+
if p.tok == _Lbrace {
f.Body = p.funcBody()
}
@@ -1363,18 +1421,18 @@ func (p *parser) funcType(context string) ([]*Field, *FuncType) {
if p.allowGenerics() && p.got(_Lbrack) {
if context != "" {
// accept but complain
- p.syntaxErrorAt(typ.pos, context+" cannot have type parameters")
+ p.syntaxErrorAt(typ.pos, context+" must have no type parameters")
}
if p.tok == _Rbrack {
p.syntaxError("empty type parameter list")
p.next()
} else {
- tparamList = p.paramList(nil, _Rbrack, true)
+ tparamList = p.paramList(nil, nil, _Rbrack, true)
}
}
p.want(_Lparen)
- typ.ParamList = p.paramList(nil, _Rparen, false)
+ typ.ParamList = p.paramList(nil, nil, _Rparen, false)
typ.ResultList = p.funcResult()
return tparamList, typ
@@ -1392,6 +1450,13 @@ func (p *parser) arrayType(pos Pos, len Expr) Expr {
len = p.expr()
p.xnest--
}
+ if p.tok == _Comma {
+ // Trailing commas are accepted in type parameter
+ // lists but not in array type declarations.
+ // Accept for better error handling but complain.
+ p.syntaxError("unexpected comma; expecting ]")
+ p.next()
+ }
p.want(_Rbrack)
t := new(ArrayType)
t.pos = pos
@@ -1516,7 +1581,7 @@ func (p *parser) funcResult() []*Field {
}
if p.got(_Lparen) {
- return p.paramList(nil, _Rparen, false)
+ return p.paramList(nil, nil, _Rparen, false)
}
pos := p.pos()
@@ -1742,7 +1807,7 @@ func (p *parser) methodDecl() *Field {
// A type argument list looks like a parameter list with only
// types. Parse a parameter list and decide afterwards.
- list := p.paramList(nil, _Rbrack, false)
+ list := p.paramList(nil, nil, _Rbrack, false)
if len(list) == 0 {
// The type parameter list is not [] but we got nothing
// due to other errors (reported by paramList). Treat
@@ -1764,7 +1829,7 @@ func (p *parser) methodDecl() *Field {
// TODO(gri) Record list as type parameter list with f.Type
// if we want to type-check the generic method.
// For now, report an error so this is not a silent event.
- p.errorAt(pos, "interface method cannot have type parameters")
+ p.errorAt(pos, "interface method must have no type parameters")
break
}
@@ -1948,17 +2013,41 @@ func (p *parser) paramDeclOrNil(name *Name, follow token) *Field {
// ParameterList = ParameterDecl { "," ParameterDecl } .
// "(" or "[" has already been consumed.
// If name != nil, it is the first name after "(" or "[".
+// If typ != nil, name must be != nil, and (name, typ) is the first field in the list.
// In the result list, either all fields have a name, or no field has a name.
-func (p *parser) paramList(name *Name, close token, requireNames bool) (list []*Field) {
+func (p *parser) paramList(name *Name, typ Expr, close token, requireNames bool) (list []*Field) {
if trace {
defer p.trace("paramList")()
}
+ // p.list won't invoke its function argument if we're at the end of the
+ // parameter list. If we have a complete field, handle this case here.
+ if name != nil && typ != nil && p.tok == close {
+ p.next()
+ par := new(Field)
+ par.pos = name.pos
+ par.Name = name
+ par.Type = typ
+ return []*Field{par}
+ }
+
var named int // number of parameters that have an explicit name and type
var typed int // number of parameters that have an explicit type
end := p.list(_Comma, close, func() bool {
- par := p.paramDeclOrNil(name, close)
+ var par *Field
+ if typ != nil {
+ if debug && name == nil {
+ panic("initial type provided without name")
+ }
+ par = new(Field)
+ par.pos = name.pos
+ par.Name = name
+ par.Type = typ
+ } else {
+ par = p.paramDeclOrNil(name, close)
+ }
name = nil // 1st name was consumed if present
+ typ = nil // 1st type was consumed if present
if par != nil {
if debug && par.Name == nil && par.Type == nil {
panic("parameter without name or type")
diff --git a/src/cmd/compile/internal/syntax/parser_test.go b/src/cmd/compile/internal/syntax/parser_test.go
index 68f3c376c9..e258a17c38 100644
--- a/src/cmd/compile/internal/syntax/parser_test.go
+++ b/src/cmd/compile/internal/syntax/parser_test.go
@@ -46,7 +46,7 @@ func TestParseGo2(t *testing.T) {
for _, fi := range list {
name := fi.Name()
if !fi.IsDir() && !strings.HasPrefix(name, ".") {
- ParseFile(filepath.Join(dir, name), func(err error) { t.Error(err) }, nil, AllowGenerics)
+ ParseFile(filepath.Join(dir, name), func(err error) { t.Error(err) }, nil, AllowGenerics|AllowMethodTypeParams)
}
}
}
diff --git a/src/cmd/compile/internal/syntax/printer.go b/src/cmd/compile/internal/syntax/printer.go
index c8d31799af..0385227c7c 100644
--- a/src/cmd/compile/internal/syntax/printer.go
+++ b/src/cmd/compile/internal/syntax/printer.go
@@ -44,7 +44,7 @@ func Fprint(w io.Writer, x Node, form Form) (n int, err error) {
return
}
-// String is a convenience functions that prints n in ShortForm
+// String is a convenience function that prints n in ShortForm
// and returns the printed string.
func String(n Node) string {
var buf bytes.Buffer
@@ -666,9 +666,7 @@ func (p *printer) printRawNode(n Node) {
}
p.print(n.Name)
if n.TParamList != nil {
- p.print(_Lbrack)
- p.printFieldList(n.TParamList, nil, _Comma)
- p.print(_Rbrack)
+ p.printParameterList(n.TParamList, true)
}
p.print(blank)
if n.Alias {
@@ -700,9 +698,7 @@ func (p *printer) printRawNode(n Node) {
}
p.print(n.Name)
if n.TParamList != nil {
- p.print(_Lbrack)
- p.printFieldList(n.TParamList, nil, _Comma)
- p.print(_Rbrack)
+ p.printParameterList(n.TParamList, true)
}
p.printSignature(n.Type)
if n.Body != nil {
@@ -887,38 +883,47 @@ func (p *printer) printDeclList(list []Decl) {
}
func (p *printer) printSignature(sig *FuncType) {
- p.printParameterList(sig.ParamList)
+ p.printParameterList(sig.ParamList, false)
if list := sig.ResultList; list != nil {
p.print(blank)
if len(list) == 1 && list[0].Name == nil {
p.printNode(list[0].Type)
} else {
- p.printParameterList(list)
+ p.printParameterList(list, false)
}
}
}
-func (p *printer) printParameterList(list []*Field) {
- p.print(_Lparen)
- if len(list) > 0 {
- for i, f := range list {
- if i > 0 {
- p.print(_Comma, blank)
- }
- if f.Name != nil {
- p.printNode(f.Name)
- if i+1 < len(list) {
- f1 := list[i+1]
- if f1.Name != nil && f1.Type == f.Type {
- continue // no need to print type
- }
+func (p *printer) printParameterList(list []*Field, types bool) {
+ open, close := _Lparen, _Rparen
+ if types {
+ open, close = _Lbrack, _Rbrack
+ }
+ p.print(open)
+ for i, f := range list {
+ if i > 0 {
+ p.print(_Comma, blank)
+ }
+ if f.Name != nil {
+ p.printNode(f.Name)
+ if i+1 < len(list) {
+ f1 := list[i+1]
+ if f1.Name != nil && f1.Type == f.Type {
+ continue // no need to print type
}
- p.print(blank)
}
- p.printNode(f.Type)
+ p.print(blank)
+ }
+ p.printNode(unparen(f.Type)) // no need for (extra) parentheses around parameter types
+ }
+ // A type parameter list [P *T] where T is not a type literal requires a comma as in [P *T,]
+ // so that it's not parsed as [P*T].
+ if types && len(list) == 1 {
+ if t, _ := list[0].Type.(*Operation); t != nil && t.Op == Mul && t.Y == nil && !isTypeLit(t.X) {
+ p.print(_Comma)
}
}
- p.print(_Rparen)
+ p.print(close)
}
func (p *printer) printStmtList(list []Stmt, braces bool) {
diff --git a/src/cmd/compile/internal/syntax/printer_test.go b/src/cmd/compile/internal/syntax/printer_test.go
index 604f1fc1ca..941af0aeb4 100644
--- a/src/cmd/compile/internal/syntax/printer_test.go
+++ b/src/cmd/compile/internal/syntax/printer_test.go
@@ -53,54 +53,77 @@ func TestPrintError(t *testing.T) {
}
}
-var stringTests = []string{
- "package p",
- "package p; type _ int; type T1 = struct{}; type ( _ *struct{}; T2 = float32 )",
+var stringTests = [][2]string{
+ dup("package p"),
+ dup("package p; type _ int; type T1 = struct{}; type ( _ *struct{}; T2 = float32 )"),
// generic type declarations
- "package p; type _[T any] struct{}",
- "package p; type _[A, B, C interface{m()}] struct{}",
- "package p; type _[T any, A, B, C interface{m()}, X, Y, Z interface{~int}] struct{}",
+ dup("package p; type _[T any] struct{}"),
+ dup("package p; type _[A, B, C interface{m()}] struct{}"),
+ dup("package p; type _[T any, A, B, C interface{m()}, X, Y, Z interface{~int}] struct{}"),
+
+ dup("package p; type _[P *T,] struct{}"),
+ dup("package p; type _[P *T, _ any] struct{}"),
+ {"package p; type _[P (*T),] struct{}", "package p; type _[P *T,] struct{}"},
+ {"package p; type _[P (*T), _ any] struct{}", "package p; type _[P *T, _ any] struct{}"},
+ {"package p; type _[P (T),] struct{}", "package p; type _[P T] struct{}"},
+ {"package p; type _[P (T), _ any] struct{}", "package p; type _[P T, _ any] struct{}"},
+
+ dup("package p; type _[P *struct{}] struct{}"),
+ {"package p; type _[P (*struct{})] struct{}", "package p; type _[P *struct{}] struct{}"},
+ {"package p; type _[P ([]int)] struct{}", "package p; type _[P []int] struct{}"},
+
+ dup("package p; type _ [P(T)]struct{}"),
+ dup("package p; type _ [P((T))]struct{}"),
+ dup("package p; type _ [P * *T]struct{}"),
+ dup("package p; type _ [P * T]struct{}"),
+ dup("package p; type _ [P(*T)]struct{}"),
+ dup("package p; type _ [P(**T)]struct{}"),
+ dup("package p; type _ [P * T - T]struct{}"),
+
+ // array type declarations
+ dup("package p; type _ [P * T]struct{}"),
+ dup("package p; type _ [P * T - T]struct{}"),
// generic function declarations
- "package p; func _[T any]()",
- "package p; func _[A, B, C interface{m()}]()",
- "package p; func _[T any, A, B, C interface{m()}, X, Y, Z interface{~int}]()",
+ dup("package p; func _[T any]()"),
+ dup("package p; func _[A, B, C interface{m()}]()"),
+ dup("package p; func _[T any, A, B, C interface{m()}, X, Y, Z interface{~int}]()"),
// methods with generic receiver types
- "package p; func (R[T]) _()",
- "package p; func (*R[A, B, C]) _()",
- "package p; func (_ *R[A, B, C]) _()",
+ dup("package p; func (R[T]) _()"),
+ dup("package p; func (*R[A, B, C]) _()"),
+ dup("package p; func (_ *R[A, B, C]) _()"),
// type constraint literals with elided interfaces
- "package p; func _[P ~int, Q int | string]() {}",
- "package p; func _[P struct{f int}, Q *P]() {}",
+ dup("package p; func _[P ~int, Q int | string]() {}"),
+ dup("package p; func _[P struct{f int}, Q *P]() {}"),
// channels
- "package p; type _ chan chan int",
- "package p; type _ chan (<-chan int)",
- "package p; type _ chan chan<- int",
+ dup("package p; type _ chan chan int"),
+ dup("package p; type _ chan (<-chan int)"),
+ dup("package p; type _ chan chan<- int"),
- "package p; type _ <-chan chan int",
- "package p; type _ <-chan <-chan int",
- "package p; type _ <-chan chan<- int",
+ dup("package p; type _ <-chan chan int"),
+ dup("package p; type _ <-chan <-chan int"),
+ dup("package p; type _ <-chan chan<- int"),
- "package p; type _ chan<- chan int",
- "package p; type _ chan<- <-chan int",
- "package p; type _ chan<- chan<- int",
+ dup("package p; type _ chan<- chan int"),
+ dup("package p; type _ chan<- <-chan int"),
+ dup("package p; type _ chan<- chan<- int"),
// TODO(gri) expand
}
func TestPrintString(t *testing.T) {
- for _, want := range stringTests {
- ast, err := Parse(nil, strings.NewReader(want), nil, nil, AllowGenerics)
+ for _, test := range stringTests {
+ ast, err := Parse(nil, strings.NewReader(test[0]), nil, nil, AllowGenerics)
if err != nil {
t.Error(err)
continue
}
- if got := String(ast); got != want {
- t.Errorf("%q: got %q", want, got)
+ if got := String(ast); got != test[1] {
+ t.Errorf("%q: got %q", test[1], got)
}
}
}
diff --git a/src/cmd/compile/internal/syntax/syntax.go b/src/cmd/compile/internal/syntax/syntax.go
index f3d4c09ed5..25c8116206 100644
--- a/src/cmd/compile/internal/syntax/syntax.go
+++ b/src/cmd/compile/internal/syntax/syntax.go
@@ -17,6 +17,7 @@ type Mode uint
const (
CheckBranches Mode = 1 << iota // check correct use of labels, break, continue, and goto statements
AllowGenerics
+ AllowMethodTypeParams // does not support interface methods yet; ignored if AllowGenerics is not set
)
// Error describes a syntax error. Error implements the error interface.
diff --git a/src/cmd/compile/internal/syntax/testdata/issue48382.go2 b/src/cmd/compile/internal/syntax/testdata/issue48382.go2
index 1e8f4b0ec6..c00fee6f82 100644
--- a/src/cmd/compile/internal/syntax/testdata/issue48382.go2
+++ b/src/cmd/compile/internal/syntax/testdata/issue48382.go2
@@ -4,12 +4,12 @@
package p
-type _ func /* ERROR function type cannot have type parameters */ [ /* ERROR empty type parameter list */ ]()
-type _ func /* ERROR function type cannot have type parameters */ [ x /* ERROR missing type constraint */ ]()
-type _ func /* ERROR function type cannot have type parameters */ [P any]()
+type _ func /* ERROR function type must have no type parameters */ [ /* ERROR empty type parameter list */ ]()
+type _ func /* ERROR function type must have no type parameters */ [ x /* ERROR missing type constraint */ ]()
+type _ func /* ERROR function type must have no type parameters */ [P any]()
-var _ = func /* ERROR function literal cannot have type parameters */ [P any]() {}
+var _ = func /* ERROR function literal must have no type parameters */ [P any]() {}
type _ interface{
- m /* ERROR interface method cannot have type parameters */ [P any]()
+ m /* ERROR interface method must have no type parameters */ [P any]()
}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue49482.go2 b/src/cmd/compile/internal/syntax/testdata/issue49482.go2
new file mode 100644
index 0000000000..1fc303d169
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue49482.go2
@@ -0,0 +1,31 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+ // these need a comma to disambiguate
+ _[P *T,] struct{}
+ _[P *T, _ any] struct{}
+ _[P (*T),] struct{}
+ _[P (*T), _ any] struct{}
+ _[P (T),] struct{}
+ _[P (T), _ any] struct{}
+
+ // these parse as name followed by type
+ _[P *struct{}] struct{}
+ _[P (*struct{})] struct{}
+ _[P ([]int)] struct{}
+
+ // array declarations
+ _ [P(T)]struct{}
+ _ [P((T))]struct{}
+ _ [P * *T] struct{} // this could be a name followed by a type but it makes the rules more complicated
+ _ [P * T]struct{}
+ _ [P(*T)]struct{}
+ _ [P(**T)]struct{}
+ _ [P * T - T]struct{}
+ _ [P*T-T /* ERROR unexpected comma */ ,]struct{}
+ _ [10 /* ERROR unexpected comma */ ,]struct{}
+)
diff --git a/src/cmd/compile/internal/syntax/testdata/tparams.go2 b/src/cmd/compile/internal/syntax/testdata/tparams.go2
index 80e155bfe0..a9bd72cf2d 100644
--- a/src/cmd/compile/internal/syntax/testdata/tparams.go2
+++ b/src/cmd/compile/internal/syntax/testdata/tparams.go2
@@ -13,7 +13,7 @@ type t struct {
}
type t interface {
t[a]
- m /* ERROR method cannot have type parameters */ [_ _, /* ERROR mixed */ _]()
+ m /* ERROR method must have no type parameters */ [_ _, /* ERROR mixed */ _]()
t[a, b]
}
diff --git a/src/cmd/compile/internal/test/issue50182_test.go b/src/cmd/compile/internal/test/issue50182_test.go
new file mode 100644
index 0000000000..cd277fa285
--- /dev/null
+++ b/src/cmd/compile/internal/test/issue50182_test.go
@@ -0,0 +1,62 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "fmt"
+ "sort"
+ "testing"
+)
+
+// Test that calling methods on generic types doesn't cause allocations.
+func genericSorted[T sort.Interface](data T) bool {
+ n := data.Len()
+ for i := n - 1; i > 0; i-- {
+ if data.Less(i, i-1) {
+ return false
+ }
+ }
+ return true
+}
+func TestGenericSorted(t *testing.T) {
+ var data = sort.IntSlice{-10, -5, 0, 1, 2, 3, 5, 7, 11, 100, 100, 100, 1000, 10000}
+ f := func() {
+ genericSorted(data)
+ }
+ if n := testing.AllocsPerRun(10, f); n > 0 {
+ t.Errorf("got %f allocs, want 0", n)
+ }
+}
+
+// Test that escape analysis correctly tracks escaping inside of methods
+// called on generic types.
+type fooer interface {
+ foo()
+}
+type P struct {
+ p *int
+ q int
+}
+
+var esc []*int
+
+func (p P) foo() {
+ esc = append(esc, p.p) // foo escapes the pointer from inside of p
+}
+func f[T fooer](t T) {
+ t.foo()
+}
+func TestGenericEscape(t *testing.T) {
+ for i := 0; i < 4; i++ {
+ var x int = 77 + i
+ var p P = P{p: &x}
+ f(p)
+ }
+ for i, p := range esc {
+ if got, want := *p, 77+i; got != want {
+ panic(fmt.Sprintf("entry %d: got %d, want %d", i, got, want))
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/typecheck/crawler.go b/src/cmd/compile/internal/typecheck/crawler.go
index ae6542d071..4394c6e698 100644
--- a/src/cmd/compile/internal/typecheck/crawler.go
+++ b/src/cmd/compile/internal/typecheck/crawler.go
@@ -8,19 +8,32 @@ import (
"cmd/compile/internal/base"
"cmd/compile/internal/ir"
"cmd/compile/internal/types"
+ "cmd/internal/src"
)
// crawlExports crawls the type/object graph rooted at the given list of exported
-// objects. It descends through all parts of types and follows any methods on defined
-// types. Any functions that are found to be potentially callable by importers are
-// marked with ExportInline, so that iexport.go knows to re-export their inline body.
-// Also, any function or global referenced by a function marked by ExportInline() is
-// marked for export (whether its name is exported or not).
+// objects (which are variables, functions, and types). It descends through all parts
+// of types and follows methods on defined types. Any functions that are found to be
+// potentially callable by importers directly or after inlining are marked with
+// ExportInline, so that iexport.go knows to export their inline body.
+//
+// The overall purpose of crawlExports is to AVOID exporting inlineable methods
+// that cannot actually be referenced, thereby reducing the size of the exports
+// significantly.
+//
+// For non-generic defined types reachable from global variables, we only set
+// ExportInline for exported methods. For defined types that are directly named or are
+// embedded recursively in such a type, we set ExportInline for all methods, since
+// these types can be embedded in another local type. For instantiated types that are
+// used anywhere in a inlineable function, we set ExportInline on all methods of the
+// base generic type, since all methods will be needed for creating any instantiated
+// type.
func crawlExports(exports []*ir.Name) {
p := crawler{
- marked: make(map[*types.Type]bool),
- embedded: make(map[*types.Type]bool),
- generic: make(map[*types.Type]bool),
+ marked: make(map[*types.Type]bool),
+ embedded: make(map[*types.Type]bool),
+ generic: make(map[*types.Type]bool),
+ checkFullyInst: make(map[*types.Type]bool),
}
for _, n := range exports {
p.markObject(n)
@@ -28,9 +41,10 @@ func crawlExports(exports []*ir.Name) {
}
type crawler struct {
- marked map[*types.Type]bool // types already seen by markType
- embedded map[*types.Type]bool // types already seen by markEmbed
- generic map[*types.Type]bool // types already seen by markGeneric
+ marked map[*types.Type]bool // types already seen by markType
+ embedded map[*types.Type]bool // types already seen by markEmbed
+ generic map[*types.Type]bool // types already seen by markGeneric
+ checkFullyInst map[*types.Type]bool // types already seen by checkForFullyInst
}
// markObject visits a reachable object (function, method, global type, or global variable)
@@ -170,10 +184,12 @@ func (p *crawler) markEmbed(t *types.Type) {
}
}
-// markGeneric takes an instantiated type or a base generic type t, and
-// marks all the methods of the base generic type of t. If a base generic
-// type is written to export file, even if not explicitly marked for export,
-// all of its methods need to be available for instantiation if needed.
+// markGeneric takes an instantiated type or a base generic type t, and marks all the
+// methods of the base generic type of t. If a base generic type is written out for
+// export, even if not explicitly marked for export, then all of its methods need to
+// be available for instantiation, since we always create all methods of a specified
+// instantiated type. Non-exported methods must generally be instantiated, since they may
+// be called by the exported methods or other generic function in the same package.
func (p *crawler) markGeneric(t *types.Type) {
if t.IsPtr() {
t = t.Elem()
@@ -194,6 +210,94 @@ func (p *crawler) markGeneric(t *types.Type) {
}
}
+// checkForFullyInst looks for fully-instantiated types in a type (at any nesting
+// level). If it finds a fully-instantiated type, it ensures that the necessary
+// dictionary and shape methods are exported. It updates p.checkFullyInst, so it
+// traverses each particular type only once.
+func (p *crawler) checkForFullyInst(t *types.Type) {
+ if p.checkFullyInst[t] {
+ return
+ }
+ p.checkFullyInst[t] = true
+
+ if t.IsFullyInstantiated() && !t.HasShape() && !t.IsInterface() && t.Methods().Len() > 0 {
+ // For any fully-instantiated type, the relevant
+ // dictionaries and shape instantiations will have
+ // already been created or are in the import data.
+ // Make sure that they are exported, so that any
+ // other package that inlines this function will have
+ // them available for import, and so will not need
+ // another round of method and dictionary
+ // instantiation after inlining.
+ baseType := t.OrigSym().Def.(*ir.Name).Type()
+ shapes := make([]*types.Type, len(t.RParams()))
+ for i, t1 := range t.RParams() {
+ shapes[i] = Shapify(t1, i, baseType.RParams()[i])
+ }
+ for j := range t.Methods().Slice() {
+ baseNname := baseType.Methods().Slice()[j].Nname.(*ir.Name)
+ dictsym := MakeDictSym(baseNname.Sym(), t.RParams(), true)
+ if dictsym.Def == nil {
+ in := Resolve(ir.NewIdent(src.NoXPos, dictsym))
+ dictsym = in.Sym()
+ }
+ Export(dictsym.Def.(*ir.Name))
+ methsym := MakeFuncInstSym(baseNname.Sym(), shapes, false, true)
+ if methsym.Def == nil {
+ in := Resolve(ir.NewIdent(src.NoXPos, methsym))
+ methsym = in.Sym()
+ }
+ methNode := methsym.Def.(*ir.Name)
+ Export(methNode)
+ if HaveInlineBody(methNode.Func) {
+ // Export the body as well if
+ // instantiation is inlineable.
+ ImportedBody(methNode.Func)
+ methNode.Func.SetExportInline(true)
+ }
+ }
+ }
+
+ // Descend into the type. We descend even if it is a fully-instantiated type,
+ // since the instantiated type may have other instantiated types inside of
+ // it (in fields, methods, etc.).
+ switch t.Kind() {
+ case types.TPTR, types.TARRAY, types.TSLICE:
+ p.checkForFullyInst(t.Elem())
+
+ case types.TCHAN:
+ p.checkForFullyInst(t.Elem())
+
+ case types.TMAP:
+ p.checkForFullyInst(t.Key())
+ p.checkForFullyInst(t.Elem())
+
+ case types.TSTRUCT:
+ if t.IsFuncArgStruct() {
+ break
+ }
+ for _, f := range t.FieldSlice() {
+ p.checkForFullyInst(f.Type)
+ }
+
+ case types.TFUNC:
+ if recv := t.Recv(); recv != nil {
+ p.checkForFullyInst(t.Recv().Type)
+ }
+ for _, f := range t.Params().FieldSlice() {
+ p.checkForFullyInst(f.Type)
+ }
+ for _, f := range t.Results().FieldSlice() {
+ p.checkForFullyInst(f.Type)
+ }
+
+ case types.TINTER:
+ for _, f := range t.AllMethods().Slice() {
+ p.checkForFullyInst(f.Type)
+ }
+ }
+}
+
// markInlBody marks n's inline body for export and recursively
// ensures all called functions are marked too.
func (p *crawler) markInlBody(n *ir.Name) {
@@ -222,8 +326,13 @@ func (p *crawler) markInlBody(n *ir.Name) {
doFlood = func(n ir.Node) {
t := n.Type()
if t != nil {
- if t.HasTParam() || t.IsFullyInstantiated() {
+ if t.HasTParam() {
+ // If any generic types are used, then make sure that
+ // the methods of the generic type are exported and
+ // scanned for other possible exports.
p.markGeneric(t)
+ } else {
+ p.checkForFullyInst(t)
}
if base.Debug.Unified == 0 {
// If a method of un-exported type is promoted and accessible by
@@ -256,6 +365,10 @@ func (p *crawler) markInlBody(n *ir.Name) {
switch n.Class {
case ir.PFUNC:
p.markInlBody(n)
+ // Note: this Export() and the one below seem unneeded,
+ // since any function/extern name encountered in an
+ // exported function body will be exported
+ // automatically via qualifiedIdent() in iexport.go.
Export(n)
case ir.PEXTERN:
Export(n)
diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go
index 9b74bf7a9d..eb316d33db 100644
--- a/src/cmd/compile/internal/typecheck/expr.go
+++ b/src/cmd/compile/internal/typecheck/expr.go
@@ -466,6 +466,27 @@ func tcConv(n *ir.ConvExpr) ir.Node {
if n.X.Op() == ir.OLITERAL {
return stringtoruneslit(n)
}
+
+ case ir.OBYTES2STR:
+ if t.Elem() != types.ByteType && t.Elem() != types.Types[types.TUINT8] {
+ // If t is a slice of a user-defined byte type B (not uint8
+ // or byte), then add an extra CONVNOP from []B to []byte, so
+ // that the call to slicebytetostring() added in walk will
+ // typecheck correctly.
+ n.X = ir.NewConvExpr(n.X.Pos(), ir.OCONVNOP, types.NewSlice(types.ByteType), n.X)
+ n.X.SetTypecheck(1)
+ }
+
+ case ir.ORUNES2STR:
+ if t.Elem() != types.RuneType && t.Elem() != types.Types[types.TINT32] {
+ // If t is a slice of a user-defined rune type B (not uint32
+ // or rune), then add an extra CONVNOP from []B to []rune, so
+ // that the call to slicerunetostring() added in walk will
+ // typecheck correctly.
+ n.X = ir.NewConvExpr(n.X.Pos(), ir.OCONVNOP, types.NewSlice(types.RuneType), n.X)
+ n.X.SetTypecheck(1)
+ }
+
}
return n
}
diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go
index 7dec65c1d6..57b15b7a2b 100644
--- a/src/cmd/compile/internal/typecheck/func.go
+++ b/src/cmd/compile/internal/typecheck/func.go
@@ -160,7 +160,12 @@ func ImportedBody(fn *ir.Func) {
IncrementalAddrtaken = false
defer func() {
if DirtyAddrtaken {
- ComputeAddrtaken(fn.Inl.Body) // compute addrtaken marks once types are available
+ // We do ComputeAddrTaken on function instantiations, but not
+ // generic functions (since we may not yet know if x in &x[i]
+ // is an array or a slice).
+ if !fn.Type().HasTParam() {
+ ComputeAddrtaken(fn.Inl.Body) // compute addrtaken marks once types are available
+ }
DirtyAddrtaken = false
}
IncrementalAddrtaken = true
diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go
index f685851e40..ae3c41ca04 100644
--- a/src/cmd/compile/internal/typecheck/iexport.go
+++ b/src/cmd/compile/internal/typecheck/iexport.go
@@ -243,6 +243,7 @@ import (
"io"
"math/big"
"sort"
+ "strconv"
"strings"
"cmd/compile/internal/base"
@@ -261,7 +262,7 @@ import (
const (
iexportVersionGo1_11 = 0
iexportVersionPosCol = 1
- iexportVersionGenerics = 1 // probably change to 2 before release
+ iexportVersionGenerics = 2
iexportVersionGo1_18 = 2
iexportVersionCurrent = 2
@@ -730,6 +731,36 @@ func (w *exportWriter) qualifiedIdent(n *ir.Name) {
w.pkg(s.Pkg)
}
+const blankMarker = "$"
+
+// TparamExportName creates a unique name for type param in a method or a generic
+// type, using the specified unique prefix and the index of the type param. The index
+// is only used if the type param is blank, in which case the blank is replace by
+// "$". A unique name is needed for later substitution in the compiler and
+// export/import that keeps blank type params associated with the correct constraint.
+func TparamExportName(prefix string, name string, index int) string {
+ if name == "_" {
+ name = blankMarker + strconv.Itoa(index)
+ }
+ return prefix + "." + name
+}
+
+// TparamName returns the real name of a type parameter, after stripping its
+// qualifying prefix and reverting blank-name encoding. See TparamExportName
+// for details.
+func TparamName(exportName string) string {
+ // Remove the "path" from the type param name that makes it unique.
+ ix := strings.LastIndex(exportName, ".")
+ if ix < 0 {
+ return ""
+ }
+ name := exportName[ix+1:]
+ if strings.HasPrefix(name, blankMarker) {
+ return "_"
+ }
+ return name
+}
+
func (w *exportWriter) selector(s *types.Sym) {
if w.currPkg == nil {
base.Fatalf("missing currPkg")
@@ -1418,6 +1449,12 @@ func (w *exportWriter) funcExt(n *ir.Name) {
w.uint64(1 + uint64(n.Func.Inl.Cost))
w.bool(n.Func.Inl.CanDelayResults)
if n.Func.ExportInline() || n.Type().HasTParam() {
+ if n.Type().HasTParam() {
+ // If this generic function/method is from another
+ // package, but we didn't use for instantiation in
+ // this package, we may not yet have imported it.
+ ImportedBody(n.Func)
+ }
w.p.doInline(n)
}
@@ -1735,6 +1772,8 @@ func (w *exportWriter) expr(n ir.Node) {
n := n.(*ir.Name)
if (n.Class == ir.PEXTERN || n.Class == ir.PFUNC) && !ir.IsBlank(n) {
w.op(ir.ONONAME)
+ // Indicate that this is not an OKEY entry.
+ w.bool(false)
w.qualifiedIdent(n)
if go117ExportTypes {
w.typ(n.Type())
@@ -1761,7 +1800,9 @@ func (w *exportWriter) expr(n ir.Node) {
case ir.ONONAME:
w.op(ir.ONONAME)
- // This should only be for OKEY nodes in generic functions
+ // This can only be for OKEY nodes in generic functions. Mark it
+ // as a key entry.
+ w.bool(true)
s := n.Sym()
w.string(s.Name)
w.pkg(s.Pkg)
diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go
index 26bc838ed9..bc34d3933a 100644
--- a/src/cmd/compile/internal/typecheck/iimport.go
+++ b/src/cmd/compile/internal/typecheck/iimport.go
@@ -1315,9 +1315,15 @@ func (r *importReader) node() ir.Node {
return n
case ir.ONONAME:
+ isKey := r.bool()
n := r.qualifiedIdent()
if go117ExportTypes {
- n2 := Resolve(n)
+ var n2 ir.Node = n
+ // Key ONONAME entries should not be resolved - they should
+ // stay as identifiers.
+ if !isKey {
+ n2 = Resolve(n)
+ }
typ := r.typ()
if n2.Type() == nil {
n2.SetType(typ)
@@ -1624,11 +1630,16 @@ func (r *importReader) node() ir.Node {
return n
case ir.OADDR, ir.OPTRLIT:
- n := NodAddrAt(r.pos(), r.expr())
if go117ExportTypes {
+ pos := r.pos()
+ expr := r.expr()
+ expr.SetTypecheck(1) // we do this for all nodes after importing, but do it now so markAddrOf can see it.
+ n := NodAddrAt(pos, expr)
n.SetOp(op)
n.SetType(r.typ())
+ return n
}
+ n := NodAddrAt(r.pos(), r.expr())
return n
case ir.ODEREF:
diff --git a/src/cmd/compile/internal/typecheck/subr.go b/src/cmd/compile/internal/typecheck/subr.go
index 1986845f64..93812ebda5 100644
--- a/src/cmd/compile/internal/typecheck/subr.go
+++ b/src/cmd/compile/internal/typecheck/subr.go
@@ -80,7 +80,7 @@ func markAddrOf(n ir.Node) ir.Node {
if IncrementalAddrtaken {
// We can only do incremental addrtaken computation when it is ok
// to typecheck the argument of the OADDR. That's only safe after the
- // main typecheck has completed.
+ // main typecheck has completed, and not loading the inlined body.
// The argument to OADDR needs to be typechecked because &x[i] takes
// the address of x if x is an array, but not if x is a slice.
// Note: OuterValue doesn't work correctly until n is typechecked.
@@ -160,7 +160,7 @@ func AddImplicitDots(n *ir.SelectorExpr) *ir.SelectorExpr {
case path != nil:
// rebuild elided dots
for c := len(path) - 1; c >= 0; c-- {
- dot := ir.NewSelectorExpr(base.Pos, ir.ODOT, n.X, path[c].field.Sym)
+ dot := ir.NewSelectorExpr(n.Pos(), ir.ODOT, n.X, path[c].field.Sym)
dot.SetImplicit(true)
dot.SetType(path[c].field.Type)
n.X = dot
@@ -173,6 +173,8 @@ func AddImplicitDots(n *ir.SelectorExpr) *ir.SelectorExpr {
return n
}
+// CalcMethods calculates all the methods (including embedding) of a non-interface
+// type t.
func CalcMethods(t *types.Type) {
if t == nil || t.AllMethods().Len() != 0 {
return
@@ -976,7 +978,9 @@ func makeInstName1(name string, targs []*types.Type, hasBrackets bool) string {
// function that helps implement a method of an instantiated type). For method nodes
// on shape types, we prepend "nofunc.", because method nodes for shape types will
// have no body, and we want to avoid a name conflict with the shape-based function
-// that helps implement the same method for fully-instantiated types.
+// that helps implement the same method for fully-instantiated types. Function names
+// are also created at the end of (*Tsubster).typ1, so we append "nofunc" there as
+// well, as needed.
func MakeFuncInstSym(gf *types.Sym, targs []*types.Type, isMethodNode, hasBrackets bool) *types.Sym {
nm := makeInstName1(gf.Name, targs, hasBrackets)
if targs[0].HasShape() && isMethodNode {
@@ -1273,7 +1277,25 @@ func (ts *Tsubster) typ1(t *types.Type) *types.Type {
for i, f := range t.Methods().Slice() {
t2 := ts.typ1(f.Type)
oldsym := f.Nname.Sym()
- newsym := MakeFuncInstSym(oldsym, ts.Targs, true, true)
+
+ // Use the name of the substituted receiver to create the
+ // method name, since the receiver name may have many levels
+ // of nesting (brackets) with type names to be substituted.
+ recvType := t2.Recv().Type
+ var nm string
+ if recvType.IsPtr() {
+ recvType = recvType.Elem()
+ nm = "(*" + recvType.Sym().Name + ")." + f.Sym.Name
+ } else {
+ nm = recvType.Sym().Name + "." + f.Sym.Name
+ }
+ if recvType.RParams()[0].HasShape() {
+ // We add "nofunc" to methods of shape type to avoid
+ // conflict with the name of the shape-based helper
+ // function. See header comment of MakeFuncInstSym.
+ nm = "nofunc." + nm
+ }
+ newsym := oldsym.Pkg.Lookup(nm)
var nname *ir.Name
if newsym.Def != nil {
nname = newsym.Def.(*ir.Name)
@@ -1304,9 +1326,9 @@ func (ts *Tsubster) typ1(t *types.Type) *types.Type {
func (ts *Tsubster) tstruct(t *types.Type, force bool) *types.Type {
if t.NumFields() == 0 {
if t.HasTParam() || t.HasShape() {
- // For an empty struct, we need to return a new type,
- // since it may now be fully instantiated (HasTParam
- // becomes false).
+ // For an empty struct, we need to return a new type, if
+ // substituting from a generic type or shape type, since it
+ // will change HasTParam/HasShape flags.
return types.NewStruct(t.Pkg(), nil)
}
return t
@@ -1365,10 +1387,10 @@ func (ts *Tsubster) tstruct(t *types.Type, force bool) *types.Type {
// tinter substitutes type params in types of the methods of an interface type.
func (ts *Tsubster) tinter(t *types.Type, force bool) *types.Type {
if t.Methods().Len() == 0 {
- if t.HasTParam() {
- // For an empty interface, we need to return a new type,
- // since it may now be fully instantiated (HasTParam
- // becomes false).
+ if t.HasTParam() || t.HasShape() {
+ // For an empty interface, we need to return a new type, if
+ // substituting from a generic type or shape type, since
+ // since it will change HasTParam/HasShape flags.
return types.NewInterface(t.Pkg(), nil, false)
}
return t
@@ -1410,11 +1432,15 @@ func genericTypeName(sym *types.Sym) string {
// For now, we only consider two types to have the same shape, if they have exactly
// the same underlying type or they are both pointer types.
//
+// tparam is the associated typeparam. If there is a structural type for
+// the associated type param (not common), then a pointer type t is mapped to its
+// underlying type, rather than being merged with other pointers.
+//
// Shape types are also distinguished by the index of the type in a type param/arg
// list. We need to do this so we can distinguish and substitute properly for two
// type params in the same function that have the same shape for a particular
// instantiation.
-func Shapify(t *types.Type, index int) *types.Type {
+func Shapify(t *types.Type, index int, tparam *types.Type) *types.Type {
assert(!t.IsShape())
// Map all types with the same underlying type to the same shape.
u := t.Underlying()
@@ -1423,7 +1449,8 @@ func Shapify(t *types.Type, index int) *types.Type {
// TODO: Make unsafe.Pointer the same shape as normal pointers.
// Note: pointers to arrays are special because of slice-to-array-pointer
// conversions. See issue 49295.
- if u.Kind() == types.TPTR && u.Elem().Kind() != types.TARRAY {
+ if u.Kind() == types.TPTR && u.Elem().Kind() != types.TARRAY &&
+ tparam.Bound().StructuralType() == nil {
u = types.Types[types.TUINT8].PtrTo()
}
diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go
index 42970f6a5e..f6be298667 100644
--- a/src/cmd/compile/internal/typecheck/typecheck.go
+++ b/src/cmd/compile/internal/typecheck/typecheck.go
@@ -129,7 +129,11 @@ const (
var typecheckdefstack []*ir.Name
-// Resolve ONONAME to definition, if any.
+// Resolve resolves an ONONAME node to a definition, if any. If n is not an ONONAME node,
+// Resolve returns n unchanged. If n is an ONONAME node and not in the same package,
+// then n.Sym() is resolved using import data. Otherwise, Resolve returns
+// n.Sym().Def. An ONONAME node can be created using ir.NewIdent(), so an imported
+// symbol can be resolved via Resolve(ir.NewIdent(src.NoXPos, sym)).
func Resolve(n ir.Node) (res ir.Node) {
if n == nil || n.Op() != ir.ONONAME {
return n
diff --git a/src/cmd/compile/internal/types/fmt.go b/src/cmd/compile/internal/types/fmt.go
index 23fc4221e1..e1b395559a 100644
--- a/src/cmd/compile/internal/types/fmt.go
+++ b/src/cmd/compile/internal/types/fmt.go
@@ -140,11 +140,17 @@ func sconv2(b *bytes.Buffer, s *Sym, verb rune, mode fmtMode) {
}
func symfmt(b *bytes.Buffer, s *Sym, verb rune, mode fmtMode) {
+ name := s.Name
if q := pkgqual(s.Pkg, verb, mode); q != "" {
b.WriteString(q)
b.WriteByte('.')
+ if mode == fmtTypeIDName {
+ // If name is a generic instantiation, it might have local package placeholders
+ // in it. Replace those placeholders with the package name. See issue 49547.
+ name = strings.Replace(name, LocalPkg.Prefix, q, -1)
+ }
}
- b.WriteString(s.Name)
+ b.WriteString(name)
}
// pkgqual returns the qualifier that should be used for printing
@@ -322,8 +328,8 @@ func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type
return
}
- if t == ByteType || t == RuneType {
- // in %-T mode collapse rune and byte with their originals.
+ if t == AnyType || t == ByteType || t == RuneType {
+ // in %-T mode collapse predeclared aliases with their originals.
switch mode {
case fmtTypeIDName, fmtTypeID:
t = Types[t.Kind()]
@@ -625,6 +631,7 @@ func fldconv(b *bytes.Buffer, f *Field, verb rune, mode fmtMode, visited map[*Ty
}
var name string
+ nameSep := " "
if verb != 'S' {
s := f.Sym
@@ -633,7 +640,47 @@ func fldconv(b *bytes.Buffer, f *Field, verb rune, mode fmtMode, visited map[*Ty
s = OrigSym(s)
}
- if s != nil && f.Embedded == 0 {
+ // Using type aliases and embedded fields, it's possible to
+ // construct types that can't be directly represented as a
+ // type literal. For example, given "type Int = int" (#50190),
+ // it would be incorrect to format "struct{ Int }" as either
+ // "struct{ int }" or "struct{ Int int }", because those each
+ // represent other, distinct types.
+ //
+ // So for the purpose of LinkString (i.e., fmtTypeID), we use
+ // the non-standard syntax "struct{ Int = int }" to represent
+ // embedded fields that have been renamed through the use of
+ // type aliases.
+ if f.Embedded != 0 {
+ if mode == fmtTypeID {
+ nameSep = " = "
+
+ // Compute tsym, the symbol that would normally be used as
+ // the field name when embedding f.Type.
+ // TODO(mdempsky): Check for other occurences of this logic
+ // and deduplicate.
+ typ := f.Type
+ if typ.IsPtr() {
+ base.Assertf(typ.Sym() == nil, "embedded pointer type has name: %L", typ)
+ typ = typ.Elem()
+ }
+ tsym := typ.Sym()
+
+ // If the field name matches the embedded type's name, then
+ // suppress printing of the field name. For example, format
+ // "struct{ T }" as simply that instead of "struct{ T = T }".
+ if tsym != nil && (s == tsym || IsExported(tsym.Name) && s.Name == tsym.Name) {
+ s = nil
+ }
+ } else {
+ // Suppress the field name for embedded fields for
+ // non-LinkString formats, to match historical behavior.
+ // TODO(mdempsky): Re-evaluate this.
+ s = nil
+ }
+ }
+
+ if s != nil {
if funarg != FunargNone {
name = fmt.Sprint(f.Nname)
} else if verb == 'L' {
@@ -652,7 +699,7 @@ func fldconv(b *bytes.Buffer, f *Field, verb rune, mode fmtMode, visited map[*Ty
if name != "" {
b.WriteString(name)
- b.WriteString(" ")
+ b.WriteString(nameSep)
}
if f.IsDDD() {
diff --git a/src/cmd/compile/internal/types/identity.go b/src/cmd/compile/internal/types/identity.go
index dce7d29143..60a0f2e7c5 100644
--- a/src/cmd/compile/internal/types/identity.go
+++ b/src/cmd/compile/internal/types/identity.go
@@ -58,6 +58,14 @@ func identical(t1, t2 *Type, flags int, assumedEqual map[typePair]struct{}) bool
return (t1 == Types[TUINT8] || t1 == ByteType) && (t2 == Types[TUINT8] || t2 == ByteType)
case TINT32:
return (t1 == Types[TINT32] || t1 == RuneType) && (t2 == Types[TINT32] || t2 == RuneType)
+ case TINTER:
+ // Make sure named any type matches any unnamed empty interface
+ // (but not a shape type, if identStrict).
+ isUnnamedEface := func(t *Type) bool { return t.IsEmptyInterface() && t.Sym() == nil }
+ if flags&identStrict != 0 {
+ return t1 == AnyType && isUnnamedEface(t2) && !t2.HasShape() || t2 == AnyType && isUnnamedEface(t1) && !t1.HasShape()
+ }
+ return t1 == AnyType && isUnnamedEface(t2) || t2 == AnyType && isUnnamedEface(t1)
default:
return false
}
diff --git a/src/cmd/compile/internal/types/pkg.go b/src/cmd/compile/internal/types/pkg.go
index fe42049cee..b159eb5eeb 100644
--- a/src/cmd/compile/internal/types/pkg.go
+++ b/src/cmd/compile/internal/types/pkg.go
@@ -9,7 +9,6 @@ import (
"cmd/internal/objabi"
"fmt"
"sort"
- "strings"
"sync"
)
@@ -49,9 +48,11 @@ func NewPkg(path, name string) *Pkg {
p := new(Pkg)
p.Path = path
p.Name = name
- if strings.HasPrefix(path, "go.") {
- // Special compiler-internal packages don't need to be escaped.
- // This particularly helps with the go.shape package.
+ if path == "go.shape" {
+ // Don't escape "go.shape", since it's not needed (it's a builtin
+ // package), and we don't want escape codes showing up in shape type
+ // names, which also appear in names of function/method
+ // instantiations.
p.Prefix = path
} else {
p.Prefix = objabi.PathToPrefix(path)
diff --git a/src/cmd/compile/internal/types/size.go b/src/cmd/compile/internal/types/size.go
index 0f3db06c1d..fb6accdc64 100644
--- a/src/cmd/compile/internal/types/size.go
+++ b/src/cmd/compile/internal/types/size.go
@@ -450,16 +450,21 @@ func CalcSize(t *Type) {
CheckSize(t.Elem())
- // make fake type to check later to
- // trigger channel argument check.
+ // Make fake type to trigger channel element size check after
+ // any top-level recursive type has been completed.
t1 := NewChanArgs(t)
CheckSize(t1)
case TCHANARGS:
t1 := t.ChanArgs()
CalcSize(t1) // just in case
+ // Make sure size of t1.Elem() is calculated at this point. We can
+ // use CalcSize() here rather than CheckSize(), because the top-level
+ // (possibly recursive) type will have been calculated before the fake
+ // chanargs is handled.
+ CalcSize(t1.Elem())
if t1.Elem().width >= 1<<16 {
- base.ErrorfAt(typePos(t1), "channel element type too large (>64kB)")
+ base.Errorf("channel element type too large (>64kB)")
}
w = 1 // anything will do
@@ -492,7 +497,7 @@ func CalcSize(t *Type) {
if t.Elem().width != 0 {
cap := (uint64(MaxWidth) - 1) / uint64(t.Elem().width)
if uint64(t.NumElem()) > cap {
- base.ErrorfAt(typePos(t), "type %L larger than address space", t)
+ base.Errorf("type %L larger than address space", t)
}
}
w = t.NumElem() * t.Elem().width
@@ -539,7 +544,7 @@ func CalcSize(t *Type) {
}
if PtrSize == 4 && w != int64(int32(w)) {
- base.ErrorfAt(typePos(t), "type %v too large", t)
+ base.Errorf("type %v too large", t)
}
t.width = w
diff --git a/src/cmd/compile/internal/types/structuraltype.go b/src/cmd/compile/internal/types/structuraltype.go
new file mode 100644
index 0000000000..ee1341be21
--- /dev/null
+++ b/src/cmd/compile/internal/types/structuraltype.go
@@ -0,0 +1,191 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+// Implementation of structural type computation for types.
+
+// TODO: we would like to depend only on the types2 computation of structural type,
+// but we can only do that the next time we change the export format and export
+// structural type info along with each constraint type, since the compiler imports
+// types directly into types1 format.
+
+// A term describes elementary type sets:
+//
+// term{false, T} set of type T
+// term{true, T} set of types with underlying type t
+// term{} empty set (we specifically check for typ == nil)
+type term struct {
+ tilde bool
+ typ *Type
+}
+
+// StructuralType returns the structural type of an interface, or nil if it has no
+// structural type.
+func (t *Type) StructuralType() *Type {
+ sts, _ := specificTypes(t)
+ var su *Type
+ for _, st := range sts {
+ u := st.typ.Underlying()
+ if su != nil {
+ u = match(su, u)
+ if u == nil {
+ return nil
+ }
+ }
+ // su == nil || match(su, u) != nil
+ su = u
+ }
+ return su
+}
+
+// If x and y are identical, match returns x.
+// If x and y are identical channels but for their direction
+// and one of them is unrestricted, match returns the channel
+// with the restricted direction.
+// In all other cases, match returns nil.
+// x and y are assumed to be underlying types, hence are not named types.
+func match(x, y *Type) *Type {
+ if IdenticalStrict(x, y) {
+ return x
+ }
+
+ if x.IsChan() && y.IsChan() && IdenticalStrict(x.Elem(), y.Elem()) {
+ // We have channels that differ in direction only.
+ // If there's an unrestricted channel, select the restricted one.
+ // If both have the same direction, return x (either is fine).
+ switch {
+ case x.ChanDir().CanSend() && x.ChanDir().CanRecv():
+ return y
+ case y.ChanDir().CanSend() && y.ChanDir().CanRecv():
+ return x
+ }
+ }
+ return nil
+}
+
+// specificTypes returns the list of specific types of an interface type or nil if
+// there are none. It also returns a flag that indicates, for an empty term list
+// result, whether it represents the empty set, or the infinite set of all types (in
+// both cases, there are no specific types).
+func specificTypes(t *Type) (list []term, inf bool) {
+ t.wantEtype(TINTER)
+
+ // We have infinite term list before processing any type elements
+ // (or if there are no type elements).
+ inf = true
+ for _, m := range t.Methods().Slice() {
+ var r2 []term
+ inf2 := false
+
+ switch {
+ case m.IsMethod():
+ inf2 = true
+
+ case m.Type.IsUnion():
+ nt := m.Type.NumTerms()
+ for i := 0; i < nt; i++ {
+ t, tilde := m.Type.Term(i)
+ if t.IsInterface() {
+ r3, r3inf := specificTypes(t)
+ if r3inf {
+ // Union with an infinite set of types is
+ // infinite, so skip remaining terms.
+ r2 = nil
+ inf2 = true
+ break
+ }
+ // Add the elements of r3 to r2.
+ for _, r3e := range r3 {
+ r2 = insertType(r2, r3e)
+ }
+ } else {
+ r2 = insertType(r2, term{tilde, t})
+ }
+ }
+
+ case m.Type.IsInterface():
+ r2, inf2 = specificTypes(m.Type)
+
+ default:
+ // m.Type is a single non-interface type, so r2 is just a
+ // one-element list, inf2 is false.
+ r2 = []term{{false, m.Type}}
+ }
+
+ if inf2 {
+ // If the current type element has infinite types,
+ // its intersection with r is just r, so skip this type element.
+ continue
+ }
+
+ if inf {
+ // If r is infinite, then the intersection of r and r2 is just r2.
+ list = r2
+ inf = false
+ continue
+ }
+
+ // r and r2 are finite, so intersect r and r2.
+ var r3 []term
+ for _, re := range list {
+ for _, r2e := range r2 {
+ if tm := intersect(re, r2e); tm.typ != nil {
+ r3 = append(r3, tm)
+ }
+ }
+ }
+ list = r3
+ }
+ return
+}
+
+// insertType adds t to the returned list if it is not already in list.
+func insertType(list []term, tm term) []term {
+ for i, elt := range list {
+ if new := union(elt, tm); new.typ != nil {
+ // Replace existing elt with the union of elt and new.
+ list[i] = new
+ return list
+ }
+ }
+ return append(list, tm)
+}
+
+// If x and y are disjoint, return term with nil typ (which means the union should
+// include both types). If x and y are not disjoint, return the single type which is
+// the union of x and y.
+func union(x, y term) term {
+ if disjoint(x, y) {
+ return term{false, nil}
+ }
+ if x.tilde || !y.tilde {
+ return x
+ }
+ return y
+}
+
+// intersect returns the intersection x ∩ y.
+func intersect(x, y term) term {
+ if disjoint(x, y) {
+ return term{false, nil}
+ }
+ if !x.tilde || y.tilde {
+ return x
+ }
+ return y
+}
+
+// disjoint reports whether x ∩ y == ∅.
+func disjoint(x, y term) bool {
+ ux := x.typ
+ if y.tilde {
+ ux = ux.Underlying()
+ }
+ uy := y.typ
+ if x.tilde {
+ uy = uy.Underlying()
+ }
+ return !IdenticalStrict(ux, uy)
+}
diff --git a/src/cmd/compile/internal/types/structuraltype_test.go b/src/cmd/compile/internal/types/structuraltype_test.go
new file mode 100644
index 0000000000..fc34458338
--- /dev/null
+++ b/src/cmd/compile/internal/types/structuraltype_test.go
@@ -0,0 +1,135 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that StructuralType() calculates the correct value of structural type for
+// unusual cases.
+
+package types
+
+import (
+ "cmd/internal/src"
+ "testing"
+)
+
+type test struct {
+ typ *Type
+ structuralType *Type
+}
+
+func TestStructuralType(t *testing.T) {
+ // These are the few constants that need to be initialized in order to use
+ // the types package without using the typecheck package by calling
+ // typecheck.InitUniverse() (the normal way to initialize the types package).
+ PtrSize = 8
+ RegSize = 8
+ MaxWidth = 1 << 50
+
+ // type intType = int
+ intType := newType(TINT)
+ // type structf = struct { f int }
+ structf := NewStruct(nil, []*Field{
+ NewField(src.NoXPos, LocalPkg.Lookup("f"), intType),
+ })
+
+ // type Sf structf
+ Sf := newType(TFORW)
+ Sf.sym = LocalPkg.Lookup("Sf")
+ Sf.SetUnderlying(structf)
+
+ // type A int
+ A := newType(TFORW)
+ A.sym = LocalPkg.Lookup("A")
+ A.SetUnderlying(intType)
+
+ // type B int
+ B := newType(TFORW)
+ B.sym = LocalPkg.Lookup("B")
+ B.SetUnderlying(intType)
+
+ emptyInterface := NewInterface(BuiltinPkg, []*Field{}, false)
+ any := newType(TFORW)
+ any.sym = LocalPkg.Lookup("any")
+ any.SetUnderlying(emptyInterface)
+
+ // The tests marked NONE have no structural type; all the others have a
+ // structural type of structf - "struct { f int }"
+ tests := []*test{
+ {
+ // interface { struct { f int } }
+ embed(structf),
+ structf,
+ },
+ {
+ // interface { struct { f int }; any }
+ embed(structf, any),
+ structf,
+ },
+ {
+ // interface { Sf }
+ embed(Sf),
+ structf,
+ },
+ {
+ // interface { any | Sf }
+ embed(any, Sf),
+ structf,
+ },
+ {
+ // interface { struct { f int }; Sf } - NONE
+ embed(structf, Sf),
+ nil,
+ },
+ {
+ // interface { struct { f int } | ~struct { f int } }
+ embed(NewUnion([]*Type{structf, structf}, []bool{false, true})),
+ structf,
+ },
+ {
+ // interface { ~struct { f int } ; Sf }
+ embed(NewUnion([]*Type{structf}, []bool{true}), Sf),
+ structf,
+ },
+ {
+ // interface { struct { f int } ; Sf } - NONE
+ embed(NewUnion([]*Type{structf}, []bool{false}), Sf),
+ nil,
+ },
+ {
+ // interface { Sf | A; B | Sf}
+ embed(NewUnion([]*Type{Sf, A}, []bool{false, false}),
+ NewUnion([]*Type{B, Sf}, []bool{false, false})),
+ structf,
+ },
+ {
+ // interface { Sf | A; A | Sf } - NONE
+ embed(NewUnion([]*Type{Sf, A}, []bool{false, false}),
+ NewUnion([]*Type{A, Sf}, []bool{false, false})),
+ nil,
+ },
+ {
+ // interface { Sf | any } - NONE
+ embed(NewUnion([]*Type{Sf, any}, []bool{false, false})),
+ nil,
+ },
+ {
+ // interface { Sf | any; Sf }
+ embed(NewUnion([]*Type{Sf, any}, []bool{false, false}), Sf),
+ structf,
+ },
+ }
+ for _, tst := range tests {
+ if got, want := tst.typ.StructuralType(), tst.structuralType; got != want {
+ t.Errorf("StructuralType(%v) = %v, wanted %v",
+ tst.typ, got, want)
+ }
+ }
+}
+
+func embed(types ...*Type) *Type {
+ fields := make([]*Field, len(types))
+ for i, t := range types {
+ fields[i] = NewField(src.NoXPos, nil, t)
+ }
+ return NewInterface(LocalPkg, fields, false)
+}
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
index 6288df30d6..fe352e0b6e 100644
--- a/src/cmd/compile/internal/types/type.go
+++ b/src/cmd/compile/internal/types/type.go
@@ -75,7 +75,7 @@ const (
TNIL
TBLANK
- // pseudo-types for frame layout
+ // pseudo-types used temporarily only during frame layout (CalcSize())
TFUNCARGS
TCHANARGS
@@ -106,12 +106,16 @@ const (
// It also stores pointers to several special types:
// - Types[TANY] is the placeholder "any" type recognized by SubstArgTypes.
// - Types[TBLANK] represents the blank variable's type.
+// - Types[TINTER] is the canonical "interface{}" type.
// - Types[TNIL] represents the predeclared "nil" value's type.
// - Types[TUNSAFEPTR] is package unsafe's Pointer type.
var Types [NTYPE]*Type
var (
- // Predeclared alias types. Kept separate for better error messages.
+ // Predeclared alias types. These are actually created as distinct
+ // defined types for better error messages, but are then specially
+ // treated as identical to their respective underlying types.
+ AnyType *Type
ByteType *Type
RuneType *Type
@@ -119,8 +123,6 @@ var (
ErrorType *Type
// Predeclared comparable interface type.
ComparableType *Type
- // Predeclared any interface type.
- AnyType *Type
// Types to represent untyped string and boolean constants.
UntypedString = newType(TSTRING)
@@ -134,6 +136,14 @@ var (
)
// A Type represents a Go type.
+//
+// There may be multiple unnamed types with identical structure. However, there must
+// be a unique Type object for each unique named (defined) type. After noding, a
+// package-level type can be looked up by building its unique symbol sym (sym =
+// package.Lookup(name)) and checking sym.Def. If sym.Def is non-nil, the type
+// already exists at package scope and is available at sym.Def.(*ir.Name).Type().
+// Local types (which may have the same name as a package-level type) are
+// distinguished by the value of vargen.
type Type struct {
// extra contains extra etype-specific fields.
// As an optimization, those etype-specific structs which contain exactly
@@ -152,6 +162,7 @@ type Type struct {
// TSLICE: Slice
// TSSA: string
// TTYPEPARAM: *Typeparam
+ // TUNION: *Union
extra interface{}
// width is the width of this Type in bytes.
@@ -228,7 +239,7 @@ func (t *Type) SetRecur(b bool) { t.flags.set(typeRecur, b) }
// Generic types should never have alg functions.
func (t *Type) SetHasTParam(b bool) { t.flags.set(typeHasTParam, b); t.flags.set(typeNoalg, b) }
-// Should always do SetHasShape(true) when doing SeIsShape(true).
+// Should always do SetHasShape(true) when doing SetIsShape(true).
func (t *Type) SetIsShape(b bool) { t.flags.set(typeIsShape, b) }
func (t *Type) SetHasShape(b bool) { t.flags.set(typeHasShape, b) }
@@ -492,13 +503,17 @@ type Field struct {
Embedded uint8 // embedded field
- Pos src.XPos
+ Pos src.XPos
+
+ // Name of field/method/parameter. Can be nil for interface fields embedded
+ // in interfaces and unnamed parameters.
Sym *Sym
Type *Type // field type
Note string // literal string annotation
- // For fields that represent function parameters, Nname points
- // to the associated ONAME Node.
+ // For fields that represent function parameters, Nname points to the
+ // associated ONAME Node. For fields that represent methods, Nname points to
+ // the function name node.
Nname Object
// Offset in bytes of this field or method within its enclosing struct
@@ -1016,7 +1031,9 @@ func (t *Type) Methods() *Fields {
}
// AllMethods returns a pointer to all the methods (including embedding) for type t.
-// For an interface type, this is the set of methods that are typically iterated over.
+// For an interface type, this is the set of methods that are typically iterated
+// over. For non-interface types, AllMethods() only returns a valid result after
+// CalcMethods() has been called at least once.
func (t *Type) AllMethods() *Fields {
if t.kind == TINTER {
// Calculate the full method set of an interface type on the fly
@@ -1207,6 +1224,12 @@ func (t *Type) cmp(x *Type) Cmp {
if (t == Types[RuneType.kind] || t == RuneType) && (x == Types[RuneType.kind] || x == RuneType) {
return CMPeq
}
+
+ case TINTER:
+ // Make sure named any type matches any empty interface.
+ if t == AnyType && x.IsEmptyInterface() || x == AnyType && t.IsEmptyInterface() {
+ return CMPeq
+ }
}
}
@@ -1741,8 +1764,9 @@ func (t *Type) SetVargen() {
t.vargen = typeGen
}
-// SetUnderlying sets the underlying type. SetUnderlying automatically updates any
-// types that were waiting for this type to be completed.
+// SetUnderlying sets the underlying type of an incomplete type (i.e. type whose kind
+// is currently TFORW). SetUnderlying automatically updates any types that were waiting
+// for this type to be completed.
func (t *Type) SetUnderlying(underlying *Type) {
if underlying.kind == TFORW {
// This type isn't computed yet; when it is, update n.
@@ -2202,4 +2226,5 @@ var (
var SimType [NTYPE]Kind
+// Fake package for shape types (see typecheck.Shapify()).
var ShapePkg = NewPkg("go.shape", "go.shape")
diff --git a/src/cmd/compile/internal/types/universe.go b/src/cmd/compile/internal/types/universe.go
index d5239eb10c..55ed7bd6d0 100644
--- a/src/cmd/compile/internal/types/universe.go
+++ b/src/cmd/compile/internal/types/universe.go
@@ -57,8 +57,9 @@ func InitTypes(defTypeName func(sym *Sym, typ *Type) Object) {
SimType[et] = et
}
- Types[TANY] = newType(TANY)
+ Types[TANY] = newType(TANY) // note: an old placeholder type, NOT the new builtin 'any' alias for interface{}
Types[TINTER] = NewInterface(LocalPkg, nil, false)
+ CheckSize(Types[TINTER])
defBasic := func(kind Kind, pkg *Pkg, name string) *Type {
typ := newType(kind)
@@ -90,6 +91,7 @@ func InitTypes(defTypeName func(sym *Sym, typ *Type) Object) {
// int32 Hence, (bytetype|runtype).Sym.isAlias() is false.
// TODO(gri) Should we get rid of this special case (at the cost
// of less informative error messages involving bytes and runes)?
+ // NOTE(rsc): No, the error message quality is important.
// (Alternatively, we could introduce an OTALIAS node representing
// type aliases, albeit at the cost of having to deal with it everywhere).
ByteType = defBasic(TUINT8, BuiltinPkg, "byte")
@@ -108,11 +110,13 @@ func InitTypes(defTypeName func(sym *Sym, typ *Type) Object) {
ResumeCheckSize()
// any type (interface)
- if base.Flag.G > 0 {
- DeferCheckSize()
- AnyType = defBasic(TFORW, BuiltinPkg, "any")
- AnyType.SetUnderlying(NewInterface(NoPkg, []*Field{}, false))
- ResumeCheckSize()
+ DeferCheckSize()
+ AnyType = defBasic(TFORW, BuiltinPkg, "any")
+ AnyType.SetUnderlying(NewInterface(BuiltinPkg, []*Field{}, false))
+ ResumeCheckSize()
+
+ if base.Flag.G == 0 {
+ ComparableType.Sym().Def = nil
}
Types[TUNSAFEPTR] = defBasic(TUNSAFEPTR, UnsafePkg, "Pointer")
@@ -148,8 +152,8 @@ func makeErrorInterface() *Type {
return NewInterface(NoPkg, []*Field{method}, false)
}
+// makeComparableInterface makes the predefined "comparable" interface in the
+// built-in package. It has a unique name, but no methods.
func makeComparableInterface() *Type {
- sig := NewSignature(NoPkg, FakeRecv(), nil, nil, nil)
- method := NewField(src.NoXPos, LocalPkg.Lookup("=="), sig)
- return NewInterface(NoPkg, []*Field{method}, false)
+ return NewInterface(NoPkg, nil, false)
}
diff --git a/src/cmd/compile/internal/types2/api.go b/src/cmd/compile/internal/types2/api.go
index 83c4b02abf..ee4f275bc0 100644
--- a/src/cmd/compile/internal/types2/api.go
+++ b/src/cmd/compile/internal/types2/api.go
@@ -55,17 +55,14 @@ func (err Error) FullError() string {
return fmt.Sprintf("%s: %s", err.Pos, err.Full)
}
-// An ArgumentError holds an error that is associated with an argument.
+// An ArgumentError holds an error associated with an argument index.
type ArgumentError struct {
- index int
- error
+ Index int
+ Err error
}
-// Index returns the positional index of the argument associated with the
-// error.
-func (e ArgumentError) Index() int {
- return e.index
-}
+func (e *ArgumentError) Error() string { return e.Err.Error() }
+func (e *ArgumentError) Unwrap() error { return e.Err }
// An Importer resolves import paths to Packages.
//
@@ -268,6 +265,7 @@ type Info struct {
//
// *syntax.File
// *syntax.FuncType
+ // *syntax.TypeDecl
// *syntax.BlockStmt
// *syntax.IfStmt
// *syntax.SwitchStmt
@@ -443,8 +441,16 @@ func ConvertibleTo(V, T Type) bool {
// Implements reports whether type V implements interface T.
func Implements(V Type, T *Interface) bool {
- f, _ := MissingMethod(V, T, true)
- return f == nil
+ if T.Empty() {
+ // All types (even Typ[Invalid]) implement the empty interface.
+ return true
+ }
+ // Checker.implements suppresses errors for invalid types, so we need special
+ // handling here.
+ if V.Underlying() == Typ[Invalid] {
+ return false
+ }
+ return (*Checker)(nil).implements(V, T) == nil
}
// Identical reports whether x and y are identical types.
diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go
index 30cfbe0ee4..80e998ebee 100644
--- a/src/cmd/compile/internal/types2/api_test.go
+++ b/src/cmd/compile/internal/types2/api_test.go
@@ -7,6 +7,7 @@ package types2_test
import (
"bytes"
"cmd/compile/internal/syntax"
+ "errors"
"fmt"
"internal/testenv"
"reflect"
@@ -17,21 +18,14 @@ import (
. "cmd/compile/internal/types2"
)
-// genericPkg is a source prefix for packages that contain generic code.
-const genericPkg = "package generic_"
-
// brokenPkg is a source prefix for packages that are not expected to parse
// or type-check cleanly. They are always parsed assuming that they contain
// generic code.
const brokenPkg = "package broken_"
func parseSrc(path, src string) (*syntax.File, error) {
- var mode syntax.Mode
- if strings.HasPrefix(src, genericPkg) || strings.HasPrefix(src, brokenPkg) {
- mode = syntax.AllowGenerics
- }
errh := func(error) {} // dummy error handler so that parsing continues in presence of errors
- return syntax.Parse(syntax.NewFileBase(path), strings.NewReader(src), errh, nil, mode)
+ return syntax.Parse(syntax.NewFileBase(path), strings.NewReader(src), errh, nil, syntax.AllowGenerics|syntax.AllowMethodTypeParams)
}
func pkgFor(path, source string, info *Info) (*Package, error) {
@@ -115,7 +109,6 @@ func TestValuesInfo(t *testing.T) {
{`package c5d; var _ = string(65)`, `65`, `untyped int`, `65`},
{`package c5e; var _ = string('A')`, `'A'`, `untyped rune`, `65`},
{`package c5f; type T string; var _ = T('A')`, `'A'`, `untyped rune`, `65`},
- {`package c5g; var s uint; var _ = string(1 << s)`, `1 << s`, `untyped int`, ``},
{`package d0; var _ = []byte("foo")`, `"foo"`, `string`, `"foo"`},
{`package d1; var _ = []byte(string("foo"))`, `"foo"`, `string`, `"foo"`},
@@ -326,28 +319,53 @@ func TestTypesInfo(t *testing.T) {
{brokenPkg + `x5; func _() { var x map[string][...]int; x = map[string][...]int{"": {1,2,3}} }`, `x`, `map[string]invalid type`},
// parameterized functions
- {genericPkg + `p0; func f[T any](T) {}; var _ = f[int]`, `f`, `func[T interface{}](T)`},
- {genericPkg + `p1; func f[T any](T) {}; var _ = f[int]`, `f[int]`, `func(int)`},
- {genericPkg + `p2; func f[T any](T) {}; func _() { f(42) }`, `f`, `func(int)`},
- {genericPkg + `p3; func f[T any](T) {}; func _() { f[int](42) }`, `f[int]`, `func(int)`},
- {genericPkg + `p4; func f[T any](T) {}; func _() { f[int](42) }`, `f`, `func[T interface{}](T)`},
- {genericPkg + `p5; func f[T any](T) {}; func _() { f(42) }`, `f(42)`, `()`},
+ {`package p0; func f[T any](T) {}; var _ = f[int]`, `f`, `func[T any](T)`},
+ {`package p1; func f[T any](T) {}; var _ = f[int]`, `f[int]`, `func(int)`},
+ {`package p2; func f[T any](T) {}; func _() { f(42) }`, `f`, `func(int)`},
+ {`package p3; func f[T any](T) {}; func _() { f[int](42) }`, `f[int]`, `func(int)`},
+ {`package p4; func f[T any](T) {}; func _() { f[int](42) }`, `f`, `func[T any](T)`},
+ {`package p5; func f[T any](T) {}; func _() { f(42) }`, `f(42)`, `()`},
// type parameters
- {genericPkg + `t0; type t[] int; var _ t`, `t`, `generic_t0.t`}, // t[] is a syntax error that is ignored in this test in favor of t
- {genericPkg + `t1; type t[P any] int; var _ t[int]`, `t`, `generic_t1.t[P interface{}]`},
- {genericPkg + `t2; type t[P interface{}] int; var _ t[int]`, `t`, `generic_t2.t[P interface{}]`},
- {genericPkg + `t3; type t[P, Q interface{}] int; var _ t[int, int]`, `t`, `generic_t3.t[P, Q interface{}]`},
+ {`package t0; type t[] int; var _ t`, `t`, `t0.t`}, // t[] is a syntax error that is ignored in this test in favor of t
+ {`package t1; type t[P any] int; var _ t[int]`, `t`, `t1.t[P any]`},
+ {`package t2; type t[P interface{}] int; var _ t[int]`, `t`, `t2.t[P interface{}]`},
+ {`package t3; type t[P, Q interface{}] int; var _ t[int, int]`, `t`, `t3.t[P, Q interface{}]`},
{brokenPkg + `t4; type t[P, Q interface{ m() }] int; var _ t[int, int]`, `t`, `broken_t4.t[P, Q interface{m()}]`},
// instantiated types must be sanitized
- {genericPkg + `g0; type t[P any] int; var x struct{ f t[int] }; var _ = x.f`, `x.f`, `generic_g0.t[int]`},
+ {`package g0; type t[P any] int; var x struct{ f t[int] }; var _ = x.f`, `x.f`, `g0.t[int]`},
// issue 45096
- {genericPkg + `issue45096; func _[T interface{ ~int8 | ~int16 | ~int32 }](x T) { _ = x < 0 }`, `0`, `T`},
+ {`package issue45096; func _[T interface{ ~int8 | ~int16 | ~int32 }](x T) { _ = x < 0 }`, `0`, `T`},
// issue 47895
{`package p; import "unsafe"; type S struct { f int }; var s S; var _ = unsafe.Offsetof(s.f)`, `s.f`, `int`},
+
+ // issue 50093
+ {`package u0a; func _[_ interface{int}]() {}`, `int`, `int`},
+ {`package u1a; func _[_ interface{~int}]() {}`, `~int`, `~int`},
+ {`package u2a; func _[_ interface{int|string}]() {}`, `int | string`, `int|string`},
+ {`package u3a; func _[_ interface{int|string|~bool}]() {}`, `int | string | ~bool`, `int|string|~bool`},
+ {`package u3a; func _[_ interface{int|string|~bool}]() {}`, `int | string`, `int|string`},
+ {`package u3a; func _[_ interface{int|string|~bool}]() {}`, `~bool`, `~bool`},
+ {`package u3a; func _[_ interface{int|string|~float64|~bool}]() {}`, `int | string | ~float64`, `int|string|~float64`},
+
+ {`package u0b; func _[_ int]() {}`, `int`, `int`},
+ {`package u1b; func _[_ ~int]() {}`, `~int`, `~int`},
+ {`package u2b; func _[_ int|string]() {}`, `int | string`, `int|string`},
+ {`package u3b; func _[_ int|string|~bool]() {}`, `int | string | ~bool`, `int|string|~bool`},
+ {`package u3b; func _[_ int|string|~bool]() {}`, `int | string`, `int|string`},
+ {`package u3b; func _[_ int|string|~bool]() {}`, `~bool`, `~bool`},
+ {`package u3b; func _[_ int|string|~float64|~bool]() {}`, `int | string | ~float64`, `int|string|~float64`},
+
+ {`package u0c; type _ interface{int}`, `int`, `int`},
+ {`package u1c; type _ interface{~int}`, `~int`, `~int`},
+ {`package u2c; type _ interface{int|string}`, `int | string`, `int|string`},
+ {`package u3c; type _ interface{int|string|~bool}`, `int | string | ~bool`, `int|string|~bool`},
+ {`package u3c; type _ interface{int|string|~bool}`, `int | string`, `int|string`},
+ {`package u3c; type _ interface{int|string|~bool}`, `~bool`, `~bool`},
+ {`package u3c; type _ interface{int|string|~float64|~bool}`, `int | string | ~float64`, `int|string|~float64`},
}
for _, test := range tests {
@@ -391,138 +409,138 @@ func TestInstanceInfo(t *testing.T) {
targs []string
typ string
}{
- {genericPkg + `p0; func f[T any](T) {}; func _() { f(42) }`,
+ {`package p0; func f[T any](T) {}; func _() { f(42) }`,
`f`,
[]string{`int`},
`func(int)`,
},
- {genericPkg + `p1; func f[T any](T) T { panic(0) }; func _() { f('@') }`,
+ {`package p1; func f[T any](T) T { panic(0) }; func _() { f('@') }`,
`f`,
[]string{`rune`},
`func(rune) rune`,
},
- {genericPkg + `p2; func f[T any](...T) T { panic(0) }; func _() { f(0i) }`,
+ {`package p2; func f[T any](...T) T { panic(0) }; func _() { f(0i) }`,
`f`,
[]string{`complex128`},
`func(...complex128) complex128`,
},
- {genericPkg + `p3; func f[A, B, C any](A, *B, []C) {}; func _() { f(1.2, new(string), []byte{}) }`,
+ {`package p3; func f[A, B, C any](A, *B, []C) {}; func _() { f(1.2, new(string), []byte{}) }`,
`f`,
[]string{`float64`, `string`, `byte`},
`func(float64, *string, []byte)`,
},
- {genericPkg + `p4; func f[A, B any](A, *B, ...[]B) {}; func _() { f(1.2, new(byte)) }`,
+ {`package p4; func f[A, B any](A, *B, ...[]B) {}; func _() { f(1.2, new(byte)) }`,
`f`,
[]string{`float64`, `byte`},
`func(float64, *byte, ...[]byte)`,
},
// we don't know how to translate these but we can type-check them
- {genericPkg + `q0; type T struct{}; func (T) m[P any](P) {}; func _(x T) { x.m(42) }`,
+ {`package q0; type T struct{}; func (T) m[P any](P) {}; func _(x T) { x.m(42) }`,
`m`,
[]string{`int`},
`func(int)`,
},
- {genericPkg + `q1; type T struct{}; func (T) m[P any](P) P { panic(0) }; func _(x T) { x.m(42) }`,
+ {`package q1; type T struct{}; func (T) m[P any](P) P { panic(0) }; func _(x T) { x.m(42) }`,
`m`,
[]string{`int`},
`func(int) int`,
},
- {genericPkg + `q2; type T struct{}; func (T) m[P any](...P) P { panic(0) }; func _(x T) { x.m(42) }`,
+ {`package q2; type T struct{}; func (T) m[P any](...P) P { panic(0) }; func _(x T) { x.m(42) }`,
`m`,
[]string{`int`},
`func(...int) int`,
},
- {genericPkg + `q3; type T struct{}; func (T) m[A, B, C any](A, *B, []C) {}; func _(x T) { x.m(1.2, new(string), []byte{}) }`,
+ {`package q3; type T struct{}; func (T) m[A, B, C any](A, *B, []C) {}; func _(x T) { x.m(1.2, new(string), []byte{}) }`,
`m`,
[]string{`float64`, `string`, `byte`},
`func(float64, *string, []byte)`,
},
- {genericPkg + `q4; type T struct{}; func (T) m[A, B any](A, *B, ...[]B) {}; func _(x T) { x.m(1.2, new(byte)) }`,
+ {`package q4; type T struct{}; func (T) m[A, B any](A, *B, ...[]B) {}; func _(x T) { x.m(1.2, new(byte)) }`,
`m`,
[]string{`float64`, `byte`},
`func(float64, *byte, ...[]byte)`,
},
- {genericPkg + `r0; type T[P any] struct{}; func (_ T[P]) m[Q any](Q) {}; func _[P any](x T[P]) { x.m(42) }`,
+ {`package r0; type T[P any] struct{}; func (_ T[P]) m[Q any](Q) {}; func _[P any](x T[P]) { x.m(42) }`,
`m`,
[]string{`int`},
`func(int)`,
},
// TODO(gri) record method type parameters in syntax.FuncType so we can check this
- // {genericPkg + `r1; type T interface{ m[P any](P) }; func _(x T) { x.m(4.2) }`,
+ // {`package r1; type T interface{ m[P any](P) }; func _(x T) { x.m(4.2) }`,
// `x.m`,
// []string{`float64`},
// `func(float64)`,
// },
- {genericPkg + `s1; func f[T any, P interface{~*T}](x T) {}; func _(x string) { f(x) }`,
+ {`package s1; func f[T any, P interface{~*T}](x T) {}; func _(x string) { f(x) }`,
`f`,
[]string{`string`, `*string`},
`func(x string)`,
},
- {genericPkg + `s2; func f[T any, P interface{~*T}](x []T) {}; func _(x []int) { f(x) }`,
+ {`package s2; func f[T any, P interface{~*T}](x []T) {}; func _(x []int) { f(x) }`,
`f`,
[]string{`int`, `*int`},
`func(x []int)`,
},
- {genericPkg + `s3; type C[T any] interface{~chan<- T}; func f[T any, P C[T]](x []T) {}; func _(x []int) { f(x) }`,
+ {`package s3; type C[T any] interface{~chan<- T}; func f[T any, P C[T]](x []T) {}; func _(x []int) { f(x) }`,
`f`,
[]string{`int`, `chan<- int`},
`func(x []int)`,
},
- {genericPkg + `s4; type C[T any] interface{~chan<- T}; func f[T any, P C[T], Q C[[]*P]](x []T) {}; func _(x []int) { f(x) }`,
+ {`package s4; type C[T any] interface{~chan<- T}; func f[T any, P C[T], Q C[[]*P]](x []T) {}; func _(x []int) { f(x) }`,
`f`,
[]string{`int`, `chan<- int`, `chan<- []*chan<- int`},
`func(x []int)`,
},
- {genericPkg + `t1; func f[T any, P interface{~*T}]() T { panic(0) }; func _() { _ = f[string] }`,
+ {`package t1; func f[T any, P interface{~*T}]() T { panic(0) }; func _() { _ = f[string] }`,
`f`,
[]string{`string`, `*string`},
`func() string`,
},
- {genericPkg + `t2; func f[T any, P interface{~*T}]() T { panic(0) }; func _() { _ = (f[string]) }`,
+ {`package t2; func f[T any, P interface{~*T}]() T { panic(0) }; func _() { _ = (f[string]) }`,
`f`,
[]string{`string`, `*string`},
`func() string`,
},
- {genericPkg + `t3; type C[T any] interface{~chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T { return nil }; func _() { _ = f[int] }`,
+ {`package t3; type C[T any] interface{~chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T { return nil }; func _() { _ = f[int] }`,
`f`,
[]string{`int`, `chan<- int`, `chan<- []*chan<- int`},
`func() []int`,
},
- {genericPkg + `t4; type C[T any] interface{~chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T { return nil }; func _() { _ = f[int] }`,
+ {`package t4; type C[T any] interface{~chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T { return nil }; func _() { _ = f[int] }`,
`f`,
[]string{`int`, `chan<- int`, `chan<- []*chan<- int`},
`func() []int`,
},
- {genericPkg + `i0; import lib "generic_lib"; func _() { lib.F(42) }`,
+ {`package i0; import lib "generic_lib"; func _() { lib.F(42) }`,
`F`,
[]string{`int`},
`func(int)`,
},
- {genericPkg + `type0; type T[P interface{~int}] struct{ x P }; var _ T[int]`,
+ {`package type0; type T[P interface{~int}] struct{ x P }; var _ T[int]`,
`T`,
[]string{`int`},
`struct{x int}`,
},
- {genericPkg + `type1; type T[P interface{~int}] struct{ x P }; var _ (T[int])`,
+ {`package type1; type T[P interface{~int}] struct{ x P }; var _ (T[int])`,
`T`,
[]string{`int`},
`struct{x int}`,
},
- {genericPkg + `type2; type T[P interface{~int}] struct{ x P }; var _ T[(int)]`,
+ {`package type2; type T[P interface{~int}] struct{ x P }; var _ T[(int)]`,
`T`,
[]string{`int`},
`struct{x int}`,
},
- {genericPkg + `type3; type T[P1 interface{~[]P2}, P2 any] struct{ x P1; y P2 }; var _ T[[]int, int]`,
+ {`package type3; type T[P1 interface{~[]P2}, P2 any] struct{ x P1; y P2 }; var _ T[[]int, int]`,
`T`,
[]string{`[]int`, `int`},
`struct{x []int; y int}`,
},
- {genericPkg + `type4; import lib "generic_lib"; var _ lib.T[int]`,
+ {`package type4; import lib "generic_lib"; var _ lib.T[int]`,
`T`,
[]string{`int`},
`[]int`,
@@ -622,6 +640,11 @@ func TestDefsInfo(t *testing.T) {
{`package p3; type x int`, `x`, `type p3.x int`},
{`package p4; func f()`, `f`, `func p4.f()`},
{`package p5; func f() int { x, _ := 1, 2; return x }`, `_`, `var _ int`},
+
+ // Tests using generics.
+ {`package g0; type x[T any] int`, `x`, `type g0.x[T any] int`},
+ {`package g1; func f[T any]() {}`, `f`, `func g1.f[T any]()`},
+ {`package g2; type x[T any] int; func (*x[_]) m() {}`, `m`, `func (*g2.x[_]).m()`},
}
for _, test := range tests {
@@ -660,6 +683,33 @@ func TestUsesInfo(t *testing.T) {
{`package p2; func _() { _ = x }; var x int`, `x`, `var p2.x int`},
{`package p3; func _() { type _ x }; type x int`, `x`, `type p3.x int`},
{`package p4; func _() { _ = f }; func f()`, `f`, `func p4.f()`},
+
+ // Tests using generics.
+ {`package g0; func _[T any]() { _ = x }; const x = 42`, `x`, `const g0.x untyped int`},
+ {`package g1; func _[T any](x T) { }`, `T`, `type parameter T any`},
+ {`package g2; type N[A any] int; var _ N[int]`, `N`, `type g2.N[A any] int`},
+ {`package g3; type N[A any] int; func (N[_]) m() {}`, `N`, `type g3.N[A any] int`},
+
+ // Uses of fields are instantiated.
+ {`package s1; type N[A any] struct{ a A }; var f = N[int]{}.a`, `a`, `field a int`},
+ {`package s1; type N[A any] struct{ a A }; func (r N[B]) m(b B) { r.a = b }`, `a`, `field a B`},
+
+ // Uses of methods are uses of the instantiated method.
+ {`package m0; type N[A any] int; func (r N[B]) m() { r.n() }; func (N[C]) n() {}`, `n`, `func (m0.N[B]).n()`},
+ {`package m1; type N[A any] int; func (r N[B]) m() { }; var f = N[int].m`, `m`, `func (m1.N[int]).m()`},
+ {`package m2; func _[A any](v interface{ m() A }) { v.m() }`, `m`, `func (interface).m() A`},
+ {`package m3; func f[A any]() interface{ m() A } { return nil }; var _ = f[int]().m()`, `m`, `func (interface).m() int`},
+ {`package m4; type T[A any] func() interface{ m() A }; var x T[int]; var y = x().m`, `m`, `func (interface).m() int`},
+ {`package m5; type T[A any] interface{ m() A }; func _[B any](t T[B]) { t.m() }`, `m`, `func (m5.T[B]).m() B`},
+ {`package m6; type T[A any] interface{ m() }; func _[B any](t T[B]) { t.m() }`, `m`, `func (m6.T[B]).m()`},
+ {`package m7; type T[A any] interface{ m() A }; func _(t T[int]) { t.m() }`, `m`, `func (m7.T[int]).m() int`},
+ {`package m8; type T[A any] interface{ m() }; func _(t T[int]) { t.m() }`, `m`, `func (m8.T[int]).m()`},
+ {`package m9; type T[A any] interface{ m() }; func _(t T[int]) { _ = t.m }`, `m`, `func (m9.T[int]).m()`},
+ {
+ `package m10; type E[A any] interface{ m() }; type T[B any] interface{ E[B]; n() }; func _(t T[int]) { t.m() }`,
+ `m`,
+ `func (m10.E[int]).m()`,
+ },
}
for _, test := range tests {
@@ -672,8 +722,10 @@ func TestUsesInfo(t *testing.T) {
var use Object
for id, obj := range info.Uses {
if id.Value == test.obj {
+ if use != nil {
+ panic(fmt.Sprintf("multiple uses of %q", id.Value))
+ }
use = obj
- break
}
}
if use == nil {
@@ -687,6 +739,89 @@ func TestUsesInfo(t *testing.T) {
}
}
+func TestGenericMethodInfo(t *testing.T) {
+ src := `package p
+
+type N[A any] int
+
+func (r N[B]) m() { r.m(); r.n() }
+
+func (r *N[C]) n() { }
+`
+ f, err := parseSrc("p.go", src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ info := Info{
+ Defs: make(map[*syntax.Name]Object),
+ Uses: make(map[*syntax.Name]Object),
+ Selections: make(map[*syntax.SelectorExpr]*Selection),
+ }
+ var conf Config
+ pkg, err := conf.Check("p", []*syntax.File{f}, &info)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ N := pkg.Scope().Lookup("N").Type().(*Named)
+
+ // Find the generic methods stored on N.
+ gm, gn := N.Method(0), N.Method(1)
+ if gm.Name() == "n" {
+ gm, gn = gn, gm
+ }
+
+ // Collect objects from info.
+ var dm, dn *Func // the declared methods
+ var dmm, dmn *Func // the methods used in the body of m
+ for _, decl := range f.DeclList {
+ fdecl, ok := decl.(*syntax.FuncDecl)
+ if !ok {
+ continue
+ }
+ def := info.Defs[fdecl.Name].(*Func)
+ switch fdecl.Name.Value {
+ case "m":
+ dm = def
+ syntax.Inspect(fdecl.Body, func(n syntax.Node) bool {
+ if call, ok := n.(*syntax.CallExpr); ok {
+ sel := call.Fun.(*syntax.SelectorExpr)
+ use := info.Uses[sel.Sel].(*Func)
+ selection := info.Selections[sel]
+ if selection.Kind() != MethodVal {
+ t.Errorf("Selection kind = %v, want %v", selection.Kind(), MethodVal)
+ }
+ if selection.Obj() != use {
+ t.Errorf("info.Selections contains %v, want %v", selection.Obj(), use)
+ }
+ switch sel.Sel.Value {
+ case "m":
+ dmm = use
+ case "n":
+ dmn = use
+ }
+ }
+ return true
+ })
+ case "n":
+ dn = def
+ }
+ }
+
+ if gm != dm {
+ t.Errorf(`N.Method(...) returns %v for "m", but Info.Defs has %v`, gm, dm)
+ }
+ if gn != dn {
+ t.Errorf(`N.Method(...) returns %v for "m", but Info.Defs has %v`, gm, dm)
+ }
+ if dmm != dm {
+ t.Errorf(`Inside "m", r.m uses %v, want the defined func %v`, dmm, dm)
+ }
+ if dmn == dn {
+ t.Errorf(`Inside "m", r.n uses %v, want a func distinct from %v`, dmm, dm)
+ }
+}
+
func TestImplicitsInfo(t *testing.T) {
testenv.MustHaveGoBuild(t)
@@ -707,6 +842,17 @@ func TestImplicitsInfo(t *testing.T) {
{`package p8; func f(int) {}`, "field: var int"},
{`package p9; func f() (complex64) { return 0 }`, "field: var complex64"},
{`package p10; type T struct{}; func (*T) f() {}`, "field: var *p10.T"},
+
+ // Tests using generics.
+ {`package f0; func f[T any](x int) {}`, ""}, // no Implicits entry
+ {`package f1; func f[T any](int) {}`, "field: var int"},
+ {`package f2; func f[T any](T) {}`, "field: var T"},
+ {`package f3; func f[T any]() (complex64) { return 0 }`, "field: var complex64"},
+ {`package f4; func f[T any](t T) (T) { return t }`, "field: var T"},
+ {`package t0; type T[A any] struct{}; func (*T[_]) f() {}`, "field: var *t0.T[_]"},
+ {`package t1; type T[A any] struct{}; func _(x interface{}) { switch t := x.(type) { case T[int]: _ = t } }`, "caseClause: var t t1.T[int]"},
+ {`package t2; type T[A any] struct{}; func _[P any](x interface{}) { switch t := x.(type) { case T[P]: _ = t } }`, "caseClause: var t t2.T[P]"},
+ {`package t3; func _[P any](x interface{}) { switch t := x.(type) { case P: _ = t } }`, "caseClause: var t P"},
}
for _, test := range tests {
@@ -1425,6 +1571,18 @@ var _ = a.C2
makePkg("main", mainSrc) // don't crash when type-checking this package
}
+func TestLookupFieldOrMethodOnNil(t *testing.T) {
+ // LookupFieldOrMethod on a nil type is expected to produce a run-time panic.
+ defer func() {
+ const want = "LookupFieldOrMethod on nil type"
+ p := recover()
+ if s, ok := p.(string); !ok || s != want {
+ t.Fatalf("got %v, want %s", p, want)
+ }
+ }()
+ LookupFieldOrMethod(nil, false, nil, "")
+}
+
func TestLookupFieldOrMethod(t *testing.T) {
// Test cases assume a lookup of the form a.f or x.f, where a stands for an
// addressable value, and x for a non-addressable value (even though a variable
@@ -1681,6 +1839,56 @@ func TestAssignableTo(t *testing.T) {
}
}
+func TestIdentical(t *testing.T) {
+ // For each test, we compare the types of objects X and Y in the source.
+ tests := []struct {
+ src string
+ want bool
+ }{
+ // Basic types.
+ {"var X int; var Y int", true},
+ {"var X int; var Y string", false},
+
+ // TODO: add more tests for complex types.
+
+ // Named types.
+ {"type X int; type Y int", false},
+
+ // Aliases.
+ {"type X = int; type Y = int", true},
+
+ // Functions.
+ {`func X(int) string { return "" }; func Y(int) string { return "" }`, true},
+ {`func X() string { return "" }; func Y(int) string { return "" }`, false},
+ {`func X(int) string { return "" }; func Y(int) {}`, false},
+
+ // Generic functions. Type parameters should be considered identical modulo
+ // renaming. See also issue #49722.
+ {`func X[P ~int](){}; func Y[Q ~int]() {}`, true},
+ {`func X[P1 any, P2 ~*P1](){}; func Y[Q1 any, Q2 ~*Q1]() {}`, true},
+ {`func X[P1 any, P2 ~[]P1](){}; func Y[Q1 any, Q2 ~*Q1]() {}`, false},
+ {`func X[P ~int](P){}; func Y[Q ~int](Q) {}`, true},
+ {`func X[P ~string](P){}; func Y[Q ~int](Q) {}`, false},
+ {`func X[P ~int]([]P){}; func Y[Q ~int]([]Q) {}`, true},
+ }
+
+ for _, test := range tests {
+ pkg, err := pkgFor("test", "package p;"+test.src, nil)
+ if err != nil {
+ t.Errorf("%s: incorrect test case: %s", test.src, err)
+ continue
+ }
+ X := pkg.Scope().Lookup("X")
+ Y := pkg.Scope().Lookup("Y")
+ if X == nil || Y == nil {
+ t.Fatal("test must declare both X and Y")
+ }
+ if got := Identical(X.Type(), Y.Type()); got != test.want {
+ t.Errorf("Identical(%s, %s) = %t, want %t", X.Type(), Y.Type(), got, test.want)
+ }
+ }
+}
+
func TestIdentical_issue15173(t *testing.T) {
// Identical should allow nil arguments and be symmetric.
for _, test := range []struct {
@@ -1958,7 +2166,7 @@ func f(x T) T { return foo.F(x) }
func TestInstantiate(t *testing.T) {
// eventually we like more tests but this is a start
- const src = genericPkg + "p; type T[P any] *T[P]"
+ const src = "package p; type T[P any] *T[P]"
pkg, err := pkgFor(".", src, nil)
if err != nil {
t.Fatal(err)
@@ -1996,7 +2204,7 @@ func TestInstantiateErrors(t *testing.T) {
}
for _, test := range tests {
- src := genericPkg + "p; " + test.src
+ src := "package p; " + test.src
pkg, err := pkgFor(".", src, nil)
if err != nil {
t.Fatal(err)
@@ -2009,10 +2217,28 @@ func TestInstantiateErrors(t *testing.T) {
t.Fatalf("Instantiate(%v, %v) returned nil error, want non-nil", T, test.targs)
}
- gotAt := err.(ArgumentError).Index()
- if gotAt != test.wantAt {
- t.Errorf("Instantate(%v, %v): error at index %d, want index %d", T, test.targs, gotAt, test.wantAt)
+ var argErr *ArgumentError
+ if !errors.As(err, &argErr) {
+ t.Fatalf("Instantiate(%v, %v): error is not an *ArgumentError", T, test.targs)
}
+
+ if argErr.Index != test.wantAt {
+ t.Errorf("Instantate(%v, %v): error at index %d, want index %d", T, test.targs, argErr.Index, test.wantAt)
+ }
+ }
+}
+
+func TestArgumentErrorUnwrapping(t *testing.T) {
+ var err error = &ArgumentError{
+ Index: 1,
+ Err: Error{Msg: "test"},
+ }
+ var e Error
+ if !errors.As(err, &e) {
+ t.Fatalf("error %v does not wrap types.Error", err)
+ }
+ if e.Msg != "test" {
+ t.Errorf("e.Msg = %q, want %q", e.Msg, "test")
}
}
@@ -2031,12 +2257,115 @@ func TestInstanceIdentity(t *testing.T) {
}
imports[name] = pkg
}
- makePkg(genericPkg + `lib; type T[P any] struct{}`)
- makePkg(genericPkg + `a; import "generic_lib"; var A generic_lib.T[int]`)
- makePkg(genericPkg + `b; import "generic_lib"; var B generic_lib.T[int]`)
- a := imports["generic_a"].Scope().Lookup("A")
- b := imports["generic_b"].Scope().Lookup("B")
+ makePkg(`package lib; type T[P any] struct{}`)
+ makePkg(`package a; import "lib"; var A lib.T[int]`)
+ makePkg(`package b; import "lib"; var B lib.T[int]`)
+ a := imports["a"].Scope().Lookup("A")
+ b := imports["b"].Scope().Lookup("B")
if !Identical(a.Type(), b.Type()) {
t.Errorf("mismatching types: a.A: %s, b.B: %s", a.Type(), b.Type())
}
}
+
+func TestImplements(t *testing.T) {
+ const src = `
+package p
+
+type EmptyIface interface{}
+
+type I interface {
+ m()
+}
+
+type C interface {
+ m()
+ ~int
+}
+
+type Integer interface{
+ int8 | int16 | int32 | int64
+}
+
+type EmptyTypeSet interface{
+ Integer
+ ~string
+}
+
+type N1 int
+func (N1) m() {}
+
+type N2 int
+func (*N2) m() {}
+
+type N3 int
+func (N3) m(int) {}
+
+type N4 string
+func (N4) m()
+
+type Bad Bad // invalid type
+`
+
+ f, err := parseSrc("p.go", src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ conf := Config{Error: func(error) {}}
+ pkg, _ := conf.Check(f.PkgName.Value, []*syntax.File{f}, nil)
+
+ scope := pkg.Scope()
+ var (
+ EmptyIface = scope.Lookup("EmptyIface").Type().Underlying().(*Interface)
+ I = scope.Lookup("I").Type().(*Named)
+ II = I.Underlying().(*Interface)
+ C = scope.Lookup("C").Type().(*Named)
+ CI = C.Underlying().(*Interface)
+ Integer = scope.Lookup("Integer").Type().Underlying().(*Interface)
+ EmptyTypeSet = scope.Lookup("EmptyTypeSet").Type().Underlying().(*Interface)
+ N1 = scope.Lookup("N1").Type()
+ N1p = NewPointer(N1)
+ N2 = scope.Lookup("N2").Type()
+ N2p = NewPointer(N2)
+ N3 = scope.Lookup("N3").Type()
+ N4 = scope.Lookup("N4").Type()
+ Bad = scope.Lookup("Bad").Type()
+ )
+
+ tests := []struct {
+ t Type
+ i *Interface
+ want bool
+ }{
+ {I, II, true},
+ {I, CI, false},
+ {C, II, true},
+ {C, CI, true},
+ {Typ[Int8], Integer, true},
+ {Typ[Int64], Integer, true},
+ {Typ[String], Integer, false},
+ {EmptyTypeSet, II, true},
+ {EmptyTypeSet, EmptyTypeSet, true},
+ {Typ[Int], EmptyTypeSet, false},
+ {N1, II, true},
+ {N1, CI, true},
+ {N1p, II, true},
+ {N1p, CI, false},
+ {N2, II, false},
+ {N2, CI, false},
+ {N2p, II, true},
+ {N2p, CI, false},
+ {N3, II, false},
+ {N3, CI, false},
+ {N4, II, true},
+ {N4, CI, false},
+ {Bad, II, false},
+ {Bad, CI, false},
+ {Bad, EmptyIface, true},
+ }
+
+ for _, test := range tests {
+ if got := Implements(test.t, test.i); got != test.want {
+ t.Errorf("Implements(%s, %s) = %t, want %t", test.t, test.i, got, test.want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/assignments.go b/src/cmd/compile/internal/types2/assignments.go
index bfc5578683..936930f0b1 100644
--- a/src/cmd/compile/internal/types2/assignments.go
+++ b/src/cmd/compile/internal/types2/assignments.go
@@ -9,6 +9,7 @@ package types2
import (
"cmd/compile/internal/syntax"
"fmt"
+ "strings"
)
// assignment reports whether x can be assigned to a variable of type T,
@@ -43,7 +44,7 @@ func (check *Checker) assignment(x *operand, T Type, context string) {
x.mode = invalid
return
}
- } else if T == nil || IsInterface(T) {
+ } else if T == nil || IsInterface(T) && !isTypeParam(T) {
target = Default(x.typ)
}
newType, val, code := check.implicitTypeAndValue(x, target)
@@ -71,7 +72,7 @@ func (check *Checker) assignment(x *operand, T Type, context string) {
// x.typ is typed
// A generic (non-instantiated) function value cannot be assigned to a variable.
- if sig := asSignature(x.typ); sig != nil && sig.TypeParams().Len() > 0 {
+ if sig, _ := under(x.typ).(*Signature); sig != nil && sig.TypeParams().Len() > 0 {
check.errorf(x, "cannot use generic function %s without instantiation in %s", x, context)
}
@@ -85,7 +86,11 @@ func (check *Checker) assignment(x *operand, T Type, context string) {
reason := ""
if ok, _ := x.assignableTo(check, T, &reason); !ok {
if check.conf.CompilerErrorMessages {
- check.errorf(x, "incompatible type: cannot use %s as %s value", x, T)
+ if reason != "" {
+ check.errorf(x, "cannot use %s as type %s in %s:\n\t%s", x, T, context, reason)
+ } else {
+ check.errorf(x, "cannot use %s as type %s in %s", x, T, context)
+ }
} else {
if reason != "" {
check.errorf(x, "cannot use %s as %s value in %s: %s", x, T, context, reason)
@@ -216,9 +221,6 @@ func (check *Checker) assignVar(lhs syntax.Expr, x *operand) Type {
return nil
case variable, mapindex:
// ok
- case nilvalue:
- check.error(&z, "cannot assign to nil") // default would print "untyped nil"
- return nil
default:
if sel, ok := z.expr.(*syntax.SelectorExpr); ok {
var op operand
@@ -240,6 +242,58 @@ func (check *Checker) assignVar(lhs syntax.Expr, x *operand) Type {
return x.typ
}
+// operandTypes returns the list of types for the given operands.
+func operandTypes(list []*operand) (res []Type) {
+ for _, x := range list {
+ res = append(res, x.typ)
+ }
+ return res
+}
+
+// varTypes returns the list of types for the given variables.
+func varTypes(list []*Var) (res []Type) {
+ for _, x := range list {
+ res = append(res, x.typ)
+ }
+ return res
+}
+
+// typesSummary returns a string of the form "(t1, t2, ...)" where the
+// ti's are user-friendly string representations for the given types.
+// If variadic is set and the last type is a slice, its string is of
+// the form "...E" where E is the slice's element type.
+func (check *Checker) typesSummary(list []Type, variadic bool) string {
+ var res []string
+ for i, t := range list {
+ var s string
+ switch {
+ case t == nil:
+ fallthrough // should not happen but be cautious
+ case t == Typ[Invalid]:
+ s = ""
+ case isUntyped(t):
+ if isNumeric(t) {
+ // Do not imply a specific type requirement:
+ // "have number, want float64" is better than
+ // "have untyped int, want float64" or
+ // "have int, want float64".
+ s = "number"
+ } else {
+ // If we don't have a number, omit the "untyped" qualifier
+ // for compactness.
+ s = strings.Replace(t.(*Basic).name, "untyped ", "", -1)
+ }
+ case variadic && i == len(list)-1:
+ s = check.sprintf("...%s", t.(*Slice).elem)
+ }
+ if s == "" {
+ s = check.sprintf("%s", t)
+ }
+ res = append(res, s)
+ }
+ return "(" + strings.Join(res, ", ") + ")"
+}
+
func (check *Checker) assignError(rhs []syntax.Expr, nvars, nvals int) {
measure := func(x int, unit string) string {
s := fmt.Sprintf("%d %s", x, unit)
@@ -262,10 +316,10 @@ func (check *Checker) assignError(rhs []syntax.Expr, nvars, nvals int) {
check.errorf(rhs0, "assignment mismatch: %s but %s", vars, vals)
}
-// If returnPos is valid, initVars is called to type-check the assignment of
-// return expressions, and returnPos is the position of the return statement.
-func (check *Checker) initVars(lhs []*Var, orig_rhs []syntax.Expr, returnPos syntax.Pos) {
- rhs, commaOk := check.exprList(orig_rhs, len(lhs) == 2 && !returnPos.IsKnown())
+// If returnStmt != nil, initVars is called to type-check the assignment
+// of return expressions, and returnStmt is the return statement.
+func (check *Checker) initVars(lhs []*Var, orig_rhs []syntax.Expr, returnStmt syntax.Stmt) {
+ rhs, commaOk := check.exprList(orig_rhs, len(lhs) == 2 && returnStmt == nil)
if len(lhs) != len(rhs) {
// invalidate lhs
@@ -281,8 +335,20 @@ func (check *Checker) initVars(lhs []*Var, orig_rhs []syntax.Expr, returnPos syn
return
}
}
- if returnPos.IsKnown() {
- check.errorf(returnPos, "wrong number of return values (want %d, got %d)", len(lhs), len(rhs))
+ if returnStmt != nil {
+ var at poser = returnStmt
+ qualifier := "not enough"
+ if len(rhs) > len(lhs) {
+ at = rhs[len(lhs)].expr // report at first extra value
+ qualifier = "too many"
+ } else if len(rhs) > 0 {
+ at = rhs[len(rhs)-1].expr // report at last value
+ }
+ var err error_
+ err.errorf(at, "%s return values", qualifier)
+ err.errorf(nopos, "have %s", check.typesSummary(operandTypes(rhs), false))
+ err.errorf(nopos, "want %s", check.typesSummary(varTypes(lhs), false))
+ check.report(&err)
return
}
if check.conf.CompilerErrorMessages {
@@ -294,7 +360,7 @@ func (check *Checker) initVars(lhs []*Var, orig_rhs []syntax.Expr, returnPos syn
}
context := "assignment"
- if returnPos.IsKnown() {
+ if returnStmt != nil {
context = "return statement"
}
@@ -448,7 +514,7 @@ func (check *Checker) shortVarDecl(pos syntax.Pos, lhs, rhs []syntax.Expr) {
}
}
- check.initVars(lhsVars, rhs, nopos)
+ check.initVars(lhsVars, rhs, nil)
// process function literals in rhs expressions before scope changes
check.processDelayed(top)
diff --git a/src/cmd/compile/internal/types2/builtins.go b/src/cmd/compile/internal/types2/builtins.go
index 548d55e10c..c2f955ce8c 100644
--- a/src/cmd/compile/internal/types2/builtins.go
+++ b/src/cmd/compile/internal/types2/builtins.go
@@ -82,7 +82,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
// of S and the respective parameter passing rules apply."
S := x.typ
var T Type
- if s, _ := structure(S).(*Slice); s != nil {
+ if s, _ := structuralType(S).(*Slice); s != nil {
T = s.elem
} else {
check.errorf(x, invalidArg+"%s is not a slice", x)
@@ -101,7 +101,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
if x.mode == invalid {
return
}
- if allString(x.typ) {
+ if t := structuralString(x.typ); t != nil && isString(t) {
if check.Types != nil {
sig := makeSig(S, S, x.typ)
sig.variadic = true
@@ -129,7 +129,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
arg(&x, i)
xlist = append(xlist, &x)
}
- check.arguments(call, sig, nil, xlist) // discard result (we know the result type)
+ check.arguments(call, sig, nil, xlist, nil) // discard result (we know the result type)
// ok to continue even if check.arguments reported errors
x.mode = value
@@ -178,8 +178,11 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
mode = value
}
- case *TypeParam:
- if t.underIs(func(t Type) bool {
+ case *Interface:
+ if !isTypeParam(x.typ) {
+ break
+ }
+ if t.typeSet().underIs(func(t Type) bool {
switch t := arrayPtrDeref(t).(type) {
case *Basic:
if isString(t) && id == _Len {
@@ -293,8 +296,8 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
// the argument types must be of floating-point type
// (applyTypeFunc never calls f with a type parameter)
f := func(typ Type) Type {
- assert(asTypeParam(typ) == nil)
- if t := asBasic(typ); t != nil {
+ assert(!isTypeParam(typ))
+ if t, _ := under(typ).(*Basic); t != nil {
switch t.kind {
case Float32:
return Typ[Complex64]
@@ -306,7 +309,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
}
return nil
}
- resTyp := check.applyTypeFunc(f, x.typ)
+ resTyp := check.applyTypeFunc(f, x, id)
if resTyp == nil {
check.errorf(x, invalidArg+"arguments have type %s, expected floating-point", x.typ)
return
@@ -327,14 +330,18 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
case _Copy:
// copy(x, y []T) int
- dst, _ := structure(x.typ).(*Slice)
+ dst, _ := structuralType(x.typ).(*Slice)
var y operand
arg(&y, 1)
if y.mode == invalid {
return
}
- src, _ := structureString(y.typ).(*Slice)
+ src0 := structuralString(y.typ)
+ if src0 != nil && isString(src0) {
+ src0 = NewSlice(universeByte)
+ }
+ src, _ := src0.(*Slice)
if dst == nil || src == nil {
check.errorf(x, invalidArg+"copy expects slice arguments; found %s and %s", x, &y)
@@ -417,8 +424,8 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
// the argument must be of complex type
// (applyTypeFunc never calls f with a type parameter)
f := func(typ Type) Type {
- assert(asTypeParam(typ) == nil)
- if t := asBasic(typ); t != nil {
+ assert(!isTypeParam(typ))
+ if t, _ := under(typ).(*Basic); t != nil {
switch t.kind {
case Complex64:
return Typ[Float32]
@@ -430,7 +437,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
}
return nil
}
- resTyp := check.applyTypeFunc(f, x.typ)
+ resTyp := check.applyTypeFunc(f, x, id)
if resTyp == nil {
check.errorf(x, invalidArg+"argument has type %s, expected complex type", x.typ)
return
@@ -464,13 +471,13 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
}
var min int // minimum number of arguments
- switch structure(T).(type) {
+ switch structuralType(T).(type) {
case *Slice:
min = 2
case *Map, *Chan:
min = 1
case nil:
- check.errorf(arg0, invalidArg+"cannot make %s; type set has no single underlying type", arg0)
+ check.errorf(arg0, invalidArg+"cannot make %s: no structural type", arg0)
return
default:
check.errorf(arg0, invalidArg+"cannot make %s; type must be slice, map, or channel", arg0)
@@ -574,7 +581,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
case _Add:
// unsafe.Add(ptr unsafe.Pointer, len IntegerType) unsafe.Pointer
if !check.allowVersion(check.pkg, 1, 17) {
- check.error(call.Fun, "unsafe.Add requires go1.17 or later")
+ check.versionErrorf(call.Fun, "go1.17", "unsafe.Add")
return
}
@@ -700,11 +707,11 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
case _Slice:
// unsafe.Slice(ptr *T, len IntegerType) []T
if !check.allowVersion(check.pkg, 1, 17) {
- check.error(call.Fun, "unsafe.Slice requires go1.17 or later")
+ check.versionErrorf(call.Fun, "go1.17", "unsafe.Slice")
return
}
- typ := asPointer(x.typ)
+ typ, _ := under(x.typ).(*Pointer)
if typ == nil {
check.errorf(x, invalidArg+"%s is not a pointer", x)
return
@@ -767,69 +774,19 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (
return true
}
-// Structure is exported for the compiler.
-
-// If typ is a type parameter, Structure returns the single underlying
-// type of all types in the corresponding type constraint if it exists,
-// or nil otherwise. If typ is not a type parameter, Structure returns
-// the underlying type.
-func Structure(typ Type) Type {
- return structure(typ)
-}
-
-// If typ is a type parameter, structure returns the single underlying
-// type of all types in the corresponding type constraint if it exists,
-// or nil otherwise. If typ is not a type parameter, structure returns
-// the underlying type.
-func structure(typ Type) Type {
- var su Type
- if underIs(typ, func(u Type) bool {
- if su != nil && !Identical(su, u) {
- return false
- }
- // su == nil || Identical(su, u)
- su = u
- return true
- }) {
- return su
- }
- return nil
-}
-
-// structureString is like structure but also considers []byte and
-// string as "identical". In this case, if successful, the result
-// is always []byte.
-func structureString(typ Type) Type {
- var su Type
- if underIs(typ, func(u Type) bool {
- if isString(u) {
- u = NewSlice(universeByte)
- }
- if su != nil && !Identical(su, u) {
- return false
- }
- // su == nil || Identical(su, u)
- su = u
- return true
- }) {
- return su
- }
- return nil
-}
-
// hasVarSize reports if the size of type t is variable due to type parameters.
func hasVarSize(t Type) bool {
- switch t := under(t).(type) {
+ switch u := under(t).(type) {
case *Array:
- return hasVarSize(t.elem)
+ return hasVarSize(u.elem)
case *Struct:
- for _, f := range t.fields {
+ for _, f := range u.fields {
if hasVarSize(f.typ) {
return true
}
}
- case *TypeParam:
- return true
+ case *Interface:
+ return isTypeParam(t)
case *Named, *Union:
unreachable()
}
@@ -843,8 +800,8 @@ func hasVarSize(t Type) bool {
// of x. If any of these applications of f return nil,
// applyTypeFunc returns nil.
// If x is not a type parameter, the result is f(x).
-func (check *Checker) applyTypeFunc(f func(Type) Type, x Type) Type {
- if tp := asTypeParam(x); tp != nil {
+func (check *Checker) applyTypeFunc(f func(Type) Type, x *operand, id builtinId) Type {
+ if tp, _ := x.typ.(*TypeParam); tp != nil {
// Test if t satisfies the requirements for the argument
// type and collect possible result types at the same time.
var terms []*Term
@@ -861,17 +818,23 @@ func (check *Checker) applyTypeFunc(f func(Type) Type, x Type) Type {
return nil
}
+ // We can type-check this fine but we're introducing a synthetic
+ // type parameter for the result. It's not clear what the API
+ // implications are here. Report an error for 1.18 but continue
+ // type-checking.
+ check.softErrorf(x, "%s not supported as argument to %s for go1.18 (see issue #50937)", x, predeclaredFuncs[id].name)
+
// Construct a suitable new type parameter for the result type.
// The type parameter is placed in the current package so export/import
// works as expected.
- tpar := NewTypeName(nopos, check.pkg, "", nil)
+ tpar := NewTypeName(nopos, check.pkg, tp.obj.name, nil)
ptyp := check.newTypeParam(tpar, NewInterfaceType(nil, []Type{NewUnion(terms)})) // assigns type to tpar as a side-effect
ptyp.index = tp.index
return ptyp
}
- return f(x)
+ return f(x.typ)
}
// makeSig makes a signature for the given argument and result types.
@@ -894,7 +857,7 @@ func makeSig(res Type, args ...Type) *Signature {
// otherwise it returns typ.
func arrayPtrDeref(typ Type) Type {
if p, ok := typ.(*Pointer); ok {
- if a := asArray(p.base); a != nil {
+ if a, _ := under(p.base).(*Array); a != nil {
return a
}
}
diff --git a/src/cmd/compile/internal/types2/call.go b/src/cmd/compile/internal/types2/call.go
index 49cae5a930..15a42ca3dc 100644
--- a/src/cmd/compile/internal/types2/call.go
+++ b/src/cmd/compile/internal/types2/call.go
@@ -16,7 +16,7 @@ import (
// The operand x must be the evaluation of inst.X and its type must be a signature.
func (check *Checker) funcInst(x *operand, inst *syntax.IndexExpr) {
if !check.allowVersion(check.pkg, 1, 18) {
- check.softErrorf(inst.Pos(), "function instantiation requires go1.18 or later")
+ check.versionErrorf(inst.Pos(), "go1.18", "function instantiation")
}
xlist := unpackExpr(inst.Index)
@@ -50,14 +50,8 @@ func (check *Checker) funcInst(x *operand, inst *syntax.IndexExpr) {
}
assert(got == want)
- // determine argument positions (for error reporting)
- poslist := make([]syntax.Pos, len(xlist))
- for i, x := range xlist {
- poslist[i] = syntax.StartPos(x)
- }
-
// instantiate function signature
- res := check.instantiateSignature(x.Pos(), sig, targs, poslist)
+ res := check.instantiateSignature(x.Pos(), sig, targs, xlist)
assert(res.TypeParams().Len() == 0) // signature is not generic anymore
check.recordInstance(inst.X, targs, res)
x.typ = res
@@ -65,7 +59,7 @@ func (check *Checker) funcInst(x *operand, inst *syntax.IndexExpr) {
x.expr = inst
}
-func (check *Checker) instantiateSignature(pos syntax.Pos, typ *Signature, targs []Type, posList []syntax.Pos) (res *Signature) {
+func (check *Checker) instantiateSignature(pos syntax.Pos, typ *Signature, targs []Type, xlist []syntax.Expr) (res *Signature) {
assert(check != nil)
assert(len(targs) == typ.TypeParams().Len())
@@ -78,19 +72,23 @@ func (check *Checker) instantiateSignature(pos syntax.Pos, typ *Signature, targs
}()
}
- inst := check.instance(pos, typ, targs, check.conf.Context).(*Signature)
- assert(len(posList) <= len(targs))
- tparams := typ.TypeParams().list()
- if i, err := check.verify(pos, tparams, targs); err != nil {
- // best position for error reporting
- pos := pos
- if i < len(posList) {
- pos = posList[i]
+ inst := check.instance(pos, typ, targs, check.bestContext(nil)).(*Signature)
+ assert(len(xlist) <= len(targs))
+
+ // verify instantiation lazily (was issue #50450)
+ check.later(func() {
+ tparams := typ.TypeParams().list()
+ if i, err := check.verify(pos, tparams, targs); err != nil {
+ // best position for error reporting
+ pos := pos
+ if i < len(xlist) {
+ pos = syntax.StartPos(xlist[i])
+ }
+ check.softErrorf(pos, "%s", err)
+ } else {
+ check.mono.recordInstance(check.pkg, pos, tparams, targs, xlist)
}
- check.softErrorf(pos, err.Error())
- } else {
- check.mono.recordInstance(check.pkg, pos, tparams, targs, posList)
- }
+ })
return inst
}
@@ -132,7 +130,7 @@ func (check *Checker) callExpr(x *operand, call *syntax.CallExpr) exprKind {
case 1:
check.expr(x, call.ArgList[0])
if x.mode != invalid {
- if t := asInterface(T); t != nil {
+ if t, _ := under(T).(*Interface); t != nil && !isTypeParam(T) {
if !t.IsMethodSet() {
check.errorf(call, "cannot use interface %s in conversion (contains specific type constraints or is comparable)", T)
break
@@ -170,7 +168,7 @@ func (check *Checker) callExpr(x *operand, call *syntax.CallExpr) exprKind {
cgocall := x.mode == cgofunc
// a type parameter may be "called" if all types have the same signature
- sig, _ := structure(x.typ).(*Signature)
+ sig, _ := structuralType(x.typ).(*Signature)
if sig == nil {
check.errorf(x, invalidOp+"cannot call non-function %s", x)
x.mode = invalid
@@ -179,9 +177,10 @@ func (check *Checker) callExpr(x *operand, call *syntax.CallExpr) exprKind {
}
// evaluate type arguments, if any
+ var xlist []syntax.Expr
var targs []Type
if inst != nil {
- xlist := unpackExpr(inst.Index)
+ xlist = unpackExpr(inst.Index)
targs = check.typeList(xlist)
if targs == nil {
check.use(call.ArgList...)
@@ -205,7 +204,7 @@ func (check *Checker) callExpr(x *operand, call *syntax.CallExpr) exprKind {
// evaluate arguments
args, _ := check.exprList(call.ArgList, false)
isGeneric := sig.TypeParams().Len() > 0
- sig = check.arguments(call, sig, targs, args)
+ sig = check.arguments(call, sig, targs, args, xlist)
if isGeneric && sig.TypeParams().Len() == 0 {
// update the recorded type of call.Fun to its instantiated type
@@ -279,7 +278,8 @@ func (check *Checker) exprList(elist []syntax.Expr, allowCommaOk bool) (xlist []
return
}
-func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []Type, args []*operand) (rsig *Signature) {
+// xlist is the list of type argument expressions supplied in the source code.
+func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []Type, args []*operand, xlist []syntax.Expr) (rsig *Signature) {
rsig = sig
// TODO(gri) try to eliminate this extra verification loop
@@ -350,12 +350,25 @@ func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []T
}
// check argument count
- switch {
- case nargs < npars:
- check.errorf(call, "not enough arguments in call to %s", call.Fun)
- return
- case nargs > npars:
- check.errorf(args[npars], "too many arguments in call to %s", call.Fun) // report at first extra argument
+ if nargs != npars {
+ var at poser = call
+ qualifier := "not enough"
+ if nargs > npars {
+ at = args[npars].expr // report at first extra argument
+ qualifier = "too many"
+ } else if nargs > 0 {
+ at = args[nargs-1].expr // report at last argument
+ }
+ // take care of empty parameter lists represented by nil tuples
+ var params []*Var
+ if sig.params != nil {
+ params = sig.params.vars
+ }
+ var err error_
+ err.errorf(at, "%s arguments in call to %s", qualifier, call.Fun)
+ err.errorf(nopos, "have %s", check.typesSummary(operandTypes(args), false))
+ err.errorf(nopos, "want %s", check.typesSummary(varTypes(params), sig.variadic))
+ check.report(&err)
return
}
@@ -363,20 +376,18 @@ func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []T
if sig.TypeParams().Len() > 0 {
if !check.allowVersion(check.pkg, 1, 18) {
if iexpr, _ := call.Fun.(*syntax.IndexExpr); iexpr != nil {
- check.softErrorf(iexpr.Pos(), "function instantiation requires go1.18 or later")
+ check.versionErrorf(iexpr.Pos(), "go1.18", "function instantiation")
} else {
- check.softErrorf(call.Pos(), "implicit function instantiation requires go1.18 or later")
+ check.versionErrorf(call.Pos(), "go1.18", "implicit function instantiation")
}
}
- // TODO(gri) provide position information for targs so we can feed
- // it to the instantiate call for better error reporting
targs := check.infer(call.Pos(), sig.TypeParams().list(), targs, sigParams, args)
if targs == nil {
return // error already reported
}
// compute result signature
- rsig = check.instantiateSignature(call.Pos(), sig, targs, nil)
+ rsig = check.instantiateSignature(call.Pos(), sig, targs, xlist)
assert(rsig.TypeParams().Len() == 0) // signature is not generic anymore
check.recordInstance(call.Fun, targs, rsig)
@@ -520,26 +531,30 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr) {
obj, index, indirect = LookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel)
if obj == nil {
- switch {
- case index != nil:
+ // Don't report another error if the underlying type was invalid (issue #49541).
+ if under(x.typ) == Typ[Invalid] {
+ goto Error
+ }
+
+ if index != nil {
// TODO(gri) should provide actual type where the conflict happens
check.errorf(e.Sel, "ambiguous selector %s.%s", x.expr, sel)
- case indirect:
- check.errorf(e.Sel, "cannot call pointer method %s on %s", sel, x.typ)
- default:
- var why string
- if tpar := asTypeParam(x.typ); tpar != nil {
- // Type parameter bounds don't specify fields, so don't mention "field".
- if tname := tpar.iface().obj; tname != nil {
- why = check.sprintf("interface %s has no method %s", tname.name, sel)
- } else {
- why = check.sprintf("type bound for %s has no method %s", x.typ, sel)
- }
- } else {
- why = check.sprintf("type %s has no field or method %s", x.typ, sel)
- }
+ goto Error
+ }
+ if indirect {
+ check.errorf(e.Sel, "cannot call pointer method %s on %s", sel, x.typ)
+ goto Error
+ }
+
+ var why string
+ if isInterfacePtr(x.typ) {
+ why = check.interfacePtrError(x.typ)
+ } else {
+ why = check.sprintf("type %s has no field or method %s", x.typ, sel)
// Check if capitalization of sel matters and provide better error message in that case.
+ // TODO(gri) This code only looks at the first character but LookupFieldOrMethod has an
+ // (internal) mechanism for case-insensitive lookup. Should use that instead.
if len(sel) > 0 {
var changeCase string
if r := rune(sel[0]); unicode.IsUpper(r) {
@@ -551,10 +566,8 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr) {
why += ", but does have " + changeCase
}
}
-
- check.errorf(e.Sel, "%s.%s undefined (%s)", x.expr, sel, why)
-
}
+ check.errorf(e.Sel, "%s.%s undefined (%s)", x.expr, sel, why)
goto Error
}
diff --git a/src/cmd/compile/internal/types2/check.go b/src/cmd/compile/internal/types2/check.go
index b9a76a8990..bfed16993b 100644
--- a/src/cmd/compile/internal/types2/check.go
+++ b/src/cmd/compile/internal/types2/check.go
@@ -39,22 +39,24 @@ type exprInfo struct {
val constant.Value // constant value; or nil (if not a constant)
}
-// A context represents the context within which an object is type-checked.
-type context struct {
+// An environment represents the environment within which an object is
+// type-checked.
+type environment struct {
decl *declInfo // package-level declaration whose init expression/function body is checked
scope *Scope // top-most scope for lookups
pos syntax.Pos // if valid, identifiers are looked up as if at position pos (used by Eval)
iota constant.Value // value of iota in a constant declaration; nil otherwise
errpos syntax.Pos // if valid, identifier position of a constant with inherited initializer
+ inTParamList bool // set if inside a type parameter list
sig *Signature // function signature if inside a function; nil otherwise
isPanic map[*syntax.CallExpr]bool // set of panic call expressions (used for termination check)
hasLabel bool // set if a function makes use of labels (only ~1% of functions); unused outside functions
hasCallOrRecv bool // set if an expression contains a function call or channel receive operation
}
-// lookup looks up name in the current context and returns the matching object, or nil.
-func (ctxt *context) lookup(name string) Object {
- _, obj := ctxt.scope.LookupParent(name, ctxt.pos)
+// lookup looks up name in the current environment and returns the matching object, or nil.
+func (env *environment) lookup(name string) Object {
+ _, obj := env.scope.LookupParent(name, env.pos)
return obj
}
@@ -102,12 +104,14 @@ type Checker struct {
// package information
// (initialized by NewChecker, valid for the life-time of checker)
conf *Config
+ ctxt *Context // context for de-duplicating instances
pkg *Package
*Info
version version // accepted language version
nextID uint64 // unique Id for type parameters (first valid Id is 1)
objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info
impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package
+ infoMap map[*Named]typeInfo // maps named types to their associated type info (for cycle detection)
// pkgPathMap maps package names to the set of distinct import paths we've
// seen for that name, anywhere in the import graph. It is used for
@@ -126,6 +130,8 @@ type Checker struct {
imports []*PkgName // list of imported packages
dotImportMap map[dotImportKey]*PkgName // maps dot-imported objects to the package they were dot-imported through
recvTParamMap map[*syntax.Name]*TypeParam // maps blank receiver type parameters to their type
+ brokenAliases map[*TypeName]bool // set of aliases with broken (not yet determined) types
+ unionTypeSets map[*Union]*_TypeSet // computed type sets for union types
mono monoGraph // graph for detecting non-monomorphizable instantiation loops
firstErr error // first error encountered
@@ -135,9 +141,9 @@ type Checker struct {
objPath []Object // path of object dependencies during type inference (for cycle reporting)
defTypes []*Named // defined types created during type checking, for final validation.
- // context within which the current object is type-checked
- // (valid only for the duration of type-checking a specific object)
- context
+ // environment within which the current object is type-checked (valid only
+ // for the duration of type-checking a specific object)
+ environment
// debugging
indent int // indentation for tracing
@@ -155,6 +161,27 @@ func (check *Checker) addDeclDep(to Object) {
from.addDep(to)
}
+// brokenAlias records that alias doesn't have a determined type yet.
+// It also sets alias.typ to Typ[Invalid].
+func (check *Checker) brokenAlias(alias *TypeName) {
+ if check.brokenAliases == nil {
+ check.brokenAliases = make(map[*TypeName]bool)
+ }
+ check.brokenAliases[alias] = true
+ alias.typ = Typ[Invalid]
+}
+
+// validAlias records that alias has the valid type typ (possibly Typ[Invalid]).
+func (check *Checker) validAlias(alias *TypeName, typ Type) {
+ delete(check.brokenAliases, alias)
+ alias.typ = typ
+}
+
+// isBrokenAlias reports whether alias doesn't have a determined type yet.
+func (check *Checker) isBrokenAlias(alias *TypeName) bool {
+ return alias.typ == Typ[Invalid] && check.brokenAliases[alias]
+}
+
func (check *Checker) rememberUntyped(e syntax.Expr, lhs bool, mode operandMode, typ *Basic, val constant.Value) {
m := check.untyped
if m == nil {
@@ -199,11 +226,6 @@ func NewChecker(conf *Config, pkg *Package, info *Info) *Checker {
conf = new(Config)
}
- // make sure we have a context
- if conf.Context == nil {
- conf.Context = NewContext()
- }
-
// make sure we have an info struct
if info == nil {
info = new(Info)
@@ -216,11 +238,13 @@ func NewChecker(conf *Config, pkg *Package, info *Info) *Checker {
return &Checker{
conf: conf,
+ ctxt: conf.Context,
pkg: pkg,
Info: info,
version: version,
objMap: make(map[Object]*declInfo),
impMap: make(map[importKey]*Package),
+ infoMap: make(map[*Named]typeInfo),
}
}
@@ -331,7 +355,10 @@ func (check *Checker) checkFiles(files []*syntax.File) (err error) {
check.pkgPathMap = nil
check.seenPkgMap = nil
check.recvTParamMap = nil
+ check.brokenAliases = nil
+ check.unionTypeSets = nil
check.defTypes = nil
+ check.ctxt = nil
// TODO(gri) There's more memory we should release at this point.
@@ -494,7 +521,7 @@ func (check *Checker) recordInstance(expr syntax.Expr, targs []Type, typ Type) {
assert(ident != nil)
assert(typ != nil)
if m := check.Instances; m != nil {
- m[ident] = Instance{NewTypeList(targs), typ}
+ m[ident] = Instance{newTypeList(targs), typ}
}
}
diff --git a/src/cmd/compile/internal/types2/check_test.go b/src/cmd/compile/internal/types2/check_test.go
index d4c7b7b39b..7efa512164 100644
--- a/src/cmd/compile/internal/types2/check_test.go
+++ b/src/cmd/compile/internal/types2/check_test.go
@@ -25,6 +25,7 @@ package types2_test
import (
"cmd/compile/internal/syntax"
"flag"
+ "internal/buildcfg"
"internal/testenv"
"os"
"path/filepath"
@@ -93,14 +94,31 @@ func asGoVersion(s string) string {
return ""
}
+// excludedForUnifiedBuild lists files that cannot be tested
+// when using the unified build's export data.
+// TODO(gri) enable as soon as the unified build supports this.
+var excludedForUnifiedBuild = map[string]bool{
+ "issue47818.go2": true,
+ "issue49705.go2": true,
+}
+
func testFiles(t *testing.T, filenames []string, colDelta uint, manual bool) {
if len(filenames) == 0 {
t.Fatal("no source files")
}
+ if buildcfg.Experiment.Unified {
+ for _, f := range filenames {
+ if excludedForUnifiedBuild[filepath.Base(f)] {
+ t.Logf("%s cannot be tested with unified build - skipped", f)
+ return
+ }
+ }
+ }
+
var mode syntax.Mode
if strings.HasSuffix(filenames[0], ".go2") || manual {
- mode |= syntax.AllowGenerics
+ mode |= syntax.AllowGenerics | syntax.AllowMethodTypeParams
}
// parse files and collect parser errors
files, errlist := parseFiles(t, filenames, mode)
@@ -277,10 +295,13 @@ func TestManual(t *testing.T) {
// TODO(gri) go/types has extra TestLongConstants and TestIndexRepresentability tests
-func TestCheck(t *testing.T) { DefPredeclaredTestFuncs(); testDirFiles(t, "testdata/check", 75, false) } // TODO(gri) narrow column tolerance
-func TestSpec(t *testing.T) { DefPredeclaredTestFuncs(); testDirFiles(t, "testdata/spec", 0, false) }
-func TestExamples(t *testing.T) { testDirFiles(t, "testdata/examples", 0, false) }
-func TestFixedbugs(t *testing.T) { testDirFiles(t, "testdata/fixedbugs", 0, false) }
+func TestCheck(t *testing.T) { DefPredeclaredTestFuncs(); testDirFiles(t, "testdata/check", 55, false) } // TODO(gri) narrow column tolerance
+func TestSpec(t *testing.T) { DefPredeclaredTestFuncs(); testDirFiles(t, "testdata/spec", 0, false) }
+func TestExamples(t *testing.T) { testDirFiles(t, "testdata/examples", 0, false) }
+func TestFixedbugs(t *testing.T) {
+ DefPredeclaredTestFuncs()
+ testDirFiles(t, "testdata/fixedbugs", 0, false)
+}
func testDirFiles(t *testing.T, dir string, colDelta uint, manual bool) {
testenv.MustHaveGoBuild(t)
diff --git a/src/cmd/compile/internal/types2/compilersupport.go b/src/cmd/compile/internal/types2/compilersupport.go
new file mode 100644
index 0000000000..b35e752b8f
--- /dev/null
+++ b/src/cmd/compile/internal/types2/compilersupport.go
@@ -0,0 +1,30 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Helper functions exported for the compiler.
+// Do not use internally.
+
+package types2
+
+// If t is a pointer, AsPointer returns that type, otherwise it returns nil.
+func AsPointer(t Type) *Pointer {
+ u, _ := t.Underlying().(*Pointer)
+ return u
+}
+
+// If t is a signature, AsSignature returns that type, otherwise it returns nil.
+func AsSignature(t Type) *Signature {
+ u, _ := t.Underlying().(*Signature)
+ return u
+}
+
+// If typ is a type parameter, structuralType returns the single underlying
+// type of all types in the corresponding type constraint if it exists, or
+// nil otherwise. If the type set contains only unrestricted and restricted
+// channel types (with identical element types), the single underlying type
+// is the restricted channel type if the restrictions are always the same.
+// If typ is not a type parameter, structuralType returns the underlying type.
+func StructuralType(t Type) Type {
+ return structuralType(t)
+}
diff --git a/src/cmd/compile/internal/types2/context.go b/src/cmd/compile/internal/types2/context.go
index a8f8591243..7abea6b654 100644
--- a/src/cmd/compile/internal/types2/context.go
+++ b/src/cmd/compile/internal/types2/context.go
@@ -1,10 +1,13 @@
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+
package types2
import (
"bytes"
+ "fmt"
+ "strconv"
"strings"
"sync"
)
@@ -15,66 +18,105 @@ import (
//
// It is safe for concurrent use.
type Context struct {
- mu sync.Mutex
- typeMap map[string]*Named // type hash -> instance
- nextID int // next unique ID
- seen map[*Named]int // assigned unique IDs
+ mu sync.Mutex
+ typeMap map[string][]ctxtEntry // type hash -> instances entries
+ nextID int // next unique ID
+ originIDs map[Type]int // origin type -> unique ID
+}
+
+type ctxtEntry struct {
+ orig Type
+ targs []Type
+ instance Type // = orig[targs]
}
// NewContext creates a new Context.
func NewContext() *Context {
return &Context{
- typeMap: make(map[string]*Named),
- seen: make(map[*Named]int),
+ typeMap: make(map[string][]ctxtEntry),
+ originIDs: make(map[Type]int),
}
}
-// TypeHash returns a string representation of typ, which can be used as an exact
-// type hash: types that are identical produce identical string representations.
-// If typ is a *Named type and targs is not empty, typ is printed as if it were
-// instantiated with targs. The result is guaranteed to not contain blanks (" ").
-func (ctxt *Context) TypeHash(typ Type, targs []Type) string {
+// instanceHash returns a string representation of typ instantiated with targs.
+// The hash should be a perfect hash, though out of caution the type checker
+// does not assume this. The result is guaranteed to not contain blanks.
+func (ctxt *Context) instanceHash(orig Type, targs []Type) string {
assert(ctxt != nil)
- assert(typ != nil)
+ assert(orig != nil)
var buf bytes.Buffer
h := newTypeHasher(&buf, ctxt)
- if named, _ := typ.(*Named); named != nil && len(targs) > 0 {
- // Don't use WriteType because we need to use the provided targs
- // and not any targs that might already be with the *Named type.
- h.typePrefix(named)
- h.typeName(named.obj)
+ h.string(strconv.Itoa(ctxt.getID(orig)))
+ // Because we've already written the unique origin ID this call to h.typ is
+ // unnecessary, but we leave it for hash readability. It can be removed later
+ // if performance is an issue.
+ h.typ(orig)
+ if len(targs) > 0 {
+ // TODO(rfindley): consider asserting on isGeneric(typ) here, if and when
+ // isGeneric handles *Signature types.
h.typeList(targs)
- } else {
- assert(targs == nil)
- h.typ(typ)
}
return strings.Replace(buf.String(), " ", "#", -1) // ReplaceAll is not available in Go1.4
}
-// typeForHash returns the recorded type for the type hash h, if it exists.
-// If no type exists for h and n is non-nil, n is recorded for h.
-func (ctxt *Context) typeForHash(h string, n *Named) *Named {
+// lookup returns an existing instantiation of orig with targs, if it exists.
+// Otherwise, it returns nil.
+func (ctxt *Context) lookup(h string, orig Type, targs []Type) Type {
ctxt.mu.Lock()
defer ctxt.mu.Unlock()
- if existing := ctxt.typeMap[h]; existing != nil {
- return existing
+
+ for _, e := range ctxt.typeMap[h] {
+ if identicalInstance(orig, targs, e.orig, e.targs) {
+ return e.instance
+ }
+ if debug {
+ // Panic during development to surface any imperfections in our hash.
+ panic(fmt.Sprintf("non-identical instances: (orig: %s, targs: %v) and %s", orig, targs, e.instance))
+ }
}
- if n != nil {
- ctxt.typeMap[h] = n
- }
- return n
+
+ return nil
}
-// idForType returns a unique ID for the pointer n.
-func (ctxt *Context) idForType(n *Named) int {
+// update de-duplicates n against previously seen types with the hash h. If an
+// identical type is found with the type hash h, the previously seen type is
+// returned. Otherwise, n is returned, and recorded in the Context for the hash
+// h.
+func (ctxt *Context) update(h string, orig Type, targs []Type, inst Type) Type {
+ assert(inst != nil)
+
ctxt.mu.Lock()
defer ctxt.mu.Unlock()
- id, ok := ctxt.seen[n]
+
+ for _, e := range ctxt.typeMap[h] {
+ if inst == nil || Identical(inst, e.instance) {
+ return e.instance
+ }
+ if debug {
+ // Panic during development to surface any imperfections in our hash.
+ panic(fmt.Sprintf("%s and %s are not identical", inst, e.instance))
+ }
+ }
+
+ ctxt.typeMap[h] = append(ctxt.typeMap[h], ctxtEntry{
+ orig: orig,
+ targs: targs,
+ instance: inst,
+ })
+
+ return inst
+}
+
+// getID returns a unique ID for the type t.
+func (ctxt *Context) getID(t Type) int {
+ ctxt.mu.Lock()
+ defer ctxt.mu.Unlock()
+ id, ok := ctxt.originIDs[t]
if !ok {
id = ctxt.nextID
- ctxt.seen[n] = id
+ ctxt.originIDs[t] = id
ctxt.nextID++
}
return id
diff --git a/src/cmd/compile/internal/types2/context_test.go b/src/cmd/compile/internal/types2/context_test.go
new file mode 100644
index 0000000000..aa649b1448
--- /dev/null
+++ b/src/cmd/compile/internal/types2/context_test.go
@@ -0,0 +1,69 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "testing"
+)
+
+func TestContextHashCollisions(t *testing.T) {
+ if debug {
+ t.Skip("hash collisions are expected, and would fail debug assertions")
+ }
+ // Unit test the de-duplication fall-back logic in Context.
+ //
+ // We can't test this via Instantiate because this is only a fall-back in
+ // case our hash is imperfect.
+ //
+ // These lookups and updates use reasonable looking types in an attempt to
+ // make them robust to internal type assertions, but could equally well use
+ // arbitrary types.
+
+ // Create some distinct origin types. nullaryP and nullaryQ have no
+ // parameters and are identical (but have different type parameter names).
+ // unaryP has a parameter.
+ var nullaryP, nullaryQ, unaryP Type
+ {
+ // type nullaryP = func[P any]()
+ tparam := NewTypeParam(NewTypeName(nopos, nil, "P", nil), &emptyInterface)
+ nullaryP = NewSignatureType(nil, nil, []*TypeParam{tparam}, nil, nil, false)
+ }
+ {
+ // type nullaryQ = func[Q any]()
+ tparam := NewTypeParam(NewTypeName(nopos, nil, "Q", nil), &emptyInterface)
+ nullaryQ = NewSignatureType(nil, nil, []*TypeParam{tparam}, nil, nil, false)
+ }
+ {
+ // type unaryP = func[P any](_ P)
+ tparam := NewTypeParam(NewTypeName(nopos, nil, "P", nil), &emptyInterface)
+ params := NewTuple(NewVar(nopos, nil, "_", tparam))
+ unaryP = NewSignatureType(nil, nil, []*TypeParam{tparam}, params, nil, false)
+ }
+
+ ctxt := NewContext()
+
+ // Update the context with an instantiation of nullaryP.
+ inst := NewSignatureType(nil, nil, nil, nil, nil, false)
+ if got := ctxt.update("", nullaryP, []Type{Typ[Int]}, inst); got != inst {
+ t.Error("bad")
+ }
+
+ // unaryP is not identical to nullaryP, so we should not get inst when
+ // instantiated with identical type arguments.
+ if got := ctxt.lookup("", unaryP, []Type{Typ[Int]}); got != nil {
+ t.Error("bad")
+ }
+
+ // nullaryQ is identical to nullaryP, so we *should* get inst when
+ // instantiated with identical type arguments.
+ if got := ctxt.lookup("", nullaryQ, []Type{Typ[Int]}); got != inst {
+ t.Error("bad")
+ }
+
+ // ...but verify we don't get inst with different type arguments.
+ if got := ctxt.lookup("", nullaryQ, []Type{Typ[String]}); got != nil {
+ t.Error("bad")
+ }
+}
diff --git a/src/cmd/compile/internal/types2/conversions.go b/src/cmd/compile/internal/types2/conversions.go
index 44e8aad84f..7fe1d5056b 100644
--- a/src/cmd/compile/internal/types2/conversions.go
+++ b/src/cmd/compile/internal/types2/conversions.go
@@ -7,6 +7,7 @@
package types2
import (
+ "fmt"
"go/constant"
"unicode"
)
@@ -17,7 +18,7 @@ func (check *Checker) conversion(x *operand, T Type) {
constArg := x.mode == constant_
constConvertibleTo := func(T Type, val *constant.Value) bool {
- switch t := asBasic(T); {
+ switch t, _ := under(T).(*Basic); {
case t == nil:
// nothing to do
case representableConst(x.val, check, t, val):
@@ -47,7 +48,7 @@ func (check *Checker) conversion(x *operand, T Type) {
// If T's type set is empty, or if it doesn't
// have specific types, constant x cannot be
// converted.
- ok = under(T).(*TypeParam).underIs(func(u Type) bool {
+ ok = T.(*TypeParam).underIs(func(u Type) bool {
// t is nil if there are no specific type terms
if u == nil {
cause = check.sprintf("%s does not contain specific types", T)
@@ -68,9 +69,19 @@ func (check *Checker) conversion(x *operand, T Type) {
if !ok {
var err error_
- err.errorf(x, "cannot convert %s to %s", x, T)
- if cause != "" {
- err.errorf(nopos, cause)
+ if check.conf.CompilerErrorMessages {
+ if cause != "" {
+ // Add colon at end of line if we have a following cause.
+ err.errorf(x, "cannot convert %s to type %s:", x, T)
+ err.errorf(nopos, cause)
+ } else {
+ err.errorf(x, "cannot convert %s to type %s", x, T)
+ }
+ } else {
+ err.errorf(x, "cannot convert %s to %s", x, T)
+ if cause != "" {
+ err.errorf(nopos, cause)
+ }
}
check.report(&err)
x.mode = invalid
@@ -87,13 +98,13 @@ func (check *Checker) conversion(x *operand, T Type) {
// - For conversions of untyped constants to non-constant types, also
// use the default type (e.g., []byte("foo") should report string
// not []byte as type for the constant "foo").
- // - For integer to string conversions, keep the argument type.
+ // - For constant integer to string conversions, keep the argument type.
// (See also the TODO below.)
if x.typ == Typ[UntypedNil] {
// ok
- } else if IsInterface(T) || constArg && !isConstType(T) {
+ } else if IsInterface(T) && !isTypeParam(T) || constArg && !isConstType(T) {
final = Default(x.typ)
- } else if isInteger(x.typ) && allString(T) {
+ } else if x.mode == constant_ && isInteger(x.typ) && allString(T) {
final = x.typ
}
check.updateExprType(x.expr, final, true)
@@ -122,70 +133,72 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool {
return true
}
- // "V and T have identical underlying types if tags are ignored"
+ // "V and T have identical underlying types if tags are ignored
+ // and V and T are not type parameters"
V := x.typ
Vu := under(V)
Tu := under(T)
- if IdenticalIgnoreTags(Vu, Tu) {
+ Vp, _ := V.(*TypeParam)
+ Tp, _ := T.(*TypeParam)
+ if IdenticalIgnoreTags(Vu, Tu) && Vp == nil && Tp == nil {
return true
}
// "V and T are unnamed pointer types and their pointer base types
- // have identical underlying types if tags are ignored"
+ // have identical underlying types if tags are ignored
+ // and their pointer base types are not type parameters"
if V, ok := V.(*Pointer); ok {
if T, ok := T.(*Pointer); ok {
- if IdenticalIgnoreTags(under(V.base), under(T.base)) {
+ if IdenticalIgnoreTags(under(V.base), under(T.base)) && !isTypeParam(V.base) && !isTypeParam(T.base) {
return true
}
}
}
// "V and T are both integer or floating point types"
- if isIntegerOrFloat(V) && isIntegerOrFloat(T) {
+ if isIntegerOrFloat(Vu) && isIntegerOrFloat(Tu) {
return true
}
// "V and T are both complex types"
- if isComplex(V) && isComplex(T) {
+ if isComplex(Vu) && isComplex(Tu) {
return true
}
// "V is an integer or a slice of bytes or runes and T is a string type"
- if (isInteger(V) || isBytesOrRunes(Vu)) && isString(T) {
+ if (isInteger(Vu) || isBytesOrRunes(Vu)) && isString(Tu) {
return true
}
// "V is a string and T is a slice of bytes or runes"
- if isString(V) && isBytesOrRunes(Tu) {
+ if isString(Vu) && isBytesOrRunes(Tu) {
return true
}
// package unsafe:
// "any pointer or value of underlying type uintptr can be converted into a unsafe.Pointer"
- if (isPointer(Vu) || isUintptr(Vu)) && isUnsafePointer(T) {
+ if (isPointer(Vu) || isUintptr(Vu)) && isUnsafePointer(Tu) {
return true
}
// "and vice versa"
- if isUnsafePointer(V) && (isPointer(Tu) || isUintptr(Tu)) {
+ if isUnsafePointer(Vu) && (isPointer(Tu) || isUintptr(Tu)) {
return true
}
// "V a slice, T is a pointer-to-array type,
// and the slice and array types have identical element types."
- if s := asSlice(V); s != nil {
- if p := asPointer(T); p != nil {
- if a := asArray(p.Elem()); a != nil {
+ if s, _ := Vu.(*Slice); s != nil {
+ if p, _ := Tu.(*Pointer); p != nil {
+ if a, _ := under(p.Elem()).(*Array); a != nil {
if Identical(s.Elem(), a.Elem()) {
if check == nil || check.allowVersion(check.pkg, 1, 17) {
return true
}
// check != nil
if cause != nil {
+ *cause = "conversion of slices to array pointers requires go1.17 or later"
if check.conf.CompilerErrorMessages {
- // compiler error message assumes a -lang flag
- *cause = "conversion of slices to array pointers only supported as of -lang=go1.17"
- } else {
- *cause = "conversion of slices to array pointers requires go1.17 or later"
+ *cause += fmt.Sprintf(" (-lang was set to %s; check go.mod)", check.conf.GoVersion)
}
}
return false
@@ -195,8 +208,6 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool {
}
// optimization: if we don't have type parameters, we're done
- Vp, _ := Vu.(*TypeParam)
- Tp, _ := Tu.(*TypeParam)
if Vp == nil && Tp == nil {
return false
}
@@ -222,6 +233,9 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool {
}
x.typ = V.typ
return Tp.is(func(T *term) bool {
+ if T == nil {
+ return false // no specific types
+ }
if !x.convertibleTo(check, T.typ, cause) {
errorf("cannot convert %s (in %s) to %s (in %s)", V.typ, Vp, T.typ, Tp)
return false
@@ -258,31 +272,24 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool {
return false
}
-// Helper predicates for convertibleToImpl. The types provided to convertibleToImpl
-// may be type parameters but they won't have specific type terms. Thus it is ok to
-// use the toT convenience converters in the predicates below.
-
func isUintptr(typ Type) bool {
- t := asBasic(typ)
+ t, _ := under(typ).(*Basic)
return t != nil && t.kind == Uintptr
}
func isUnsafePointer(typ Type) bool {
- // TODO(gri): Is this asBasic(typ) instead of typ.(*Basic) correct?
- // (The former calls under(), while the latter doesn't.)
- // The spec does not say so, but gc claims it is. See also
- // issue 6326.
- t := asBasic(typ)
+ t, _ := under(typ).(*Basic)
return t != nil && t.kind == UnsafePointer
}
func isPointer(typ Type) bool {
- return asPointer(typ) != nil
+ _, ok := under(typ).(*Pointer)
+ return ok
}
func isBytesOrRunes(typ Type) bool {
- if s := asSlice(typ); s != nil {
- t := asBasic(s.elem)
+ if s, _ := under(typ).(*Slice); s != nil {
+ t, _ := under(s.elem).(*Basic)
return t != nil && (t.kind == Byte || t.kind == Rune)
}
return false
diff --git a/src/cmd/compile/internal/types2/decl.go b/src/cmd/compile/internal/types2/decl.go
index 5d2a6c531b..0e8f5085ba 100644
--- a/src/cmd/compile/internal/types2/decl.go
+++ b/src/cmd/compile/internal/types2/decl.go
@@ -51,7 +51,7 @@ func pathString(path []Object) string {
return s
}
-// objDecl type-checks the declaration of obj in its respective (file) context.
+// objDecl type-checks the declaration of obj in its respective (file) environment.
// For the meaning of def, see Checker.definedType, in typexpr.go.
func (check *Checker) objDecl(obj Object, def *Named) {
if check.conf.Trace && obj.Type() == nil {
@@ -66,12 +66,6 @@ func (check *Checker) objDecl(obj Object, def *Named) {
}()
}
- // Funcs with m.instRecv set have not yet be completed. Complete them now
- // so that they have a type when objDecl exits.
- if m, _ := obj.(*Func); m != nil && m.instRecv != nil {
- check.completeMethod(check.conf.Context, m)
- }
-
// Checking the declaration of obj means inferring its type
// (and possibly its value, for constants).
// An object's type (and thus the object) may be in one of
@@ -124,7 +118,7 @@ func (check *Checker) objDecl(obj Object, def *Named) {
fallthrough
case grey:
- // We have a cycle.
+ // We have a (possibly invalid) cycle.
// In the existing code, this is marked by a non-nil type
// for the object except for constants and variables whose
// type may be non-nil (known), or nil if it depends on the
@@ -136,17 +130,17 @@ func (check *Checker) objDecl(obj Object, def *Named) {
// order code.
switch obj := obj.(type) {
case *Const:
- if check.cycle(obj) || obj.typ == nil {
+ if !check.validCycle(obj) || obj.typ == nil {
obj.typ = Typ[Invalid]
}
case *Var:
- if check.cycle(obj) || obj.typ == nil {
+ if !check.validCycle(obj) || obj.typ == nil {
obj.typ = Typ[Invalid]
}
case *TypeName:
- if check.cycle(obj) {
+ if !check.validCycle(obj) {
// break cycle
// (without this, calling underlying()
// below may lead to an endless loop
@@ -156,7 +150,7 @@ func (check *Checker) objDecl(obj Object, def *Named) {
}
case *Func:
- if check.cycle(obj) {
+ if !check.validCycle(obj) {
// Don't set obj.typ to Typ[Invalid] here
// because plenty of code type-asserts that
// functions have a *Signature type. Grey
@@ -178,11 +172,11 @@ func (check *Checker) objDecl(obj Object, def *Named) {
unreachable()
}
- // save/restore current context and setup object context
- defer func(ctxt context) {
- check.context = ctxt
- }(check.context)
- check.context = context{
+ // save/restore current environment and set up object environment
+ defer func(env environment) {
+ check.environment = env
+ }(check.environment)
+ check.environment = environment{
scope: d.file,
}
@@ -210,9 +204,9 @@ func (check *Checker) objDecl(obj Object, def *Named) {
}
}
-// cycle checks if the cycle starting with obj is valid and
+// validCycle reports whether the cycle starting with obj is valid and
// reports an error if it is not.
-func (check *Checker) cycle(obj Object) (isCycle bool) {
+func (check *Checker) validCycle(obj Object) (valid bool) {
// The object map contains the package scope objects and the non-interface methods.
if debug {
info := check.objMap[obj]
@@ -228,13 +222,23 @@ func (check *Checker) cycle(obj Object) (isCycle bool) {
assert(obj.color() >= grey)
start := obj.color() - grey // index of obj in objPath
cycle := check.objPath[start:]
- nval := 0 // number of (constant or variable) values in the cycle
- ndef := 0 // number of type definitions in the cycle
+ tparCycle := false // if set, the cycle is through a type parameter list
+ nval := 0 // number of (constant or variable) values in the cycle; valid if !generic
+ ndef := 0 // number of type definitions in the cycle; valid if !generic
+loop:
for _, obj := range cycle {
switch obj := obj.(type) {
case *Const, *Var:
nval++
case *TypeName:
+ // If we reach a generic type that is part of a cycle
+ // and we are in a type parameter list, we have a cycle
+ // through a type parameter list, which is invalid.
+ if check.inTParamList && isGeneric(obj.typ) {
+ tparCycle = true
+ break loop
+ }
+
// Determine if the type name is an alias or not. For
// package-level objects, use the object map which
// provides syntactic information (which doesn't rely
@@ -262,121 +266,38 @@ func (check *Checker) cycle(obj Object) (isCycle bool) {
if check.conf.Trace {
check.trace(obj.Pos(), "## cycle detected: objPath = %s->%s (len = %d)", pathString(cycle), obj.Name(), len(cycle))
- check.trace(obj.Pos(), "## cycle contains: %d values, %d type definitions", nval, ndef)
+ if tparCycle {
+ check.trace(obj.Pos(), "## cycle contains: generic type in a type parameter list")
+ } else {
+ check.trace(obj.Pos(), "## cycle contains: %d values, %d type definitions", nval, ndef)
+ }
defer func() {
- if isCycle {
+ if valid {
+ check.trace(obj.Pos(), "=> cycle is valid")
+ } else {
check.trace(obj.Pos(), "=> error: cycle is invalid")
}
}()
}
- // A cycle involving only constants and variables is invalid but we
- // ignore them here because they are reported via the initialization
- // cycle check.
- if nval == len(cycle) {
- return false
- }
+ if !tparCycle {
+ // A cycle involving only constants and variables is invalid but we
+ // ignore them here because they are reported via the initialization
+ // cycle check.
+ if nval == len(cycle) {
+ return true
+ }
- // A cycle involving only types (and possibly functions) must have at least
- // one type definition to be permitted: If there is no type definition, we
- // have a sequence of alias type names which will expand ad infinitum.
- if nval == 0 && ndef > 0 {
- return false // cycle is permitted
+ // A cycle involving only types (and possibly functions) must have at least
+ // one type definition to be permitted: If there is no type definition, we
+ // have a sequence of alias type names which will expand ad infinitum.
+ if nval == 0 && ndef > 0 {
+ return true
+ }
}
check.cycleError(cycle)
-
- return true
-}
-
-type typeInfo uint
-
-// validType verifies that the given type does not "expand" infinitely
-// producing a cycle in the type graph. Cycles are detected by marking
-// defined types.
-// (Cycles involving alias types, as in "type A = [10]A" are detected
-// earlier, via the objDecl cycle detection mechanism.)
-func (check *Checker) validType(typ Type, path []Object) typeInfo {
- const (
- unknown typeInfo = iota
- marked
- valid
- invalid
- )
-
- switch t := typ.(type) {
- case *Array:
- return check.validType(t.elem, path)
-
- case *Struct:
- for _, f := range t.fields {
- if check.validType(f.typ, path) == invalid {
- return invalid
- }
- }
-
- case *Union:
- for _, t := range t.terms {
- if check.validType(t.typ, path) == invalid {
- return invalid
- }
- }
-
- case *Interface:
- for _, etyp := range t.embeddeds {
- if check.validType(etyp, path) == invalid {
- return invalid
- }
- }
-
- case *Named:
- // If t is parameterized, we should be considering the instantiated (expanded)
- // form of t, but in general we can't with this algorithm: if t is an invalid
- // type it may be so because it infinitely expands through a type parameter.
- // Instantiating such a type would lead to an infinite sequence of instantiations.
- // In general, we need "type flow analysis" to recognize those cases.
- // Example: type A[T any] struct{ x A[*T] } (issue #48951)
- // In this algorithm we always only consider the orginal, uninstantiated type.
- // This won't recognize some invalid cases with parameterized types, but it
- // will terminate.
- t = t.orig
-
- // don't touch the type if it is from a different package or the Universe scope
- // (doing so would lead to a race condition - was issue #35049)
- if t.obj.pkg != check.pkg {
- return valid
- }
-
- // don't report a 2nd error if we already know the type is invalid
- // (e.g., if a cycle was detected earlier, via under).
- if t.underlying == Typ[Invalid] {
- t.info = invalid
- return invalid
- }
-
- switch t.info {
- case unknown:
- t.info = marked
- t.info = check.validType(t.fromRHS, append(path, t.obj)) // only types of current package added to path
- case marked:
- // cycle detected
- for i, tn := range path {
- if t.obj.pkg != check.pkg {
- panic("type cycle via package-external type")
- }
- if tn == t.obj {
- check.cycleError(path[i:])
- t.info = invalid
- t.underlying = Typ[Invalid]
- return invalid
- }
- }
- panic("cycle start not found")
- }
- return t.info
- }
-
- return valid
+ return false
}
// cycleError reports a declaration cycle starting with
@@ -387,8 +308,13 @@ func (check *Checker) cycleError(cycle []Object) {
// cycle? That would be more consistent with other error messages.
i := firstInSrc(cycle)
obj := cycle[i]
+ // If obj is a type alias, mark it as valid (not broken) in order to avoid follow-on errors.
+ tname, _ := obj.(*TypeName)
+ if tname != nil && tname.IsAlias() {
+ check.validAlias(tname, Typ[Invalid])
+ }
var err error_
- if check.conf.CompilerErrorMessages {
+ if tname != nil && check.conf.CompilerErrorMessages {
err.errorf(obj, "invalid recursive type %s", obj.Name())
} else {
err.errorf(obj, "illegal cycle in declaration of %s", obj.Name())
@@ -534,7 +460,7 @@ func (check *Checker) varDecl(obj *Var, lhs []*Var, typ, init syntax.Expr) {
}
}
- check.initVars(lhs, []syntax.Expr{init}, nopos)
+ check.initVars(lhs, []syntax.Expr{init}, nil)
}
// isImportedConstraint reports whether typ is an imported type constraint.
@@ -552,10 +478,12 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *Named
var rhs Type
check.later(func() {
- check.validType(obj.typ, nil)
+ if t, _ := obj.typ.(*Named); t != nil { // type may be invalid
+ check.validType(t)
+ }
// If typ is local, an error was already reported where typ is specified/defined.
if check.isImportedConstraint(rhs) && !check.allowVersion(check.pkg, 1, 18) {
- check.errorf(tdecl.Type.Pos(), "using type constraint %s requires go1.18 or later", rhs)
+ check.versionErrorf(tdecl.Type, "go1.18", "using type constraint %s", rhs)
}
}).describef(obj, "validType(%s)", obj.Name())
@@ -570,16 +498,12 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *Named
// alias declaration
if alias {
if !check.allowVersion(check.pkg, 1, 9) {
- if check.conf.CompilerErrorMessages {
- check.error(tdecl, "type aliases only supported as of -lang=go1.9")
- } else {
- check.error(tdecl, "type aliases requires go1.9 or later")
- }
+ check.versionErrorf(tdecl, "go1.9", "type aliases")
}
- obj.typ = Typ[Invalid]
+ check.brokenAlias(obj)
rhs = check.varType(tdecl.Type)
- obj.typ = rhs
+ check.validAlias(obj, rhs)
return
}
@@ -605,10 +529,11 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *Named
}
// Disallow a lone type parameter as the RHS of a type declaration (issue #45639).
- // We can look directly at named.underlying because even if it is still a *Named
- // type (underlying not fully resolved yet) it cannot become a type parameter due
- // to this very restriction.
- if tpar, _ := named.underlying.(*TypeParam); tpar != nil {
+ // We don't need this restriction anymore if we make the underlying type of a type
+ // parameter its constraint interface: if the RHS is a lone type parameter, we will
+ // use its underlying type (like we do for any RHS in a type declaration), and its
+ // underlying type is an interface and the type declaration is well defined.
+ if isTypeParam(rhs) {
check.error(tdecl.Type, "cannot use a type parameter as RHS in type declaration")
named.underlying = Typ[Invalid]
}
@@ -629,32 +554,40 @@ func (check *Checker) collectTypeParams(dst **TypeParamList, list []*syntax.Fiel
// Example: type T[P T[P]] interface{}
*dst = bindTParams(tparams)
+ // Signal to cycle detection that we are in a type parameter list.
+ // We can only be inside one type parameter list at any given time:
+ // function closures may appear inside a type parameter list but they
+ // cannot be generic, and their bodies are processed in delayed and
+ // sequential fashion. Note that with each new declaration, we save
+ // the existing environment and restore it when done; thus inTParamList
+ // is true exactly only when we are in a specific type parameter list.
+ assert(!check.inTParamList)
+ check.inTParamList = true
+ defer func() {
+ check.inTParamList = false
+ }()
+
// Keep track of bounds for later validation.
var bound Type
var bounds []Type
- var posers []poser
for i, f := range list {
// Optimization: Re-use the previous type bound if it hasn't changed.
// This also preserves the grouped output of type parameter lists
// when printing type strings.
if i == 0 || f.Type != list[i-1].Type {
bound = check.bound(f.Type)
+ if isTypeParam(bound) {
+ // We may be able to allow this since it is now well-defined what
+ // the underlying type and thus type set of a type parameter is.
+ // But we may need some additional form of cycle detection within
+ // type parameter lists.
+ check.error(f.Type, "cannot use a type parameter as constraint")
+ bound = Typ[Invalid]
+ }
bounds = append(bounds, bound)
- posers = append(posers, f.Type)
}
tparams[i].bound = bound
}
-
- check.later(func() {
- for i, bound := range bounds {
- if _, ok := under(bound).(*TypeParam); ok {
- check.error(posers[i], "cannot use a type parameter as constraint")
- }
- }
- for _, tpar := range tparams {
- tpar.iface() // compute type set
- }
- })
}
func (check *Checker) bound(x syntax.Expr) Type {
@@ -702,8 +635,9 @@ func (check *Checker) collectMethods(obj *TypeName) {
// spec: "If the base type is a struct type, the non-blank method
// and field names must be distinct."
- base := asNamed(obj.typ) // shouldn't fail but be conservative
+ base, _ := obj.typ.(*Named) // shouldn't fail but be conservative
if base != nil {
+ assert(base.targs.Len() == 0) // collectMethods should not be called on an instantiated type
u := base.under()
if t, _ := u.(*Struct); t != nil {
for _, fld := range t.fields {
@@ -716,7 +650,8 @@ func (check *Checker) collectMethods(obj *TypeName) {
// Checker.Files may be called multiple times; additional package files
// may add methods to already type-checked types. Add pre-existing methods
// so that we can detect redeclarations.
- for _, m := range base.methods {
+ for i := 0; i < base.methods.Len(); i++ {
+ m := base.methods.At(i, nil)
assert(m.name != "_")
assert(mset.insert(m) == nil)
}
@@ -748,7 +683,7 @@ func (check *Checker) collectMethods(obj *TypeName) {
if base != nil {
base.resolve(nil) // TODO(mdempsky): Probably unnecessary.
- base.methods = append(base.methods, m)
+ base.AddMethod(m)
}
}
}
diff --git a/src/cmd/compile/internal/types2/errors.go b/src/cmd/compile/internal/types2/errors.go
index b56d11a28b..2318b95f3d 100644
--- a/src/cmd/compile/internal/types2/errors.go
+++ b/src/cmd/compile/internal/types2/errors.go
@@ -98,10 +98,32 @@ func sprintf(qf Qualifier, debug bool, format string, args ...interface{}) strin
arg = a.String()
case syntax.Expr:
arg = syntax.String(a)
+ case []syntax.Expr:
+ var buf bytes.Buffer
+ buf.WriteByte('[')
+ for i, x := range a {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(syntax.String(x))
+ }
+ buf.WriteByte(']')
+ arg = buf.String()
case Object:
arg = ObjectString(a, qf)
case Type:
arg = typeString(a, qf, debug)
+ case []Type:
+ var buf bytes.Buffer
+ buf.WriteByte('[')
+ for i, x := range a {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(typeString(x, qf, debug))
+ }
+ buf.WriteByte(']')
+ arg = buf.String()
}
args[i] = arg
}
@@ -230,6 +252,16 @@ func (check *Checker) softErrorf(at poser, format string, args ...interface{}) {
check.err(at, check.sprintf(format, args...), true)
}
+func (check *Checker) versionErrorf(at poser, goVersion string, format string, args ...interface{}) {
+ msg := check.sprintf(format, args...)
+ if check.conf.CompilerErrorMessages {
+ msg = fmt.Sprintf("%s requires %s or later (-lang was set to %s; check go.mod)", msg, goVersion, check.conf.GoVersion)
+ } else {
+ msg = fmt.Sprintf("%s requires %s or later", msg, goVersion)
+ }
+ check.err(at, msg, true)
+}
+
// posFor reports the left (= start) position of at.
func posFor(at poser) syntax.Pos {
switch x := at.(type) {
diff --git a/src/cmd/compile/internal/types2/expr.go b/src/cmd/compile/internal/types2/expr.go
index 95b96f2334..7a668d20f1 100644
--- a/src/cmd/compile/internal/types2/expr.go
+++ b/src/cmd/compile/internal/types2/expr.go
@@ -73,11 +73,7 @@ func init() {
func (check *Checker) op(m opPredicates, x *operand, op syntax.Operator) bool {
if pred := m[op]; pred != nil {
if !pred(x.typ) {
- if check.conf.CompilerErrorMessages {
- check.errorf(x, invalidOp+"operator %s not defined on %s", op, x)
- } else {
- check.errorf(x, invalidOp+"operator %s not defined for %s", op, x)
- }
+ check.errorf(x, invalidOp+"operator %s not defined on %s", op, x)
return false
}
} else {
@@ -116,7 +112,7 @@ func (check *Checker) overflow(x *operand) {
// x.typ cannot be a type parameter (type
// parameters cannot be constant types).
if isTyped(x.typ) {
- check.representable(x, asBasic(x.typ))
+ check.representable(x, under(x.typ).(*Basic))
return
}
@@ -160,11 +156,10 @@ var op2str2 = [...]string{
// If typ is a type parameter, underIs returns the result of typ.underIs(f).
// Otherwise, underIs returns the result of f(under(typ)).
func underIs(typ Type, f func(Type) bool) bool {
- u := under(typ)
- if tpar, _ := u.(*TypeParam); tpar != nil {
+ if tpar, _ := typ.(*TypeParam); tpar != nil {
return tpar.underIs(f)
}
- return f(u)
+ return f(under(typ))
}
func (check *Checker) unary(x *operand, e *syntax.Operation) {
@@ -187,29 +182,25 @@ func (check *Checker) unary(x *operand, e *syntax.Operation) {
return
case syntax.Recv:
- var elem Type
- if !underIs(x.typ, func(u Type) bool {
- ch, _ := u.(*Chan)
- if ch == nil {
- check.errorf(x, invalidOp+"cannot receive from non-channel %s", x)
- return false
- }
- if ch.dir == SendOnly {
- check.errorf(x, invalidOp+"cannot receive from send-only channel %s", x)
- return false
- }
- if elem != nil && !Identical(ch.elem, elem) {
- check.errorf(x, invalidOp+"channels of %s must have the same element type", x)
- return false
- }
- elem = ch.elem
- return true
- }) {
+ u := structuralType(x.typ)
+ if u == nil {
+ check.errorf(x, invalidOp+"cannot receive from %s: no structural type", x)
+ x.mode = invalid
+ return
+ }
+ ch, _ := u.(*Chan)
+ if ch == nil {
+ check.errorf(x, invalidOp+"cannot receive from non-channel %s", x)
+ x.mode = invalid
+ return
+ }
+ if ch.dir == SendOnly {
+ check.errorf(x, invalidOp+"cannot receive from send-only channel %s", x)
x.mode = invalid
return
}
x.mode = commaok
- x.typ = elem
+ x.typ = ch.elem
check.hasCallOrRecv = true
return
}
@@ -513,8 +504,11 @@ func (check *Checker) invalidConversion(code errorCode, x *operand, target Type)
// Also, if x is a constant, it must be representable as a value of typ,
// and if x is the (formerly untyped) lhs operand of a non-constant
// shift, it must be an integer value.
-//
func (check *Checker) updateExprType(x syntax.Expr, typ Type, final bool) {
+ check.updateExprType0(nil, x, typ, final)
+}
+
+func (check *Checker) updateExprType0(parent, x syntax.Expr, typ Type, final bool) {
old, found := check.untyped[x]
if !found {
return // nothing to do
@@ -557,7 +551,7 @@ func (check *Checker) updateExprType(x syntax.Expr, typ Type, final bool) {
// No operands to take care of.
case *syntax.ParenExpr:
- check.updateExprType(x.X, typ, final)
+ check.updateExprType0(x, x.X, typ, final)
// case *syntax.UnaryExpr:
// // If x is a constant, the operands were constants.
@@ -568,7 +562,7 @@ func (check *Checker) updateExprType(x syntax.Expr, typ Type, final bool) {
// if old.val != nil {
// break
// }
- // check.updateExprType(x.X, typ, final)
+ // check.updateExprType0(x, x.X, typ, final)
case *syntax.Operation:
if x.Y == nil {
@@ -589,7 +583,7 @@ func (check *Checker) updateExprType(x syntax.Expr, typ Type, final bool) {
if old.val != nil {
break
}
- check.updateExprType(x.X, typ, final)
+ check.updateExprType0(x, x.X, typ, final)
break
}
@@ -603,11 +597,11 @@ func (check *Checker) updateExprType(x syntax.Expr, typ Type, final bool) {
} else if isShift(x.Op) {
// The result type depends only on lhs operand.
// The rhs type was updated when checking the shift.
- check.updateExprType(x.X, typ, final)
+ check.updateExprType0(x, x.X, typ, final)
} else {
// The operand types match the result type.
- check.updateExprType(x.X, typ, final)
- check.updateExprType(x.Y, typ, final)
+ check.updateExprType0(x, x.X, typ, final)
+ check.updateExprType0(x, x.Y, typ, final)
}
default:
@@ -617,7 +611,7 @@ func (check *Checker) updateExprType(x syntax.Expr, typ Type, final bool) {
// If the new type is not final and still untyped, just
// update the recorded type.
if !final && isUntyped(typ) {
- old.typ = asBasic(typ)
+ old.typ = under(typ).(*Basic)
check.untyped[x] = old
return
}
@@ -630,8 +624,12 @@ func (check *Checker) updateExprType(x syntax.Expr, typ Type, final bool) {
// If x is the lhs of a shift, its final type must be integer.
// We already know from the shift check that it is representable
// as an integer if it is a constant.
- if !isInteger(typ) {
- check.errorf(x, invalidOp+"shifted operand %s (type %s) must be integer", x, typ)
+ if !allInteger(typ) {
+ if check.conf.CompilerErrorMessages {
+ check.errorf(x, invalidOp+"%s (shift of type %s)", parent, typ)
+ } else {
+ check.errorf(x, invalidOp+"shifted operand %s (type %s) must be integer", x, typ)
+ }
return
}
// Even if we have an integer, if the value is a constant we
@@ -663,7 +661,11 @@ func (check *Checker) updateExprVal(x syntax.Expr, val constant.Value) {
func (check *Checker) convertUntyped(x *operand, target Type) {
newType, val, code := check.implicitTypeAndValue(x, target)
if code != 0 {
- check.invalidConversion(code, x, safeUnderlying(target))
+ t := target
+ if !isTypeParam(target) {
+ t = safeUnderlying(target)
+ }
+ check.invalidConversion(code, x, t)
x.mode = invalid
return
}
@@ -742,16 +744,19 @@ func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, const
default:
return nil, nil, _InvalidUntypedConversion
}
- case *TypeParam:
- // TODO(gri) review this code - doesn't look quite right
- ok := u.underIs(func(t Type) bool {
- target, _, _ := check.implicitTypeAndValue(x, t)
- return target != nil
- })
- if !ok {
- return nil, nil, _InvalidUntypedConversion
- }
case *Interface:
+ if isTypeParam(target) {
+ if !u.typeSet().underIs(func(u Type) bool {
+ if u == nil {
+ return false
+ }
+ t, _, _ := check.implicitTypeAndValue(x, u)
+ return t != nil
+ }) {
+ return nil, nil, _InvalidUntypedConversion
+ }
+ break
+ }
// Update operand types to the default type rather than the target
// (interface) type: values must have concrete dynamic types.
// Untyped nil was handled upfront.
@@ -772,10 +777,12 @@ func (check *Checker) comparison(x, y *operand, op syntax.Operator) {
xok, _ := x.assignableTo(check, y.typ, nil)
yok, _ := y.assignableTo(check, x.typ, nil)
if xok || yok {
+ equality := false
defined := false
switch op {
case syntax.Eql, syntax.Neq:
// spec: "The equality operators == and != apply to operands that are comparable."
+ equality = true
defined = Comparable(x.typ) && Comparable(y.typ) || x.isNil() && hasNil(y.typ) || y.isNil() && hasNil(x.typ)
case syntax.Lss, syntax.Leq, syntax.Gtr, syntax.Geq:
// spec: The ordering operators <, <=, >, and >= apply to operands that are ordered."
@@ -784,14 +791,18 @@ func (check *Checker) comparison(x, y *operand, op syntax.Operator) {
unreachable()
}
if !defined {
- typ := x.typ
- if x.isNil() {
- typ = y.typ
- }
- if check.conf.CompilerErrorMessages {
- err = check.sprintf("operator %s not defined on %s", op, typ)
+ if equality && (isTypeParam(x.typ) || isTypeParam(y.typ)) {
+ typ := x.typ
+ if isTypeParam(y.typ) {
+ typ = y.typ
+ }
+ err = check.sprintf("%s is not comparable", typ)
} else {
- err = check.sprintf("operator %s not defined for %s", op, typ)
+ typ := x.typ
+ if x.isNil() {
+ typ = y.typ
+ }
+ err = check.sprintf("operator %s not defined on %s", op, typ)
}
}
} else {
@@ -869,7 +880,7 @@ func (check *Checker) shift(x, y *operand, e syntax.Expr, op syntax.Operator) {
x.mode = invalid
return
} else if !allUnsigned(y.typ) && !check.allowVersion(check.pkg, 1, 13) {
- check.errorf(y, invalidOp+"signed shift count %s requires go1.13 or later", y)
+ check.versionErrorf(y, "go1.13", invalidOp+"signed shift count %s", y)
x.mode = invalid
return
}
@@ -991,8 +1002,9 @@ func (check *Checker) binary(x *operand, e syntax.Expr, lhs, rhs syntax.Expr, op
return
}
+ // TODO(gri) make canMix more efficient - called for each binary operation
canMix := func(x, y *operand) bool {
- if IsInterface(x.typ) || IsInterface(y.typ) {
+ if IsInterface(x.typ) && !isTypeParam(x.typ) || IsInterface(y.typ) && !isTypeParam(y.typ) {
return true
}
if allBoolean(x.typ) != allBoolean(y.typ) {
@@ -1250,7 +1262,7 @@ func (check *Checker) exprInternal(x *operand, e syntax.Expr, hint Type) exprKin
case hint != nil:
// no composite literal type present - use hint (element type of enclosing type)
typ = hint
- base, _ = deref(under(typ)) // *T implies &T{}
+ base, _ = deref(structuralType(typ)) // *T implies &T{}
default:
// TODO(gri) provide better error messages depending on context
@@ -1258,8 +1270,14 @@ func (check *Checker) exprInternal(x *operand, e syntax.Expr, hint Type) exprKin
goto Error
}
- switch utyp := structure(base).(type) {
+ switch utyp := structuralType(base).(type) {
case *Struct:
+ // Prevent crash if the struct referred to is not yet set up.
+ // See analogous comment for *Array.
+ if utyp.fields == nil {
+ check.error(e, "illegal cycle in type declaration")
+ goto Error
+ }
if len(e.ElemList) == 0 {
break
}
@@ -1388,7 +1406,7 @@ func (check *Checker) exprInternal(x *operand, e syntax.Expr, hint Type) exprKin
duplicate := false
// if the key is of interface type, the type is also significant when checking for duplicates
xkey := keyVal(x.val)
- if asInterface(utyp.key) != nil {
+ if IsInterface(utyp.key) {
for _, vtyp := range visited[xkey] {
if Identical(vtyp, x.typ) {
duplicate = true
@@ -1458,9 +1476,14 @@ func (check *Checker) exprInternal(x *operand, e syntax.Expr, hint Type) exprKin
if x.mode == invalid {
goto Error
}
+ // TODO(gri) we may want to permit type assertions on type parameter values at some point
+ if isTypeParam(x.typ) {
+ check.errorf(x, invalidOp+"cannot use type assertion on type parameter value %s", x)
+ goto Error
+ }
xtyp, _ := under(x.typ).(*Interface)
if xtyp == nil {
- check.errorf(x, "%s is not an interface type", x)
+ check.errorf(x, invalidOp+"%s is not an interface", x)
goto Error
}
// x.(type) expressions are encoded via TypeSwitchGuards
@@ -1620,25 +1643,20 @@ func (check *Checker) typeAssertion(e syntax.Expr, x *operand, xtyp *Interface,
return
}
- var msg string
- if wrongType != nil {
- if Identical(method.typ, wrongType.typ) {
- msg = fmt.Sprintf("%s method has pointer receiver", method.name)
- } else {
- msg = fmt.Sprintf("wrong type for method %s: have %s, want %s", method.name, wrongType.typ, method.typ)
- }
- } else {
- msg = fmt.Sprintf("missing %s method", method.name)
- }
-
var err error_
+ var msg string
if typeSwitch {
err.errorf(e.Pos(), "impossible type switch case: %s", e)
- err.errorf(nopos, "%s cannot have dynamic type %s (%s)", x, T, msg)
+ msg = check.sprintf("%s cannot have dynamic type %s %s", x, T,
+ check.missingMethodReason(T, x.typ, method, wrongType))
+
} else {
err.errorf(e.Pos(), "impossible type assertion: %s", e)
- err.errorf(nopos, "%s does not implement %s (%s)", T, x.typ, msg)
+ msg = check.sprintf("%s does not implement %s %s", T, x.typ,
+ check.missingMethodReason(T, x.typ, method, wrongType))
+
}
+ err.errorf(nopos, msg)
check.report(&err)
}
diff --git a/src/cmd/compile/internal/types2/index.go b/src/cmd/compile/internal/types2/index.go
index 67110704e9..4995d2d730 100644
--- a/src/cmd/compile/internal/types2/index.go
+++ b/src/cmd/compile/internal/types2/index.go
@@ -34,7 +34,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo
return false
case value:
- if sig := asSignature(x.typ); sig != nil && sig.TypeParams().Len() > 0 {
+ if sig, _ := under(x.typ).(*Signature); sig != nil && sig.TypeParams().Len() > 0 {
// function instantiation
return true
}
@@ -72,7 +72,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo
x.typ = typ.elem
case *Pointer:
- if typ := asArray(typ.base); typ != nil {
+ if typ, _ := under(typ.base).(*Array); typ != nil {
valid = true
length = typ.len
x.mode = variable
@@ -99,12 +99,15 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo
x.expr = e
return false
- case *TypeParam:
+ case *Interface:
+ if !isTypeParam(x.typ) {
+ break
+ }
// TODO(gri) report detailed failure cause for better error messages
var key, elem Type // key != nil: we must have all maps
mode := variable // non-maps result mode
// TODO(gri) factor out closure and use it for non-typeparam cases as well
- if typ.underIs(func(u Type) bool {
+ if typ.typeSet().underIs(func(u Type) bool {
l := int64(-1) // valid if >= 0
var k, e Type // k is only set for maps
switch t := u.(type) {
@@ -120,7 +123,7 @@ func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst boo
mode = value
}
case *Pointer:
- if t := asArray(t.base); t != nil {
+ if t, _ := under(t.base).(*Array); t != nil {
l = t.len
e = t.elem
}
@@ -210,16 +213,20 @@ func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) {
valid := false
length := int64(-1) // valid if >= 0
- switch u := structure(x.typ).(type) {
+ switch u := structuralString(x.typ).(type) {
case nil:
- check.errorf(x, invalidOp+"cannot slice %s: type set has no single underlying type", x)
+ check.errorf(x, invalidOp+"cannot slice %s: %s has no structural type", x, x.typ)
x.mode = invalid
return
case *Basic:
if isString(u) {
if e.Full {
- check.error(x, invalidOp+"3-index slice of string")
+ at := e.Index[2]
+ if at == nil {
+ at = e // e.Index[2] should be present but be careful
+ }
+ check.error(at, invalidOp+"3-index slice of string")
x.mode = invalid
return
}
@@ -229,7 +236,7 @@ func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) {
}
// spec: "For untyped string operands the result
// is a non-constant value of type string."
- if u.kind == UntypedString {
+ if isUntyped(x.typ) {
x.typ = Typ[String]
}
}
@@ -245,7 +252,7 @@ func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) {
x.typ = &Slice{elem: u.elem}
case *Pointer:
- if u := asArray(u.base); u != nil {
+ if u, _ := under(u.base).(*Array); u != nil {
valid = true
length = u.len
x.typ = &Slice{elem: u.elem}
@@ -302,9 +309,12 @@ func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) {
L:
for i, x := range ind[:len(ind)-1] {
if x > 0 {
- for _, y := range ind[i+1:] {
- if y >= 0 && x > y {
- check.errorf(e, "invalid slice indices: %d > %d", x, y)
+ for j, y := range ind[i+1:] {
+ if y >= 0 && y < x {
+ // The value y corresponds to the expression e.Index[i+1+j].
+ // Because y >= 0, it must have been set from the expression
+ // when checking indices and thus e.Index[i+1+j] is not nil.
+ check.errorf(e.Index[i+1+j], "invalid slice indices: %d < %d", y, x)
break L // only report one error, ok to continue
}
}
diff --git a/src/cmd/compile/internal/types2/infer.go b/src/cmd/compile/internal/types2/infer.go
index 24c461f1c3..51d0d22144 100644
--- a/src/cmd/compile/internal/types2/infer.go
+++ b/src/cmd/compile/internal/types2/infer.go
@@ -19,15 +19,17 @@ const useConstraintTypeInference = true
// function arguments args, if any. There must be at least one type parameter, no more type arguments
// than type parameters, and params and args must match in number (incl. zero).
// If successful, infer returns the complete list of type arguments, one for each type parameter.
-// Otherwise the result is nil and appropriate errors will be reported unless report is set to false.
+// Otherwise the result is nil and appropriate errors will be reported.
//
-// Inference proceeds in 3 steps:
+// Inference proceeds as follows:
//
-// 1) Start with given type arguments.
-// 2) Infer type arguments from typed function arguments.
-// 3) Infer type arguments from untyped function arguments.
+// Starting with given type arguments
+// 1) apply FTI (function type inference) with typed arguments,
+// 2) apply CTI (constraint type inference),
+// 3) apply FTI with untyped function arguments,
+// 4) apply CTI.
//
-// Constraint type inference is used after each step to expand the set of type arguments.
+// The process stops as soon as all type arguments are known or an error occurs.
func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, params *Tuple, args []*operand) (result []Type) {
if debug {
defer func() {
@@ -46,34 +48,69 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type,
// Function parameters and arguments must match in number.
assert(params.Len() == len(args))
- // --- 0 ---
// If we already have all type arguments, we're done.
if len(targs) == n {
return targs
}
// len(targs) < n
- // --- 1 ---
- // Explicitly provided type arguments take precedence over any inferred types;
- // and types inferred via constraint type inference take precedence over types
- // inferred from function arguments.
- // If we have type arguments, see how far we get with constraint type inference.
- if len(targs) > 0 && useConstraintTypeInference {
- var index int
- targs, index = check.inferB(tparams, targs)
- if targs == nil || index < 0 {
- return targs
+ // If we have more than 2 arguments, we may have arguments with named and unnamed types.
+ // If that is the case, permutate params and args such that the arguments with named
+ // types are first in the list. This doesn't affect type inference if all types are taken
+ // as is. But when we have inexact unification enabled (as is the case for function type
+ // inference), when a named type is unified with an unnamed type, unification proceeds
+ // with the underlying type of the named type because otherwise unification would fail
+ // right away. This leads to an asymmetry in type inference: in cases where arguments of
+ // named and unnamed types are passed to parameters with identical type, different types
+ // (named vs underlying) may be inferred depending on the order of the arguments.
+ // By ensuring that named types are seen first, order dependence is avoided and unification
+ // succeeds where it can.
+ //
+ // This code is disabled for now pending decision whether we want to address cases like
+ // these and make the spec on type inference more complicated (see issue #43056).
+ const enableArgSorting = false
+ if m := len(args); m >= 2 && enableArgSorting {
+ // Determine indices of arguments with named and unnamed types.
+ var named, unnamed []int
+ for i, arg := range args {
+ if hasName(arg.typ) {
+ named = append(named, i)
+ } else {
+ unnamed = append(unnamed, i)
+ }
+ }
+
+ // If we have named and unnamed types, move the arguments with
+ // named types first. Update the parameter list accordingly.
+ // Make copies so as not to clobber the incoming slices.
+ if len(named) != 0 && len(unnamed) != 0 {
+ params2 := make([]*Var, m)
+ args2 := make([]*operand, m)
+ i := 0
+ for _, j := range named {
+ params2[i] = params.At(j)
+ args2[i] = args[j]
+ i++
+ }
+ for _, j := range unnamed {
+ params2[i] = params.At(j)
+ args2[i] = args[j]
+ i++
+ }
+ params = NewTuple(params2...)
+ args = args2
}
}
- // Continue with the type arguments we have now. Avoid matching generic
+ // --- 1 ---
+ // Continue with the type arguments we have. Avoid matching generic
// parameters that already have type arguments against function arguments:
// It may fail because matching uses type identity while parameter passing
// uses assignment rules. Instantiate the parameter list with the type
// arguments we have, and continue with that parameter list.
- // First, make sure we have a "full" list of type arguments, so of which
- // may be nil (unknown).
+ // First, make sure we have a "full" list of type arguments, some of which
+ // may be nil (unknown). Make a copy so as to not clobber the incoming slice.
if len(targs) < n {
targs2 := make([]Type, n)
copy(targs2, targs)
@@ -90,7 +127,6 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type,
params = check.subst(nopos, params, smap, nil).(*Tuple)
}
- // --- 2 ---
// Unify parameter and argument types for generic parameters with typed arguments
// and collect the indices of generic parameters with untyped arguments.
// Terminology: generic parameter = function parameter with a type-parameterized type
@@ -167,11 +203,12 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type,
return targs
}
+ // --- 2 ---
// See how far we get with constraint type inference.
// Note that even if we don't have any type arguments, constraint type inference
// may produce results for constraints that explicitly specify a type.
if useConstraintTypeInference {
- targs, index = check.inferB(tparams, targs)
+ targs, index = check.inferB(pos, tparams, targs)
if targs == nil || index < 0 {
return targs
}
@@ -207,9 +244,10 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type,
return targs
}
+ // --- 4 ---
// Again, follow up with constraint type inference.
if useConstraintTypeInference {
- targs, index = check.inferB(tparams, targs)
+ targs, index = check.inferB(pos, tparams, targs)
if targs == nil || index < 0 {
return targs
}
@@ -360,7 +398,7 @@ func (w *tpWalker) isParameterizedTypeList(list []Type) bool {
// first type argument in that list that couldn't be inferred (and thus is nil). If all
// type arguments were inferred successfully, index is < 0. The number of type arguments
// provided may be less than the number of type parameters, but there must be at least one.
-func (check *Checker) inferB(tparams []*TypeParam, targs []Type) (types []Type, index int) {
+func (check *Checker) inferB(pos syntax.Pos, tparams []*TypeParam, targs []Type) (types []Type, index int) {
assert(len(tparams) >= len(targs) && len(targs) > 0)
// Setup bidirectional unification between constraints
@@ -378,7 +416,7 @@ func (check *Checker) inferB(tparams []*TypeParam, targs []Type) (types []Type,
// If a constraint has a structural type, unify the corresponding type parameter with it.
for _, tpar := range tparams {
- sbound := structure(tpar)
+ sbound := structuralType(tpar)
if sbound != nil {
// If the structural type is the underlying type of a single
// defined type in the constraint, use that defined type instead.
@@ -388,7 +426,7 @@ func (check *Checker) inferB(tparams []*TypeParam, targs []Type) (types []Type,
if !u.unify(tpar, sbound) {
// TODO(gri) improve error message by providing the type arguments
// which we know already
- check.errorf(tpar.obj, "%s does not match %s", tpar, sbound)
+ check.errorf(pos, "%s does not match %s", tpar, sbound)
return nil, 0
}
}
diff --git a/src/cmd/compile/internal/types2/initorder.go b/src/cmd/compile/internal/types2/initorder.go
index 4081627666..cf6110baa9 100644
--- a/src/cmd/compile/internal/types2/initorder.go
+++ b/src/cmd/compile/internal/types2/initorder.go
@@ -7,6 +7,7 @@ package types2
import (
"container/heap"
"fmt"
+ "sort"
)
// initOrder computes the Info.InitOrder for package variables.
@@ -190,6 +191,12 @@ type graphNode struct {
ndeps int // number of outstanding dependencies before this object can be initialized
}
+// cost returns the cost of removing this node, which involves copying each
+// predecessor to each successor (and vice-versa).
+func (n *graphNode) cost() int {
+ return len(n.pred) * len(n.succ)
+}
+
type nodeSet map[*graphNode]bool
func (s *nodeSet) add(p *graphNode) {
@@ -227,35 +234,48 @@ func dependencyGraph(objMap map[Object]*declInfo) []*graphNode {
}
}
+ var G, funcG []*graphNode // separate non-functions and functions
+ for _, n := range M {
+ if _, ok := n.obj.(*Func); ok {
+ funcG = append(funcG, n)
+ } else {
+ G = append(G, n)
+ }
+ }
+
// remove function nodes and collect remaining graph nodes in G
// (Mutually recursive functions may introduce cycles among themselves
// which are permitted. Yet such cycles may incorrectly inflate the dependency
// count for variables which in turn may not get scheduled for initialization
// in correct order.)
- var G []*graphNode
- for obj, n := range M {
- if _, ok := obj.(*Func); ok {
- // connect each predecessor p of n with each successor s
- // and drop the function node (don't collect it in G)
- for p := range n.pred {
- // ignore self-cycles
- if p != n {
- // Each successor s of n becomes a successor of p, and
- // each predecessor p of n becomes a predecessor of s.
- for s := range n.succ {
- // ignore self-cycles
- if s != n {
- p.succ.add(s)
- s.pred.add(p)
- delete(s.pred, n) // remove edge to n
- }
+ //
+ // Note that because we recursively copy predecessors and successors
+ // throughout the function graph, the cost of removing a function at
+ // position X is proportional to cost * (len(funcG)-X). Therefore, we should
+ // remove high-cost functions last.
+ sort.Slice(funcG, func(i, j int) bool {
+ return funcG[i].cost() < funcG[j].cost()
+ })
+ for _, n := range funcG {
+ // connect each predecessor p of n with each successor s
+ // and drop the function node (don't collect it in G)
+ for p := range n.pred {
+ // ignore self-cycles
+ if p != n {
+ // Each successor s of n becomes a successor of p, and
+ // each predecessor p of n becomes a predecessor of s.
+ for s := range n.succ {
+ // ignore self-cycles
+ if s != n {
+ p.succ.add(s)
+ s.pred.add(p)
}
- delete(p.succ, n) // remove edge to n
}
+ delete(p.succ, n) // remove edge to n
}
- } else {
- // collect non-function nodes
- G = append(G, n)
+ }
+ for s := range n.succ {
+ delete(s.pred, n) // remove edge to n
}
}
diff --git a/src/cmd/compile/internal/types2/instantiate.go b/src/cmd/compile/internal/types2/instantiate.go
index 44cf593ffb..e0f2d8abe1 100644
--- a/src/cmd/compile/internal/types2/instantiate.go
+++ b/src/cmd/compile/internal/types2/instantiate.go
@@ -13,94 +13,108 @@ import (
"fmt"
)
-// Instantiate instantiates the type typ with the given type arguments targs.
-// typ must be a *Named or a *Signature type, and its number of type parameters
-// must match the number of provided type arguments. The result is a new,
-// instantiated (not parameterized) type of the same kind (either a *Named or a
-// *Signature). Any methods attached to a *Named are simply copied; they are
-// not instantiated.
+// Instantiate instantiates the type orig with the given type arguments targs.
+// orig must be a *Named or a *Signature type. If there is no error, the
+// resulting Type is a new, instantiated (not parameterized) type of the same
+// kind (either a *Named or a *Signature). Methods attached to a *Named type
+// are also instantiated, and associated with a new *Func that has the same
+// position as the original method, but nil function scope.
//
-// If ctxt is non-nil, it may be used to de-dupe the instance against previous
-// instances with the same identity.
+// If ctxt is non-nil, it may be used to de-duplicate the instance against
+// previous instances with the same identity. As a special case, generic
+// *Signature origin types are only considered identical if they are pointer
+// equivalent, so that instantiating distinct (but possibly identical)
+// signatures will yield different instances.
//
-// If verify is set and constraint satisfaction fails, the returned error may
-// be of dynamic type ArgumentError indicating which type argument did not
-// satisfy its corresponding type parameter constraint, and why.
+// If validate is set, Instantiate verifies that the number of type arguments
+// and parameters match, and that the type arguments satisfy their
+// corresponding type constraints. If verification fails, the resulting error
+// may wrap an *ArgumentError indicating which type argument did not satisfy
+// its corresponding type parameter constraint, and why.
//
-// TODO(rfindley): change this function to also return an error if lengths of
-// tparams and targs do not match.
-func Instantiate(ctxt *Context, typ Type, targs []Type, validate bool) (Type, error) {
- inst := (*Checker)(nil).instance(nopos, typ, targs, ctxt)
-
- var err error
+// If validate is not set, Instantiate does not verify the type argument count
+// or whether the type arguments satisfy their constraints. Instantiate is
+// guaranteed to not return an error, but may panic. Specifically, for
+// *Signature types, Instantiate will panic immediately if the type argument
+// count is incorrect; for *Named types, a panic may occur later inside the
+// *Named API.
+func Instantiate(ctxt *Context, orig Type, targs []Type, validate bool) (Type, error) {
if validate {
var tparams []*TypeParam
- switch t := typ.(type) {
+ switch t := orig.(type) {
case *Named:
tparams = t.TypeParams().list()
case *Signature:
tparams = t.TypeParams().list()
}
+ if len(targs) != len(tparams) {
+ return nil, fmt.Errorf("got %d type arguments but %s has %d type parameters", len(targs), orig, len(tparams))
+ }
if i, err := (*Checker)(nil).verify(nopos, tparams, targs); err != nil {
- return inst, ArgumentError{i, err}
+ return nil, &ArgumentError{i, err}
}
}
- return inst, err
+ inst := (*Checker)(nil).instance(nopos, orig, targs, ctxt)
+ return inst, nil
}
// instance creates a type or function instance using the given original type
// typ and arguments targs. For Named types the resulting instance will be
// unexpanded.
-func (check *Checker) instance(pos syntax.Pos, typ Type, targs []Type, ctxt *Context) Type {
- switch t := typ.(type) {
- case *Named:
- var h string
- if ctxt != nil {
- h = ctxt.TypeHash(t, targs)
- // typ may already have been instantiated with identical type arguments. In
- // that case, re-use the existing instance.
- if named := ctxt.typeForHash(h, nil); named != nil {
- return named
- }
+func (check *Checker) instance(pos syntax.Pos, orig Type, targs []Type, ctxt *Context) (res Type) {
+ var h string
+ if ctxt != nil {
+ h = ctxt.instanceHash(orig, targs)
+ // typ may already have been instantiated with identical type arguments. In
+ // that case, re-use the existing instance.
+ if inst := ctxt.lookup(h, orig, targs); inst != nil {
+ return inst
}
- tname := NewTypeName(pos, t.obj.pkg, t.obj.name, nil)
- named := check.newNamed(tname, t, nil, nil, nil) // underlying, tparams, and methods are set when named is resolved
- named.targs = NewTypeList(targs)
- named.resolver = func(ctxt *Context, n *Named) (*TypeParamList, Type, []*Func) {
+ }
+
+ switch orig := orig.(type) {
+ case *Named:
+ tname := NewTypeName(pos, orig.obj.pkg, orig.obj.name, nil)
+ named := check.newNamed(tname, orig, nil, nil, nil) // underlying, tparams, and methods are set when named is resolved
+ named.targs = newTypeList(targs)
+ named.resolver = func(ctxt *Context, n *Named) (*TypeParamList, Type, *methodList) {
return expandNamed(ctxt, n, pos)
}
- if ctxt != nil {
- // It's possible that we've lost a race to add named to the context.
- // In this case, use whichever instance is recorded in the context.
- named = ctxt.typeForHash(h, named)
- }
- return named
+ res = named
case *Signature:
- tparams := t.TypeParams()
+ tparams := orig.TypeParams()
if !check.validateTArgLen(pos, tparams.Len(), len(targs)) {
return Typ[Invalid]
}
if tparams.Len() == 0 {
- return typ // nothing to do (minor optimization)
+ return orig // nothing to do (minor optimization)
}
- sig := check.subst(pos, typ, makeSubstMap(tparams.list(), targs), ctxt).(*Signature)
+ sig := check.subst(pos, orig, makeSubstMap(tparams.list(), targs), ctxt).(*Signature)
// If the signature doesn't use its type parameters, subst
// will not make a copy. In that case, make a copy now (so
// we can set tparams to nil w/o causing side-effects).
- if sig == t {
+ if sig == orig {
copy := *sig
sig = ©
}
// After instantiating a generic signature, it is not generic
// anymore; we need to set tparams to nil.
sig.tparams = nil
- return sig
+ res = sig
+ default:
+ // only types and functions can be generic
+ panic(fmt.Sprintf("%v: cannot instantiate %v", pos, orig))
}
- // only types and functions can be generic
- panic(fmt.Sprintf("%v: cannot instantiate %v", pos, typ))
+ if ctxt != nil {
+ // It's possible that we've lost a race to add named to the context.
+ // In this case, use whichever instance is recorded in the context.
+ res = ctxt.update(h, orig, targs, res)
+ }
+
+ return res
}
// validateTArgLen verifies that the length of targs and tparams matches,
@@ -121,35 +135,31 @@ func (check *Checker) validateTArgLen(pos syntax.Pos, ntparams, ntargs int) bool
func (check *Checker) verify(pos syntax.Pos, tparams []*TypeParam, targs []Type) (int, error) {
smap := makeSubstMap(tparams, targs)
for i, tpar := range tparams {
- // stop checking bounds after the first failure
- if err := check.satisfies(pos, targs[i], tpar, smap); err != nil {
+ // The type parameter bound is parameterized with the same type parameters
+ // as the instantiated type; before we can use it for bounds checking we
+ // need to instantiate it with the type arguments with which we instantiated
+ // the parameterized type.
+ bound := check.subst(pos, tpar.bound, smap, nil)
+ if err := check.implements(targs[i], bound); err != nil {
return i, err
}
}
return -1, nil
}
-// satisfies reports whether the type argument targ satisfies the constraint of type parameter
-// parameter tpar (after any of its type parameters have been substituted through smap).
-// A suitable error is reported if the result is false.
-// TODO(gri) This should be a method of interfaces or type sets.
-func (check *Checker) satisfies(pos syntax.Pos, targ Type, tpar *TypeParam, smap substMap) error {
- iface := tpar.iface()
-
- // Every type argument satisfies interface{}.
- if iface.Empty() {
- return nil
+// implements checks if V implements T and reports an error if it doesn't.
+// The receiver may be nil if implements is called through an exported
+// API call such as AssignableTo.
+func (check *Checker) implements(V, T Type) error {
+ Vu := under(V)
+ Tu := under(T)
+ if Vu == Typ[Invalid] || Tu == Typ[Invalid] {
+ return nil // avoid follow-on errors
+ }
+ if p, _ := Vu.(*Pointer); p != nil && under(p.base) == Typ[Invalid] {
+ return nil // avoid follow-on errors (see issue #49541 for an example)
}
- // A type argument that is a type parameter with an empty type set satisfies any constraint.
- // (The empty set is a subset of any set.)
- if targ := asTypeParam(targ); targ != nil && targ.iface().typeSet().IsEmpty() {
- return nil
- }
-
- // TODO(rfindley): it would be great if users could pass in a qualifier here,
- // rather than falling back to verbose qualification. Maybe this can be part
- // of the shared context.
var qf Qualifier
if check != nil {
qf = check.qualifier
@@ -158,74 +168,104 @@ func (check *Checker) satisfies(pos syntax.Pos, targ Type, tpar *TypeParam, smap
return errors.New(sprintf(qf, false, format, args...))
}
- // No type argument with non-empty type set satisfies the empty type set.
- if iface.typeSet().IsEmpty() {
- return errorf("%s does not satisfy %s (constraint type set is empty)", targ, tpar.bound)
+ Ti, _ := Tu.(*Interface)
+ if Ti == nil {
+ var cause string
+ if isInterfacePtr(Tu) {
+ cause = sprintf(qf, false, "type %s is pointer to interface, not interface", T)
+ } else {
+ cause = sprintf(qf, false, "%s is not an interface", T)
+ }
+ return errorf("%s does not implement %s (%s)", V, T, cause)
}
- // The type parameter bound is parameterized with the same type parameters
- // as the instantiated type; before we can use it for bounds checking we
- // need to instantiate it with the type arguments with which we instantiate
- // the parameterized type.
- iface = check.subst(pos, iface, smap, nil).(*Interface)
-
- // if iface is comparable, targ must be comparable
- // TODO(gri) the error messages needs to be better, here
- if iface.IsComparable() && !Comparable(targ) {
- if tpar := asTypeParam(targ); tpar != nil && tpar.iface().typeSet().IsAll() {
- return errorf("%s has no constraints", targ)
- }
- return errorf("%s does not satisfy comparable", targ)
- }
-
- // targ must implement iface (methods)
- // - check only if we have methods
- if iface.NumMethods() > 0 {
- // If the type argument is a pointer to a type parameter, the type argument's
- // method set is empty.
- // TODO(gri) is this what we want? (spec question)
- if base, isPtr := deref(targ); isPtr && asTypeParam(base) != nil {
- return errorf("%s has no methods", targ)
- }
- if m, wrong := check.missingMethod(targ, iface, true); m != nil {
- // TODO(gri) needs to print updated name to avoid major confusion in error message!
- // (print warning for now)
- // Old warning:
- // check.softErrorf(pos, "%s does not satisfy %s (warning: name not updated) = %s (missing method %s)", targ, tpar.bound, iface, m)
- if wrong != nil {
- // TODO(gri) This can still report uninstantiated types which makes the error message
- // more difficult to read then necessary.
- return errorf("%s does not satisfy %s: wrong method signature\n\tgot %s\n\twant %s",
- targ, tpar.bound, wrong, m,
- )
- }
- return errorf("%s does not satisfy %s (missing method %s)", targ, tpar.bound, m.name)
- }
- }
-
- // targ must also be in the set of types of iface, if any.
- // Constraints with empty type sets were already excluded above.
- if !iface.typeSet().hasTerms() {
- return nil // nothing to do
- }
-
- // If targ is itself a type parameter, each of its possible types must be in the set
- // of iface types (i.e., the targ type set must be a subset of the iface type set).
- // Type arguments with empty type sets were already excluded above.
- if targ := asTypeParam(targ); targ != nil {
- targBound := targ.iface()
- if !targBound.typeSet().subsetOf(iface.typeSet()) {
- // TODO(gri) report which type is missing
- return errorf("%s does not satisfy %s", targ, tpar.bound)
- }
+ // Every type satisfies the empty interface.
+ if Ti.Empty() {
return nil
}
+ // T is not the empty interface (i.e., the type set of T is restricted)
- // Otherwise, targ's type must be included in the iface type set.
- if !iface.typeSet().includes(targ) {
- // TODO(gri) report which type is missing
- return errorf("%s does not satisfy %s", targ, tpar.bound)
+ // An interface V with an empty type set satisfies any interface.
+ // (The empty set is a subset of any set.)
+ Vi, _ := Vu.(*Interface)
+ if Vi != nil && Vi.typeSet().IsEmpty() {
+ return nil
+ }
+ // type set of V is not empty
+
+ // No type with non-empty type set satisfies the empty type set.
+ if Ti.typeSet().IsEmpty() {
+ return errorf("cannot implement %s (empty type set)", T)
}
- return nil
+ // V must implement T's methods, if any.
+ if Ti.NumMethods() > 0 {
+ if m, wrong := check.missingMethod(V, Ti, true); m != nil /* !Implements(V, Ti) */ {
+ if check != nil && check.conf.CompilerErrorMessages {
+ return errorf("%s does not implement %s %s", V, T, check.missingMethodReason(V, T, m, wrong))
+ }
+ var cause string
+ if wrong != nil {
+ if Identical(m.typ, wrong.typ) {
+ cause = fmt.Sprintf("missing method %s (%s has pointer receiver)", m.name, m.name)
+ } else {
+ cause = fmt.Sprintf("wrong type for method %s (have %s, want %s)", m.Name(), wrong.typ, m.typ)
+ }
+ } else {
+ cause = "missing method " + m.Name()
+ }
+ return errorf("%s does not implement %s: %s", V, T, cause)
+ }
+ }
+
+ // If T is comparable, V must be comparable.
+ // Remember as a pending error and report only if we don't have a more specific error.
+ var pending error
+ if Ti.IsComparable() && ((Vi != nil && !Vi.IsComparable()) || (Vi == nil && !Comparable(V))) {
+ pending = errorf("%s does not implement comparable", V)
+ }
+
+ // V must also be in the set of types of T, if any.
+ // Constraints with empty type sets were already excluded above.
+ if !Ti.typeSet().hasTerms() {
+ return pending // nothing to do
+ }
+
+ // If V is itself an interface, each of its possible types must be in the set
+ // of T types (i.e., the V type set must be a subset of the T type set).
+ // Interfaces V with empty type sets were already excluded above.
+ if Vi != nil {
+ if !Vi.typeSet().subsetOf(Ti.typeSet()) {
+ // TODO(gri) report which type is missing
+ return errorf("%s does not implement %s", V, T)
+ }
+ return pending
+ }
+
+ // Otherwise, V's type must be included in the iface type set.
+ var alt Type
+ if Ti.typeSet().is(func(t *term) bool {
+ if !t.includes(V) {
+ // If V ∉ t.typ but V ∈ ~t.typ then remember this type
+ // so we can suggest it as an alternative in the error
+ // message.
+ if alt == nil && !t.tilde && Identical(t.typ, under(t.typ)) {
+ tt := *t
+ tt.tilde = true
+ if tt.includes(V) {
+ alt = t.typ
+ }
+ }
+ return true
+ }
+ return false
+ }) {
+ if alt != nil {
+ return errorf("%s does not implement %s (possibly missing ~ for %s in constraint %s)", V, T, alt, T)
+ } else {
+ return errorf("%s does not implement %s", V, T)
+ }
+ }
+
+ return pending
}
diff --git a/src/cmd/compile/internal/types2/instantiate_test.go b/src/cmd/compile/internal/types2/instantiate_test.go
index a99fc5d032..591b467a2e 100644
--- a/src/cmd/compile/internal/types2/instantiate_test.go
+++ b/src/cmd/compile/internal/types2/instantiate_test.go
@@ -10,29 +10,132 @@ import (
)
func TestInstantiateEquality(t *testing.T) {
- const src = genericPkg + "p; type T[P any] int"
- pkg, err := pkgFor(".", src, nil)
- if err != nil {
- t.Fatal(err)
+ emptySignature := NewSignatureType(nil, nil, nil, nil, nil, false)
+ tests := []struct {
+ src string
+ name1 string
+ targs1 []Type
+ name2 string
+ targs2 []Type
+ wantEqual bool
+ }{
+ {
+ "package basictype; type T[P any] int",
+ "T", []Type{Typ[Int]},
+ "T", []Type{Typ[Int]},
+ true,
+ },
+ {
+ "package differenttypeargs; type T[P any] int",
+ "T", []Type{Typ[Int]},
+ "T", []Type{Typ[String]},
+ false,
+ },
+ {
+ "package typeslice; type T[P any] int",
+ "T", []Type{NewSlice(Typ[Int])},
+ "T", []Type{NewSlice(Typ[Int])},
+ true,
+ },
+ {
+ // interface{interface{...}} is equivalent to interface{...}
+ "package equivalentinterfaces; type T[P any] int",
+ "T", []Type{
+ NewInterfaceType([]*Func{NewFunc(nopos, nil, "M", emptySignature)}, nil),
+ },
+ "T", []Type{
+ NewInterfaceType(
+ nil,
+ []Type{
+ NewInterfaceType([]*Func{NewFunc(nopos, nil, "M", emptySignature)}, nil),
+ },
+ ),
+ },
+ true,
+ },
+ {
+ // int|string is equivalent to string|int
+ "package equivalenttypesets; type T[P any] int",
+ "T", []Type{
+ NewInterfaceType(nil, []Type{
+ NewUnion([]*Term{NewTerm(false, Typ[Int]), NewTerm(false, Typ[String])}),
+ }),
+ },
+ "T", []Type{
+ NewInterfaceType(nil, []Type{
+ NewUnion([]*Term{NewTerm(false, Typ[String]), NewTerm(false, Typ[Int])}),
+ }),
+ },
+ true,
+ },
+ {
+ "package basicfunc; func F[P any]() {}",
+ "F", []Type{Typ[Int]},
+ "F", []Type{Typ[Int]},
+ true,
+ },
+ {
+ "package funcslice; func F[P any]() {}",
+ "F", []Type{NewSlice(Typ[Int])},
+ "F", []Type{NewSlice(Typ[Int])},
+ true,
+ },
+ {
+ "package funcwithparams; func F[P any](x string) float64 { return 0 }",
+ "F", []Type{Typ[Int]},
+ "F", []Type{Typ[Int]},
+ true,
+ },
+ {
+ "package differentfuncargs; func F[P any](x string) float64 { return 0 }",
+ "F", []Type{Typ[Int]},
+ "F", []Type{Typ[String]},
+ false,
+ },
+ {
+ "package funcequality; func F1[P any](x int) {}; func F2[Q any](x int) {}",
+ "F1", []Type{Typ[Int]},
+ "F2", []Type{Typ[Int]},
+ false,
+ },
+ {
+ "package funcsymmetry; func F1[P any](x P) {}; func F2[Q any](x Q) {}",
+ "F1", []Type{Typ[Int]},
+ "F2", []Type{Typ[Int]},
+ false,
+ },
}
- T := pkg.Scope().Lookup("T").Type().(*Named)
- // Instantiating the same type twice should result in pointer-equivalent
- // instances.
- ctxt := NewContext()
- res1, err := Instantiate(ctxt, T, []Type{Typ[Int]}, false)
- if err != nil {
- t.Fatal(err)
- }
- res2, err := Instantiate(ctxt, T, []Type{Typ[Int]}, false)
- if err != nil {
- t.Fatal(err)
- }
- if res1 != res2 {
- t.Errorf("first instance (%s) not pointer-equivalent to second instance (%s)", res1, res2)
+
+ for _, test := range tests {
+ pkg, err := pkgFor(".", test.src, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ t.Run(pkg.Name(), func(t *testing.T) {
+ ctxt := NewContext()
+
+ T1 := pkg.Scope().Lookup(test.name1).Type()
+ res1, err := Instantiate(ctxt, T1, test.targs1, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ T2 := pkg.Scope().Lookup(test.name2).Type()
+ res2, err := Instantiate(ctxt, T2, test.targs2, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if gotEqual := res1 == res2; gotEqual != test.wantEqual {
+ t.Errorf("%s == %s: %t, want %t", res1, res2, gotEqual, test.wantEqual)
+ }
+ })
}
}
+
func TestInstantiateNonEquality(t *testing.T) {
- const src = genericPkg + "p; type T[P any] int"
+ const src = "package p; type T[P any] int"
pkg1, err := pkgFor(".", src, nil)
if err != nil {
t.Fatal(err)
@@ -63,7 +166,7 @@ func TestInstantiateNonEquality(t *testing.T) {
}
func TestMethodInstantiation(t *testing.T) {
- const prefix = genericPkg + `p
+ const prefix = `package p
type T[P any] struct{}
@@ -102,7 +205,7 @@ var X T[int]
}
func TestImmutableSignatures(t *testing.T) {
- const src = genericPkg + `p
+ const src = `package p
type T[P any] struct{}
diff --git a/src/cmd/compile/internal/types2/interface.go b/src/cmd/compile/internal/types2/interface.go
index 96c92ccaec..ca5140d092 100644
--- a/src/cmd/compile/internal/types2/interface.go
+++ b/src/cmd/compile/internal/types2/interface.go
@@ -86,7 +86,7 @@ func (t *Interface) Method(i int) *Func { return t.typeSet().Method(i) }
func (t *Interface) Empty() bool { return t.typeSet().IsAll() }
// IsComparable reports whether each type in interface t's type set is comparable.
-func (t *Interface) IsComparable() bool { return t.typeSet().IsComparable() }
+func (t *Interface) IsComparable() bool { return t.typeSet().IsComparable(nil) }
// IsMethodSet reports whether the interface t is fully described by its method set.
func (t *Interface) IsMethodSet() bool { return t.typeSet().IsMethodSet() }
@@ -111,7 +111,7 @@ func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType
for _, f := range iface.MethodList {
if f.Name == nil {
- addEmbedded(posFor(f.Type), parseUnion(check, flattenUnion(nil, f.Type)))
+ addEmbedded(posFor(f.Type), parseUnion(check, f.Type))
continue
}
// f.Name != nil
@@ -136,13 +136,6 @@ func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType
continue // ignore
}
- // Always type-check method type parameters but complain if they are not enabled.
- // (This extra check is needed here because interface method signatures don't have
- // a receiver specification.)
- if sig.tparams != nil && !acceptMethodTypeParams {
- check.error(f.Type, "methods cannot have type parameters")
- }
-
// use named receiver type if available (for better error messages)
var recvTyp Type = ityp
if def != nil {
@@ -182,11 +175,3 @@ func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType
ityp.check = nil
}).describef(iface, "compute type set for %s", ityp)
}
-
-func flattenUnion(list []syntax.Expr, x syntax.Expr) []syntax.Expr {
- if o, _ := x.(*syntax.Operation); o != nil && o.Op == syntax.Or {
- list = flattenUnion(list, o.X)
- x = o.Y
- }
- return append(list, x)
-}
diff --git a/src/cmd/compile/internal/types2/issues_test.go b/src/cmd/compile/internal/types2/issues_test.go
index 9890b79323..697a73525c 100644
--- a/src/cmd/compile/internal/types2/issues_test.go
+++ b/src/cmd/compile/internal/types2/issues_test.go
@@ -611,3 +611,29 @@ func TestIssue43124(t *testing.T) {
t.Errorf("type checking error for c does not disambiguate package template: %q", err)
}
}
+
+func TestIssue50646(t *testing.T) {
+ anyType := Universe.Lookup("any").Type()
+ comparableType := Universe.Lookup("comparable").Type()
+
+ if !Comparable(anyType) {
+ t.Errorf("any is not a comparable type")
+ }
+ if !Comparable(comparableType) {
+ t.Errorf("comparable is not a comparable type")
+ }
+
+ if Implements(anyType, comparableType.Underlying().(*Interface)) {
+ t.Errorf("any implements comparable")
+ }
+ if !Implements(comparableType, anyType.(*Interface)) {
+ t.Errorf("comparable does not implement any")
+ }
+
+ if AssignableTo(anyType, comparableType) {
+ t.Errorf("any assignable to comparable")
+ }
+ if !AssignableTo(comparableType, anyType) {
+ t.Errorf("comparable not assignable to any")
+ }
+}
diff --git a/src/cmd/compile/internal/types2/lookup.go b/src/cmd/compile/internal/types2/lookup.go
index e0fd74482a..a71dd409e1 100644
--- a/src/cmd/compile/internal/types2/lookup.go
+++ b/src/cmd/compile/internal/types2/lookup.go
@@ -6,6 +6,10 @@
package types2
+import (
+ "strings"
+)
+
// Internal use of LookupFieldOrMethod: If the obj result is a method
// associated with a concrete (non-interface) type, the method's signature
// may not be fully set up. Call Checker.objDecl(obj, nil) before accessing
@@ -15,7 +19,7 @@ package types2
// in T and returns the corresponding *Var or *Func, an index sequence, and a
// bool indicating if there were any pointer indirections on the path to the
// field or method. If addressable is set, T is the type of an addressable
-// variable (only matters for method lookups).
+// variable (only matters for method lookups). T must not be nil.
//
// The last index entry is the field or method index in the (possibly embedded)
// type where the entry was found, either:
@@ -38,16 +42,20 @@ package types2
// the method's formal receiver base type, nor was the receiver addressable.
//
func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
- // Methods cannot be associated to a named pointer type
+ if T == nil {
+ panic("LookupFieldOrMethod on nil type")
+ }
+
+ // Methods cannot be associated to a named pointer type.
// (spec: "The type denoted by T is called the receiver base type;
// it must not be a pointer or interface type and it must be declared
// in the same package as the method.").
// Thus, if we have a named pointer type, proceed with the underlying
// pointer type but discard the result if it is a method since we would
// not have found it for T (see also issue 8590).
- if t := asNamed(T); t != nil {
- if p, _ := safeUnderlying(t).(*Pointer); p != nil {
- obj, index, indirect = lookupFieldOrMethod(p, false, pkg, name)
+ if t, _ := T.(*Named); t != nil {
+ if p, _ := t.Underlying().(*Pointer); p != nil {
+ obj, index, indirect = lookupFieldOrMethod(p, false, false, pkg, name)
if _, ok := obj.(*Func); ok {
return nil, nil, false
}
@@ -55,7 +63,21 @@ func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o
}
}
- return lookupFieldOrMethod(T, addressable, pkg, name)
+ obj, index, indirect = lookupFieldOrMethod(T, addressable, false, pkg, name)
+
+ // If we didn't find anything and if we have a type parameter with a structural constraint,
+ // see if there is a matching field (but not a method, those need to be declared explicitly
+ // in the constraint). If the structural constraint is a named pointer type (see above), we
+ // are ok here because only fields are accepted as results.
+ if obj == nil && isTypeParam(T) {
+ if t := structuralType(T); t != nil {
+ obj, index, indirect = lookupFieldOrMethod(t, addressable, false, pkg, name)
+ if _, ok := obj.(*Var); !ok {
+ obj, index, indirect = nil, nil, false // accept fields (variables) only
+ }
+ }
+ }
+ return
}
// TODO(gri) The named type consolidation and seen maps below must be
@@ -64,7 +86,11 @@ func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o
// indirectly via different packages.)
// lookupFieldOrMethod should only be called by LookupFieldOrMethod and missingMethod.
-func lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
+// If checkFold is true, the lookup for methods will include looking for any method
+// which case-folds to the same as 'name' (used for giving helpful error messages).
+//
+// The resulting object may not be fully type-checked.
+func lookupFieldOrMethod(T Type, addressable, checkFold bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
// WARNING: The code in this function is extremely subtle - do not modify casually!
if name == "_" {
@@ -73,12 +99,8 @@ func lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o
typ, isPtr := deref(T)
- // *typ where typ is an interface or type parameter has no methods.
+ // *typ where typ is an interface (incl. a type parameter) has no methods.
if isPtr {
- // don't look at under(typ) here - was bug (issue #47747)
- if _, ok := typ.(*TypeParam); ok {
- return
- }
if _, ok := under(typ).(*Interface); ok {
return
}
@@ -101,13 +123,12 @@ func lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o
var next []embeddedType // embedded types found at current depth
// look for (pkg, name) in all types at current depth
- var tpar *TypeParam // set if obj receiver is a type parameter
for _, e := range current {
typ := e.typ
// If we have a named type, we may have associated methods.
// Look for those first.
- if named := asNamed(typ); named != nil {
+ if named, _ := typ.(*Named); named != nil {
if seen[named] {
// We have seen this type before, at a more shallow depth
// (note that multiples of this type at the current depth
@@ -123,7 +144,7 @@ func lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o
// look for a matching attached method
named.resolve(nil)
- if i, m := lookupMethod(named.methods, pkg, name); m != nil {
+ if i, m := named.lookupMethodFold(pkg, name, checkFold); m != nil {
// potential match
// caution: method may not have a proper signature yet
index = concat(e.index, i)
@@ -134,17 +155,9 @@ func lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o
indirect = e.indirect
continue // we can't have a matching field or interface method
}
-
- // continue with underlying type, but only if it's not a type parameter
- // TODO(gri) is this what we want to do for type parameters? (spec question)
- typ = named.under()
- if asTypeParam(typ) != nil {
- continue
- }
}
- tpar = nil
- switch t := typ.(type) {
+ switch t := under(typ).(type) {
case *Struct:
// look for a matching field and collect embedded types
for i, f := range t.fields {
@@ -177,8 +190,8 @@ func lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o
}
case *Interface:
- // look for a matching method
- if i, m := t.typeSet().LookupMethod(pkg, name); m != nil {
+ // look for a matching method (interface may be a type parameter)
+ if i, m := lookupMethodFold(t.typeSet().methods, pkg, name, checkFold); m != nil {
assert(m.typ != nil)
index = concat(e.index, i)
if obj != nil || e.multiples {
@@ -187,24 +200,6 @@ func lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o
obj = m
indirect = e.indirect
}
-
- case *TypeParam:
- if i, m := t.iface().typeSet().LookupMethod(pkg, name); m != nil {
- assert(m.typ != nil)
- index = concat(e.index, i)
- if obj != nil || e.multiples {
- return nil, index, false // collision
- }
- tpar = t
- obj = m
- indirect = e.indirect
- }
- if obj == nil {
- // At this point we're not (yet) looking into methods
- // that any underlying type of the types in the type list
- // might have.
- // TODO(gri) Do we want to specify the language that way?
- }
}
}
@@ -216,8 +211,7 @@ func lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o
// is shorthand for (&x).m()".
if f, _ := obj.(*Func); f != nil {
// determine if method has a pointer receiver
- hasPtrRecv := tpar == nil && f.hasPtrRecv()
- if hasPtrRecv && !indirect && !addressable {
+ if f.hasPtrRecv() && !indirect && !addressable {
return nil, nil, true // pointer/addressable receiver required
}
}
@@ -291,6 +285,11 @@ func MissingMethod(V Type, T *Interface, static bool) (method *Func, wrongType b
return m, typ != nil
}
+// If we accept type parameters for methods, (at least) the code
+// guarded with this constant will need to be adjusted when such
+// methods are used (not just parsed).
+const acceptMethodTypeParams = false
+
// missingMethod is like MissingMethod but accepts a *Checker as
// receiver and an addressable flag.
// The receiver may be nil if missingMethod is invoked through
@@ -306,7 +305,7 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
return
}
- if ityp := asInterface(V); ityp != nil {
+ if ityp, _ := under(V).(*Interface); ityp != nil {
// TODO(gri) the methods are sorted - could do this more efficiently
for _, m := range T.typeSet().methods {
_, f := ityp.typeSet().LookupMethod(m.pkg, m.name)
@@ -315,6 +314,7 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
if !static {
continue
}
+ // We don't do any case-fold check if V is an interface.
return m, f
}
@@ -328,14 +328,7 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
panic("method with type parameters")
}
- // If the methods have type parameters we don't care whether they
- // are the same or not, as long as they match up. Use unification
- // to see if they can be made to match.
- // TODO(gri) is this always correct? what about type bounds?
- // (Alternative is to rename/subst type parameters and compare.)
- u := newUnifier(true)
- u.x.init(ftyp.TypeParams().list())
- if !u.unify(ftyp, mtyp) {
+ if !Identical(ftyp, mtyp) {
return m, f
}
}
@@ -345,14 +338,24 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
// A concrete type implements T if it implements all methods of T.
for _, m := range T.typeSet().methods {
- // TODO(gri) should this be calling lookupFieldOrMethod instead (and why not)?
- obj, _, _ := lookupFieldOrMethod(V, false, m.pkg, m.name)
+ // TODO(gri) should this be calling LookupFieldOrMethod instead (and why not)?
+ obj, _, _ := lookupFieldOrMethod(V, false, false, m.pkg, m.name)
// Check if *V implements this method of T.
if obj == nil {
ptr := NewPointer(V)
- obj, _, _ = lookupFieldOrMethod(ptr, false, m.pkg, m.name)
+ obj, _, _ = lookupFieldOrMethod(ptr, false, false, m.pkg, m.name)
+ if obj == nil {
+ // If we didn't find the exact method (even with pointer
+ // receiver), look to see if there is a method that
+ // matches m.name with case-folding.
+ obj, _, _ = lookupFieldOrMethod(V, false, true, m.pkg, m.name)
+ }
if obj != nil {
+ // methods may not have a fully set up signature yet
+ if check != nil {
+ check.objDecl(obj, nil)
+ }
return m, obj.(*Func)
}
}
@@ -378,27 +381,7 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
panic("method with type parameters")
}
- // If the methods have type parameters we don't care whether they
- // are the same or not, as long as they match up. Use unification
- // to see if they can be made to match.
- // TODO(gri) is this always correct? what about type bounds?
- // (Alternative is to rename/subst type parameters and compare.)
- u := newUnifier(true)
- if ftyp.TypeParams().Len() > 0 {
- // We reach here only if we accept method type parameters.
- // In this case, unification must consider any receiver
- // and method type parameters as "free" type parameters.
- assert(acceptMethodTypeParams)
- // We don't have a test case for this at the moment since
- // we can't parse method type parameters. Keeping the
- // unimplemented call so that we test this code if we
- // enable method type parameters.
- unimplemented()
- u.x.init(append(ftyp.RecvTypeParams().list(), ftyp.TypeParams().list()...))
- } else {
- u.x.init(ftyp.RecvTypeParams().list())
- }
- if !u.unify(ftyp, mtyp) {
+ if !Identical(ftyp, mtyp) {
return m, f
}
}
@@ -406,6 +389,65 @@ func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method,
return
}
+// missingMethodReason returns a string giving the detailed reason for a missing method m,
+// where m is missing from V, but required by T. It puts the reason in parentheses,
+// and may include more have/want info after that. If non-nil, wrongType is a relevant
+// method that matches in some way. It may have the correct name, but wrong type, or
+// it may have a pointer receiver, or it may have the correct name except wrong case.
+func (check *Checker) missingMethodReason(V, T Type, m, wrongType *Func) string {
+ var r string
+ var mname string
+ if check.conf.CompilerErrorMessages {
+ mname = m.Name() + " method"
+ } else {
+ mname = "method " + m.Name()
+ }
+ if wrongType != nil {
+ if m.Name() != wrongType.Name() {
+ r = check.sprintf("(missing %s)\n\t\thave %s^^%s\n\t\twant %s^^%s",
+ mname, wrongType.Name(), wrongType.typ, m.Name(), m.typ)
+ } else if Identical(m.typ, wrongType.typ) {
+ r = check.sprintf("(%s has pointer receiver)", mname)
+ } else {
+ if check.conf.CompilerErrorMessages {
+ r = check.sprintf("(wrong type for %s)\n\t\thave %s^^%s\n\t\twant %s^^%s",
+ mname, wrongType.Name(), wrongType.typ, m.Name(), m.typ)
+ } else {
+ r = check.sprintf("(wrong type for %s)\n\thave %s\n\twant %s",
+ mname, wrongType.typ, m.typ)
+ }
+ }
+ // This is a hack to print the function type without the leading
+ // 'func' keyword in the have/want printouts. We could change to have
+ // an extra formatting option for types2.Type that doesn't print out
+ // 'func'.
+ r = strings.Replace(r, "^^func", "", -1)
+ } else if IsInterface(T) {
+ if isInterfacePtr(V) {
+ r = "(" + check.interfacePtrError(V) + ")"
+ }
+ } else if isInterfacePtr(T) {
+ r = "(" + check.interfacePtrError(T) + ")"
+ }
+ if r == "" {
+ r = check.sprintf("(missing %s)", mname)
+ }
+ return r
+}
+
+func isInterfacePtr(T Type) bool {
+ p, _ := under(T).(*Pointer)
+ return p != nil && IsInterface(p.base)
+}
+
+func (check *Checker) interfacePtrError(T Type) string {
+ assert(isInterfacePtr(T))
+ if p, _ := under(T).(*Pointer); isTypeParam(p.base) {
+ return check.sprintf("type %s is pointer to type parameter, not type parameter", T)
+ }
+ return check.sprintf("type %s is pointer to interface, not interface", T)
+}
+
// assertableTo reports whether a value of type V can be asserted to have type T.
// It returns (nil, false) as affirmative answer. Otherwise it returns a missing
// method required by V and whether it is missing or just has the wrong type.
@@ -417,7 +459,7 @@ func (check *Checker) assertableTo(V *Interface, T Type) (method, wrongType *Fun
// no static check is required if T is an interface
// spec: "If T is an interface type, x.(T) asserts that the
// dynamic type of x implements the interface T."
- if asInterface(T) != nil && !forceStrict {
+ if IsInterface(T) && !forceStrict {
return
}
return check.missingMethod(T, V, false)
@@ -427,6 +469,13 @@ func (check *Checker) assertableTo(V *Interface, T Type) (method, wrongType *Fun
// Otherwise it returns (typ, false).
func deref(typ Type) (Type, bool) {
if p, _ := typ.(*Pointer); p != nil {
+ // p.base should never be nil, but be conservative
+ if p.base == nil {
+ if debug {
+ panic("pointer with nil base type (possibly due to an invalid cyclic declaration)")
+ }
+ return Typ[Invalid], true
+ }
return p.base, true
}
return typ, false
@@ -435,8 +484,8 @@ func deref(typ Type) (Type, bool) {
// derefStructPtr dereferences typ if it is a (named or unnamed) pointer to a
// (named or unnamed) struct and returns its base. Otherwise it returns typ.
func derefStructPtr(typ Type) Type {
- if p := asPointer(typ); p != nil {
- if asStruct(p.base) != nil {
+ if p, _ := under(typ).(*Pointer); p != nil {
+ if _, ok := under(p.base).(*Struct); ok {
return p.base
}
}
@@ -474,3 +523,21 @@ func lookupMethod(methods []*Func, pkg *Package, name string) (int, *Func) {
}
return -1, nil
}
+
+// lookupMethodFold is like lookupMethod, but if checkFold is true, it matches a method
+// name if the names are equal with case folding.
+func lookupMethodFold(methods []*Func, pkg *Package, name string, checkFold bool) (int, *Func) {
+ if name != "_" {
+ for i, m := range methods {
+ if m.name != name && !(checkFold && strings.EqualFold(m.name, name)) {
+ continue
+ }
+ // Use m.name, since we've already checked that m.name and
+ // name are equal with folding.
+ if m.sameId(pkg, m.name) {
+ return i, m
+ }
+ }
+ }
+ return -1, nil
+}
diff --git a/src/cmd/compile/internal/types2/methodlist.go b/src/cmd/compile/internal/types2/methodlist.go
new file mode 100644
index 0000000000..ba10159ea2
--- /dev/null
+++ b/src/cmd/compile/internal/types2/methodlist.go
@@ -0,0 +1,79 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "sync"
+
+// methodList holds a list of methods that may be lazily resolved by a provided
+// resolution method.
+type methodList struct {
+ methods []*Func
+
+ // guards synchronizes the instantiation of lazy methods. For lazy method
+ // lists, guards is non-nil and of the length passed to newLazyMethodList.
+ // For non-lazy method lists, guards is nil.
+ guards *[]sync.Once
+}
+
+// newMethodList creates a non-lazy method list holding the given methods.
+func newMethodList(methods []*Func) *methodList {
+ return &methodList{methods: methods}
+}
+
+// newLazyMethodList creates a lazy method list of the given length. Methods
+// may be resolved lazily for a given index by providing a resolver function.
+func newLazyMethodList(length int) *methodList {
+ guards := make([]sync.Once, length)
+ return &methodList{
+ methods: make([]*Func, length),
+ guards: &guards,
+ }
+}
+
+// isLazy reports whether the receiver is a lazy method list.
+func (l *methodList) isLazy() bool {
+ return l != nil && l.guards != nil
+}
+
+// Add appends a method to the method list if not not already present. Add
+// panics if the receiver is lazy.
+func (l *methodList) Add(m *Func) {
+ assert(!l.isLazy())
+ if i, _ := lookupMethod(l.methods, m.pkg, m.name); i < 0 {
+ l.methods = append(l.methods, m)
+ }
+}
+
+// LookupFold looks up the method identified by pkg and name in the receiver.
+// LookupFold panics if the receiver is lazy. If checkFold is true, it matches
+// a method name if the names are equal with case folding.
+func (l *methodList) LookupFold(pkg *Package, name string, checkFold bool) (int, *Func) {
+ assert(!l.isLazy())
+ if l == nil {
+ return -1, nil
+ }
+ return lookupMethodFold(l.methods, pkg, name, checkFold)
+}
+
+// Len returns the length of the method list.
+func (l *methodList) Len() int {
+ if l == nil {
+ return 0
+ }
+ return len(l.methods)
+}
+
+// At returns the i'th method of the method list. At panics if i is out of
+// bounds, or if the receiver is lazy and resolve is nil.
+func (l *methodList) At(i int, resolve func() *Func) *Func {
+ if !l.isLazy() {
+ return l.methods[i]
+ }
+ assert(resolve != nil)
+ (*l.guards)[i].Do(func() {
+ l.methods[i] = resolve()
+ })
+ return l.methods[i]
+}
diff --git a/src/cmd/compile/internal/types2/methodlist_test.go b/src/cmd/compile/internal/types2/methodlist_test.go
new file mode 100644
index 0000000000..7a183ac7f9
--- /dev/null
+++ b/src/cmd/compile/internal/types2/methodlist_test.go
@@ -0,0 +1,40 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "testing"
+)
+
+func TestLazyMethodList(t *testing.T) {
+ l := newLazyMethodList(2)
+
+ if got := l.Len(); got != 2 {
+ t.Fatalf("Len() = %d, want 2", got)
+ }
+
+ f0 := NewFunc(nopos, nil, "f0", nil)
+ f1 := NewFunc(nopos, nil, "f1", nil)
+
+ // Verify that methodList.At is idempotent, by calling it repeatedly with a
+ // resolve func that returns different pointer values (f0 or f1).
+ steps := []struct {
+ index int
+ resolve *Func // the *Func returned by the resolver
+ want *Func // the actual *Func returned by methodList.At
+ }{
+ {0, f0, f0},
+ {0, f1, f0},
+ {1, f1, f1},
+ {1, f0, f1},
+ }
+
+ for i, step := range steps {
+ got := l.At(step.index, func() *Func { return step.resolve })
+ if got != step.want {
+ t.Errorf("step %d: At(%d, ...) = %s, want %s", i, step.index, got.Name(), step.want.Name())
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/mono.go b/src/cmd/compile/internal/types2/mono.go
index 39c4d4fbef..7bd79f4282 100644
--- a/src/cmd/compile/internal/types2/mono.go
+++ b/src/cmd/compile/internal/types2/mono.go
@@ -168,11 +168,11 @@ func (w *monoGraph) recordCanon(mpar, tpar *TypeParam) {
// recordInstance records that the given type parameters were
// instantiated with the corresponding type arguments.
-func (w *monoGraph) recordInstance(pkg *Package, pos syntax.Pos, tparams []*TypeParam, targs []Type, posList []syntax.Pos) {
+func (w *monoGraph) recordInstance(pkg *Package, pos syntax.Pos, tparams []*TypeParam, targs []Type, xlist []syntax.Expr) {
for i, tpar := range tparams {
pos := pos
- if i < len(posList) {
- pos = posList[i]
+ if i < len(xlist) {
+ pos = syntax.StartPos(xlist[i])
}
w.assign(pkg, pos, tpar, targs[i])
}
diff --git a/src/cmd/compile/internal/types2/named.go b/src/cmd/compile/internal/types2/named.go
index 6ebad8fbb5..5248893a4a 100644
--- a/src/cmd/compile/internal/types2/named.go
+++ b/src/cmd/compile/internal/types2/named.go
@@ -12,17 +12,22 @@ import (
// A Named represents a named (defined) type.
type Named struct {
check *Checker
- info typeInfo // for cycle detection
obj *TypeName // corresponding declared object for declared types; placeholder for instantiated types
orig *Named // original, uninstantiated type
fromRHS Type // type (on RHS of declaration) this *Named type is derived from (for cycle reporting)
underlying Type // possibly a *Named during setup; never a *Named once set up completely
tparams *TypeParamList // type parameters, or nil
targs *TypeList // type arguments (after instantiation), or nil
- methods []*Func // methods declared for this type (not the method set of this type); signatures are type-checked lazily
+
+ // methods declared for this type (not the method set of this type).
+ // Signatures are type-checked lazily.
+ // For non-instantiated types, this is a fully populated list of methods. For
+ // instantiated types, this is a 'lazy' list, and methods are instantiated
+ // when they are first accessed.
+ methods *methodList
// resolver may be provided to lazily resolve type parameters, underlying, and methods.
- resolver func(*Context, *Named) (tparams *TypeParamList, underlying Type, methods []*Func)
+ resolver func(*Context, *Named) (tparams *TypeParamList, underlying Type, methods *methodList)
once sync.Once // ensures that tparams, underlying, and methods are resolved before accessing
}
@@ -33,7 +38,7 @@ func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
if _, ok := underlying.(*Named); ok {
panic("underlying type must not be *Named")
}
- return (*Checker)(nil).newNamed(obj, nil, underlying, nil, methods)
+ return (*Checker)(nil).newNamed(obj, nil, underlying, nil, newMethodList(methods))
}
func (t *Named) resolve(ctxt *Context) *Named {
@@ -57,7 +62,7 @@ func (t *Named) resolve(ctxt *Context) *Named {
}
// newNamed is like NewNamed but with a *Checker receiver and additional orig argument.
-func (check *Checker) newNamed(obj *TypeName, orig *Named, underlying Type, tparams *TypeParamList, methods []*Func) *Named {
+func (check *Checker) newNamed(obj *TypeName, orig *Named, underlying Type, tparams *TypeParamList, methods *methodList) *Named {
typ := &Named{check: check, obj: obj, orig: orig, fromRHS: underlying, underlying: underlying, tparams: tparams, methods: methods}
if typ.orig == nil {
typ.orig = typ
@@ -88,19 +93,87 @@ func (t *Named) Origin() *Named { return t.orig }
func (t *Named) TypeParams() *TypeParamList { return t.resolve(nil).tparams }
// SetTypeParams sets the type parameters of the named type t.
-func (t *Named) SetTypeParams(tparams []*TypeParam) { t.resolve(nil).tparams = bindTParams(tparams) }
+// t must not have type arguments.
+func (t *Named) SetTypeParams(tparams []*TypeParam) {
+ assert(t.targs.Len() == 0)
+ t.resolve(nil).tparams = bindTParams(tparams)
+}
// TypeArgs returns the type arguments used to instantiate the named type t.
func (t *Named) TypeArgs() *TypeList { return t.targs }
// NumMethods returns the number of explicit methods whose receiver is named type t.
-func (t *Named) NumMethods() int { return len(t.resolve(nil).methods) }
+func (t *Named) NumMethods() int { return t.resolve(nil).methods.Len() }
// Method returns the i'th method of named type t for 0 <= i < t.NumMethods().
-func (t *Named) Method(i int) *Func { return t.resolve(nil).methods[i] }
+func (t *Named) Method(i int) *Func {
+ t.resolve(nil)
+ return t.methods.At(i, func() *Func {
+ return t.instantiateMethod(i)
+ })
+}
+
+// instiateMethod instantiates the i'th method for an instantiated receiver.
+func (t *Named) instantiateMethod(i int) *Func {
+ assert(t.TypeArgs().Len() > 0) // t must be an instance
+
+ // t.orig.methods is not lazy. origm is the method instantiated with its
+ // receiver type parameters (the "origin" method).
+ origm := t.orig.Method(i)
+ assert(origm != nil)
+
+ check := t.check
+ // Ensure that the original method is type-checked.
+ if check != nil {
+ check.objDecl(origm, nil)
+ }
+
+ origSig := origm.typ.(*Signature)
+ rbase, _ := deref(origSig.Recv().Type())
+
+ // If rbase is t, then origm is already the instantiated method we're looking
+ // for. In this case, we return origm to preserve the invariant that
+ // traversing Method->Receiver Type->Method should get back to the same
+ // method.
+ //
+ // This occurs if t is instantiated with the receiver type parameters, as in
+ // the use of m in func (r T[_]) m() { r.m() }.
+ if rbase == t {
+ return origm
+ }
+
+ sig := origSig
+ // We can only substitute if we have a correspondence between type arguments
+ // and type parameters. This check is necessary in the presence of invalid
+ // code.
+ if origSig.RecvTypeParams().Len() == t.targs.Len() {
+ ctxt := check.bestContext(nil)
+ smap := makeSubstMap(origSig.RecvTypeParams().list(), t.targs.list())
+ sig = check.subst(origm.pos, origSig, smap, ctxt).(*Signature)
+ }
+
+ if sig == origSig {
+ // No substitution occurred, but we still need to create a new signature to
+ // hold the instantiated receiver.
+ copy := *origSig
+ sig = ©
+ }
+
+ var rtyp Type
+ if origm.hasPtrRecv() {
+ rtyp = NewPointer(t)
+ } else {
+ rtyp = t
+ }
+
+ sig.recv = NewParam(origSig.recv.pos, origSig.recv.pkg, origSig.recv.name, rtyp)
+ return NewFunc(origm.pos, origm.pkg, origm.name, sig)
+}
// SetUnderlying sets the underlying type and marks t as complete.
+// t must not have type arguments.
func (t *Named) SetUnderlying(underlying Type) {
+ assert(t.targs.Len() == 0)
if underlying == nil {
panic("underlying type must not be nil")
}
@@ -108,14 +181,20 @@ func (t *Named) SetUnderlying(underlying Type) {
panic("underlying type must not be *Named")
}
t.resolve(nil).underlying = underlying
+ if t.fromRHS == nil {
+ t.fromRHS = underlying // for cycle detection
+ }
}
// AddMethod adds method m unless it is already in the method list.
+// t must not have type arguments.
func (t *Named) AddMethod(m *Func) {
+ assert(t.targs.Len() == 0)
t.resolve(nil)
- if i, _ := lookupMethod(t.methods, m.pkg, m.name); i < 0 {
- t.methods = append(t.methods, m)
+ if t.methods == nil {
+ t.methods = newMethodList(nil)
}
+ t.methods.Add(m)
}
func (t *Named) Underlying() Type { return t.resolve(nil).underlying }
@@ -218,24 +297,39 @@ func (n *Named) setUnderlying(typ Type) {
}
}
+func (n *Named) lookupMethodFold(pkg *Package, name string, checkFold bool) (int, *Func) {
+ n.resolve(nil)
+ // If n is an instance, we may not have yet instantiated all of its methods.
+ // Look up the method index in orig, and only instantiate method at the
+ // matching index (if any).
+ i, _ := n.orig.methods.LookupFold(pkg, name, checkFold)
+ if i < 0 {
+ return -1, nil
+ }
+ // For instances, m.Method(i) will be different from the orig method.
+ return i, n.Method(i)
+}
+
// bestContext returns the best available context. In order of preference:
// - the given ctxt, if non-nil
-// - check.Config.Context, if check is non-nil
+// - check.ctxt, if check is non-nil
// - a new Context
func (check *Checker) bestContext(ctxt *Context) *Context {
if ctxt != nil {
return ctxt
}
if check != nil {
- assert(check.conf.Context != nil)
- return check.conf.Context
+ if check.ctxt == nil {
+ check.ctxt = NewContext()
+ }
+ return check.ctxt
}
return NewContext()
}
// expandNamed ensures that the underlying type of n is instantiated.
// The underlying type will be Typ[Invalid] if there was an error.
-func expandNamed(ctxt *Context, n *Named, instPos syntax.Pos) (tparams *TypeParamList, underlying Type, methods []*Func) {
+func expandNamed(ctxt *Context, n *Named, instPos syntax.Pos) (tparams *TypeParamList, underlying Type, methods *methodList) {
n.orig.resolve(ctxt)
assert(n.orig.underlying != nil)
@@ -251,86 +345,36 @@ func expandNamed(ctxt *Context, n *Named, instPos syntax.Pos) (tparams *TypePara
if n.orig.tparams.Len() == n.targs.Len() {
// We must always have a context, to avoid infinite recursion.
ctxt = check.bestContext(ctxt)
- h := ctxt.TypeHash(n.orig, n.targs.list())
+ h := ctxt.instanceHash(n.orig, n.targs.list())
// ensure that an instance is recorded for h to avoid infinite recursion.
- ctxt.typeForHash(h, n)
+ ctxt.update(h, n.orig, n.TypeArgs().list(), n)
smap := makeSubstMap(n.orig.tparams.list(), n.targs.list())
underlying = n.check.subst(instPos, n.orig.underlying, smap, ctxt)
-
- for i := 0; i < n.orig.NumMethods(); i++ {
- origm := n.orig.Method(i)
-
- // During type checking origm may not have a fully set up type, so defer
- // instantiation of its signature until later.
- m := NewFunc(origm.pos, origm.pkg, origm.name, nil)
- m.hasPtrRecv_ = origm.hasPtrRecv()
- // Setting instRecv here allows us to complete later (we need the
- // instRecv to get targs and the original method).
- m.instRecv = n
-
- methods = append(methods, m)
+ // If the underlying of n is an interface, we need to set the receiver of
+ // its methods accurately -- we set the receiver of interface methods on
+ // the RHS of a type declaration to the defined type.
+ if iface, _ := underlying.(*Interface); iface != nil {
+ if methods, copied := replaceRecvType(iface.methods, n.orig, n); copied {
+ // If the underlying doesn't actually use type parameters, it's possible
+ // that it wasn't substituted. In this case we need to create a new
+ // *Interface before modifying receivers.
+ if iface == n.orig.underlying {
+ iface = &Interface{
+ embeddeds: iface.embeddeds,
+ complete: iface.complete,
+ implicit: iface.implicit, // should be false but be conservative
+ }
+ underlying = iface
+ }
+ iface.methods = methods
+ }
}
} else {
underlying = Typ[Invalid]
}
- // Methods should not escape the type checker API without being completed. If
- // we're in the context of a type checking pass, we need to defer this until
- // later (not all methods may have types).
- completeMethods := func() {
- for _, m := range methods {
- if m.instRecv != nil {
- check.completeMethod(ctxt, m)
- }
- }
- }
- if check != nil {
- check.later(completeMethods)
- } else {
- completeMethods()
- }
-
- return n.orig.tparams, underlying, methods
-}
-
-func (check *Checker) completeMethod(ctxt *Context, m *Func) {
- assert(m.instRecv != nil)
- rbase := m.instRecv
- m.instRecv = nil
- m.setColor(black)
-
- assert(rbase.TypeArgs().Len() > 0)
-
- // Look up the original method.
- _, orig := lookupMethod(rbase.orig.methods, rbase.obj.pkg, m.name)
- assert(orig != nil)
- if check != nil {
- check.objDecl(orig, nil)
- }
- origSig := orig.typ.(*Signature)
- if origSig.RecvTypeParams().Len() != rbase.targs.Len() {
- m.typ = origSig // or new(Signature), but we can't use Typ[Invalid]: Funcs must have Signature type
- return // error reported elsewhere
- }
-
- smap := makeSubstMap(origSig.RecvTypeParams().list(), rbase.targs.list())
- sig := check.subst(orig.pos, origSig, smap, ctxt).(*Signature)
- if sig == origSig {
- // No substitution occurred, but we still need to create a new signature to
- // hold the instantiated receiver.
- copy := *origSig
- sig = ©
- }
- var rtyp Type
- if m.hasPtrRecv() {
- rtyp = NewPointer(rbase)
- } else {
- rtyp = rbase
- }
- sig.recv = NewParam(origSig.recv.pos, origSig.recv.pkg, origSig.recv.name, rtyp)
-
- m.typ = sig
+ return n.orig.tparams, underlying, newLazyMethodList(n.orig.methods.Len())
}
// safeUnderlying returns the underlying of typ without expanding instances, to
diff --git a/src/cmd/compile/internal/types2/object.go b/src/cmd/compile/internal/types2/object.go
index c7d6709c26..08d37cb256 100644
--- a/src/cmd/compile/internal/types2/object.go
+++ b/src/cmd/compile/internal/types2/object.go
@@ -281,7 +281,7 @@ func NewTypeName(pos syntax.Pos, pkg *Package, name string, typ Type) *TypeName
func NewTypeNameLazy(pos syntax.Pos, pkg *Package, name string, load func(named *Named) (tparams []*TypeParam, underlying Type, methods []*Func)) *TypeName {
obj := NewTypeName(pos, pkg, name, nil)
- resolve := func(_ *Context, t *Named) (*TypeParamList, Type, []*Func) {
+ resolve := func(_ *Context, t *Named) (*TypeParamList, Type, *methodList) {
tparams, underlying, methods := load(t)
switch underlying.(type) {
@@ -289,7 +289,7 @@ func NewTypeNameLazy(pos syntax.Pos, pkg *Package, name string, load func(named
panic(fmt.Sprintf("invalid underlying type %T", t.underlying))
}
- return bindTParams(tparams), underlying, methods
+ return bindTParams(tparams), underlying, newMethodList(methods)
}
NewNamed(obj, nil, nil).resolver = resolve
@@ -365,8 +365,7 @@ func (*Var) isDependency() {} // a variable may be a dependency of an initializa
// An abstract method may belong to many interfaces due to embedding.
type Func struct {
object
- instRecv *Named // if non-nil, the receiver type for an incomplete instance method
- hasPtrRecv_ bool // only valid for methods that don't have a type yet; use hasPtrRecv() to read
+ hasPtrRecv_ bool // only valid for methods that don't have a type yet; use hasPtrRecv() to read
}
// NewFunc returns a new function with the given signature, representing
@@ -377,7 +376,7 @@ func NewFunc(pos syntax.Pos, pkg *Package, name string, sig *Signature) *Func {
if sig != nil {
typ = sig
}
- return &Func{object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}, nil, false}
+ return &Func{object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}, false}
}
// FullName returns the package- or receiver-type-qualified name of
@@ -389,6 +388,8 @@ func (obj *Func) FullName() string {
}
// Scope returns the scope of the function's body block.
+// The result is nil for imported or instantiated functions and methods
+// (but there is also no mechanism to get to an instantiated function).
func (obj *Func) Scope() *Scope { return obj.typ.(*Signature).scope }
// hasPtrRecv reports whether the receiver is of the form *T for the given method obj.
@@ -458,6 +459,9 @@ func writeObject(buf *bytes.Buffer, obj Object, qf Qualifier) {
case *TypeName:
tname = obj
buf.WriteString("type")
+ if isTypeParam(typ) {
+ buf.WriteString(" parameter")
+ }
case *Var:
if obj.isField {
@@ -503,22 +507,34 @@ func writeObject(buf *bytes.Buffer, obj Object, qf Qualifier) {
}
if tname != nil {
- // We have a type object: Don't print anything more for
- // basic types since there's no more information (names
- // are the same; see also comment in TypeName.IsAlias).
- if _, ok := typ.(*Basic); ok {
+ switch t := typ.(type) {
+ case *Basic:
+ // Don't print anything more for basic types since there's
+ // no more information.
return
- }
- if named, _ := typ.(*Named); named != nil && named.TypeParams().Len() > 0 {
- newTypeWriter(buf, qf).tParamList(named.TypeParams().list())
+ case *Named:
+ if t.TypeParams().Len() > 0 {
+ newTypeWriter(buf, qf).tParamList(t.TypeParams().list())
+ }
}
if tname.IsAlias() {
buf.WriteString(" =")
+ } else if t, _ := typ.(*TypeParam); t != nil {
+ typ = t.bound
} else {
+ // TODO(gri) should this be fromRHS for *Named?
typ = under(typ)
}
}
+ // Special handling for any: because WriteType will format 'any' as 'any',
+ // resulting in the object string `type any = any` rather than `type any =
+ // interface{}`. To avoid this, swap in a different empty interface.
+ if obj == universeAny {
+ assert(Identical(typ, &emptyInterface))
+ typ = &emptyInterface
+ }
+
buf.WriteByte(' ')
WriteType(buf, typ, qf)
}
diff --git a/src/cmd/compile/internal/types2/object_test.go b/src/cmd/compile/internal/types2/object_test.go
index ed3c123023..8f0303d4b2 100644
--- a/src/cmd/compile/internal/types2/object_test.go
+++ b/src/cmd/compile/internal/types2/object_test.go
@@ -2,17 +2,16 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package types2
+package types2_test
import (
"cmd/compile/internal/syntax"
+ "internal/testenv"
"strings"
"testing"
-)
-func parseSrc(path, src string) (*syntax.File, error) {
- return syntax.Parse(syntax.NewFileBase(path), strings.NewReader(src), nil, nil, 0)
-}
+ . "cmd/compile/internal/types2"
+)
func TestIsAlias(t *testing.T) {
check := func(obj *TypeName, want bool) {
@@ -42,12 +41,12 @@ func TestIsAlias(t *testing.T) {
{NewTypeName(nopos, nil, "t0", nil), false}, // no type yet
{NewTypeName(nopos, pkg, "t0", nil), false}, // no type yet
{t1, false}, // type name refers to named type and vice versa
- {NewTypeName(nopos, nil, "t2", &emptyInterface), true}, // type name refers to unnamed type
- {NewTypeName(nopos, pkg, "t3", n1), true}, // type name refers to named type with different type name
- {NewTypeName(nopos, nil, "t4", Typ[Int32]), true}, // type name refers to basic type with different name
- {NewTypeName(nopos, nil, "int32", Typ[Int32]), false}, // type name refers to basic type with same name
- {NewTypeName(nopos, pkg, "int32", Typ[Int32]), true}, // type name is declared in user-defined package (outside Universe)
- {NewTypeName(nopos, nil, "rune", Typ[Rune]), true}, // type name refers to basic type rune which is an alias already
+ {NewTypeName(nopos, nil, "t2", NewInterfaceType(nil, nil)), true}, // type name refers to unnamed type
+ {NewTypeName(nopos, pkg, "t3", n1), true}, // type name refers to named type with different type name
+ {NewTypeName(nopos, nil, "t4", Typ[Int32]), true}, // type name refers to basic type with different name
+ {NewTypeName(nopos, nil, "int32", Typ[Int32]), false}, // type name refers to basic type with same name
+ {NewTypeName(nopos, pkg, "int32", Typ[Int32]), true}, // type name is declared in user-defined package (outside Universe)
+ {NewTypeName(nopos, nil, "rune", Typ[Rune]), true}, // type name refers to basic type rune which is an alias already
{t5, false}, // type name refers to type parameter and vice versa
} {
check(test.name, test.alias)
@@ -89,3 +88,80 @@ func TestEmbeddedMethod(t *testing.T) {
t.Fatalf("%s (%p) != %s (%p)", orig, orig, embed, embed)
}
}
+
+var testObjects = []struct {
+ src string
+ obj string
+ want string
+}{
+ {"import \"io\"; var r io.Reader", "r", "var p.r io.Reader"},
+
+ {"const c = 1.2", "c", "const p.c untyped float"},
+ {"const c float64 = 3.14", "c", "const p.c float64"},
+
+ {"type t struct{f int}", "t", "type p.t struct{f int}"},
+ {"type t func(int)", "t", "type p.t func(int)"},
+ {"type t[P any] struct{f P}", "t", "type p.t[P any] struct{f P}"},
+ {"type t[P any] struct{f P}", "t.P", "type parameter P any"},
+ {"type C interface{m()}; type t[P C] struct{}", "t.P", "type parameter P p.C"},
+
+ {"type t = struct{f int}", "t", "type p.t = struct{f int}"},
+ {"type t = func(int)", "t", "type p.t = func(int)"},
+
+ {"var v int", "v", "var p.v int"},
+
+ {"func f(int) string", "f", "func p.f(int) string"},
+ {"func g[P any](x P){}", "g", "func p.g[P any](x P)"},
+ {"func g[P interface{~int}](x P){}", "g.P", "type parameter P interface{~int}"},
+ {"", "any", "type any = interface{}"},
+}
+
+func TestObjectString(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ for _, test := range testObjects {
+ src := "package p; " + test.src
+ pkg, err := makePkg(src)
+ if err != nil {
+ t.Errorf("%s: %s", src, err)
+ continue
+ }
+
+ names := strings.Split(test.obj, ".")
+ if len(names) != 1 && len(names) != 2 {
+ t.Errorf("%s: invalid object path %s", test.src, test.obj)
+ continue
+ }
+ _, obj := pkg.Scope().LookupParent(names[0], nopos)
+ if obj == nil {
+ t.Errorf("%s: %s not found", test.src, names[0])
+ continue
+ }
+ if len(names) == 2 {
+ if typ, ok := obj.Type().(interface{ TypeParams() *TypeParamList }); ok {
+ obj = lookupTypeParamObj(typ.TypeParams(), names[1])
+ if obj == nil {
+ t.Errorf("%s: %s not found", test.src, test.obj)
+ continue
+ }
+ } else {
+ t.Errorf("%s: %s has no type parameters", test.src, names[0])
+ continue
+ }
+ }
+
+ if got := obj.String(); got != test.want {
+ t.Errorf("%s: got %s, want %s", test.src, got, test.want)
+ }
+ }
+}
+
+func lookupTypeParamObj(list *TypeParamList, name string) Object {
+ for i := 0; i < list.Len(); i++ {
+ tpar := list.At(i)
+ if tpar.Obj().Name() == name {
+ return tpar.Obj()
+ }
+ }
+ return nil
+}
diff --git a/src/cmd/compile/internal/types2/operand.go b/src/cmd/compile/internal/types2/operand.go
index 2f85802701..fce9a11ffa 100644
--- a/src/cmd/compile/internal/types2/operand.go
+++ b/src/cmd/compile/internal/types2/operand.go
@@ -116,7 +116,7 @@ func operandString(x *operand, qf Qualifier) string {
case nil, Typ[Invalid]:
return "nil (with invalid type)"
case Typ[UntypedNil]:
- return "untyped nil"
+ return "nil"
default:
return fmt.Sprintf("nil (of type %s)", TypeString(x.typ, qf))
}
@@ -183,7 +183,7 @@ func operandString(x *operand, qf Qualifier) string {
}
buf.WriteString(intro)
WriteType(&buf, x.typ, qf)
- if tpar := asTypeParam(x.typ); tpar != nil {
+ if tpar, _ := x.typ.(*TypeParam); tpar != nil {
buf.WriteString(" constrained by ")
WriteType(&buf, tpar.bound, qf) // do not compute interface type sets here
}
@@ -256,8 +256,8 @@ func (x *operand) assignableTo(check *Checker, T Type, reason *string) (bool, er
Vu := under(V)
Tu := under(T)
- Vp, _ := Vu.(*TypeParam)
- Tp, _ := Tu.(*TypeParam)
+ Vp, _ := V.(*TypeParam)
+ Tp, _ := T.(*TypeParam)
// x is an untyped value representable by a value of type T.
if isUntyped(Vu) {
@@ -282,32 +282,35 @@ func (x *operand) assignableTo(check *Checker, T Type, reason *string) (bool, er
// Vu is typed
// x's type V and T have identical underlying types
- // and at least one of V or T is not a named type.
- if Identical(Vu, Tu) && (!hasName(V) || !hasName(T)) {
+ // and at least one of V or T is not a named type
+ // and neither V nor T is a type parameter.
+ if Identical(Vu, Tu) && (!hasName(V) || !hasName(T)) && Vp == nil && Tp == nil {
return true, 0
}
- // T is an interface type and x implements T and T is not a type parameter
- if Ti, ok := Tu.(*Interface); ok {
- if m, wrongType := check.missingMethod(V, Ti, true); m != nil /* Implements(V, Ti) */ {
+ // T is an interface type and x implements T and T is not a type parameter.
+ // Also handle the case where T is a pointer to an interface.
+ if _, ok := Tu.(*Interface); ok && Tp == nil || isInterfacePtr(Tu) {
+ if err := check.implements(V, T); err != nil {
if reason != nil {
- // TODO(gri) the error messages here should follow the style in Checker.typeAssertion (factor!)
- if wrongType != nil {
- if Identical(m.typ, wrongType.typ) {
- *reason = fmt.Sprintf("missing method %s (%s has pointer receiver)", m.name, m.name)
- } else {
- *reason = fmt.Sprintf("wrong type for method %s (have %s, want %s)", m.Name(), wrongType.typ, m.typ)
- }
-
- } else {
- *reason = "missing method " + m.Name()
- }
+ *reason = err.Error()
}
return false, _InvalidIfaceAssign
}
return true, 0
}
+ // If V is an interface, check if a missing type assertion is the problem.
+ if Vi, _ := Vu.(*Interface); Vi != nil && Vp == nil {
+ if check.implements(T, V) == nil {
+ // T implements V, so give hint about type assertion.
+ if reason != nil {
+ *reason = "need type assertion"
+ }
+ return false, _IncompatibleAssign
+ }
+ }
+
// x is a bidirectional channel value, T is a channel
// type, x's type V and T have identical element types,
// and at least one of V or T is not a named type.
diff --git a/src/cmd/compile/internal/types2/predicates.go b/src/cmd/compile/internal/types2/predicates.go
index 7fbb91eb61..003e58db38 100644
--- a/src/cmd/compile/internal/types2/predicates.go
+++ b/src/cmd/compile/internal/types2/predicates.go
@@ -31,7 +31,7 @@ func isBasic(t Type, info BasicInfo) bool {
// The allX predicates below report whether t is an X.
// If t is a type parameter the result is true if isX is true
// for all specified types of the type parameter's type set.
-// allX is an optimized version of isX(structure(t)) (which
+// allX is an optimized version of isX(structuralType(t)) (which
// is the same as underIs(t, isX)).
func allBoolean(t Type) bool { return allBasic(t, IsBoolean) }
@@ -45,15 +45,12 @@ func allNumericOrString(t Type) bool { return allBasic(t, IsNumeric|IsString) }
// allBasic reports whether under(t) is a basic type with the specified info.
// If t is a type parameter, the result is true if isBasic(t, info) is true
// for all specific types of the type parameter's type set.
-// allBasic(t, info) is an optimized version of isBasic(structure(t), info).
+// allBasic(t, info) is an optimized version of isBasic(structuralType(t), info).
func allBasic(t Type, info BasicInfo) bool {
- switch u := under(t).(type) {
- case *Basic:
- return u.info&info != 0
- case *TypeParam:
- return u.is(func(t *term) bool { return t != nil && isBasic(t.typ, info) })
+ if tpar, _ := t.(*TypeParam); tpar != nil {
+ return tpar.is(func(t *term) bool { return t != nil && isBasic(t.typ, info) })
}
- return false
+ return isBasic(t, info)
}
// hasName reports whether t has a name. This includes
@@ -72,7 +69,7 @@ func hasName(t Type) bool {
// are not fully set up.
func isTyped(t Type) bool {
// isTyped is called with types that are not fully
- // set up. Must not call asBasic()!
+ // set up. Must not call under()!
b, _ := t.(*Basic)
return b == nil || b.info&IsUntyped == 0
}
@@ -84,12 +81,13 @@ func isUntyped(t Type) bool {
// IsInterface reports whether t is an interface type.
func IsInterface(t Type) bool {
- return asInterface(t) != nil
+ _, ok := under(t).(*Interface)
+ return ok
}
// isTypeParam reports whether t is a type parameter.
func isTypeParam(t Type) bool {
- _, ok := under(t).(*TypeParam)
+ _, ok := t.(*TypeParam)
return ok
}
@@ -121,7 +119,7 @@ func comparable(T Type, seen map[Type]bool) bool {
// assume invalid types to be comparable
// to avoid follow-up errors
return t.kind != UntypedNil
- case *Pointer, *Interface, *Chan:
+ case *Pointer, *Chan:
return true
case *Struct:
for _, f := range t.fields {
@@ -132,8 +130,8 @@ func comparable(T Type, seen map[Type]bool) bool {
return true
case *Array:
return comparable(t.elem, seen)
- case *TypeParam:
- return t.iface().IsComparable()
+ case *Interface:
+ return !isTypeParam(T) || t.typeSet().IsComparable(seen)
}
return false
}
@@ -143,10 +141,12 @@ func hasNil(t Type) bool {
switch u := under(t).(type) {
case *Basic:
return u.kind == UnsafePointer
- case *Slice, *Pointer, *Signature, *Interface, *Map, *Chan:
+ case *Slice, *Pointer, *Signature, *Map, *Chan:
return true
- case *TypeParam:
- return u.underIs(hasNil)
+ case *Interface:
+ return !isTypeParam(t) || u.typeSet().underIs(func(u Type) bool {
+ return u != nil && hasNil(u)
+ })
}
return false
}
@@ -235,23 +235,63 @@ func identical(x, y Type, cmpTags bool, p *ifacePair) bool {
}
case *Signature:
- // Two function types are identical if they have the same number of parameters
- // and result values, corresponding parameter and result types are identical,
- // and either both functions are variadic or neither is. Parameter and result
- // names are not required to match.
- // Generic functions must also have matching type parameter lists, but for the
- // parameter names.
- if y, ok := y.(*Signature); ok {
- return x.variadic == y.variadic &&
- identicalTParams(x.TypeParams().list(), y.TypeParams().list(), cmpTags, p) &&
- identical(x.params, y.params, cmpTags, p) &&
- identical(x.results, y.results, cmpTags, p)
+ y, _ := y.(*Signature)
+ if y == nil {
+ return false
}
+ // Two function types are identical if they have the same number of
+ // parameters and result values, corresponding parameter and result types
+ // are identical, and either both functions are variadic or neither is.
+ // Parameter and result names are not required to match, and type
+ // parameters are considered identical modulo renaming.
+
+ if x.TypeParams().Len() != y.TypeParams().Len() {
+ return false
+ }
+
+ // In the case of generic signatures, we will substitute in yparams and
+ // yresults.
+ yparams := y.params
+ yresults := y.results
+
+ if x.TypeParams().Len() > 0 {
+ // We must ignore type parameter names when comparing x and y. The
+ // easiest way to do this is to substitute x's type parameters for y's.
+ xtparams := x.TypeParams().list()
+ ytparams := y.TypeParams().list()
+
+ var targs []Type
+ for i := range xtparams {
+ targs = append(targs, x.TypeParams().At(i))
+ }
+ smap := makeSubstMap(ytparams, targs)
+
+ var check *Checker // ok to call subst on a nil *Checker
+
+ // Constraints must be pair-wise identical, after substitution.
+ for i, xtparam := range xtparams {
+ ybound := check.subst(nopos, ytparams[i].bound, smap, nil)
+ if !identical(xtparam.bound, ybound, cmpTags, p) {
+ return false
+ }
+ }
+
+ yparams = check.subst(nopos, y.params, smap, nil).(*Tuple)
+ yresults = check.subst(nopos, y.results, smap, nil).(*Tuple)
+ }
+
+ return x.variadic == y.variadic &&
+ identical(x.params, yparams, cmpTags, p) &&
+ identical(x.results, yresults, cmpTags, p)
+
case *Union:
if y, _ := y.(*Union); y != nil {
- xset := computeUnionTypeSet(nil, nopos, x)
- yset := computeUnionTypeSet(nil, nopos, y)
+ // TODO(rfindley): can this be reached during type checking? If so,
+ // consider passing a type set map.
+ unionSets := make(map[*Union]*_TypeSet)
+ xset := computeUnionTypeSet(nil, unionSets, nopos, x)
+ yset := computeUnionTypeSet(nil, unionSets, nopos, y)
return xset.terms.equal(yset.terms)
}
@@ -266,6 +306,9 @@ func identical(x, y Type, cmpTags bool, p *ifacePair) bool {
if y, ok := y.(*Interface); ok {
xset := x.typeSet()
yset := y.typeSet()
+ if xset.comparable != yset.comparable {
+ return false
+ }
if !xset.terms.equal(yset.terms) {
return false
}
@@ -372,17 +415,21 @@ func identical(x, y Type, cmpTags bool, p *ifacePair) bool {
return false
}
-func identicalTParams(x, y []*TypeParam, cmpTags bool, p *ifacePair) bool {
- if len(x) != len(y) {
+// identicalInstance reports if two type instantiations are identical.
+// Instantiations are identical if their origin and type arguments are
+// identical.
+func identicalInstance(xorig Type, xargs []Type, yorig Type, yargs []Type) bool {
+ if len(xargs) != len(yargs) {
return false
}
- for i, x := range x {
- y := y[i]
- if !identical(x.bound, y.bound, cmpTags, p) {
+
+ for i, xa := range xargs {
+ if !Identical(xa, yargs[i]) {
return false
}
}
- return true
+
+ return Identical(xorig, yorig)
}
// Default returns the default "typed" type for an "untyped" type;
diff --git a/src/cmd/compile/internal/types2/resolver.go b/src/cmd/compile/internal/types2/resolver.go
index a8cb244c55..05755f8cfd 100644
--- a/src/cmd/compile/internal/types2/resolver.go
+++ b/src/cmd/compile/internal/types2/resolver.go
@@ -448,15 +448,10 @@ func (check *Checker) collectObjects() {
} else {
// method
// d.Recv != nil
- if !acceptMethodTypeParams && len(s.TParamList) != 0 {
- //check.error(d.TParamList.Pos(), invalidAST + "method must have no type parameters")
- check.error(s.TParamList[0], invalidAST+"method must have no type parameters")
- hasTParamError = true
- }
ptr, recv, _ := check.unpackRecv(s.Recv.Type, false)
- // (Methods with invalid receiver cannot be associated to a type, and
+ // Methods with invalid receiver cannot be associated to a type, and
// methods with blank _ names are never found; no need to collect any
- // of them. They will still be type-checked with all the other functions.)
+ // of them. They will still be type-checked with all the other functions.
if recv != nil && name != "_" {
methods = append(methods, methodInfo{obj, ptr, recv})
}
@@ -661,25 +656,31 @@ func (check *Checker) packageObjects() {
}
}
- // We process non-alias declarations first, in order to avoid situations where
- // the type of an alias declaration is needed before it is available. In general
- // this is still not enough, as it is possible to create sufficiently convoluted
- // recursive type definitions that will cause a type alias to be needed before it
- // is available (see issue #25838 for examples).
- // As an aside, the cmd/compiler suffers from the same problem (#25838).
+ // We process non-alias type declarations first, followed by alias declarations,
+ // and then everything else. This appears to avoid most situations where the type
+ // of an alias is needed before it is available.
+ // There may still be cases where this is not good enough (see also issue #25838).
+ // In those cases Checker.ident will report an error ("invalid use of type alias").
var aliasList []*TypeName
- // phase 1
+ var othersList []Object // everything that's not a type
+ // phase 1: non-alias type declarations
for _, obj := range objList {
- // If we have a type alias, collect it for the 2nd phase.
- if tname, _ := obj.(*TypeName); tname != nil && check.objMap[tname].tdecl.Alias {
- aliasList = append(aliasList, tname)
- continue
+ if tname, _ := obj.(*TypeName); tname != nil {
+ if check.objMap[tname].tdecl.Alias {
+ aliasList = append(aliasList, tname)
+ } else {
+ check.objDecl(obj, nil)
+ }
+ } else {
+ othersList = append(othersList, obj)
}
-
+ }
+ // phase 2: alias type declarations
+ for _, obj := range aliasList {
check.objDecl(obj, nil)
}
- // phase 2
- for _, obj := range aliasList {
+ // phase 3: all other declarations
+ for _, obj := range othersList {
check.objDecl(obj, nil)
}
diff --git a/src/cmd/compile/internal/types2/self_test.go b/src/cmd/compile/internal/types2/self_test.go
index e0d2e1b07a..9a01ccdf7a 100644
--- a/src/cmd/compile/internal/types2/self_test.go
+++ b/src/cmd/compile/internal/types2/self_test.go
@@ -33,6 +33,7 @@ func BenchmarkCheck(b *testing.B) {
filepath.Join("src", "net", "http"),
filepath.Join("src", "go", "parser"),
filepath.Join("src", "go", "constant"),
+ filepath.Join("src", "runtime"),
filepath.Join("src", "go", "internal", "gcimporter"),
} {
b.Run(path.Base(p), func(b *testing.B) {
diff --git a/src/cmd/compile/internal/types2/signature.go b/src/cmd/compile/internal/types2/signature.go
index 4541435587..c87fab749c 100644
--- a/src/cmd/compile/internal/types2/signature.go
+++ b/src/cmd/compile/internal/types2/signature.go
@@ -18,7 +18,7 @@ type Signature struct {
// We then unpack the *Signature and use the scope for the literal body.
rparams *TypeParamList // receiver type parameters from left to right, or nil
tparams *TypeParamList // type parameters from left to right, or nil
- scope *Scope // function scope, present for package-local signatures
+ scope *Scope // function scope for package-local and non-instantiated signatures; nil otherwise
recv *Var // nil if not a method
params *Tuple // (incoming) parameters from left to right; or nil
results *Tuple // (outgoing) results from left to right; or nil
@@ -73,9 +73,6 @@ func (s *Signature) SetTypeParams(tparams []*TypeParam) { s.tparams = bindTParam
// RecvTypeParams returns the receiver type parameters of signature s, or nil.
func (s *Signature) RecvTypeParams() *TypeParamList { return s.rparams }
-// SetRecvTypeParams sets the receiver type params of signature s.
-func (s *Signature) SetRecvTypeParams(rparams []*TypeParam) { s.rparams = bindTParams(rparams) }
-
// Params returns the parameters of signature s, or nil.
func (s *Signature) Params() *Tuple { return s.params }
@@ -91,9 +88,6 @@ func (s *Signature) String() string { return TypeString(s, nil) }
// ----------------------------------------------------------------------------
// Implementation
-// Disabled by default, but enabled when running tests (via types_test.go).
-var acceptMethodTypeParams bool
-
// funcType type-checks a function or method type.
func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []*syntax.Field, ftyp *syntax.FuncType) {
check.openScope(ftyp, "function")
@@ -163,13 +157,8 @@ func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []
}
if tparams != nil {
+ // The parser will complain about invalid type parameters for methods.
check.collectTypeParams(&sig.tparams, tparams)
- // Always type-check method type parameters but complain if they are not enabled.
- // (A separate check is needed when type-checking interface method signatures because
- // they don't have a receiver specification.)
- if recvPar != nil && !acceptMethodTypeParams {
- check.error(ftyp, "methods cannot have type parameters")
- }
}
// Value (non-type) parameters' scope starts in the function body. Use a temporary scope for their
@@ -216,7 +205,7 @@ func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []
var err string
switch T := rtyp.(type) {
case *Named:
- T.resolve(check.conf.Context)
+ T.resolve(check.bestContext(nil))
// The receiver type may be an instantiated type referred to
// by an alias (which cannot have receiver parameters for now).
if T.TypeArgs() != nil && sig.RecvTypeParams() == nil {
diff --git a/src/cmd/compile/internal/types2/sizeof_test.go b/src/cmd/compile/internal/types2/sizeof_test.go
index 99b846b80b..14020050a9 100644
--- a/src/cmd/compile/internal/types2/sizeof_test.go
+++ b/src/cmd/compile/internal/types2/sizeof_test.go
@@ -27,11 +27,11 @@ func TestSizeof(t *testing.T) {
{Pointer{}, 8, 16},
{Tuple{}, 12, 24},
{Signature{}, 28, 56},
- {Union{}, 16, 32},
+ {Union{}, 12, 24},
{Interface{}, 44, 88},
{Map{}, 16, 32},
{Chan{}, 12, 24},
- {Named{}, 68, 128},
+ {Named{}, 56, 104},
{TypeParam{}, 28, 48},
{term{}, 12, 24},
@@ -40,7 +40,7 @@ func TestSizeof(t *testing.T) {
{Const{}, 64, 104},
{TypeName{}, 56, 88},
{Var{}, 60, 96},
- {Func{}, 64, 104},
+ {Func{}, 60, 96},
{Label{}, 60, 96},
{Builtin{}, 60, 96},
{Nil{}, 56, 88},
diff --git a/src/cmd/compile/internal/types2/sizes.go b/src/cmd/compile/internal/types2/sizes.go
index 6a3d19d8ea..6f981964be 100644
--- a/src/cmd/compile/internal/types2/sizes.go
+++ b/src/cmd/compile/internal/types2/sizes.go
@@ -67,6 +67,9 @@ func (s *StdSizes) Alignof(T Type) int64 {
case *Slice, *Interface:
// Multiword data structures are effectively structs
// in which each element has size WordSize.
+ // Type parameters lead to variable sizes/alignments;
+ // StdSizes.Alignof won't be called for them.
+ assert(!isTypeParam(T))
return s.WordSize
case *Basic:
// Strings are like slices and interfaces.
@@ -151,6 +154,9 @@ func (s *StdSizes) Sizeof(T Type) int64 {
offsets := s.Offsetsof(t.fields)
return offsets[n-1] + s.Sizeof(t.fields[n-1].typ)
case *Interface:
+ // Type parameters lead to variable sizes/alignments;
+ // StdSizes.Sizeof won't be called for them.
+ assert(!isTypeParam(T))
return s.WordSize * 2
case *TypeParam, *Union:
unreachable()
@@ -243,7 +249,7 @@ func (conf *Config) offsetsof(T *Struct) []int64 {
func (conf *Config) offsetof(typ Type, index []int) int64 {
var o int64
for _, i := range index {
- s := asStruct(typ)
+ s := under(typ).(*Struct)
o += conf.offsetsof(s)[i]
typ = s.fields[i].typ
}
diff --git a/src/cmd/compile/internal/types2/stdlib_test.go b/src/cmd/compile/internal/types2/stdlib_test.go
index 9c22f01673..551611da55 100644
--- a/src/cmd/compile/internal/types2/stdlib_test.go
+++ b/src/cmd/compile/internal/types2/stdlib_test.go
@@ -165,9 +165,11 @@ func TestStdTest(t *testing.T) {
testTestDir(t, filepath.Join(runtime.GOROOT(), "test"),
"cmplxdivide.go", // also needs file cmplxdivide1.go - ignore
"directive.go", // tests compiler rejection of bad directive placement - ignore
+ "directive2.go", // tests compiler rejection of bad directive placement - ignore
"embedfunc.go", // tests //go:embed
"embedvers.go", // tests //go:embed
"linkname2.go", // types2 doesn't check validity of //go:xxx directives
+ "linkname3.go", // types2 doesn't check validity of //go:xxx directives
)
}
@@ -194,6 +196,8 @@ func TestStdFixed(t *testing.T) {
"issue42058b.go", // types2 does not have constraints on channel element size
"issue48097.go", // go/types doesn't check validity of //go:xxx directives, and non-init bodyless function
"issue48230.go", // go/types doesn't check validity of //go:xxx directives
+ "issue49767.go", // go/types does not have constraints on channel element size
+ "issue49814.go", // go/types does not have constraints on array size
)
}
diff --git a/src/cmd/compile/internal/types2/stmt.go b/src/cmd/compile/internal/types2/stmt.go
index eaf420aca7..b23d7aeef2 100644
--- a/src/cmd/compile/internal/types2/stmt.go
+++ b/src/cmd/compile/internal/types2/stmt.go
@@ -28,13 +28,13 @@ func (check *Checker) funcBody(decl *declInfo, name string, sig *Signature, body
sig.scope.pos = body.Pos()
sig.scope.end = syntax.EndPos(body)
- // save/restore current context and setup function context
+ // save/restore current environment and set up function environment
// (and use 0 indentation at function start)
- defer func(ctxt context, indent int) {
- check.context = ctxt
+ defer func(env environment, indent int) {
+ check.environment = env
check.indent = indent
- }(check.context, check.indent)
- check.context = context{
+ }(check.environment, check.indent)
+ check.environment = environment{
decl: decl,
scope: sig.scope,
iota: iota,
@@ -305,7 +305,7 @@ L:
}
}
seen[T] = e
- if T != nil {
+ if T != nil && xtyp != nil {
check.typeAssertion(e, x, xtyp, T, true)
}
}
@@ -408,27 +408,21 @@ func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) {
if ch.mode == invalid || val.mode == invalid {
return
}
- var elem Type
- if !underIs(ch.typ, func(u Type) bool {
- uch, _ := u.(*Chan)
- if uch == nil {
- check.errorf(s, invalidOp+"cannot send to non-channel %s", &ch)
- return false
- }
- if uch.dir == RecvOnly {
- check.errorf(s, invalidOp+"cannot send to receive-only channel %s", &ch)
- return false
- }
- if elem != nil && !Identical(uch.elem, elem) {
- check.errorf(s, invalidOp+"channels of %s must have the same element type", &ch)
- return false
- }
- elem = uch.elem
- return true
- }) {
+ u := structuralType(ch.typ)
+ if u == nil {
+ check.errorf(s, invalidOp+"cannot send to %s: no structural type", &ch)
return
}
- check.assignment(&val, elem, "send")
+ uch, _ := u.(*Chan)
+ if uch == nil {
+ check.errorf(s, invalidOp+"cannot send to non-channel %s", &ch)
+ return
+ }
+ if uch.dir == RecvOnly {
+ check.errorf(s, invalidOp+"cannot send to receive-only channel %s", &ch)
+ return
+ }
+ check.assignment(&val, uch.elem, "send")
case *syntax.AssignStmt:
lhs := unpackExpr(s.Lhs)
@@ -480,30 +474,28 @@ func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) {
case *syntax.ReturnStmt:
res := check.sig.results
+ // Return with implicit results allowed for function with named results.
+ // (If one is named, all are named.)
results := unpackExpr(s.Results)
- if res.Len() > 0 {
- // function returns results
- // (if one, say the first, result parameter is named, all of them are named)
- if len(results) == 0 && res.vars[0].name != "" {
- // spec: "Implementation restriction: A compiler may disallow an empty expression
- // list in a "return" statement if a different entity (constant, type, or variable)
- // with the same name as a result parameter is in scope at the place of the return."
- for _, obj := range res.vars {
- if alt := check.lookup(obj.name); alt != nil && alt != obj {
- var err error_
- err.errorf(s, "result parameter %s not in scope at return", obj.name)
- err.errorf(alt, "inner declaration of %s", obj)
- check.report(&err)
- // ok to continue
- }
+ if len(results) == 0 && res.Len() > 0 && res.vars[0].name != "" {
+ // spec: "Implementation restriction: A compiler may disallow an empty expression
+ // list in a "return" statement if a different entity (constant, type, or variable)
+ // with the same name as a result parameter is in scope at the place of the return."
+ for _, obj := range res.vars {
+ if alt := check.lookup(obj.name); alt != nil && alt != obj {
+ var err error_
+ err.errorf(s, "result parameter %s not in scope at return", obj.name)
+ err.errorf(alt, "inner declaration of %s", obj)
+ check.report(&err)
+ // ok to continue
}
- } else {
- // return has results or result parameters are unnamed
- check.initVars(res.vars, results, s.Pos())
}
- } else if len(results) > 0 {
- check.error(results[0], "no result values expected")
- check.use(results...)
+ } else {
+ var lhs []*Var
+ if res.Len() > 0 {
+ lhs = res.vars
+ }
+ check.initVars(lhs, results, s)
}
case *syntax.BranchStmt:
@@ -739,14 +731,16 @@ func (check *Checker) typeSwitchStmt(inner stmtContext, s *syntax.SwitchStmt, gu
if x.mode == invalid {
return
}
- // Caution: We're not using asInterface here because we don't want
- // to switch on a suitably constrained type parameter (for
- // now).
- // TODO(gri) Need to revisit this.
- xtyp, _ := under(x.typ).(*Interface)
- if xtyp == nil {
- check.errorf(&x, "%s is not an interface type", &x)
- return
+
+ // TODO(gri) we may want to permit type switches on type parameter values at some point
+ var xtyp *Interface
+ if isTypeParam(x.typ) {
+ check.errorf(&x, "cannot use type switch on type parameter value %s", &x)
+ } else {
+ xtyp, _ = under(x.typ).(*Interface)
+ if xtyp == nil {
+ check.errorf(&x, "%s is not an interface", &x)
+ }
}
check.multipleSwitchDefaults(s.Body)
@@ -815,32 +809,34 @@ func (check *Checker) typeSwitchStmt(inner stmtContext, s *syntax.SwitchStmt, gu
func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *syntax.RangeClause) {
// scope already opened
+ // determine lhs, if any
+ sKey := rclause.Lhs // possibly nil
+ var sValue, sExtra syntax.Expr
+ if p, _ := sKey.(*syntax.ListExpr); p != nil {
+ if len(p.ElemList) < 2 {
+ check.error(s, invalidAST+"invalid lhs in range clause")
+ return
+ }
+ // len(p.ElemList) >= 2
+ sKey = p.ElemList[0]
+ sValue = p.ElemList[1]
+ if len(p.ElemList) > 2 {
+ // delay error reporting until we know more
+ sExtra = p.ElemList[2]
+ }
+ }
+
// check expression to iterate over
var x operand
check.expr(&x, rclause.X)
- // determine lhs, if any
- sKey := rclause.Lhs // possibly nil
- var sValue syntax.Expr
- if p, _ := sKey.(*syntax.ListExpr); p != nil {
- if len(p.ElemList) != 2 {
- check.error(s, invalidAST+"invalid lhs in range clause")
- return
- }
- sKey = p.ElemList[0]
- sValue = p.ElemList[1]
- }
-
// determine key/value types
var key, val Type
if x.mode != invalid {
- // Ranging over a type parameter is permitted if it has a single underlying type.
+ // Ranging over a type parameter is permitted if it has a structural type.
var cause string
- u := structure(x.typ)
- switch t := u.(type) {
- case nil:
- cause = "type set has no single underlying type"
- case *Chan:
+ u := structuralType(x.typ)
+ if t, _ := u.(*Chan); t != nil {
if sValue != nil {
check.softErrorf(sValue, "range over %s permits only one iteration variable", &x)
// ok to continue
@@ -848,6 +844,14 @@ func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *s
if t.dir == SendOnly {
cause = "receive from send-only channel"
}
+ } else {
+ if sExtra != nil {
+ check.softErrorf(sExtra, "range clause permits at most two iteration variables")
+ // ok to continue
+ }
+ if u == nil {
+ cause = check.sprintf("%s has no structural type", x.typ)
+ }
}
key, val = rangeKeyVal(u)
if key == nil || cause != "" {
diff --git a/src/cmd/compile/internal/types2/struct.go b/src/cmd/compile/internal/types2/struct.go
index 933d7ef947..31a3b1af5b 100644
--- a/src/cmd/compile/internal/types2/struct.go
+++ b/src/cmd/compile/internal/types2/struct.go
@@ -14,7 +14,7 @@ import (
// A Struct represents a struct type.
type Struct struct {
- fields []*Var
+ fields []*Var // fields != nil indicates the struct is set up (possibly with len(fields) == 0)
tags []string // field tags; nil if there are no tags
}
@@ -32,7 +32,9 @@ func NewStruct(fields []*Var, tags []string) *Struct {
if len(tags) > len(fields) {
panic("more tags than fields")
}
- return &Struct{fields: fields, tags: tags}
+ s := &Struct{fields: fields, tags: tags}
+ s.markComplete()
+ return s
}
// NumFields returns the number of fields in the struct (including blank and embedded fields).
@@ -55,8 +57,15 @@ func (s *Struct) String() string { return TypeString(s, nil) }
// ----------------------------------------------------------------------------
// Implementation
+func (s *Struct) markComplete() {
+ if s.fields == nil {
+ s.fields = make([]*Var, 0)
+ }
+}
+
func (check *Checker) structType(styp *Struct, e *syntax.StructType) {
if e.FieldList == nil {
+ styp.markComplete()
return
}
@@ -135,21 +144,23 @@ func (check *Checker) structType(styp *Struct, e *syntax.StructType) {
embeddedPos := pos
check.later(func() {
t, isPtr := deref(embeddedTyp)
- switch t := under(t).(type) {
+ switch u := under(t).(type) {
case *Basic:
if t == Typ[Invalid] {
// error was reported before
return
}
// unsafe.Pointer is treated like a regular pointer
- if t.kind == UnsafePointer {
+ if u.kind == UnsafePointer {
check.error(embeddedPos, "embedded field type cannot be unsafe.Pointer")
}
case *Pointer:
check.error(embeddedPos, "embedded field type cannot be a pointer")
- case *TypeParam:
- check.error(embeddedPos, "embedded field type cannot be a (pointer to a) type parameter")
case *Interface:
+ if isTypeParam(t) {
+ check.error(embeddedPos, "embedded field type cannot be a (pointer to a) type parameter")
+ break
+ }
if isPtr {
check.error(embeddedPos, "embedded field type cannot be a pointer to an interface")
}
@@ -160,6 +171,7 @@ func (check *Checker) structType(styp *Struct, e *syntax.StructType) {
styp.fields = fields
styp.tags = tags
+ styp.markComplete()
}
func embeddedFieldIdent(e syntax.Expr) *syntax.Name {
diff --git a/src/cmd/compile/internal/types2/subst.go b/src/cmd/compile/internal/types2/subst.go
index 269b284ac4..f2e8fecc05 100644
--- a/src/cmd/compile/internal/types2/subst.go
+++ b/src/cmd/compile/internal/types2/subst.go
@@ -91,7 +91,9 @@ func (subst *subster) typ(typ Type) Type {
case *Struct:
if fields, copied := subst.varList(t.fields); copied {
- return &Struct{fields: fields, tags: t.tags}
+ s := &Struct{fields: fields, tags: t.tags}
+ s.markComplete()
+ return s
}
case *Pointer:
@@ -104,17 +106,29 @@ func (subst *subster) typ(typ Type) Type {
return subst.tuple(t)
case *Signature:
- // TODO(gri) rethink the recv situation with respect to methods on parameterized types
- // recv := subst.var_(t.recv) // TODO(gri) this causes a stack overflow - explain
+ // Preserve the receiver: it is handled during *Interface and *Named type
+ // substitution.
+ //
+ // Naively doing the substitution here can lead to an infinite recursion in
+ // the case where the receiver is an interface. For example, consider the
+ // following declaration:
+ //
+ // type T[A any] struct { f interface{ m() } }
+ //
+ // In this case, the type of f is an interface that is itself the receiver
+ // type of all of its methods. Because we have no type name to break
+ // cycles, substituting in the recv results in an infinite loop of
+ // recv->interface->recv->interface->...
recv := t.recv
+
params := subst.tuple(t.params)
results := subst.tuple(t.results)
- if recv != t.recv || params != t.params || results != t.results {
+ if params != t.params || results != t.results {
return &Signature{
rparams: t.rparams,
// TODO(gri) why can't we nil out tparams here, rather than in instantiate?
- tparams: t.tparams,
- scope: t.scope,
+ tparams: t.tparams,
+ // instantiated signatures have a nil scope
recv: recv,
params: params,
results: results,
@@ -128,14 +142,28 @@ func (subst *subster) typ(typ Type) Type {
// term list substitution may introduce duplicate terms (unlikely but possible).
// This is ok; lazy type set computation will determine the actual type set
// in normal form.
- return &Union{terms, nil}
+ return &Union{terms}
}
case *Interface:
methods, mcopied := subst.funcList(t.methods)
embeddeds, ecopied := subst.typeList(t.embeddeds)
if mcopied || ecopied {
- iface := &Interface{methods: methods, embeddeds: embeddeds, complete: t.complete}
+ iface := &Interface{embeddeds: embeddeds, implicit: t.implicit, complete: t.complete}
+ // If we've changed the interface type, we may need to replace its
+ // receiver if the receiver type is the original interface. Receivers of
+ // *Named type are replaced during named type expansion.
+ //
+ // Notably, it's possible to reach here and not create a new *Interface,
+ // even though the receiver type may be parameterized. For example:
+ //
+ // type T[P any] interface{ m() }
+ //
+ // In this case the interface will not be substituted here, because its
+ // method signatures do not depend on the type parameter P, but we still
+ // need to create new interface methods to hold the instantiated
+ // receiver. This is handled by expandNamed.
+ iface.methods, _ = replaceRecvType(methods, t, iface)
return iface
}
@@ -205,9 +233,9 @@ func (subst *subster) typ(typ Type) Type {
}
// before creating a new named type, check if we have this one already
- h := subst.ctxt.TypeHash(t.orig, newTArgs)
+ h := subst.ctxt.instanceHash(t.orig, newTArgs)
dump(">>> new type hash: %s", h)
- if named := subst.ctxt.typeForHash(h, nil); named != nil {
+ if named := subst.ctxt.lookup(h, t.orig, newTArgs); named != nil {
dump(">>> found %s", named)
return named
}
@@ -347,3 +375,31 @@ func (subst *subster) termlist(in []*Term) (out []*Term, copied bool) {
}
return
}
+
+// replaceRecvType updates any function receivers that have type old to have
+// type new. It does not modify the input slice; if modifications are required,
+// the input slice and any affected signatures will be copied before mutating.
+//
+// The resulting out slice contains the updated functions, and copied reports
+// if anything was modified.
+func replaceRecvType(in []*Func, old, new Type) (out []*Func, copied bool) {
+ out = in
+ for i, method := range in {
+ sig := method.Type().(*Signature)
+ if sig.recv != nil && sig.recv.Type() == old {
+ if !copied {
+ // Allocate a new methods slice before mutating for the first time.
+ // This is defensive, as we may share methods across instantiations of
+ // a given interface type if they do not get substituted.
+ out = make([]*Func, len(in))
+ copy(out, in)
+ copied = true
+ }
+ newsig := *sig
+ sig = &newsig
+ sig.recv = NewVar(sig.recv.pos, sig.recv.pkg, "", new)
+ out[i] = NewFunc(method.pos, method.pkg, method.name, sig)
+ }
+ }
+ return
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/builtins.go2 b/src/cmd/compile/internal/types2/testdata/check/builtins.go2
index d1067a190f..48a39891bf 100644
--- a/src/cmd/compile/internal/types2/testdata/check/builtins.go2
+++ b/src/cmd/compile/internal/types2/testdata/check/builtins.go2
@@ -148,7 +148,7 @@ func _[
_ = make /* ERROR expects 2 or 3 arguments */ (S1)
_ = make(S1, 10, 20)
_ = make /* ERROR expects 2 or 3 arguments */ (S1, 10, 20, 30)
- _ = make(S2 /* ERROR cannot make .* no single underlying type */ , 10)
+ _ = make(S2 /* ERROR cannot make S2: no structural type */ , 10)
type M0 map[string]int
_ = make(map[string]int)
@@ -156,7 +156,7 @@ func _[
_ = make(M1)
_ = make(M1, 10)
_ = make/* ERROR expects 1 or 2 arguments */(M1, 10, 20)
- _ = make(M2 /* ERROR cannot make .* no single underlying type */ )
+ _ = make(M2 /* ERROR cannot make M2: no structural type */ )
type C0 chan int
_ = make(chan int)
@@ -164,7 +164,7 @@ func _[
_ = make(C1)
_ = make(C1, 10)
_ = make/* ERROR expects 1 or 2 arguments */(C1, 10, 20)
- _ = make(C2 /* ERROR cannot make .* no single underlying type */ )
+ _ = make(C2 /* ERROR cannot make C2: no structural type */ )
_ = make(C3)
}
diff --git a/src/cmd/compile/internal/types2/testdata/check/builtins.src b/src/cmd/compile/internal/types2/testdata/check/builtins.src
index 17e4068d65..de27f5c632 100644
--- a/src/cmd/compile/internal/types2/testdata/check/builtins.src
+++ b/src/cmd/compile/internal/types2/testdata/check/builtins.src
@@ -25,7 +25,7 @@ func append1() {
_ = append(s, b)
_ = append(s, x /* ERROR cannot use x */ )
_ = append(s, s /* ERROR cannot use s */ )
- _ = append(s... ) /* ERROR not enough arguments */
+ _ = append(s /* ERROR not enough arguments */ ...)
_ = append(s, b, s /* ERROR too many arguments */ ... )
_ = append(s, 1, 2, 3)
_ = append(s, 1, 2, 3, x /* ERROR cannot use x */ , 5, 6, 6)
diff --git a/src/cmd/compile/internal/types2/testdata/check/cycles.src b/src/cmd/compile/internal/types2/testdata/check/cycles.src
index b2ee8ecd5f..998f9f7da9 100644
--- a/src/cmd/compile/internal/types2/testdata/check/cycles.src
+++ b/src/cmd/compile/internal/types2/testdata/check/cycles.src
@@ -45,6 +45,7 @@ type (
// pointers
P0 *P0
+ PP *struct{ PP.f /* ERROR no field or method f */ }
// functions
F0 func(F0)
diff --git a/src/cmd/compile/internal/types2/testdata/check/cycles5.src b/src/cmd/compile/internal/types2/testdata/check/cycles5.src
index 397adcce01..c932ef92d0 100644
--- a/src/cmd/compile/internal/types2/testdata/check/cycles5.src
+++ b/src/cmd/compile/internal/types2/testdata/check/cycles5.src
@@ -135,7 +135,7 @@ type (
type (
a struct{ *b }
b = c
- c struct{ *b }
+ c struct{ *b /* ERROR invalid use of type alias */ }
)
// issue #24939
@@ -145,7 +145,7 @@ type (
}
M interface {
- F() P
+ F() P // ERROR invalid use of type alias
}
P = interface {
diff --git a/src/cmd/compile/internal/types2/testdata/check/errors.src b/src/cmd/compile/internal/types2/testdata/check/errors.src
index ff929217c4..5f09197bde 100644
--- a/src/cmd/compile/internal/types2/testdata/check/errors.src
+++ b/src/cmd/compile/internal/types2/testdata/check/errors.src
@@ -8,32 +8,38 @@ package errors
// (matching messages are regular expressions, hence the \'s).
func f(x int, m map[string]int) {
// no values
- _ = f /* ERROR "f\(0, m\) \(no value\) used as value" */ (0, m)
+ _ = f /* ERROR f\(0, m\) \(no value\) used as value */ (0, m)
// built-ins
- _ = println /* ERROR "println \(built-in\) must be called" */
+ _ = println // ERROR println \(built-in\) must be called
// types
- _ = complex128 /* ERROR "complex128 \(type\) is not an expression" */
+ _ = complex128 // ERROR complex128 \(type\) is not an expression
// constants
const c1 = 991
const c2 float32 = 0.5
- 0 /* ERROR "0 \(untyped int constant\) is not used" */
- c1 /* ERROR "c1 \(untyped int constant 991\) is not used" */
- c2 /* ERROR "c2 \(constant 0.5 of type float32\) is not used" */
- c1 /* ERROR "c1 \+ c2 \(constant 991.5 of type float32\) is not used" */ + c2
+ const c3 = "foo"
+ 0 // ERROR 0 \(untyped int constant\) is not used
+ 0.5 // ERROR 0.5 \(untyped float constant\) is not used
+ "foo" // ERROR "foo" \(untyped string constant\) is not used
+ c1 // ERROR c1 \(untyped int constant 991\) is not used
+ c2 // ERROR c2 \(constant 0.5 of type float32\) is not used
+ c1 /* ERROR c1 \+ c2 \(constant 991.5 of type float32\) is not used */ + c2
+ c3 // ERROR c3 \(untyped string constant "foo"\) is not used
// variables
- x /* ERROR "x \(variable of type int\) is not used" */
+ x // ERROR x \(variable of type int\) is not used
// values
- x /* ERROR "x != x \(untyped bool value\) is not used" */ != x
- x /* ERROR "x \+ x \(value of type int\) is not used" */ + x
+ nil // ERROR nil is not used
+ (*int)(nil) // ERROR \(\*int\)\(nil\) \(value of type \*int\) is not used
+ x /* ERROR x != x \(untyped bool value\) is not used */ != x
+ x /* ERROR x \+ x \(value of type int\) is not used */ + x
// value, ok's
const s = "foo"
- m /* ERROR "m\[s\] \(map index expression of type int\) is not used" */ [s]
+ m /* ERROR m\[s\] \(map index expression of type int\) is not used */ [s]
}
// Valid ERROR comments can have a variety of forms.
diff --git a/src/cmd/compile/internal/types2/testdata/check/expr3.src b/src/cmd/compile/internal/types2/testdata/check/expr3.src
index df4cf6a840..646319e4c4 100644
--- a/src/cmd/compile/internal/types2/testdata/check/expr3.src
+++ b/src/cmd/compile/internal/types2/testdata/check/expr3.src
@@ -45,9 +45,9 @@ func indexes() {
_ = a[:10:10]
_ = a[:11 /* ERROR "index .* out of bounds" */ :10]
_ = a[:10:11 /* ERROR "index .* out of bounds" */ ]
- _ = a[10:0:10] /* ERROR "invalid slice indices" */
- _ = a[0:10:0] /* ERROR "invalid slice indices" */
- _ = a[10:0:0] /* ERROR "invalid slice indices" */
+ _ = a[10:0 /* ERROR "invalid slice indices" */ :10]
+ _ = a[0:10:0 /* ERROR "invalid slice indices" */ ]
+ _ = a[10:0 /* ERROR "invalid slice indices" */:0]
_ = &a /* ERROR "cannot take address" */ [:10]
pa := &a
@@ -63,9 +63,9 @@ func indexes() {
_ = pa[:10:10]
_ = pa[:11 /* ERROR "index .* out of bounds" */ :10]
_ = pa[:10:11 /* ERROR "index .* out of bounds" */ ]
- _ = pa[10:0:10] /* ERROR "invalid slice indices" */
- _ = pa[0:10:0] /* ERROR "invalid slice indices" */
- _ = pa[10:0:0] /* ERROR "invalid slice indices" */
+ _ = pa[10:0 /* ERROR "invalid slice indices" */ :10]
+ _ = pa[0:10:0 /* ERROR "invalid slice indices" */ ]
+ _ = pa[10:0 /* ERROR "invalid slice indices" */ :0]
_ = &pa /* ERROR "cannot take address" */ [:10]
var b [0]int
@@ -90,9 +90,9 @@ func indexes() {
_ = s[1 /* ERROR "overflows" */ <<100 : 1 /* ERROR "overflows" */ <<100]
_ = s[: /* ERROR "middle index required" */ : /* ERROR "final index required" */ ]
_ = s[:10:10]
- _ = s[10:0:10] /* ERROR "invalid slice indices" */
- _ = s[0:10:0] /* ERROR "invalid slice indices" */
- _ = s[10:0:0] /* ERROR "invalid slice indices" */
+ _ = s[10:0 /* ERROR "invalid slice indices" */ :10]
+ _ = s[0:10:0 /* ERROR "invalid slice indices" */ ]
+ _ = s[10:0 /* ERROR "invalid slice indices" */ :0]
_ = &s /* ERROR "cannot take address" */ [:10]
var m map[string]int
@@ -110,8 +110,8 @@ func indexes() {
_ = t[- /* ERROR "negative" */ 1]
_ = t[- /* ERROR "negative" */ 1 :]
_ = t[: - /* ERROR "negative" */ 1]
- _ = t /* ERROR "3-index slice of string" */ [1:2:3]
- _ = "foo" /* ERROR "3-index slice of string" */ [1:2:3]
+ _ = t[1:2:3 /* ERROR "3-index slice of string" */ ]
+ _ = "foo"[1:2:3 /* ERROR "3-index slice of string" */ ]
var t0 byte
t0 = t[0]
_ = t0
@@ -459,9 +459,9 @@ func type_asserts() {
var t I
_ = t /* ERROR "use of .* outside type switch" */ .(type)
- _ = t /* ERROR "m method has pointer receiver" */ .(T)
+ _ = t /* ERROR "method m has pointer receiver" */ .(T)
_ = t.(*T)
- _ = t /* ERROR "missing m method" */ .(T1)
+ _ = t /* ERROR "missing method m" */ .(T1)
_ = t /* ERROR "wrong type for method m" */ .(T2)
_ = t /* STRICT "wrong type for method m" */ .(I2) // only an error in strict mode (issue 8561)
@@ -494,23 +494,23 @@ func _calls() {
f1(0)
f1(x)
f1(10.0)
- f1() /* ERROR "not enough arguments" */
- f1(x, y /* ERROR "too many arguments" */ )
+ f1 /* ERROR "not enough arguments in call to f1\n\thave \(\)\n\twant \(int\)" */ ()
+ f1(x, y /* ERROR "too many arguments in call to f1\n\thave \(int, float32\)\n\twant \(int\)" */ )
f1(s /* ERROR "cannot use .* in argument" */ )
f1(x ... /* ERROR "cannot use ..." */ )
f1(g0 /* ERROR "used as value" */ ())
f1(g1())
- f1(g2 /* ERROR "too many arguments" */ ())
+ f1(g2 /* ERROR "too many arguments in call to f1\n\thave \(float32, string\)\n\twant \(int\)" */ ())
- f2() /* ERROR "not enough arguments" */
- f2(3.14) /* ERROR "not enough arguments" */
+ f2 /* ERROR "not enough arguments in call to f2\n\thave \(\)\n\twant \(float32, string\)" */ ()
+ f2(3.14 /* ERROR "not enough arguments in call to f2\n\thave \(number\)\n\twant \(float32, string\)" */ )
f2(3.14, "foo")
f2(x /* ERROR "cannot use .* in argument" */ , "foo")
f2(g0 /* ERROR "used as value" */ ())
- f2(g1()) /* ERROR "not enough arguments" */
+ f2(g1 /* ERROR "not enough arguments in call to f2\n\thave \(int\)\n\twant \(float32, string\)" */ ())
f2(g2())
- fs() /* ERROR "not enough arguments" */
+ fs /* ERROR "not enough arguments" */ ()
fs(g0 /* ERROR "used as value" */ ())
fs(g1 /* ERROR "cannot use .* in argument" */ ())
fs(g2 /* ERROR "too many arguments" */ ())
diff --git a/src/cmd/compile/internal/types2/testdata/check/issues.go2 b/src/cmd/compile/internal/types2/testdata/check/issues.go2
index 8608473135..1763550c04 100644
--- a/src/cmd/compile/internal/types2/testdata/check/issues.go2
+++ b/src/cmd/compile/internal/types2/testdata/check/issues.go2
@@ -9,19 +9,18 @@ package p
import "io"
import "context"
-// Interfaces are always comparable (though the comparison may panic at runtime).
func eql[T comparable](x, y T) bool {
return x == y
}
-func _() {
- var x interface{}
- var y interface{ m() }
+func _[X comparable, Y interface{comparable; m()}]() {
+ var x X
+ var y Y
eql(x, y /* ERROR does not match */ ) // interfaces of different types
eql(x, x)
eql(y, y)
- eql(y, nil)
- eql[io.Reader](nil, nil)
+ eql(y, nil /* ERROR cannot use nil as Y value in argument to eql */ )
+ eql[io /* ERROR does not implement comparable */ .Reader](nil, nil)
}
// If we have a receiver of pointer to type parameter type (below: *T)
@@ -47,7 +46,7 @@ func (T) m1()
func (*T) m2()
func _() {
- f2[T /* ERROR wrong method signature */ ]()
+ f2[T /* ERROR m2 has pointer receiver */ ]()
f2[*T]()
}
@@ -58,7 +57,7 @@ func _() {
type T1[P interface{~uint}] struct{}
func _[P any]() {
- _ = T1[P /* ERROR P has no constraints */ ]{}
+ _ = T1[P /* ERROR P does not implement interface{~uint} */ ]{}
}
// This is the original (simplified) program causing the same issue.
@@ -74,8 +73,8 @@ func (u T2[U]) Add1() U {
return u.s + 1
}
-func NewT2[U any]() T2[U /* ERROR U has no constraints */ ] {
- return T2[U /* ERROR U has no constraints */ ]{}
+func NewT2[U any]() T2[U /* ERROR U does not implement Unsigned */ ] {
+ return T2[U /* ERROR U does not implement Unsigned */ ]{}
}
func _() {
@@ -145,8 +144,8 @@ type List3[TElem any] struct {
}
// Infinite generic type declarations must lead to an error.
-type inf1 /* ERROR illegal cycle */ [T any] struct{ _ inf1[T] }
-type inf2 /* ERROR illegal cycle */ [T any] struct{ inf2[T] }
+type inf1[T any] struct{ _ inf1 /* ERROR illegal cycle */ [T] }
+type inf2[T any] struct{ inf2 /* ERROR illegal cycle */ [T] }
// The implementation of conversions T(x) between integers and floating-point
// numbers checks that both T and x have either integer or floating-point
diff --git a/src/cmd/compile/internal/types2/testdata/check/issues.src b/src/cmd/compile/internal/types2/testdata/check/issues.src
index dfd51006b9..a19f99b31a 100644
--- a/src/cmd/compile/internal/types2/testdata/check/issues.src
+++ b/src/cmd/compile/internal/types2/testdata/check/issues.src
@@ -132,12 +132,12 @@ func issue10260() {
var x I1
x = T1 /* ERROR cannot use .*: missing method foo \(foo has pointer receiver\) */ {}
- _ = x. /* ERROR impossible type assertion: x.\(T1\)\n\tT1 does not implement I1 \(foo method has pointer receiver\) */ (T1)
+ _ = x. /* ERROR impossible type assertion: x.\(T1\)\n\tT1 does not implement I1 \(method foo has pointer receiver\) */ (T1)
T1{}.foo /* ERROR cannot call pointer method foo on T1 */ ()
x.Foo /* ERROR "x.Foo undefined \(type I1 has no field or method Foo, but does have foo\)" */ ()
- _ = i2. /* ERROR impossible type assertion: i2.\(\*T1\)\n\t\*T1 does not implement I2 \(wrong type for method foo: have func\(\), want func\(x int\)\) */ (*T1)
+ _ = i2. /* ERROR impossible type assertion: i2.\(\*T1\)\n\t\*T1 does not implement I2 \(wrong type for method foo\)\n\thave func\(\)\n\twant func\(x int\) */ (*T1)
i1 = i0 /* ERROR cannot use .* missing method foo */
i1 = t0 /* ERROR cannot use .* missing method foo */
@@ -165,8 +165,8 @@ func issue10260() {
_ = map[int]I1{0: i0 /* ERROR cannot use .* missing method foo */ }
_ = map[int]I1{0: i2 /* ERROR cannot use .* wrong type for method foo */ }
- make(chan I1) <- i0 /* ERROR cannot use .* in send: missing method foo */
- make(chan I1) <- i2 /* ERROR cannot use .* in send: wrong type for method foo */
+ make(chan I1) <- i0 /* ERROR I0 does not implement I1: missing method foo */
+ make(chan I1) <- i2 /* ERROR wrong type for method foo \(have func\(x int\), want func\(\)\) */
}
// Check that constants representable as integers are in integer form
diff --git a/src/cmd/compile/internal/types2/testdata/check/methodsets.src b/src/cmd/compile/internal/types2/testdata/check/methodsets.src
index 9fb10deb9a..b0eb14cf50 100644
--- a/src/cmd/compile/internal/types2/testdata/check/methodsets.src
+++ b/src/cmd/compile/internal/types2/testdata/check/methodsets.src
@@ -196,9 +196,9 @@ func issue5918() {
_ func(error) string = error.Error
perr = &err
- _ = perr.Error /* ERROR "no field or method" */ ()
- _ func() string = perr.Error /* ERROR "no field or method" */
- _ func(*error) string = (*error).Error /* ERROR "no field or method" */
+ _ = perr.Error /* ERROR "type \*error is pointer to interface, not interface" */ ()
+ _ func() string = perr.Error /* ERROR "type \*error is pointer to interface, not interface" */
+ _ func(*error) string = (*error).Error /* ERROR "type \*error is pointer to interface, not interface" */
)
type T *interface{ m() int }
@@ -207,8 +207,8 @@ func issue5918() {
_ = (*x).m()
_ = (*x).m
- _ = x.m /* ERROR "no field or method" */ ()
- _ = x.m /* ERROR "no field or method" */
- _ = T.m /* ERROR "no field or method" */
+ _ = x.m /* ERROR "type T is pointer to interface, not interface" */ ()
+ _ = x.m /* ERROR "type T is pointer to interface, not interface" */
+ _ = T.m /* ERROR "type T is pointer to interface, not interface" */
)
}
diff --git a/src/cmd/compile/internal/types2/testdata/check/shifts.src b/src/cmd/compile/internal/types2/testdata/check/shifts.src
index 60db731cf4..37bc84c0f6 100644
--- a/src/cmd/compile/internal/types2/testdata/check/shifts.src
+++ b/src/cmd/compile/internal/types2/testdata/check/shifts.src
@@ -381,7 +381,7 @@ func issue21727() {
var a = make([]int, 1<= 0 {
+ // The names of type parameters that are declared by the type being
+ // hashed are not part of the type identity. Replace them with a
+ // placeholder indicating their index.
+ w.string(fmt.Sprintf("$%d", i))
+ } else {
+ w.string(t.obj.name)
+ if w.debug || w.ctxt != nil {
+ w.string(subscript(t.id))
+ }
}
default:
@@ -285,12 +317,39 @@ func (w *typeWriter) typ(typ Type) {
}
}
-// If w.ctxt is non-nil, typePrefix writes a unique prefix for the named type t
-// based on the types already observed by w.ctxt. If w.ctxt is nil, it does
-// nothing.
-func (w *typeWriter) typePrefix(t *Named) {
- if w.ctxt != nil {
- w.string(strconv.Itoa(w.ctxt.idForType(t)))
+// typeSet writes a canonical hash for an interface type set.
+func (w *typeWriter) typeSet(s *_TypeSet) {
+ assert(w.ctxt != nil)
+ first := true
+ for _, m := range s.methods {
+ if !first {
+ w.byte(';')
+ }
+ first = false
+ w.string(m.name)
+ w.signature(m.typ.(*Signature))
+ }
+ switch {
+ case s.terms.isAll():
+ // nothing to do
+ case s.terms.isEmpty():
+ w.string(s.terms.String())
+ default:
+ var termHashes []string
+ for _, term := range s.terms {
+ // terms are not canonically sorted, so we sort their hashes instead.
+ var buf bytes.Buffer
+ if term.tilde {
+ buf.WriteByte('~')
+ }
+ newTypeHasher(&buf, w.ctxt).typ(term.typ)
+ termHashes = append(termHashes, buf.String())
+ }
+ sort.Strings(termHashes)
+ if !first {
+ w.byte(';')
+ }
+ w.string(strings.Join(termHashes, "|"))
}
}
@@ -361,7 +420,7 @@ func (w *typeWriter) tuple(tup *Tuple, variadic bool) {
} else {
// special case:
// append(s, "foo"...) leads to signature func([]byte, string...)
- if t := asBasic(typ); t == nil || t.kind != String {
+ if t, _ := under(typ).(*Basic); t == nil || t.kind != String {
w.error("expected string type")
continue
}
@@ -378,6 +437,13 @@ func (w *typeWriter) tuple(tup *Tuple, variadic bool) {
func (w *typeWriter) signature(sig *Signature) {
if sig.TypeParams().Len() != 0 {
+ if w.ctxt != nil {
+ assert(w.tparams == nil)
+ w.tparams = sig.TypeParams()
+ defer func() {
+ w.tparams = nil
+ }()
+ }
w.tParamList(sig.TypeParams().list())
}
diff --git a/src/cmd/compile/internal/types2/typestring_test.go b/src/cmd/compile/internal/types2/typestring_test.go
index 0ed2934961..c0689e866c 100644
--- a/src/cmd/compile/internal/types2/typestring_test.go
+++ b/src/cmd/compile/internal/types2/typestring_test.go
@@ -93,6 +93,10 @@ var independentTestTypes = []testEntry{
dup(`interface{String() string; m(int) float32}`),
dup("interface{int|float32|complex128}"),
dup("interface{int|~float32|~complex128}"),
+ dup("any"),
+ dup("interface{comparable}"),
+ {"comparable", "interface{comparable}"},
+ {"error", "interface{Error() string}"},
// maps
dup("map[string]int"),
@@ -129,7 +133,12 @@ func TestTypeString(t *testing.T) {
t.Errorf("%s: %s", src, err)
continue
}
- typ := pkg.Scope().Lookup("T").Type().Underlying()
+ obj := pkg.Scope().Lookup("T")
+ if obj == nil {
+ t.Errorf("%s: T not found", test.src)
+ continue
+ }
+ typ := obj.Type().Underlying()
if got := typ.String(); got != test.str {
t.Errorf("%s: got %s, want %s", test.src, got, test.str)
}
diff --git a/src/cmd/compile/internal/types2/typeterm_test.go b/src/cmd/compile/internal/types2/typeterm_test.go
index 5a5c1fa447..6d9c8db034 100644
--- a/src/cmd/compile/internal/types2/typeterm_test.go
+++ b/src/cmd/compile/internal/types2/typeterm_test.go
@@ -99,7 +99,7 @@ func TestTermUnion(t *testing.T) {
"~int ~string ~int ~string",
"~int myInt ~int ∅",
- // union is symmetric, but the result order isn't - repeat symmetric cases explictly
+ // union is symmetric, but the result order isn't - repeat symmetric cases explicitly
"𝓤 ∅ 𝓤 ∅",
"int ∅ int ∅",
"~int ∅ ~int ∅",
diff --git a/src/cmd/compile/internal/types2/typexpr.go b/src/cmd/compile/internal/types2/typexpr.go
index 95893fd1e1..de778fb010 100644
--- a/src/cmd/compile/internal/types2/typexpr.go
+++ b/src/cmd/compile/internal/types2/typexpr.go
@@ -99,6 +99,10 @@ func (check *Checker) ident(x *operand, e *syntax.Name, def *Named, wantType boo
x.mode = constant_
case *TypeName:
+ if check.isBrokenAlias(obj) {
+ check.errorf(e, "invalid use of type alias %s in recursive type (see issue #50729)", obj.name)
+ return
+ }
x.mode = typexpr
case *Var:
@@ -144,11 +148,16 @@ func (check *Checker) typ(e syntax.Expr) Type {
func (check *Checker) varType(e syntax.Expr) Type {
typ := check.definedType(e, nil)
- // We don't want to call under() (via toInterface) or complete interfaces while we
- // are in the middle of type-checking parameter declarations that might belong to
- // interface methods. Delay this check to the end of type-checking.
+ // If we have a type parameter there's nothing to do.
+ if isTypeParam(typ) {
+ return typ
+ }
+
+ // We don't want to call under() or complete interfaces while we are in
+ // the middle of type-checking parameter declarations that might belong
+ // to interface methods. Delay this check to the end of type-checking.
check.later(func() {
- if t := asInterface(typ); t != nil {
+ if t, _ := under(typ).(*Interface); t != nil {
pos := syntax.StartPos(e)
tset := computeInterfaceTypeSet(check, pos, t) // TODO(gri) is this the correct position?
if !tset.IsMethodSet() {
@@ -206,7 +215,7 @@ func goTypeName(typ Type) string {
//
func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) {
if check.conf.Trace {
- check.trace(e0.Pos(), "type %s", e0)
+ check.trace(e0.Pos(), "-- type %s", e0)
check.indent++
defer func() {
check.indent--
@@ -264,7 +273,7 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) {
case *syntax.IndexExpr:
if !check.allowVersion(check.pkg, 1, 18) {
- check.softErrorf(e.Pos(), "type instantiation requires go1.18 or later")
+ check.versionErrorf(e.Pos(), "go1.18", "type instantiation")
}
return check.instantiatedType(e.X, unpackExpr(e.Index), def)
@@ -310,6 +319,7 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) {
case *syntax.Operation:
if e.Op == syntax.Mul && e.Y == nil {
typ := new(Pointer)
+ typ.base = Typ[Invalid] // avoid nil base in invalid recursive type declaration
def.setUnderlying(typ)
typ.base = check.varType(e.X)
// If typ.base is invalid, it's unlikely that *base is particularly
@@ -356,7 +366,7 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) {
check.later(func() {
if !Comparable(typ.key) {
var why string
- if asTypeParam(typ.key) != nil {
+ if isTypeParam(typ.key) {
why = " (missing comparable constraint)"
}
check.errorf(e.Key, "invalid map key type %s%s", typ.key, why)
@@ -396,9 +406,9 @@ func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) {
return typ
}
-func (check *Checker) instantiatedType(x syntax.Expr, targsx []syntax.Expr, def *Named) (res Type) {
+func (check *Checker) instantiatedType(x syntax.Expr, xlist []syntax.Expr, def *Named) (res Type) {
if check.conf.Trace {
- check.trace(x.Pos(), "-- instantiating %s with %s", x, targsx)
+ check.trace(x.Pos(), "-- instantiating %s with %s", x, xlist)
check.indent++
defer func() {
check.indent--
@@ -412,42 +422,38 @@ func (check *Checker) instantiatedType(x syntax.Expr, targsx []syntax.Expr, def
return gtyp // error already reported
}
- origin, _ := gtyp.(*Named)
- if origin == nil {
+ orig, _ := gtyp.(*Named)
+ if orig == nil {
panic(fmt.Sprintf("%v: cannot instantiate %v", x.Pos(), gtyp))
}
// evaluate arguments
- targs := check.typeList(targsx)
+ targs := check.typeList(xlist)
if targs == nil {
def.setUnderlying(Typ[Invalid]) // avoid later errors due to lazy instantiation
return Typ[Invalid]
}
- // determine argument positions
- posList := make([]syntax.Pos, len(targs))
- for i, arg := range targsx {
- posList[i] = arg.Pos()
- }
-
// create the instance
- h := check.conf.Context.TypeHash(origin, targs)
+ ctxt := check.bestContext(nil)
+ h := ctxt.instanceHash(orig, targs)
// targs may be incomplete, and require inference. In any case we should de-duplicate.
- inst := check.conf.Context.typeForHash(h, nil)
+ inst, _ := ctxt.lookup(h, orig, targs).(*Named)
// If inst is non-nil, we can't just return here. Inst may have been
// constructed via recursive substitution, in which case we wouldn't do the
// validation below. Ensure that the validation (and resulting errors) runs
// for each instantiated type in the source.
if inst == nil {
- tname := NewTypeName(x.Pos(), origin.obj.pkg, origin.obj.name, nil)
- inst = check.newNamed(tname, origin, nil, nil, nil) // underlying, methods and tparams are set when named is resolved
- inst.targs = NewTypeList(targs)
- inst = check.conf.Context.typeForHash(h, inst)
+ // x may be a selector for an imported type; use its start pos rather than x.Pos().
+ tname := NewTypeName(syntax.StartPos(x), orig.obj.pkg, orig.obj.name, nil)
+ inst = check.newNamed(tname, orig, nil, nil, nil) // underlying, methods and tparams are set when named is resolved
+ inst.targs = newTypeList(targs)
+ inst = ctxt.update(h, orig, targs, inst).(*Named)
}
def.setUnderlying(inst)
- inst.resolver = func(ctxt *Context, n *Named) (*TypeParamList, Type, []*Func) {
- tparams := origin.TypeParams().list()
+ inst.resolver = func(ctxt *Context, n *Named) (*TypeParamList, Type, *methodList) {
+ tparams := orig.TypeParams().list()
inferred := targs
if len(targs) < len(tparams) {
@@ -455,7 +461,7 @@ func (check *Checker) instantiatedType(x syntax.Expr, targsx []syntax.Expr, def
// be set to Typ[Invalid] in expandNamed.
inferred = check.infer(x.Pos(), tparams, targs, nil, nil)
if len(inferred) > len(targs) {
- inst.targs = NewTypeList(inferred)
+ inst.targs = newTypeList(inferred)
}
}
@@ -463,12 +469,12 @@ func (check *Checker) instantiatedType(x syntax.Expr, targsx []syntax.Expr, def
return expandNamed(ctxt, n, x.Pos())
}
- // origin.tparams may not be set up, so we need to do expansion later.
+ // orig.tparams may not be set up, so we need to do expansion later.
check.later(func() {
// This is an instance from the source, not from recursive substitution,
// and so it must be resolved during type-checking so that we can report
// errors.
- inst.resolve(check.conf.Context)
+ inst.resolve(ctxt)
// Since check is non-nil, we can still mutate inst. Unpinning the resolver
// frees some memory.
inst.resolver = nil
@@ -477,16 +483,16 @@ func (check *Checker) instantiatedType(x syntax.Expr, targsx []syntax.Expr, def
if i, err := check.verify(x.Pos(), inst.tparams.list(), inst.targs.list()); err != nil {
// best position for error reporting
pos := x.Pos()
- if i < len(posList) {
- pos = posList[i]
+ if i < len(xlist) {
+ pos = syntax.StartPos(xlist[i])
}
- check.softErrorf(pos, err.Error())
+ check.softErrorf(pos, "%s", err)
} else {
- check.mono.recordInstance(check.pkg, x.Pos(), inst.tparams.list(), inst.targs.list(), posList)
+ check.mono.recordInstance(check.pkg, x.Pos(), inst.tparams.list(), inst.targs.list(), xlist)
}
}
- check.validType(inst, nil)
+ check.validType(inst)
})
return inst
diff --git a/src/cmd/compile/internal/types2/unify.go b/src/cmd/compile/internal/types2/unify.go
index 7f636c30d3..079db3276c 100644
--- a/src/cmd/compile/internal/types2/unify.go
+++ b/src/cmd/compile/internal/types2/unify.go
@@ -33,6 +33,16 @@ import (
// by setting up one of them (using init) and then assigning its value
// to the other.
+const (
+ // Upper limit for recursion depth. Used to catch infinite recursions
+ // due to implementation issues (e.g., see issues #48619, #48656).
+ unificationDepthLimit = 50
+
+ // Whether to panic when unificationDepthLimit is reached. Turn on when
+ // investigating infinite recursion.
+ panicAtUnificationDepthLimit = false
+)
+
// A unifier maintains the current type parameters for x and y
// and the respective types inferred for each type parameter.
// A unifier is created by calling newUnifier.
@@ -40,6 +50,7 @@ type unifier struct {
exact bool
x, y tparamsList // x and y must initialized via tparamsList.init
types []Type // inferred types, shared by x and y
+ depth int // recursion depth during unification
}
// newUnifier returns a new unifier.
@@ -161,7 +172,13 @@ func (d *tparamsList) index(typ Type) int {
// If tpar is a type parameter in list, tparamIndex returns the type parameter index.
// Otherwise, the result is < 0. tpar must not be nil.
func tparamIndex(list []*TypeParam, tpar *TypeParam) int {
- if i := tpar.index; i < len(list) && list[i] == tpar {
+ // Once a type parameter is bound its index is >= 0. However, there are some
+ // code paths (namely tracing and type hashing) by which it is possible to
+ // arrive here with a type parameter that has not been bound, hence the check
+ // for 0 <= i below.
+ // TODO(rfindley): investigate a better approach for guarding against using
+ // unbound type parameters.
+ if i := tpar.index; 0 <= i && i < len(list) && list[i] == tpar {
return i
}
return -1
@@ -231,18 +248,28 @@ func (u *unifier) nifyEq(x, y Type, p *ifacePair) bool {
// code the corresponding changes should be made here.
// Must not be called directly from outside the unifier.
func (u *unifier) nify(x, y Type, p *ifacePair) bool {
+ // Stop gap for cases where unification fails.
+ if u.depth >= unificationDepthLimit {
+ if panicAtUnificationDepthLimit {
+ panic("unification reached recursion depth limit")
+ }
+ return false
+ }
+ u.depth++
+ defer func() {
+ u.depth--
+ }()
+
if !u.exact {
// If exact unification is known to fail because we attempt to
// match a type name against an unnamed type literal, consider
// the underlying type of the named type.
- // (Subtle: We use hasName to include any type with a name (incl.
- // basic types and type parameters. We use asNamed because we only
- // want *Named types.)
- switch {
- case !hasName(x) && y != nil && asNamed(y) != nil:
- return u.nify(x, under(y), p)
- case x != nil && asNamed(x) != nil && !hasName(y):
- return u.nify(under(x), y, p)
+ // (We use !hasName to exclude any type with a name, including
+ // basic types and type parameters; the rest are unamed types.)
+ if nx, _ := x.(*Named); nx != nil && !hasName(y) {
+ return u.nify(nx.under(), y, p)
+ } else if ny, _ := y.(*Named); ny != nil && !hasName(x) {
+ return u.nify(x, ny.under(), p)
}
}
@@ -366,6 +393,9 @@ func (u *unifier) nify(x, y Type, p *ifacePair) bool {
if y, ok := y.(*Interface); ok {
xset := x.typeSet()
yset := y.typeSet()
+ if xset.comparable != yset.comparable {
+ return false
+ }
if !xset.terms.equal(yset.terms) {
return false
}
@@ -433,11 +463,14 @@ func (u *unifier) nify(x, y Type, p *ifacePair) bool {
xargs := x.targs.list()
yargs := y.targs.list()
+ if len(xargs) != len(yargs) {
+ return false
+ }
+
// TODO(gri) This is not always correct: two types may have the same names
// in the same package if one of them is nested in a function.
// Extremely unlikely but we need an always correct solution.
if x.obj.pkg == y.obj.pkg && x.obj.name == y.obj.name {
- assert(len(xargs) == len(yargs))
for i, x := range xargs {
if !u.nify(x, yargs[i], p) {
return false
diff --git a/src/cmd/compile/internal/types2/union.go b/src/cmd/compile/internal/types2/union.go
index 5379bde02c..3c0df04ccd 100644
--- a/src/cmd/compile/internal/types2/union.go
+++ b/src/cmd/compile/internal/types2/union.go
@@ -11,8 +11,7 @@ import "cmd/compile/internal/syntax"
// A Union represents a union of terms embedded in an interface.
type Union struct {
- terms []*Term // list of syntactical terms (not a canonicalized termlist)
- tset *_TypeSet // type set described by this union, computed lazily
+ terms []*Term // list of syntactical terms (not a canonicalized termlist)
}
// NewUnion returns a new Union type with the given terms.
@@ -21,7 +20,7 @@ func NewUnion(terms []*Term) *Union {
if len(terms) == 0 {
panic("empty union")
}
- return &Union{terms, nil}
+ return &Union{terms}
}
func (u *Union) Len() int { return len(u.terms) }
@@ -46,24 +45,40 @@ func (t *Term) String() string { return (*term)(t).String() }
// Avoid excessive type-checking times due to quadratic termlist operations.
const maxTermCount = 100
-// parseUnion parses the given list of type expressions tlist as a union of
-// those expressions. The result is a Union type, or Typ[Invalid] for some
-// errors.
-func parseUnion(check *Checker, tlist []syntax.Expr) Type {
+// parseUnion parses uexpr as a union of expressions.
+// The result is a Union type, or Typ[Invalid] for some errors.
+func parseUnion(check *Checker, uexpr syntax.Expr) Type {
+ blist, tlist := flattenUnion(nil, uexpr)
+ assert(len(blist) == len(tlist)-1)
+
var terms []*Term
- for _, x := range tlist {
- tilde, typ := parseTilde(check, x)
- if len(tlist) == 1 && !tilde {
+
+ var u Type
+ for i, x := range tlist {
+ term := parseTilde(check, x)
+ if len(tlist) == 1 && !term.tilde {
// Single type. Ok to return early because all relevant
// checks have been performed in parseTilde (no need to
// run through term validity check below).
- return typ
+ return term.typ // typ already recorded through check.typ in parseTilde
}
if len(terms) >= maxTermCount {
- check.errorf(x, "cannot handle more than %d union terms (implementation limitation)", maxTermCount)
- return Typ[Invalid]
+ if u != Typ[Invalid] {
+ check.errorf(x, "cannot handle more than %d union terms (implementation limitation)", maxTermCount)
+ u = Typ[Invalid]
+ }
+ } else {
+ terms = append(terms, term)
+ u = &Union{terms}
}
- terms = append(terms, NewTerm(tilde, typ))
+
+ if i > 0 {
+ check.recordTypeAndValue(blist[i-1], typexpr, u, nil)
+ }
+ }
+
+ if u == Typ[Invalid] {
+ return u
}
// Check validity of terms.
@@ -93,7 +108,16 @@ func parseUnion(check *Checker, tlist []syntax.Expr) Type {
// in the beginning. Embedded interfaces with tilde are excluded above. If we reach
// here, we must have at least two terms in the union.
if f != nil && !f.typeSet().IsTypeSet() {
- check.errorf(tlist[i], "cannot use %s in union (interface contains methods)", t)
+ switch {
+ case f.typeSet().NumMethods() != 0:
+ check.errorf(tlist[i], "cannot use %s in union (%s contains methods)", t, t)
+ case t.typ == universeComparable.Type():
+ check.error(tlist[i], "cannot use comparable in union")
+ case f.typeSet().comparable:
+ check.errorf(tlist[i], "cannot use %s in union (%s embeds comparable)", t, t)
+ default:
+ panic("not a type set but no methods and not comparable")
+ }
continue // don't report another error for t
}
@@ -105,26 +129,31 @@ func parseUnion(check *Checker, tlist []syntax.Expr) Type {
}
})
- return &Union{terms, nil}
+ return u
}
-func parseTilde(check *Checker, x syntax.Expr) (tilde bool, typ Type) {
+func parseTilde(check *Checker, tx syntax.Expr) *Term {
+ x := tx
+ var tilde bool
if op, _ := x.(*syntax.Operation); op != nil && op.Op == syntax.Tilde {
x = op.X
tilde = true
}
- typ = check.typ(x)
+ typ := check.typ(x)
// Embedding stand-alone type parameters is not permitted (issue #47127).
- // Do this check later because it requires computation of the underlying type (see also issue #46461).
- // Note: If an underlying type cannot be a type parameter, the call to
- // under() will not be needed and then we don't need to delay this
- // check to later and could return Typ[Invalid] instead.
- check.later(func() {
- if _, ok := under(typ).(*TypeParam); ok {
- check.error(x, "cannot embed a type parameter")
- }
- })
- return
+ // We don't need this restriction anymore if we make the underlying type of a type
+ // parameter its constraint interface: if we embed a lone type parameter, we will
+ // simply use its underlying type (like we do for other named, embedded interfaces),
+ // and since the underlying type is an interface the embedding is well defined.
+ if isTypeParam(typ) {
+ check.error(x, "cannot embed a type parameter")
+ typ = Typ[Invalid]
+ }
+ term := NewTerm(tilde, typ)
+ if tilde {
+ check.recordTypeAndValue(tx, typexpr, &Union{[]*Term{term}}, nil)
+ }
+ return term
}
// overlappingTerm reports the index of the term x in terms which is
@@ -144,3 +173,14 @@ func overlappingTerm(terms []*Term, y *Term) int {
}
return -1
}
+
+// flattenUnion walks a union type expression of the form A | B | C | ...,
+// extracting both the binary exprs (blist) and leaf types (tlist).
+func flattenUnion(list []syntax.Expr, x syntax.Expr) (blist, tlist []syntax.Expr) {
+ if o, _ := x.(*syntax.Operation); o != nil && o.Op == syntax.Or {
+ blist, tlist = flattenUnion(list, o.X)
+ blist = append(blist, o)
+ x = o.Y
+ }
+ return blist, append(tlist, x)
+}
diff --git a/src/cmd/compile/internal/types2/universe.go b/src/cmd/compile/internal/types2/universe.go
index 92fa32524c..6ee5dbdca3 100644
--- a/src/cmd/compile/internal/types2/universe.go
+++ b/src/cmd/compile/internal/types2/universe.go
@@ -79,28 +79,41 @@ func defPredeclaredTypes() {
}
// type any = interface{}
- def(NewTypeName(nopos, nil, "any", &emptyInterface))
+ // Note: don't use &emptyInterface for the type of any. Using a unique
+ // pointer allows us to detect any and format it as "any" rather than
+ // interface{}, which clarifies user-facing error messages significantly.
+ def(NewTypeName(nopos, nil, "any", &Interface{complete: true, tset: &topTypeSet}))
// type error interface{ Error() string }
{
obj := NewTypeName(nopos, nil, "error", nil)
obj.setColor(black)
+ typ := NewNamed(obj, nil, nil)
+
+ // error.Error() string
+ recv := NewVar(nopos, nil, "", typ)
res := NewVar(nopos, nil, "", Typ[String])
- sig := NewSignatureType(nil, nil, nil, nil, NewTuple(res), false)
+ sig := NewSignatureType(recv, nil, nil, nil, NewTuple(res), false)
err := NewFunc(nopos, nil, "Error", sig)
- ityp := &Interface{nil, obj, []*Func{err}, nil, nil, false, true, nil}
+
+ // interface{ Error() string }
+ ityp := &Interface{obj: obj, methods: []*Func{err}, complete: true}
computeInterfaceTypeSet(nil, nopos, ityp) // prevent races due to lazy computation of tset
- typ := NewNamed(obj, ityp, nil)
- sig.recv = NewVar(nopos, nil, "", typ)
+
+ typ.SetUnderlying(ityp)
def(obj)
}
- // type comparable interface{ /* type set marked comparable */ }
+ // type comparable interface{} // marked as comparable
{
obj := NewTypeName(nopos, nil, "comparable", nil)
obj.setColor(black)
- ityp := &Interface{nil, obj, nil, nil, nil, false, true, &_TypeSet{true, nil, allTermlist}}
- NewNamed(obj, ityp, nil)
+ typ := NewNamed(obj, nil, nil)
+
+ // interface{} // marked as comparable
+ ityp := &Interface{obj: obj, complete: true, tset: &_TypeSet{true, nil, allTermlist}}
+
+ typ.SetUnderlying(ityp)
def(obj)
}
}
@@ -240,7 +253,7 @@ func def(obj Object) {
return // nothing to do
}
// fix Obj link for named types
- if typ := asNamed(obj.Type()); typ != nil {
+ if typ, _ := obj.Type().(*Named); typ != nil {
typ.obj = obj.(*TypeName)
}
// exported identifiers go into package unsafe
diff --git a/src/cmd/compile/internal/types2/validtype.go b/src/cmd/compile/internal/types2/validtype.go
new file mode 100644
index 0000000000..c508eadc7c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/validtype.go
@@ -0,0 +1,147 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// validType verifies that the given type does not "expand" indefinitely
+// producing a cycle in the type graph. Cycles are detected by marking
+// defined types.
+// (Cycles involving alias types, as in "type A = [10]A" are detected
+// earlier, via the objDecl cycle detection mechanism.)
+func (check *Checker) validType(typ *Named) {
+ check.validType0(typ, nil, nil)
+}
+
+type typeInfo uint
+
+// validType0 checks if the given type is valid. If typ is a type parameter
+// its value is looked up in the provided environment. The environment is
+// nil if typ is not part of (the RHS of) an instantiated type, in that case
+// any type parameter encountered must be from an enclosing function and can
+// be ignored. The path is the list of type names that lead to the current typ.
+func (check *Checker) validType0(typ Type, env *tparamEnv, path []Object) typeInfo {
+ const (
+ unknown typeInfo = iota
+ marked
+ valid
+ invalid
+ )
+
+ switch t := typ.(type) {
+ case nil:
+ // We should never see a nil type but be conservative and panic
+ // only in debug mode.
+ if debug {
+ panic("validType0(nil)")
+ }
+
+ case *Array:
+ return check.validType0(t.elem, env, path)
+
+ case *Struct:
+ for _, f := range t.fields {
+ if check.validType0(f.typ, env, path) == invalid {
+ return invalid
+ }
+ }
+
+ case *Union:
+ for _, t := range t.terms {
+ if check.validType0(t.typ, env, path) == invalid {
+ return invalid
+ }
+ }
+
+ case *Interface:
+ for _, etyp := range t.embeddeds {
+ if check.validType0(etyp, env, path) == invalid {
+ return invalid
+ }
+ }
+
+ case *Named:
+ // Don't report a 2nd error if we already know the type is invalid
+ // (e.g., if a cycle was detected earlier, via under).
+ if t.underlying == Typ[Invalid] {
+ check.infoMap[t] = invalid
+ return invalid
+ }
+
+ switch check.infoMap[t] {
+ case unknown:
+ check.infoMap[t] = marked
+ check.infoMap[t] = check.validType0(t.orig.fromRHS, env.push(t), append(path, t.obj))
+ case marked:
+ // We have seen type t before and thus must have a cycle.
+ check.infoMap[t] = invalid
+ // t cannot be in an imported package otherwise that package
+ // would have reported a type cycle and couldn't have been
+ // imported in the first place.
+ assert(t.obj.pkg == check.pkg)
+ t.underlying = Typ[Invalid] // t is in the current package (no race possibilty)
+ // Find the starting point of the cycle and report it.
+ for i, tn := range path {
+ if tn == t.obj {
+ check.cycleError(path[i:])
+ return invalid
+ }
+ }
+ panic("cycle start not found")
+ }
+ return check.infoMap[t]
+
+ case *TypeParam:
+ // A type parameter stands for the type (argument) it was instantiated with.
+ // Check the corresponding type argument for validity if we have one.
+ if env != nil {
+ if targ := env.tmap[t]; targ != nil {
+ // Type arguments found in targ must be looked
+ // up in the enclosing environment env.link.
+ return check.validType0(targ, env.link, path)
+ }
+ }
+ }
+
+ return valid
+}
+
+// A tparamEnv provides the environment for looking up the type arguments
+// with which type parameters for a given instance were instantiated.
+// If we don't have an instance, the corresponding tparamEnv is nil.
+type tparamEnv struct {
+ tmap substMap
+ link *tparamEnv
+}
+
+func (env *tparamEnv) push(typ *Named) *tparamEnv {
+ // If typ is not an instantiated type there are no typ-specific
+ // type parameters to look up and we don't need an environment.
+ targs := typ.TypeArgs()
+ if targs == nil {
+ return nil // no instance => nil environment
+ }
+
+ // Populate tmap: remember the type argument for each type parameter.
+ // We cannot use makeSubstMap because the number of type parameters
+ // and arguments may not match due to errors in the source (too many
+ // or too few type arguments). Populate tmap "manually".
+ tparams := typ.TypeParams()
+ n, m := targs.Len(), tparams.Len()
+ if n > m {
+ n = m // too many targs
+ }
+ tmap := make(substMap, n)
+ for i := 0; i < n; i++ {
+ tmap[tparams.At(i)] = targs.At(i)
+ }
+
+ return &tparamEnv{tmap: tmap, link: env}
+}
+
+// TODO(gri) Alternative implementation:
+// We may not need to build a stack of environments to
+// look up the type arguments for type parameters. The
+// same information should be available via the path:
+// We should be able to just walk the path backwards
+// and find the type arguments in the instance objects.
diff --git a/src/cmd/compile/internal/types2/version.go b/src/cmd/compile/internal/types2/version.go
index d9d18b6f7a..b649f09c3a 100644
--- a/src/cmd/compile/internal/types2/version.go
+++ b/src/cmd/compile/internal/types2/version.go
@@ -21,7 +21,7 @@ func (check *Checker) langCompat(lit *syntax.BasicLit) {
}
// len(s) > 2
if strings.Contains(s, "_") {
- check.error(lit, "underscores in numeric literals requires go1.13 or later")
+ check.versionErrorf(lit, "go1.13", "underscores in numeric literals")
return
}
if s[0] != '0' {
@@ -29,15 +29,15 @@ func (check *Checker) langCompat(lit *syntax.BasicLit) {
}
radix := s[1]
if radix == 'b' || radix == 'B' {
- check.error(lit, "binary literals requires go1.13 or later")
+ check.versionErrorf(lit, "go1.13", "binary literals")
return
}
if radix == 'o' || radix == 'O' {
- check.error(lit, "0o/0O-style octal literals requires go1.13 or later")
+ check.versionErrorf(lit, "go1.13", "0o/0O-style octal literals")
return
}
if lit.Kind != syntax.IntLit && (radix == 'x' || radix == 'X') {
- check.error(lit, "hexadecimal floating-point literals requires go1.13 or later")
+ check.versionErrorf(lit, "go1.13", "hexadecimal floating-point literals")
}
}
diff --git a/src/cmd/cover/testdata/test.go b/src/cmd/cover/testdata/test.go
index 703fba57a4..0e1dbc6194 100644
--- a/src/cmd/cover/testdata/test.go
+++ b/src/cmd/cover/testdata/test.go
@@ -151,7 +151,7 @@ func testSwitch() {
}
func testTypeSwitch() {
- var x = []interface{}{1, 2.0, "hi"}
+ var x = []any{1, 2.0, "hi"}
for _, v := range x {
switch func() { check(LINE, 3) }(); v.(type) {
case int:
@@ -215,7 +215,7 @@ func testEmptySwitches() {
switch 3 {
}
check(LINE, 1)
- switch i := (interface{})(3).(int); i {
+ switch i := (any)(3).(int); i {
}
check(LINE, 1)
c := make(chan int)
diff --git a/src/cmd/dist/buildtag_test.go b/src/cmd/dist/buildtag_test.go
new file mode 100644
index 0000000000..f64abfd1f1
--- /dev/null
+++ b/src/cmd/dist/buildtag_test.go
@@ -0,0 +1,43 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+)
+
+var buildParserTests = []struct {
+ x string
+ matched bool
+ err error
+}{
+ {"gc", true, nil},
+ {"gccgo", false, nil},
+ {"!gc", false, nil},
+ {"gc && gccgo", false, nil},
+ {"gc || gccgo", true, nil},
+ {"gc || (gccgo && !gccgo)", true, nil},
+ {"gc && (gccgo || !gccgo)", true, nil},
+ {"!(gc && (gccgo || !gccgo))", false, nil},
+ {"gccgo || gc", true, nil},
+ {"!(!(!(gccgo || gc)))", false, nil},
+ {"compiler_bootstrap", false, nil},
+ {"cmd_go_bootstrap", true, nil},
+ {"syntax(error", false, fmt.Errorf("parsing //go:build line: unexpected (")},
+ {"(gc", false, fmt.Errorf("parsing //go:build line: missing )")},
+ {"gc gc", false, fmt.Errorf("parsing //go:build line: unexpected tag")},
+ {"(gc))", false, fmt.Errorf("parsing //go:build line: unexpected )")},
+}
+
+func TestBuildParser(t *testing.T) {
+ for _, tt := range buildParserTests {
+ matched, err := matchexpr(tt.x)
+ if matched != tt.matched || !reflect.DeepEqual(err, tt.err) {
+ t.Errorf("matchexpr(%q) = %v, %v; want %v, %v", tt.x, matched, err, tt.matched, tt.err)
+ }
+ }
+}
diff --git a/src/cmd/dist/buildtool.go b/src/cmd/dist/buildtool.go
index 75f04a975c..036f8c52fa 100644
--- a/src/cmd/dist/buildtool.go
+++ b/src/cmd/dist/buildtool.go
@@ -15,6 +15,7 @@ import (
"fmt"
"os"
"path/filepath"
+ "regexp"
"runtime"
"strings"
)
@@ -93,10 +94,21 @@ var ignoreSuffixes = []string{
"_test.go",
}
+var tryDirs = []string{
+ "sdk/go1.17",
+ "go1.17",
+}
+
func bootstrapBuildTools() {
goroot_bootstrap := os.Getenv("GOROOT_BOOTSTRAP")
if goroot_bootstrap == "" {
- goroot_bootstrap = pathf("%s/go1.4", os.Getenv("HOME"))
+ home := os.Getenv("HOME")
+ goroot_bootstrap = pathf("%s/go1.4", home)
+ for _, d := range tryDirs {
+ if p := pathf("%s/%s", home, d); isdir(p) {
+ goroot_bootstrap = p
+ }
+ }
}
xprintf("Building Go toolchain1 using %s.\n", goroot_bootstrap)
@@ -277,7 +289,11 @@ func rewriteBlock%s(b *Block) bool { panic("unused during bootstrap") }
}
func bootstrapFixImports(srcFile string) string {
- lines := strings.SplitAfter(readfile(srcFile), "\n")
+ text := readfile(srcFile)
+ if !strings.Contains(srcFile, "/cmd/") && !strings.Contains(srcFile, `\cmd\`) {
+ text = regexp.MustCompile(`\bany\b`).ReplaceAllString(text, "interface{}")
+ }
+ lines := strings.SplitAfter(text, "\n")
inBlock := false
for i, line := range lines {
if strings.HasPrefix(line, "import (") {
diff --git a/src/cmd/dist/main.go b/src/cmd/dist/main.go
index 37de1acc31..212d5cbe45 100644
--- a/src/cmd/dist/main.go
+++ b/src/cmd/dist/main.go
@@ -94,7 +94,15 @@ func main() {
if gohostarch == "" {
// Default Unix system.
out := run("", CheckExit, "uname", "-m")
+ outAll := run("", CheckExit, "uname", "-a")
switch {
+ case strings.Contains(outAll, "RELEASE_ARM64"):
+ // MacOS prints
+ // Darwin p1.local 21.1.0 Darwin Kernel Version 21.1.0: Wed Oct 13 17:33:01 PDT 2021; root:xnu-8019.41.5~1/RELEASE_ARM64_T6000 x86_64
+ // on ARM64 laptops when there is an x86 parent in the
+ // process tree. Look for the RELEASE_ARM64 to avoid being
+ // confused into building an x86 toolchain.
+ gohostarch = "arm64"
case strings.Contains(out, "x86_64"), strings.Contains(out, "amd64"):
gohostarch = "amd64"
case strings.Contains(out, "86"):
diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go
index 98e30a158f..50a2e5936c 100644
--- a/src/cmd/dist/test.go
+++ b/src/cmd/dist/test.go
@@ -499,17 +499,6 @@ func (t *tester) registerTests() {
})
}
- if t.iOS() && !t.compileOnly {
- t.tests = append(t.tests, distTest{
- name: "x509omitbundledroots",
- heading: "crypto/x509 without bundled roots",
- fn: func(dt *distTest) error {
- t.addCmd(dt, "src", t.goTest(), t.timeout(300), "-tags=x509omitbundledroots", "-run=OmitBundledRoots", "crypto/x509")
- return nil
- },
- })
- }
-
// Test ios/amd64 for the iOS simulator.
if goos == "darwin" && goarch == "amd64" && t.cgoEnabled {
t.tests = append(t.tests, distTest{
@@ -1054,10 +1043,8 @@ func (t *tester) supportedBuildmode(mode string) bool {
}
return false
case "plugin":
- // linux-arm64 is missing because it causes the external linker
- // to crash, see https://golang.org/issue/17138
switch pair {
- case "linux-386", "linux-amd64", "linux-arm", "linux-s390x", "linux-ppc64le":
+ case "linux-386", "linux-amd64", "linux-arm", "linux-arm64", "linux-s390x", "linux-ppc64le":
return true
case "darwin-amd64", "darwin-arm64":
return true
diff --git a/src/cmd/dist/util.go b/src/cmd/dist/util.go
index 28fe5e1d8d..8856f467d5 100644
--- a/src/cmd/dist/util.go
+++ b/src/cmd/dist/util.go
@@ -172,6 +172,9 @@ func bgwait(wg *sync.WaitGroup) {
select {
case <-done:
case <-dying:
+ // Don't return to the caller, to avoid reporting additional errors
+ // to the user.
+ select {}
}
}
diff --git a/src/cmd/doc/doc_test.go b/src/cmd/doc/doc_test.go
index af7793133e..0ff9edcde3 100644
--- a/src/cmd/doc/doc_test.go
+++ b/src/cmd/doc/doc_test.go
@@ -7,6 +7,7 @@ package main
import (
"bytes"
"flag"
+ "log"
"os"
"path/filepath"
"regexp"
@@ -125,6 +126,9 @@ var tests = []test{
`func MultiLineFunc\(x interface{ ... }\) \(r struct{ ... }\)`, // Multi line function.
`var LongLine = newLongLine\(("someArgument[1-4]", ){4}...\)`, // Long list of arguments.
`type T1 = T2`, // Type alias
+ `type SimpleConstraint interface{ ... }`,
+ `type TildeConstraint interface{ ... }`,
+ `type StructConstraint interface{ ... }`,
},
[]string{
`const internalConstant = 2`, // No internal constants.
@@ -199,6 +203,9 @@ var tests = []test{
`Comment about exported method`,
`type T1 = T2`,
`type T2 int`,
+ `type SimpleConstraint interface {`,
+ `type TildeConstraint interface {`,
+ `type StructConstraint interface {`,
},
[]string{
`constThree`,
@@ -822,13 +829,19 @@ var tests = []test{
func TestDoc(t *testing.T) {
maybeSkip(t)
+ defer log.SetOutput(log.Writer())
for _, test := range tests {
var b bytes.Buffer
var flagSet flag.FlagSet
+ var logbuf bytes.Buffer
+ log.SetOutput(&logbuf)
err := do(&b, &flagSet, test.args)
if err != nil {
t.Fatalf("%s %v: %s\n", test.name, test.args, err)
}
+ if logbuf.Len() > 0 {
+ t.Errorf("%s %v: unexpected log messages:\n%s", test.name, test.args, logbuf.Bytes())
+ }
output := b.Bytes()
failed := false
for j, yes := range test.yes {
diff --git a/src/cmd/doc/main.go b/src/cmd/doc/main.go
index 0499c40369..dee5d7bbcd 100644
--- a/src/cmd/doc/main.go
+++ b/src/cmd/doc/main.go
@@ -110,6 +110,13 @@ func do(writer io.Writer, flagSet *flag.FlagSet, args []string) (err error) {
if buildPackage == nil {
return fmt.Errorf("no such package: %s", userPath)
}
+
+ // The builtin package needs special treatment: its symbols are lower
+ // case but we want to see them, always.
+ if buildPackage.ImportPath == "builtin" {
+ unexported = true
+ }
+
symbol, method = parseSymbol(sym)
pkg := parsePackage(writer, buildPackage, userPath)
paths = append(paths, pkg.prettyPath())
@@ -128,12 +135,6 @@ func do(writer io.Writer, flagSet *flag.FlagSet, args []string) (err error) {
panic(e)
}()
- // The builtin package needs special treatment: its symbols are lower
- // case but we want to see them, always.
- if pkg.build.ImportPath == "builtin" {
- unexported = true
- }
-
// We have a package.
if showAll && symbol == "" {
pkg.allDoc()
diff --git a/src/cmd/doc/pkg.go b/src/cmd/doc/pkg.go
index 2257c5c0eb..0266600730 100644
--- a/src/cmd/doc/pkg.go
+++ b/src/cmd/doc/pkg.go
@@ -122,7 +122,7 @@ func trim(path, prefix string) (string, bool) {
// main do function, so it doesn't cause an exit. Allows testing to work
// without running a subprocess. The log prefix will be added when
// logged in main; it is not added here.
-func (pkg *Package) Fatalf(format string, args ...interface{}) {
+func (pkg *Package) Fatalf(format string, args ...any) {
panic(PackageError(fmt.Sprintf(format, args...)))
}
@@ -209,7 +209,7 @@ func parsePackage(writer io.Writer, pkg *build.Package, userPath string) *Packag
return p
}
-func (pkg *Package) Printf(format string, args ...interface{}) {
+func (pkg *Package) Printf(format string, args ...any) {
fmt.Fprintf(&pkg.buf, format, args...)
}
@@ -235,7 +235,7 @@ func (pkg *Package) newlines(n int) {
// clears the stuff we don't want to print anyway. It's a bit of a magic trick.
func (pkg *Package) emit(comment string, node ast.Node) {
if node != nil {
- var arg interface{} = node
+ var arg any = node
if showSrc {
// Need an extra little dance to get internal comments to appear.
arg = &printer.CommentedNode{
@@ -865,6 +865,7 @@ func trimUnexportedFields(fields *ast.FieldList, isInterface bool) *ast.FieldLis
if len(names) == 0 {
// Embedded type. Use the name of the type. It must be of the form ident or
// pkg.ident (for structs and interfaces), or *ident or *pkg.ident (structs only).
+ // Or a type embedded in a constraint.
// Nothing else is allowed.
ty := field.Type
if se, ok := field.Type.(*ast.StarExpr); !isInterface && ok {
@@ -872,6 +873,7 @@ func trimUnexportedFields(fields *ast.FieldList, isInterface bool) *ast.FieldLis
// embedded types in structs.
ty = se.X
}
+ constraint := false
switch ident := ty.(type) {
case *ast.Ident:
if isInterface && ident.Name == "error" && ident.Obj == nil {
@@ -885,8 +887,12 @@ func trimUnexportedFields(fields *ast.FieldList, isInterface bool) *ast.FieldLis
case *ast.SelectorExpr:
// An embedded type may refer to a type in another package.
names = []*ast.Ident{ident.Sel}
+ default:
+ // An approximation or union or type
+ // literal in an interface.
+ constraint = true
}
- if names == nil {
+ if names == nil && !constraint {
// Can only happen if AST is incorrect. Safe to continue with a nil list.
log.Print("invalid program: unexpected type for embedded field")
}
diff --git a/src/cmd/doc/testdata/nested/ignore.go b/src/cmd/doc/testdata/nested/ignore.go
index c497f1b5bc..5fa811d0a8 100644
--- a/src/cmd/doc/testdata/nested/ignore.go
+++ b/src/cmd/doc/testdata/nested/ignore.go
@@ -1,3 +1,4 @@
+//go:build ignore
// +build ignore
// Ignored package
diff --git a/src/cmd/doc/testdata/pkg.go b/src/cmd/doc/testdata/pkg.go
index 5ece832565..a693c74918 100644
--- a/src/cmd/doc/testdata/pkg.go
+++ b/src/cmd/doc/testdata/pkg.go
@@ -238,3 +238,15 @@ type ExportedFormattedType struct {
// Text after pre-formatted block.
ExportedField int
}
+
+type SimpleConstraint interface {
+ ~int | ~float64
+}
+
+type TildeConstraint interface {
+ ~int
+}
+
+type StructConstraint interface {
+ struct { F int }
+}
diff --git a/src/cmd/fix/cftype.go b/src/cmd/fix/cftype.go
index 3d292bdeba..27e4088aa9 100644
--- a/src/cmd/fix/cftype.go
+++ b/src/cmd/fix/cftype.go
@@ -45,8 +45,8 @@ func typefix(f *ast.File, badType func(string) bool) bool {
// step 1: Find all the nils with the offending types.
// Compute their replacement.
- badNils := map[interface{}]ast.Expr{}
- walk(f, func(n interface{}) {
+ badNils := map[any]ast.Expr{}
+ walk(f, func(n any) {
if i, ok := n.(*ast.Ident); ok && i.Name == "nil" && badType(typeof[n]) {
badNils[n] = &ast.BasicLit{ValuePos: i.NamePos, Kind: token.INT, Value: "0"}
}
@@ -58,7 +58,7 @@ func typefix(f *ast.File, badType func(string) bool) bool {
if len(badNils) > 0 {
exprType := reflect.TypeOf((*ast.Expr)(nil)).Elem()
exprSliceType := reflect.TypeOf(([]ast.Expr)(nil))
- walk(f, func(n interface{}) {
+ walk(f, func(n any) {
if n == nil {
return
}
@@ -99,7 +99,7 @@ func typefix(f *ast.File, badType func(string) bool) bool {
// Now we need unsafe.Pointer as an intermediate cast.
// (*unsafe.Pointer)(x) where x is type *bad -> (*unsafe.Pointer)(unsafe.Pointer(x))
// (*bad.type)(x) where x is type *unsafe.Pointer -> (*bad.type)(unsafe.Pointer(x))
- walk(f, func(n interface{}) {
+ walk(f, func(n any) {
if n == nil {
return
}
diff --git a/src/cmd/fix/fix.go b/src/cmd/fix/fix.go
index b9980c17b9..7abdab28a8 100644
--- a/src/cmd/fix/fix.go
+++ b/src/cmd/fix/fix.go
@@ -43,15 +43,15 @@ func register(f fix) {
// walk traverses the AST x, calling visit(y) for each node y in the tree but
// also with a pointer to each ast.Expr, ast.Stmt, and *ast.BlockStmt,
// in a bottom-up traversal.
-func walk(x interface{}, visit func(interface{})) {
+func walk(x any, visit func(any)) {
walkBeforeAfter(x, nop, visit)
}
-func nop(interface{}) {}
+func nop(any) {}
// walkBeforeAfter is like walk but calls before(x) before traversing
// x's children and after(x) afterward.
-func walkBeforeAfter(x interface{}, before, after func(interface{})) {
+func walkBeforeAfter(x any, before, after func(any)) {
before(x)
switch n := x.(type) {
@@ -390,7 +390,7 @@ func renameTop(f *ast.File, old, new string) bool {
// Rename top-level old to new, both unresolved names
// (probably defined in another file) and names that resolve
// to a declaration we renamed.
- walk(f, func(n interface{}) {
+ walk(f, func(n any) {
id, ok := n.(*ast.Ident)
if ok && isTopName(id, old) {
id.Name = new
diff --git a/src/cmd/fix/gotypes.go b/src/cmd/fix/gotypes.go
index 031f85c9cc..6085816ada 100644
--- a/src/cmd/fix/gotypes.go
+++ b/src/cmd/fix/gotypes.go
@@ -36,7 +36,7 @@ func fixGoExact(f *ast.File) bool {
// This one is harder because the import name changes.
// First find the import spec.
var importSpec *ast.ImportSpec
- walk(f, func(n interface{}) {
+ walk(f, func(n any) {
if importSpec != nil {
return
}
diff --git a/src/cmd/fix/main.go b/src/cmd/fix/main.go
index b5f7b901d6..3229b71ec4 100644
--- a/src/cmd/fix/main.go
+++ b/src/cmd/fix/main.go
@@ -245,7 +245,7 @@ func processFile(filename string, useStdin bool) error {
return os.WriteFile(f.Name(), newSrc, 0)
}
-func gofmt(n interface{}) string {
+func gofmt(n any) string {
var gofmtBuf bytes.Buffer
if err := format.Node(&gofmtBuf, fset, n); err != nil {
return "<" + err.Error() + ">"
diff --git a/src/cmd/fix/netipv6zone.go b/src/cmd/fix/netipv6zone.go
index 3e502bda07..199fcf5bf5 100644
--- a/src/cmd/fix/netipv6zone.go
+++ b/src/cmd/fix/netipv6zone.go
@@ -26,7 +26,7 @@ func netipv6zone(f *ast.File) bool {
}
fixed := false
- walk(f, func(n interface{}) {
+ walk(f, func(n any) {
cl, ok := n.(*ast.CompositeLit)
if !ok {
return
diff --git a/src/cmd/fix/printerconfig.go b/src/cmd/fix/printerconfig.go
index 6d93996872..bad6953196 100644
--- a/src/cmd/fix/printerconfig.go
+++ b/src/cmd/fix/printerconfig.go
@@ -23,7 +23,7 @@ func printerconfig(f *ast.File) bool {
}
fixed := false
- walk(f, func(n interface{}) {
+ walk(f, func(n any) {
cl, ok := n.(*ast.CompositeLit)
if !ok {
return
diff --git a/src/cmd/fix/typecheck.go b/src/cmd/fix/typecheck.go
index b7ec72e116..8a18d61bf2 100644
--- a/src/cmd/fix/typecheck.go
+++ b/src/cmd/fix/typecheck.go
@@ -142,9 +142,9 @@ func (typ *Type) dot(cfg *TypeConfig, name string) string {
// typeof maps AST nodes to type information in gofmt string form.
// assign maps type strings to lists of expressions that were assigned
// to values of another type that were assigned to that type.
-func typecheck(cfg *TypeConfig, f *ast.File) (typeof map[interface{}]string, assign map[string][]interface{}) {
- typeof = make(map[interface{}]string)
- assign = make(map[string][]interface{})
+func typecheck(cfg *TypeConfig, f *ast.File) (typeof map[any]string, assign map[string][]any) {
+ typeof = make(map[any]string)
+ assign = make(map[string][]any)
cfg1 := &TypeConfig{}
*cfg1 = *cfg // make copy so we can add locally
copied := false
@@ -296,7 +296,7 @@ func makeExprList(a []*ast.Ident) []ast.Expr {
// Typecheck1 is the recursive form of typecheck.
// It is like typecheck but adds to the information in typeof
// instead of allocating a new map.
-func typecheck1(cfg *TypeConfig, f interface{}, typeof map[interface{}]string, assign map[string][]interface{}) {
+func typecheck1(cfg *TypeConfig, f any, typeof map[any]string, assign map[string][]any) {
// set sets the type of n to typ.
// If isDecl is true, n is being declared.
set := func(n ast.Expr, typ string, isDecl bool) {
@@ -368,7 +368,7 @@ func typecheck1(cfg *TypeConfig, f interface{}, typeof map[interface{}]string, a
// the curfn stack.
var curfn []*ast.FuncType
- before := func(n interface{}) {
+ before := func(n any) {
// push function type on stack
switch n := n.(type) {
case *ast.FuncDecl:
@@ -379,7 +379,7 @@ func typecheck1(cfg *TypeConfig, f interface{}, typeof map[interface{}]string, a
}
// After is the real type checker.
- after := func(n interface{}) {
+ after := func(n any) {
if n == nil {
return
}
diff --git a/src/cmd/go.mod b/src/cmd/go.mod
index f7802a1675..48fc888f94 100644
--- a/src/cmd/go.mod
+++ b/src/cmd/go.mod
@@ -4,16 +4,16 @@ go 1.18
require (
github.com/google/pprof v0.0.0-20211104044539-f987b9c94b31
- golang.org/x/arch v0.0.0-20210901143047-ebb09ed340f1
- golang.org/x/mod v0.5.1-0.20210913215816-37dd6891021a
+ golang.org/x/arch v0.0.0-20210923205945-b76863e36670
+ golang.org/x/mod v0.6.0-dev.0.20211102181907-3a5865c02020
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
- golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b
- golang.org/x/tools v0.1.8-0.20211025211149-f916b54a1784
+ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
+ golang.org/x/tools v0.1.9-0.20220124164225-97de9ec46646
)
require (
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d // indirect
- golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 // indirect
- golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac // indirect
+ golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 // indirect
+ golang.org/x/sys v0.0.0-20211205182925-97ca703d548d // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
)
diff --git a/src/cmd/go.sum b/src/cmd/go.sum
index 25c25d81bd..4a5479f881 100644
--- a/src/cmd/go.sum
+++ b/src/cmd/go.sum
@@ -5,19 +5,20 @@ github.com/google/pprof v0.0.0-20211104044539-f987b9c94b31 h1:YvpxjnjGhf/vDEeYOy
github.com/google/pprof v0.0.0-20211104044539-f987b9c94b31/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d h1:uGg2frlt3IcT7kbV6LEp5ONv4vmoO2FW4qSO+my/aoM=
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
-golang.org/x/arch v0.0.0-20210901143047-ebb09ed340f1 h1:MwxAfiDvuwX8Nnnc6iRDhzyMyyc2tz5tYyCP/pZcPCg=
-golang.org/x/arch v0.0.0-20210901143047-ebb09ed340f1/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
-golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 h1:HWj/xjIHfjYU5nVXpTM0s39J9CbLn7Cc5a7IC5rwsMQ=
-golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/mod v0.5.1-0.20210913215816-37dd6891021a h1:55PVa91KndtPGH2lus5l2gDZqoO/x+Oa5CV0lVf8Ij8=
-golang.org/x/mod v0.5.1-0.20210913215816-37dd6891021a/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/arch v0.0.0-20210923205945-b76863e36670 h1:18EFjUmQOcUvxNYSkA6jO9VAiXCnxFY6NyDX0bHDmkU=
+golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
+golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3 h1:0es+/5331RGQPcXlMfP+WrnIIS6dNnNRe0WB02W0F4M=
+golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
+golang.org/x/mod v0.6.0-dev.0.20211102181907-3a5865c02020 h1:HjtpZuJcnSa+yHlL4Y5aypjDvbHkJne5FS8JRmKI2+I=
+golang.org/x/mod v0.6.0-dev.0.20211102181907-3a5865c02020/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac h1:oN6lz7iLW/YC7un8pq+9bOLyXrprv2+DKfkJY+2LJJw=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
-golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
-golang.org/x/tools v0.1.8-0.20211025211149-f916b54a1784 h1:+xP+QoP2SEPgbn+07I/yJTzP+gavj0XKGS6+JU5tlck=
-golang.org/x/tools v0.1.8-0.20211025211149-f916b54a1784/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
+golang.org/x/sys v0.0.0-20211205182925-97ca703d548d h1:FjkYO/PPp4Wi0EAUOVLxePm7qVW4r4ctbWpURyuOD0E=
+golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
+golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
+golang.org/x/tools v0.1.9-0.20220124164225-97de9ec46646 h1:f8aekWvlQQ8ZhD8SL7lOu18dtWslZYl029PN2F0VnS4=
+golang.org/x/tools v0.1.9-0.20220124164225-97de9ec46646/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go
index c9426801c5..13a3f00d6f 100644
--- a/src/cmd/go/alldocs.go
+++ b/src/cmd/go/alldocs.go
@@ -25,6 +25,7 @@
// install compile and install packages and dependencies
// list list packages or modules
// mod module maintenance
+// work workspace maintenance
// run compile and run Go program
// test test packages
// tool run specified go tool
@@ -134,11 +135,6 @@
//
// -asmflags '[pattern=]arg list'
// arguments to pass on each go tool asm invocation.
-// -buildinfo
-// Whether to stamp binaries with build flags. By default, the compiler name
-// (gc or gccgo), toolchain flags (like -gcflags), and environment variables
-// containing flags (like CGO_CFLAGS) are stamped into binaries. Use
-// -buildinfo=false to omit build information. See also -buildvcs.
// -buildmode mode
// build mode to use. See 'go help buildmode' for more.
// -buildvcs
@@ -146,7 +142,7 @@
// version control information is stamped into a binary if the main package
// and the main module containing it are in the repository containing the
// current directory (if there is a repository). Use -buildvcs=false to
-// omit version control information. See also -buildinfo.
+// omit version control information.
// -compiler name
// name of compiler to use, as in runtime.Compiler (gccgo or gc).
// -gccgoflags '[pattern=]arg list'
@@ -213,9 +209,8 @@
// -trimpath
// remove all file system paths from the resulting executable.
// Instead of absolute file system paths, the recorded file names
-// will begin with either "go" (for the standard library),
-// or a module path@version (when using modules),
-// or a plain import path (when using GOPATH).
+// will begin either a module path@version (when using modules),
+// or a plain import path (when using the standard library, or GOPATH).
// -toolexec 'cmd args'
// a program to use to invoke toolchain programs like vet and asm.
// For example, instead of running asm, the go command will run
@@ -367,9 +362,8 @@
// path. The go tool's usual package mechanism does not apply: package path
// elements like . and ... are not implemented by go doc.
//
-// When run with two arguments, the first must be a full package path (not just a
-// suffix), and the second is a symbol, or symbol with method or struct field.
-// This is similar to the syntax accepted by godoc:
+// When run with two arguments, the first is a package path (full path or suffix),
+// and the second is a symbol, or symbol with method or struct field:
//
// go doc [.]
//
@@ -1055,10 +1049,8 @@
//
// download download modules to local cache
// edit edit go.mod from tools or scripts
-// editwork edit go.work from tools or scripts
// graph print module requirement graph
// init initialize new module in current directory
-// initwork initialize workspace file
// tidy add missing and remove unused modules
// vendor make vendored copy of dependencies
// verify verify dependencies have expected content
@@ -1218,77 +1210,6 @@
// See https://golang.org/ref/mod#go-mod-edit for more about 'go mod edit'.
//
//
-// Edit go.work from tools or scripts
-//
-// Usage:
-//
-// go mod editwork [editing flags] [go.work]
-//
-// Editwork provides a command-line interface for editing go.work,
-// for use primarily by tools or scripts. It only reads go.work;
-// it does not look up information about the modules involved.
-// If no file is specified, editwork looks for a go.work file in the current
-// directory and its parent directories
-//
-// The editing flags specify a sequence of editing operations.
-//
-// The -fmt flag reformats the go.work file without making other changes.
-// This reformatting is also implied by any other modifications that use or
-// rewrite the go.mod file. The only time this flag is needed is if no other
-// flags are specified, as in 'go mod editwork -fmt'.
-//
-// The -directory=path and -dropdirectory=path flags
-// add and drop a directory from the go.work files set of module directories.
-//
-// The -replace=old[@v]=new[@v] flag adds a replacement of the given
-// module path and version pair. If the @v in old@v is omitted, a
-// replacement without a version on the left side is added, which applies
-// to all versions of the old module path. If the @v in new@v is omitted,
-// the new path should be a local module root directory, not a module
-// path. Note that -replace overrides any redundant replacements for old[@v],
-// so omitting @v will drop existing replacements for specific versions.
-//
-// The -dropreplace=old[@v] flag drops a replacement of the given
-// module path and version pair. If the @v is omitted, a replacement without
-// a version on the left side is dropped.
-//
-// The -directory, -dropdirectory, -replace, and -dropreplace,
-// editing flags may be repeated, and the changes are applied in the order given.
-//
-// The -go=version flag sets the expected Go language version.
-//
-// The -print flag prints the final go.work in its text format instead of
-// writing it back to go.mod.
-//
-// The -json flag prints the final go.work file in JSON format instead of
-// writing it back to go.mod. The JSON output corresponds to these Go types:
-//
-// type Module struct {
-// Path string
-// Version string
-// }
-//
-// type GoWork struct {
-// Go string
-// Directory []Directory
-// Replace []Replace
-// }
-//
-// type Directory struct {
-// Path string
-// ModulePath string
-// }
-//
-// type Replace struct {
-// Old Module
-// New Module
-// }
-//
-// See the workspaces design proposal at
-// https://go.googlesource.com/proposal/+/master/design/45713-workspace.md for
-// more information.
-//
-//
// Print module requirement graph
//
// Usage:
@@ -1328,23 +1249,6 @@
// See https://golang.org/ref/mod#go-mod-init for more about 'go mod init'.
//
//
-// Initialize workspace file
-//
-// Usage:
-//
-// go mod initwork [moddirs]
-//
-// go mod initwork initializes and writes a new go.work file in the current
-// directory, in effect creating a new workspace at the current directory.
-//
-// go mod initwork optionally accepts paths to the workspace modules as arguments.
-// If the argument is omitted, an empty workspace with no modules will be created.
-//
-// See the workspaces design proposal at
-// https://go.googlesource.com/proposal/+/master/design/45713-workspace.md for
-// more information.
-//
-//
// Add missing and remove unused modules
//
// Usage:
@@ -1384,7 +1288,7 @@
//
// Usage:
//
-// go mod vendor [-e] [-v]
+// go mod vendor [-e] [-v] [-o outdir]
//
// Vendor resets the main module's vendor directory to include all packages
// needed to build and test all the main module's packages.
@@ -1396,6 +1300,11 @@
// The -e flag causes vendor to attempt to proceed despite errors
// encountered while loading packages.
//
+// The -o flag causes vendor to create the vendor directory at the given
+// path instead of "vendor". The go command can only use a vendor directory
+// named "vendor" within the module root directory, so this flag is
+// primarily useful for other tools.
+//
// See https://golang.org/ref/mod#go-mod-vendor for more about 'go mod vendor'.
//
//
@@ -1453,6 +1362,202 @@
// See https://golang.org/ref/mod#go-mod-why for more about 'go mod why'.
//
//
+// Workspace maintenance
+//
+// Go workspace provides access to operations on workspaces.
+//
+// Note that support for workspaces is built into many other commands, not
+// just 'go work'.
+//
+// See 'go help modules' for information about Go's module system of which
+// workspaces are a part.
+//
+// A workspace is specified by a go.work file that specifies a set of
+// module directories with the "use" directive. These modules are used as
+// root modules by the go command for builds and related operations. A
+// workspace that does not specify modules to be used cannot be used to do
+// builds from local modules.
+//
+// go.work files are line-oriented. Each line holds a single directive,
+// made up of a keyword followed by arguments. For example:
+//
+// go 1.18
+//
+// use ../foo/bar
+// use ./baz
+//
+// replace example.com/foo v1.2.3 => example.com/bar v1.4.5
+//
+// The leading keyword can be factored out of adjacent lines to create a block,
+// like in Go imports.
+//
+// use (
+// ../foo/bar
+// ./baz
+// )
+//
+// The use directive specifies a module to be included in the workspace's
+// set of main modules. The argument to the use directive is the directory
+// containing the module's go.mod file.
+//
+// The go directive specifies the version of Go the file was written at. It
+// is possible there may be future changes in the semantics of workspaces
+// that could be controlled by this version, but for now the version
+// specified has no effect.
+//
+// The replace directive has the same syntax as the replace directive in a
+// go.mod file and takes precedence over replaces in go.mod files. It is
+// primarily intended to override conflicting replaces in different workspace
+// modules.
+//
+// To determine whether the go command is operating in workspace mode, use
+// the "go env GOWORK" command. This will specify the workspace file being
+// used.
+//
+// Usage:
+//
+// go work [arguments]
+//
+// The commands are:
+//
+// edit edit go.work from tools or scripts
+// init initialize workspace file
+// sync sync workspace build list to modules
+// use add modules to workspace file
+//
+// Use "go help work " for more information about a command.
+//
+// Edit go.work from tools or scripts
+//
+// Usage:
+//
+// go work edit [editing flags] [go.work]
+//
+// Edit provides a command-line interface for editing go.work,
+// for use primarily by tools or scripts. It only reads go.work;
+// it does not look up information about the modules involved.
+// If no file is specified, Edit looks for a go.work file in the current
+// directory and its parent directories
+//
+// The editing flags specify a sequence of editing operations.
+//
+// The -fmt flag reformats the go.work file without making other changes.
+// This reformatting is also implied by any other modifications that use or
+// rewrite the go.mod file. The only time this flag is needed is if no other
+// flags are specified, as in 'go work edit -fmt'.
+//
+// The -use=path and -dropuse=path flags
+// add and drop a use directive from the go.work file's set of module directories.
+//
+// The -replace=old[@v]=new[@v] flag adds a replacement of the given
+// module path and version pair. If the @v in old@v is omitted, a
+// replacement without a version on the left side is added, which applies
+// to all versions of the old module path. If the @v in new@v is omitted,
+// the new path should be a local module root directory, not a module
+// path. Note that -replace overrides any redundant replacements for old[@v],
+// so omitting @v will drop existing replacements for specific versions.
+//
+// The -dropreplace=old[@v] flag drops a replacement of the given
+// module path and version pair. If the @v is omitted, a replacement without
+// a version on the left side is dropped.
+//
+// The -use, -dropuse, -replace, and -dropreplace,
+// editing flags may be repeated, and the changes are applied in the order given.
+//
+// The -go=version flag sets the expected Go language version.
+//
+// The -print flag prints the final go.work in its text format instead of
+// writing it back to go.mod.
+//
+// The -json flag prints the final go.work file in JSON format instead of
+// writing it back to go.mod. The JSON output corresponds to these Go types:
+//
+// type GoWork struct {
+// Go string
+// Use []Use
+// Replace []Replace
+// }
+//
+// type Use struct {
+// DiskPath string
+// ModulePath string
+// }
+//
+// type Replace struct {
+// Old Module
+// New Module
+// }
+//
+// type Module struct {
+// Path string
+// Version string
+// }
+//
+// See the workspaces design proposal at
+// https://go.googlesource.com/proposal/+/master/design/45713-workspace.md for
+// more information.
+//
+//
+// Initialize workspace file
+//
+// Usage:
+//
+// go work init [moddirs]
+//
+// Init initializes and writes a new go.work file in the
+// current directory, in effect creating a new workspace at the current
+// directory.
+//
+// go work init optionally accepts paths to the workspace modules as
+// arguments. If the argument is omitted, an empty workspace with no
+// modules will be created.
+//
+// Each argument path is added to a use directive in the go.work file. The
+// current go version will also be listed in the go.work file.
+//
+//
+// Sync workspace build list to modules
+//
+// Usage:
+//
+// go work sync
+//
+// Sync syncs the workspace's build list back to the
+// workspace's modules
+//
+// The workspace's build list is the set of versions of all the
+// (transitive) dependency modules used to do builds in the workspace. go
+// work sync generates that build list using the Minimal Version Selection
+// algorithm, and then syncs those versions back to each of modules
+// specified in the workspace (with use directives).
+//
+// The syncing is done by sequentially upgrading each of the dependency
+// modules specified in a workspace module to the version in the build list
+// if the dependency module's version is not already the same as the build
+// list's version. Note that Minimal Version Selection guarantees that the
+// build list's version of each module is always the same or higher than
+// that in each workspace module.
+//
+//
+// Add modules to workspace file
+//
+// Usage:
+//
+// go work use [-r] [moddirs]
+//
+// Use provides a command-line interface for adding
+// directories, optionally recursively, to a go.work file.
+//
+// A use directive will be added to the go.work file for each argument
+// directory listed on the command line go.work file, if it exists on disk,
+// or removed from the go.work file if it does not exist on disk.
+//
+// The -r flag searches recursively for modules in the argument
+// directories, and the use command operates as if each of the directories
+// were specified as arguments: namely, use directives will be added for
+// directories that exist, and removed for directories that do not exist.
+//
+//
// Compile and run Go program
//
// Usage:
@@ -1512,7 +1617,7 @@
// 'Go test' recompiles each package along with any files with names matching
// the file pattern "*_test.go".
// These additional files can contain test functions, benchmark functions, fuzz
-// targets and example functions. See 'go help testfunc' for more.
+// tests and example functions. See 'go help testfunc' for more.
// Each listed package causes the execution of a separate test binary.
// Files whose names begin with "_" (including "_test.go") or "." are ignored.
//
@@ -1931,6 +2036,8 @@
// GOENV
// The location of the Go environment configuration file.
// Cannot be set using 'go env -w'.
+// Setting GOENV=off in the environment disables the use of the
+// default configuration file.
// GOFLAGS
// A space-separated list of -flag=value settings to apply
// to go commands by default, when the given flag is known by
@@ -2020,7 +2127,7 @@
// GOAMD64
// For GOARCH=amd64, the microarchitecture level for which to compile.
// Valid values are v1 (default), v2, v3, v4.
-// See https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels.
+// See https://golang.org/wiki/MinimumRequirements#amd64
// GOMIPS
// For GOARCH=mips{,le}, whether to use floating point instructions.
// Valid values are hardfloat (default), softfloat.
@@ -2761,7 +2868,7 @@
// Run each test, benchmark, and fuzz seed n times (default 1).
// If -cpu is set, run n times for each GOMAXPROCS value.
// Examples are always run once. -count does not apply to
-// fuzz targets matched by -fuzz.
+// fuzz tests matched by -fuzz.
//
// -cover
// Enable coverage analysis.
@@ -2789,41 +2896,48 @@
//
// -cpu 1,2,4
// Specify a list of GOMAXPROCS values for which the tests, benchmarks or
-// fuzz targets should be executed. The default is the current value
-// of GOMAXPROCS. -cpu does not apply to fuzz targets matched by -fuzz.
+// fuzz tests should be executed. The default is the current value
+// of GOMAXPROCS. -cpu does not apply to fuzz tests matched by -fuzz.
//
// -failfast
// Do not start new tests after the first test failure.
//
// -fuzz regexp
-// Run the fuzz target matching the regular expression. When specified,
+// Run the fuzz test matching the regular expression. When specified,
// the command line argument must match exactly one package within the
-// main module, and regexp must match exactly one fuzz target within
-// that package. After tests, benchmarks, seed corpora of other fuzz
-// targets, and examples have completed, the matching target will be
-// fuzzed. See the Fuzzing section of the testing package documentation
-// for details.
+// main module, and regexp must match exactly one fuzz test within
+// that package. Fuzzing will occur after tests, benchmarks, seed corpora
+// of other fuzz tests, and examples have completed. See the Fuzzing
+// section of the testing package documentation for details.
//
// -fuzztime t
-// Run enough iterations of the fuzz test to take t, specified as a
-// time.Duration (for example, -fuzztime 1h30s). The default is to run
-// forever.
-// The special syntax Nx means to run the fuzz test N times
-// (for example, -fuzztime 100x).
+// Run enough iterations of the fuzz target during fuzzing to take t,
+// specified as a time.Duration (for example, -fuzztime 1h30s).
+// The default is to run forever.
+// The special syntax Nx means to run the fuzz target N times
+// (for example, -fuzztime 1000x).
+//
+// -fuzzminimizetime t
+// Run enough iterations of the fuzz target during each minimization
+// attempt to take t, as specified as a time.Duration (for example,
+// -fuzzminimizetime 30s).
+// The default is 60s.
+// The special syntax Nx means to run the fuzz target N times
+// (for example, -fuzzminimizetime 100x).
//
// -json
// Log verbose output and test results in JSON. This presents the
// same information as the -v flag in a machine-readable format.
//
// -list regexp
-// List tests, benchmarks, fuzz targets, or examples matching the regular
-// expression. No tests, benchmarks, fuzz targets, or examples will be run.
+// List tests, benchmarks, fuzz tests, or examples matching the regular
+// expression. No tests, benchmarks, fuzz tests, or examples will be run.
// This will only list top-level tests. No subtest or subbenchmarks will be
// shown.
//
// -parallel n
// Allow parallel execution of test functions that call t.Parallel, and
-// f.Fuzz functions that call t.Parallel when running the seed corpus.
+// fuzz targets that call t.Parallel when running the seed corpus.
// The value of this flag is the maximum number of tests to run
// simultaneously.
// While fuzzing, the value of this flag is the maximum number of
@@ -2838,7 +2952,7 @@
// (see 'go help build').
//
// -run regexp
-// Run only those tests, examples, and fuzz targets matching the regular
+// Run only those tests, examples, and fuzz tests matching the regular
// expression. For tests, the regular expression is split by unbracketed
// slash (/) characters into a sequence of regular expressions, and each
// part of a test's identifier must match the corresponding element in
@@ -3012,7 +3126,7 @@
//
// func BenchmarkXxx(b *testing.B) { ... }
//
-// A fuzz target is one named FuzzXxx and should have the signature,
+// A fuzz test is one named FuzzXxx and should have the signature,
//
// func FuzzXxx(f *testing.F) { ... }
//
@@ -3055,7 +3169,7 @@
//
// The entire test file is presented as the example when it contains a single
// example function, at least one other function, type, variable, or constant
-// declaration, and no fuzz targets or test or benchmark functions.
+// declaration, and no tests, benchmarks, or fuzz tests.
//
// See the documentation of the testing package for more information.
//
diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go
index 4a81c826a5..11ba733b38 100644
--- a/src/cmd/go/go_test.go
+++ b/src/cmd/go/go_test.go
@@ -133,7 +133,7 @@ func TestMain(m *testing.M) {
}
gotool, err := testenv.GoTool()
if err != nil {
- fmt.Fprintln(os.Stderr, err)
+ fmt.Fprintln(os.Stderr, "locating go tool: ", err)
os.Exit(2)
}
@@ -1128,11 +1128,11 @@ func TestGoListTest(t *testing.T) {
tg.grepStdoutNot(`^testing \[sort.test\]$`, "unexpected test copy of testing")
tg.grepStdoutNot(`^testing$`, "unexpected real copy of testing")
- tg.run("list", "-test", "cmd/dist", "cmd/doc")
- tg.grepStdout(`^cmd/dist$`, "missing cmd/dist")
+ tg.run("list", "-test", "cmd/buildid", "cmd/doc")
+ tg.grepStdout(`^cmd/buildid$`, "missing cmd/buildid")
tg.grepStdout(`^cmd/doc$`, "missing cmd/doc")
tg.grepStdout(`^cmd/doc\.test$`, "missing cmd/doc test")
- tg.grepStdoutNot(`^cmd/dist\.test$`, "unexpected cmd/dist test")
+ tg.grepStdoutNot(`^cmd/buildid\.test$`, "unexpected cmd/buildid test")
tg.grepStdoutNot(`^testing`, "unexpected testing")
tg.run("list", "-test", "runtime/cgo")
@@ -1387,7 +1387,7 @@ func TestLdFlagsLongArgumentsIssue42295(t *testing.T) {
for buf.Len() < sys.ExecArgLengthLimit+1 {
buf.WriteString(testStr)
}
- tg.run("run", "-buildinfo=false", "-ldflags", fmt.Sprintf(`-X "main.extern=%s"`, buf.String()), tg.path("main.go"))
+ tg.run("run", "-ldflags", fmt.Sprintf(`-X "main.extern=%s"`, buf.String()), tg.path("main.go"))
if tg.stderr.String() != buf.String() {
t.Errorf("strings differ")
}
diff --git a/src/cmd/go/internal/base/base.go b/src/cmd/go/internal/base/base.go
index 954ce47a98..c2d4e6b258 100644
--- a/src/cmd/go/internal/base/base.go
+++ b/src/cmd/go/internal/base/base.go
@@ -117,12 +117,12 @@ func Exit() {
os.Exit(exitStatus)
}
-func Fatalf(format string, args ...interface{}) {
+func Fatalf(format string, args ...any) {
Errorf(format, args...)
Exit()
}
-func Errorf(format string, args ...interface{}) {
+func Errorf(format string, args ...any) {
log.Printf(format, args...)
SetExitStatus(1)
}
@@ -151,7 +151,7 @@ func GetExitStatus() int {
// Run runs the command, with stdout and stderr
// connected to the go command's own stdout and stderr.
// If the command fails, Run reports the error using Errorf.
-func Run(cmdargs ...interface{}) {
+func Run(cmdargs ...any) {
cmdline := str.StringList(cmdargs...)
if cfg.BuildN || cfg.BuildX {
fmt.Printf("%s\n", strings.Join(cmdline, " "))
diff --git a/src/cmd/go/internal/bug/bug.go b/src/cmd/go/internal/bug/bug.go
index a81ca7d8c3..702dc2a14a 100644
--- a/src/cmd/go/internal/bug/bug.go
+++ b/src/cmd/go/internal/bug/bug.go
@@ -106,8 +106,9 @@ func printGoEnv(w io.Writer) {
}
func printGoDetails(w io.Writer) {
- printCmdOut(w, "GOROOT/bin/go version: ", filepath.Join(runtime.GOROOT(), "bin/go"), "version")
- printCmdOut(w, "GOROOT/bin/go tool compile -V: ", filepath.Join(runtime.GOROOT(), "bin/go"), "tool", "compile", "-V")
+ gocmd := filepath.Join(runtime.GOROOT(), "bin/go")
+ printCmdOut(w, "GOROOT/bin/go version: ", gocmd, "version")
+ printCmdOut(w, "GOROOT/bin/go tool compile -V: ", gocmd, "tool", "compile", "-V")
}
func printOSDetails(w io.Writer) {
diff --git a/src/cmd/go/internal/cache/default.go b/src/cmd/go/internal/cache/default.go
index 0b1c1e0c20..426dddfb97 100644
--- a/src/cmd/go/internal/cache/default.go
+++ b/src/cmd/go/internal/cache/default.go
@@ -30,6 +30,7 @@ var (
// README as a courtesy to explain where it came from.
const cacheREADME = `This directory holds cached build artifacts from the Go build system.
Run "go clean -cache" if the directory is getting too large.
+Run "go clean -fuzzcache" to delete the fuzz cache.
See golang.org to learn more about Go.
`
diff --git a/src/cmd/go/internal/cfg/cfg.go b/src/cmd/go/internal/cfg/cfg.go
index 351c3ee6a5..7f68d7bb62 100644
--- a/src/cmd/go/internal/cfg/cfg.go
+++ b/src/cmd/go/internal/cfg/cfg.go
@@ -25,7 +25,6 @@ import (
// These are general "build flags" used by build and other commands.
var (
BuildA bool // -a flag
- BuildBuildinfo bool // -buildinfo flag
BuildBuildmode string // -buildmode flag
BuildBuildvcs bool // -buildvcs flag
BuildContext = defaultContext()
@@ -63,6 +62,8 @@ var (
// GoPathError is set when GOPATH is not set. it contains an
// explanation why GOPATH is unset.
GoPathError string
+
+ GOEXPERIMENT = envOr("GOEXPERIMENT", buildcfg.DefaultGOEXPERIMENT)
)
func defaultContext() build.Context {
@@ -89,7 +90,7 @@ func defaultContext() build.Context {
// The experiments flags are based on GOARCH, so they may
// need to change. TODO: This should be cleaned up.
- buildcfg.UpdateExperiments(ctxt.GOOS, ctxt.GOARCH, envOr("GOEXPERIMENT", buildcfg.DefaultGOEXPERIMENT))
+ buildcfg.UpdateExperiments(ctxt.GOOS, ctxt.GOARCH, GOEXPERIMENT)
ctxt.ToolTags = nil
for _, exp := range buildcfg.EnabledExperiments() {
ctxt.ToolTags = append(ctxt.ToolTags, "goexperiment."+exp)
diff --git a/src/cmd/go/internal/cmdflag/flag.go b/src/cmd/go/internal/cmdflag/flag.go
index 8abb7e559f..a634bc1ab8 100644
--- a/src/cmd/go/internal/cmdflag/flag.go
+++ b/src/cmd/go/internal/cmdflag/flag.go
@@ -92,7 +92,7 @@ func ParseOne(fs *flag.FlagSet, args []string) (f *flag.Flag, remainingArgs []st
// Use fs.Set instead of f.Value.Set below so that any subsequent call to
// fs.Visit will correctly visit the flags that have been set.
- failf := func(format string, a ...interface{}) (*flag.Flag, []string, error) {
+ failf := func(format string, a ...any) (*flag.Flag, []string, error) {
return f, args, fmt.Errorf(format, a...)
}
diff --git a/src/cmd/go/internal/doc/doc.go b/src/cmd/go/internal/doc/doc.go
index 8580a5dc4d..7741a9022c 100644
--- a/src/cmd/go/internal/doc/doc.go
+++ b/src/cmd/go/internal/doc/doc.go
@@ -60,9 +60,8 @@ The package path must be either a qualified path or a proper suffix of a
path. The go tool's usual package mechanism does not apply: package path
elements like . and ... are not implemented by go doc.
-When run with two arguments, the first must be a full package path (not just a
-suffix), and the second is a symbol, or symbol with method or struct field.
-This is similar to the syntax accepted by godoc:
+When run with two arguments, the first is a package path (full path or suffix),
+and the second is a symbol, or symbol with method or struct field:
go doc [.]
diff --git a/src/cmd/go/internal/fsys/fsys.go b/src/cmd/go/internal/fsys/fsys.go
index 0b806027e6..9a1bbf890e 100644
--- a/src/cmd/go/internal/fsys/fsys.go
+++ b/src/cmd/go/internal/fsys/fsys.go
@@ -499,7 +499,7 @@ func (f fakeFile) Size() int64 { return f.real.Size() }
func (f fakeFile) Mode() fs.FileMode { return f.real.Mode() }
func (f fakeFile) ModTime() time.Time { return f.real.ModTime() }
func (f fakeFile) IsDir() bool { return f.real.IsDir() }
-func (f fakeFile) Sys() interface{} { return f.real.Sys() }
+func (f fakeFile) Sys() any { return f.real.Sys() }
// missingFile provides an fs.FileInfo for an overlaid file where the
// destination file in the overlay doesn't exist. It returns zero values
@@ -512,7 +512,7 @@ func (f missingFile) Size() int64 { return 0 }
func (f missingFile) Mode() fs.FileMode { return fs.ModeIrregular }
func (f missingFile) ModTime() time.Time { return time.Unix(0, 0) }
func (f missingFile) IsDir() bool { return false }
-func (f missingFile) Sys() interface{} { return nil }
+func (f missingFile) Sys() any { return nil }
// fakeDir provides an fs.FileInfo implementation for directories that are
// implicitly created by overlaid files. Each directory in the
@@ -524,7 +524,7 @@ func (f fakeDir) Size() int64 { return 0 }
func (f fakeDir) Mode() fs.FileMode { return fs.ModeDir | 0500 }
func (f fakeDir) ModTime() time.Time { return time.Unix(0, 0) }
func (f fakeDir) IsDir() bool { return true }
-func (f fakeDir) Sys() interface{} { return nil }
+func (f fakeDir) Sys() any { return nil }
// Glob is like filepath.Glob but uses the overlay file system.
func Glob(pattern string) (matches []string, err error) {
diff --git a/src/cmd/go/internal/generate/generate.go b/src/cmd/go/internal/generate/generate.go
index a3873d1138..54ccfe78f2 100644
--- a/src/cmd/go/internal/generate/generate.go
+++ b/src/cmd/go/internal/generate/generate.go
@@ -25,8 +25,8 @@ import (
"cmd/go/internal/cfg"
"cmd/go/internal/load"
"cmd/go/internal/modload"
- "cmd/go/internal/work"
"cmd/go/internal/str"
+ "cmd/go/internal/work"
)
var CmdGenerate = &base.Command{
@@ -408,7 +408,7 @@ var stop = fmt.Errorf("error in generation")
// errorf logs an error message prefixed with the file and line number.
// It then exits the program (with exit status 1) because generation stops
// at the first error.
-func (g *Generator) errorf(format string, args ...interface{}) {
+func (g *Generator) errorf(format string, args ...any) {
fmt.Fprintf(os.Stderr, "%s:%d: %s\n", base.ShortPath(g.path), g.lineNum,
fmt.Sprintf(format, args...))
panic(stop)
diff --git a/src/cmd/go/internal/get/get.go b/src/cmd/go/internal/get/get.go
index f46313dcff..8cf8fe6645 100644
--- a/src/cmd/go/internal/get/get.go
+++ b/src/cmd/go/internal/get/get.go
@@ -17,10 +17,10 @@ import (
"cmd/go/internal/cfg"
"cmd/go/internal/load"
"cmd/go/internal/search"
+ "cmd/go/internal/str"
"cmd/go/internal/vcs"
"cmd/go/internal/web"
"cmd/go/internal/work"
- "cmd/go/internal/str"
"golang.org/x/mod/module"
)
diff --git a/src/cmd/go/internal/help/help.go b/src/cmd/go/internal/help/help.go
index 7a730fc8eb..2a07d2423b 100644
--- a/src/cmd/go/internal/help/help.go
+++ b/src/cmd/go/internal/help/help.go
@@ -162,7 +162,7 @@ func (w *errWriter) Write(b []byte) (int, error) {
}
// tmpl executes the given template text on data, writing the result to w.
-func tmpl(w io.Writer, text string, data interface{}) {
+func tmpl(w io.Writer, text string, data any) {
t := template.New("top")
t.Funcs(template.FuncMap{"trim": strings.TrimSpace, "capitalize": capitalize})
template.Must(t.Parse(text))
diff --git a/src/cmd/go/internal/help/helpdoc.go b/src/cmd/go/internal/help/helpdoc.go
index 035235fe1b..d1eaad1c12 100644
--- a/src/cmd/go/internal/help/helpdoc.go
+++ b/src/cmd/go/internal/help/helpdoc.go
@@ -506,6 +506,8 @@ General-purpose environment variables:
GOENV
The location of the Go environment configuration file.
Cannot be set using 'go env -w'.
+ Setting GOENV=off in the environment disables the use of the
+ default configuration file.
GOFLAGS
A space-separated list of -flag=value settings to apply
to go commands by default, when the given flag is known by
@@ -595,7 +597,7 @@ Architecture-specific environment variables:
GOAMD64
For GOARCH=amd64, the microarchitecture level for which to compile.
Valid values are v1 (default), v2, v3, v4.
- See https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels.
+ See https://golang.org/wiki/MinimumRequirements#amd64
GOMIPS
For GOARCH=mips{,le}, whether to use floating point instructions.
Valid values are hardfloat (default), softfloat.
diff --git a/src/cmd/go/internal/imports/testdata/android/e.go b/src/cmd/go/internal/imports/testdata/android/e.go
index d9b2db769b..f1b9c888c2 100644
--- a/src/cmd/go/internal/imports/testdata/android/e.go
+++ b/src/cmd/go/internal/imports/testdata/android/e.go
@@ -1,3 +1,4 @@
+//go:build android
// +build android
package android
diff --git a/src/cmd/go/internal/imports/testdata/android/f.go b/src/cmd/go/internal/imports/testdata/android/f.go
index 281e4dd6b9..bb0ff7b73f 100644
--- a/src/cmd/go/internal/imports/testdata/android/f.go
+++ b/src/cmd/go/internal/imports/testdata/android/f.go
@@ -1,3 +1,4 @@
+//go:build linux
// +build linux
package android
diff --git a/src/cmd/go/internal/imports/testdata/android/g.go b/src/cmd/go/internal/imports/testdata/android/g.go
index 66a789c0ad..ee19424890 100644
--- a/src/cmd/go/internal/imports/testdata/android/g.go
+++ b/src/cmd/go/internal/imports/testdata/android/g.go
@@ -1,3 +1,4 @@
+//go:build !android
// +build !android
package android
diff --git a/src/cmd/go/internal/imports/testdata/illumos/e.go b/src/cmd/go/internal/imports/testdata/illumos/e.go
index 5e1ed3cb9d..fddf2c4299 100644
--- a/src/cmd/go/internal/imports/testdata/illumos/e.go
+++ b/src/cmd/go/internal/imports/testdata/illumos/e.go
@@ -1,3 +1,4 @@
+//go:build illumos
// +build illumos
package illumos
diff --git a/src/cmd/go/internal/imports/testdata/illumos/f.go b/src/cmd/go/internal/imports/testdata/illumos/f.go
index f3e3f728bc..4b6d528e4c 100644
--- a/src/cmd/go/internal/imports/testdata/illumos/f.go
+++ b/src/cmd/go/internal/imports/testdata/illumos/f.go
@@ -1,3 +1,4 @@
+//go:build solaris
// +build solaris
package illumos
diff --git a/src/cmd/go/internal/imports/testdata/illumos/g.go b/src/cmd/go/internal/imports/testdata/illumos/g.go
index b30f1eb403..1bf826b815 100644
--- a/src/cmd/go/internal/imports/testdata/illumos/g.go
+++ b/src/cmd/go/internal/imports/testdata/illumos/g.go
@@ -1,3 +1,4 @@
+//go:build !illumos
// +build !illumos
package illumos
diff --git a/src/cmd/go/internal/imports/testdata/star/x1.go b/src/cmd/go/internal/imports/testdata/star/x1.go
index 6a9594aed0..eaaea979e9 100644
--- a/src/cmd/go/internal/imports/testdata/star/x1.go
+++ b/src/cmd/go/internal/imports/testdata/star/x1.go
@@ -1,8 +1,5 @@
-// +build blahblh
-// +build linux
-// +build !linux
-// +build windows
-// +build darwin
+//go:build blahblh && linux && !linux && windows && darwin
+// +build blahblh,linux,!linux,windows,darwin
package x
diff --git a/src/cmd/go/internal/list/list.go b/src/cmd/go/internal/list/list.go
index 8c85ddcf21..d9a7078ccf 100644
--- a/src/cmd/go/internal/list/list.go
+++ b/src/cmd/go/internal/list/list.go
@@ -23,8 +23,8 @@ import (
"cmd/go/internal/load"
"cmd/go/internal/modinfo"
"cmd/go/internal/modload"
- "cmd/go/internal/work"
"cmd/go/internal/str"
+ "cmd/go/internal/work"
)
var CmdList = &base.Command{
@@ -358,9 +358,9 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
}
}
- var do func(interface{})
+ var do func(any)
if *listJson {
- do = func(x interface{}) {
+ do = func(x any) {
b, err := json.MarshalIndent(x, "", "\t")
if err != nil {
out.Flush()
@@ -386,7 +386,7 @@ func runList(ctx context.Context, cmd *base.Command, args []string) {
if err != nil {
base.Fatalf("%s", err)
}
- do = func(x interface{}) {
+ do = func(x any) {
if err := tmpl.Execute(out, x); err != nil {
out.Flush()
base.Fatalf("%s", err)
diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go
index f9051cce3d..aba1dfd1c1 100644
--- a/src/cmd/go/internal/load/pkg.go
+++ b/src/cmd/go/internal/load/pkg.go
@@ -504,7 +504,7 @@ type importError struct {
err error // created with fmt.Errorf
}
-func ImportErrorf(path, format string, args ...interface{}) ImportPathError {
+func ImportErrorf(path, format string, args ...any) ImportPathError {
err := &importError{importPath: path, err: fmt.Errorf(format, args...)}
if errStr := err.Error(); !strings.Contains(errStr, path) {
panic(fmt.Sprintf("path %q not in error %q", path, errStr))
@@ -595,10 +595,10 @@ func ClearPackageCachePartial(args []string) {
delete(packageCache, arg)
}
}
- resolvedImportCache.DeleteIf(func(key interface{}) bool {
+ resolvedImportCache.DeleteIf(func(key any) bool {
return shouldDelete[key.(importSpec).path]
})
- packageDataCache.DeleteIf(func(key interface{}) bool {
+ packageDataCache.DeleteIf(func(key any) bool {
return shouldDelete[key.(string)]
})
}
@@ -611,7 +611,7 @@ func ReloadPackageNoFlags(arg string, stk *ImportStack) *Package {
p := packageCache[arg]
if p != nil {
delete(packageCache, arg)
- resolvedImportCache.DeleteIf(func(key interface{}) bool {
+ resolvedImportCache.DeleteIf(func(key any) bool {
return key.(importSpec).path == p.ImportPath
})
packageDataCache.Delete(p.ImportPath)
@@ -823,7 +823,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo
parentIsStd: parentIsStd,
mode: mode,
}
- r := resolvedImportCache.Do(importKey, func() interface{} {
+ r := resolvedImportCache.Do(importKey, func() any {
var r resolvedImport
if build.IsLocalImport(path) {
r.dir = filepath.Join(parentDir, path)
@@ -850,7 +850,7 @@ func loadPackageData(ctx context.Context, path, parentPath, parentDir, parentRoo
// Load the package from its directory. If we already found the package's
// directory when resolving its import path, use that.
- data := packageDataCache.Do(r.path, func() interface{} {
+ data := packageDataCache.Do(r.path, func() any {
loaded = true
var data packageData
if r.dir != "" {
@@ -1069,7 +1069,7 @@ func cleanImport(path string) string {
var isDirCache par.Cache
func isDir(path string) bool {
- return isDirCache.Do(path, func() interface{} {
+ return isDirCache.Do(path, func() any {
fi, err := fsys.Stat(path)
return err == nil && fi.IsDir()
}).(bool)
@@ -1197,7 +1197,7 @@ var (
// goModPath returns the module path in the go.mod in dir, if any.
func goModPath(dir string) (path string) {
- return goModPathCache.Do(dir, func() interface{} {
+ return goModPathCache.Do(dir, func() any {
data, err := os.ReadFile(filepath.Join(dir, "go.mod"))
if err != nil {
return ""
@@ -2023,13 +2023,18 @@ func resolveEmbed(pkgdir string, patterns []string) (files []string, pmap map[st
for _, pattern = range patterns {
pid++
+ glob := pattern
+ all := strings.HasPrefix(pattern, "all:")
+ if all {
+ glob = pattern[len("all:"):]
+ }
// Check pattern is valid for //go:embed.
- if _, err := path.Match(pattern, ""); err != nil || !validEmbedPattern(pattern) {
+ if _, err := path.Match(glob, ""); err != nil || !validEmbedPattern(glob) {
return nil, nil, fmt.Errorf("invalid pattern syntax")
}
// Glob to find matches.
- match, err := fsys.Glob(pkgdir + string(filepath.Separator) + filepath.FromSlash(pattern))
+ match, err := fsys.Glob(pkgdir + string(filepath.Separator) + filepath.FromSlash(glob))
if err != nil {
return nil, nil, err
}
@@ -2092,7 +2097,7 @@ func resolveEmbed(pkgdir string, patterns []string) (files []string, pmap map[st
}
rel := filepath.ToSlash(path[len(pkgdir)+1:])
name := info.Name()
- if path != file && (isBadEmbedName(name) || name[0] == '.' || name[0] == '_') {
+ if path != file && (isBadEmbedName(name) || ((name[0] == '.' || name[0] == '_') && !all)) {
// Ignore bad names, assuming they won't go into modules.
// Also avoid hidden files that user may not know about.
// See golang.org/issue/42328.
@@ -2204,6 +2209,10 @@ func (p *Package) collectDeps() {
}
}
+// vcsStatusCache maps repository directories (string)
+// to their VCS information (vcsStatusError).
+var vcsStatusCache par.Cache
+
// setBuildInfo gathers build information, formats it as a string to be
// embedded in the binary, then sets p.Internal.BuildInfo to that string.
// setBuildInfo should only be called on a main package with no errors.
@@ -2218,7 +2227,7 @@ func (p *Package) setBuildInfo() {
// executables always appear stale unless the user sets the same flags.
// Perhaps it's safe to omit those flags when GO_GCFLAGS and GO_LDFLAGS
// are not set?
- setPkgErrorf := func(format string, args ...interface{}) {
+ setPkgErrorf := func(format string, args ...any) {
if p.Error == nil {
p.Error = &PackageError{Err: fmt.Errorf(format, args...)}
}
@@ -2282,33 +2291,57 @@ func (p *Package) setBuildInfo() {
Deps: deps,
}
appendSetting := func(key, value string) {
+ value = strings.ReplaceAll(value, "\n", " ") // make value safe
info.Settings = append(info.Settings, debug.BuildSetting{Key: key, Value: value})
}
// Add command-line flags relevant to the build.
// This is informational, not an exhaustive list.
- if cfg.BuildBuildinfo && !p.Standard {
- appendSetting("compiler", cfg.BuildContext.Compiler)
+ // Please keep the list sorted.
+ if !p.Standard {
+ if cfg.BuildASan {
+ appendSetting("-asan", "true")
+ }
if BuildAsmflags.present {
- appendSetting("asmflags", BuildAsmflags.String())
+ appendSetting("-asmflags", BuildAsmflags.String())
+ }
+ appendSetting("-compiler", cfg.BuildContext.Compiler)
+ if BuildGccgoflags.present && cfg.BuildContext.Compiler == "gccgo" {
+ appendSetting("-gccgoflags", BuildGccgoflags.String())
}
if BuildGcflags.present && cfg.BuildContext.Compiler == "gc" {
- appendSetting("gcflags", BuildGcflags.String())
- }
- if BuildGccgoflags.present && cfg.BuildContext.Compiler == "gccgo" {
- appendSetting("gccgoflags", BuildGccgoflags.String())
+ appendSetting("-gcflags", BuildGcflags.String())
}
if BuildLdflags.present {
- appendSetting("ldflags", BuildLdflags.String())
+ appendSetting("-ldflags", BuildLdflags.String())
}
- tags := append(cfg.BuildContext.BuildTags, cfg.BuildContext.ToolTags...)
- appendSetting("tags", strings.Join(tags, ","))
- appendSetting("CGO_ENABLED", strconv.FormatBool(cfg.BuildContext.CgoEnabled))
+ if cfg.BuildMSan {
+ appendSetting("-msan", "true")
+ }
+ if cfg.BuildRace {
+ appendSetting("-race", "true")
+ }
+ if tags := cfg.BuildContext.BuildTags; len(tags) > 0 {
+ appendSetting("-tags", strings.Join(tags, ","))
+ }
+ cgo := "0"
if cfg.BuildContext.CgoEnabled {
- for _, name := range []string{"CGO_CPPFLAGS", "CGO_CFLAGS", "CGO_CXXFLAGS", "CGO_LDFLAGS"} {
+ cgo = "1"
+ }
+ appendSetting("CGO_ENABLED", cgo)
+ if cfg.BuildContext.CgoEnabled {
+ for _, name := range []string{"CGO_CFLAGS", "CGO_CPPFLAGS", "CGO_CXXFLAGS", "CGO_LDFLAGS"} {
appendSetting(name, cfg.Getenv(name))
}
}
+ appendSetting("GOARCH", cfg.BuildContext.GOARCH)
+ if cfg.GOEXPERIMENT != "" {
+ appendSetting("GOEXPERIMENT", cfg.GOEXPERIMENT)
+ }
+ appendSetting("GOOS", cfg.BuildContext.GOOS)
+ if key, val := cfg.GetArchEnv(); key != "" && val != "" {
+ appendSetting(key, val)
+ }
}
// Add VCS status if all conditions are true:
@@ -2366,19 +2399,29 @@ func (p *Package) setBuildInfo() {
return
}
- st, err := vcsCmd.Status(vcsCmd, repoDir)
- if err != nil {
+ type vcsStatusError struct {
+ Status vcs.Status
+ Err error
+ }
+ cached := vcsStatusCache.Do(repoDir, func() any {
+ st, err := vcsCmd.Status(vcsCmd, repoDir)
+ return vcsStatusError{st, err}
+ }).(vcsStatusError)
+ if err := cached.Err; err != nil {
setVCSError(err)
return
}
+ st := cached.Status
+
+ appendSetting("vcs", vcsCmd.Cmd)
if st.Revision != "" {
- appendSetting(vcsCmd.Cmd+"revision", st.Revision)
+ appendSetting("vcs.revision", st.Revision)
}
if !st.CommitTime.IsZero() {
stamp := st.CommitTime.UTC().Format(time.RFC3339Nano)
- appendSetting(vcsCmd.Cmd+"committime", stamp)
+ appendSetting("vcs.time", stamp)
}
- appendSetting(vcsCmd.Cmd+"uncommitted", strconv.FormatBool(st.Uncommitted))
+ appendSetting("vcs.modified", strconv.FormatBool(st.Uncommitted))
}
text, err := info.MarshalText()
diff --git a/src/cmd/go/internal/load/test.go b/src/cmd/go/internal/load/test.go
index 8a18dfbe93..6122428c9c 100644
--- a/src/cmd/go/internal/load/test.go
+++ b/src/cmd/go/internal/load/test.go
@@ -22,8 +22,8 @@ import (
"unicode/utf8"
"cmd/go/internal/fsys"
- "cmd/go/internal/trace"
"cmd/go/internal/str"
+ "cmd/go/internal/trace"
)
var TestMainDeps = []string{
diff --git a/src/cmd/go/internal/modcmd/download.go b/src/cmd/go/internal/modcmd/download.go
index f252133762..6b8a010fd9 100644
--- a/src/cmd/go/internal/modcmd/download.go
+++ b/src/cmd/go/internal/modcmd/download.go
@@ -93,24 +93,27 @@ func runDownload(ctx context.Context, cmd *base.Command, args []string) {
modload.ExplicitWriteGoMod = true
haveExplicitArgs := len(args) > 0
- if modload.HasModRoot() {
+ if modload.HasModRoot() || modload.WorkFilePath() != "" {
modload.LoadModFile(ctx) // to fill MainModules
- if len(modload.MainModules.Versions()) != 1 {
- panic(modload.TODOWorkspaces("Support workspace mode in go mod download"))
- }
- mainModule := modload.MainModules.Versions()[0]
-
if haveExplicitArgs {
- targetAtUpgrade := mainModule.Path + "@upgrade"
- targetAtPatch := mainModule.Path + "@patch"
- for _, arg := range args {
- switch arg {
- case mainModule.Path, targetAtUpgrade, targetAtPatch:
- os.Stderr.WriteString("go: skipping download of " + arg + " that resolves to the main module\n")
+ for _, mainModule := range modload.MainModules.Versions() {
+ targetAtUpgrade := mainModule.Path + "@upgrade"
+ targetAtPatch := mainModule.Path + "@patch"
+ for _, arg := range args {
+ switch arg {
+ case mainModule.Path, targetAtUpgrade, targetAtPatch:
+ os.Stderr.WriteString("go: skipping download of " + arg + " that resolves to the main module\n")
+ }
}
}
+ } else if modload.WorkFilePath() != "" {
+ // TODO(#44435): Think about what the correct query is to download the
+ // right set of modules. Also see code review comment at
+ // https://go-review.googlesource.com/c/go/+/359794/comments/ce946a80_6cf53992.
+ args = []string{"all"}
} else {
+ mainModule := modload.MainModules.Versions()[0]
modFile := modload.MainModules.ModFile(mainModule)
if modFile.Go == nil || semver.Compare("v"+modFile.Go.Version, modload.ExplicitIndirectVersionV) < 0 {
if len(modFile.Require) > 0 {
diff --git a/src/cmd/go/internal/modcmd/initwork.go b/src/cmd/go/internal/modcmd/initwork.go
deleted file mode 100644
index 4182aa071d..0000000000
--- a/src/cmd/go/internal/modcmd/initwork.go
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// go mod initwork
-
-package modcmd
-
-import (
- "cmd/go/internal/base"
- "cmd/go/internal/modload"
- "context"
- "path/filepath"
-)
-
-var _ = modload.TODOWorkspaces("Add more documentation below. Though this is" +
- "enough for those trying workspaces out, there should be more through" +
- "documentation if the proposal is accepted and released.")
-
-var cmdInitwork = &base.Command{
- UsageLine: "go mod initwork [moddirs]",
- Short: "initialize workspace file",
- Long: `go mod initwork initializes and writes a new go.work file in the current
-directory, in effect creating a new workspace at the current directory.
-
-go mod initwork optionally accepts paths to the workspace modules as arguments.
-If the argument is omitted, an empty workspace with no modules will be created.
-
-See the workspaces design proposal at
-https://go.googlesource.com/proposal/+/master/design/45713-workspace.md for
-more information.
-`,
- Run: runInitwork,
-}
-
-func init() {
- base.AddModCommonFlags(&cmdInitwork.Flag)
- base.AddWorkfileFlag(&cmdInitwork.Flag)
-}
-
-func runInitwork(ctx context.Context, cmd *base.Command, args []string) {
- modload.InitWorkfile()
-
- modload.ForceUseModules = true
-
- // TODO(matloob): support using the -workfile path
- // To do that properly, we'll have to make the module directories
- // make dirs relative to workFile path before adding the paths to
- // the directory entries
-
- workFile := filepath.Join(base.Cwd(), "go.work")
-
- modload.CreateWorkFile(ctx, workFile, args)
-}
diff --git a/src/cmd/go/internal/modcmd/mod.go b/src/cmd/go/internal/modcmd/mod.go
index 29aad58324..d72d0cacd6 100644
--- a/src/cmd/go/internal/modcmd/mod.go
+++ b/src/cmd/go/internal/modcmd/mod.go
@@ -23,10 +23,8 @@ See 'go help modules' for an overview of module functionality.
Commands: []*base.Command{
cmdDownload,
cmdEdit,
- cmdEditwork,
cmdGraph,
cmdInit,
- cmdInitwork,
cmdTidy,
cmdVendor,
cmdVerify,
diff --git a/src/cmd/go/internal/modcmd/tidy.go b/src/cmd/go/internal/modcmd/tidy.go
index 57d303a13c..d35476eb53 100644
--- a/src/cmd/go/internal/modcmd/tidy.go
+++ b/src/cmd/go/internal/modcmd/tidy.go
@@ -75,8 +75,8 @@ type goVersionFlag struct {
v string
}
-func (f *goVersionFlag) String() string { return f.v }
-func (f *goVersionFlag) Get() interface{} { return f.v }
+func (f *goVersionFlag) String() string { return f.v }
+func (f *goVersionFlag) Get() any { return f.v }
func (f *goVersionFlag) Set(s string) error {
if s != "" {
diff --git a/src/cmd/go/internal/modcmd/vendor.go b/src/cmd/go/internal/modcmd/vendor.go
index 484e095cc7..ef123700aa 100644
--- a/src/cmd/go/internal/modcmd/vendor.go
+++ b/src/cmd/go/internal/modcmd/vendor.go
@@ -31,7 +31,7 @@ import (
)
var cmdVendor = &base.Command{
- UsageLine: "go mod vendor [-e] [-v]",
+ UsageLine: "go mod vendor [-e] [-v] [-o outdir]",
Short: "make vendored copy of dependencies",
Long: `
Vendor resets the main module's vendor directory to include all packages
@@ -44,16 +44,23 @@ modules and packages to standard error.
The -e flag causes vendor to attempt to proceed despite errors
encountered while loading packages.
+The -o flag causes vendor to create the vendor directory at the given
+path instead of "vendor". The go command can only use a vendor directory
+named "vendor" within the module root directory, so this flag is
+primarily useful for other tools.
+
See https://golang.org/ref/mod#go-mod-vendor for more about 'go mod vendor'.
`,
Run: runVendor,
}
-var vendorE bool // if true, report errors but proceed anyway
+var vendorE bool // if true, report errors but proceed anyway
+var vendorO string // if set, overrides the default output directory
func init() {
cmdVendor.Flag.BoolVar(&cfg.BuildV, "v", false, "")
cmdVendor.Flag.BoolVar(&vendorE, "e", false, "")
+ cmdVendor.Flag.StringVar(&vendorO, "o", "", "")
base.AddModCommonFlags(&cmdVendor.Flag)
}
@@ -74,7 +81,15 @@ func runVendor(ctx context.Context, cmd *base.Command, args []string) {
}
_, pkgs := modload.LoadPackages(ctx, loadOpts, "all")
- vdir := filepath.Join(modload.VendorDir())
+ var vdir string
+ switch {
+ case filepath.IsAbs(vendorO):
+ vdir = vendorO
+ case vendorO != "":
+ vdir = filepath.Join(base.Cwd(), vendorO)
+ default:
+ vdir = filepath.Join(modload.VendorDir())
+ }
if err := os.RemoveAll(vdir); err != nil {
base.Fatalf("go: %v", err)
}
diff --git a/src/cmd/go/internal/modfetch/cache.go b/src/cmd/go/internal/modfetch/cache.go
index 8d299e931a..b0dae1cb3d 100644
--- a/src/cmd/go/internal/modfetch/cache.go
+++ b/src/cmd/go/internal/modfetch/cache.go
@@ -204,7 +204,7 @@ func (r *cachingRepo) Versions(prefix string) ([]string, error) {
list []string
err error
}
- c := r.cache.Do("versions:"+prefix, func() interface{} {
+ c := r.cache.Do("versions:"+prefix, func() any {
list, err := r.repo().Versions(prefix)
return cached{list, err}
}).(cached)
@@ -221,7 +221,7 @@ type cachedInfo struct {
}
func (r *cachingRepo) Stat(rev string) (*RevInfo, error) {
- c := r.cache.Do("stat:"+rev, func() interface{} {
+ c := r.cache.Do("stat:"+rev, func() any {
file, info, err := readDiskStat(r.path, rev)
if err == nil {
return cachedInfo{info, nil}
@@ -233,7 +233,7 @@ func (r *cachingRepo) Stat(rev string) (*RevInfo, error) {
// then save the information under the proper version, for future use.
if info.Version != rev {
file, _ = CachePath(module.Version{Path: r.path, Version: info.Version}, "info")
- r.cache.Do("stat:"+info.Version, func() interface{} {
+ r.cache.Do("stat:"+info.Version, func() any {
return cachedInfo{info, err}
})
}
@@ -253,12 +253,12 @@ func (r *cachingRepo) Stat(rev string) (*RevInfo, error) {
}
func (r *cachingRepo) Latest() (*RevInfo, error) {
- c := r.cache.Do("latest:", func() interface{} {
+ c := r.cache.Do("latest:", func() any {
info, err := r.repo().Latest()
// Save info for likely future Stat call.
if err == nil {
- r.cache.Do("stat:"+info.Version, func() interface{} {
+ r.cache.Do("stat:"+info.Version, func() any {
return cachedInfo{info, err}
})
if file, _, err := readDiskStat(r.path, info.Version); err != nil {
@@ -281,7 +281,7 @@ func (r *cachingRepo) GoMod(version string) ([]byte, error) {
text []byte
err error
}
- c := r.cache.Do("gomod:"+version, func() interface{} {
+ c := r.cache.Do("gomod:"+version, func() any {
file, text, err := readDiskGoMod(r.path, version)
if err == nil {
// Note: readDiskGoMod already called checkGoMod.
@@ -642,7 +642,7 @@ func rewriteVersionList(dir string) (err error) {
// Lock listfile when writing to it to try to avoid corruption to the file.
// Under rare circumstances, for instance, if the system loses power in the
// middle of a write it is possible for corrupt data to be written. This is
- // not a problem for the go command itself, but may be an issue if the the
+ // not a problem for the go command itself, but may be an issue if the
// cache is being served by a GOPROXY HTTP server. This will be corrected
// the next time a new version of the module is fetched and the file is rewritten.
// TODO(matloob): golang.org/issue/43313 covers adding a go mod verify
diff --git a/src/cmd/go/internal/modfetch/codehost/codehost.go b/src/cmd/go/internal/modfetch/codehost/codehost.go
index 378fbae34f..4a0e2241e5 100644
--- a/src/cmd/go/internal/modfetch/codehost/codehost.go
+++ b/src/cmd/go/internal/modfetch/codehost/codehost.go
@@ -55,21 +55,6 @@ type Repo interface {
// os.IsNotExist(err) returns true.
ReadFile(rev, file string, maxSize int64) (data []byte, err error)
- // ReadFileRevs reads a single file at multiple versions.
- // It should refuse to read more than maxSize bytes.
- // The result is a map from each requested rev strings
- // to the associated FileRev. The map must have a non-nil
- // entry for every requested rev (unless ReadFileRevs returned an error).
- // A file simply being missing or even corrupted in revs[i]
- // should be reported only in files[revs[i]].Err, not in the error result
- // from ReadFileRevs.
- // The overall call should return an error (and no map) only
- // in the case of a problem with obtaining the data, such as
- // a network failure.
- // Implementations may assume that revs only contain tags,
- // not direct commit hashes.
- ReadFileRevs(revs []string, file string, maxSize int64) (files map[string]*FileRev, err error)
-
// ReadZip downloads a zip file for the subdir subdirectory
// of the given revision to a new file in a given temporary directory.
// It should refuse to read more than maxSize bytes.
@@ -243,7 +228,7 @@ var dirLock sync.Map
// It returns the standard output and, for a non-zero exit,
// a *RunError indicating the command, exit status, and standard error.
// Standard error is unavailable for commands that exit successfully.
-func Run(dir string, cmdline ...interface{}) ([]byte, error) {
+func Run(dir string, cmdline ...any) ([]byte, error) {
return RunWithStdin(dir, nil, cmdline...)
}
@@ -251,7 +236,7 @@ func Run(dir string, cmdline ...interface{}) ([]byte, error) {
// See https://www.gnu.org/software/bash/manual/html_node/Double-Quotes.html.
var bashQuoter = strings.NewReplacer(`"`, `\"`, `$`, `\$`, "`", "\\`", `\`, `\\`)
-func RunWithStdin(dir string, stdin io.Reader, cmdline ...interface{}) ([]byte, error) {
+func RunWithStdin(dir string, stdin io.Reader, cmdline ...any) ([]byte, error) {
if dir != "" {
muIface, ok := dirLock.Load(dir)
if !ok {
diff --git a/src/cmd/go/internal/modfetch/codehost/git.go b/src/cmd/go/internal/modfetch/codehost/git.go
index a782de56ff..34f453c855 100644
--- a/src/cmd/go/internal/modfetch/codehost/git.go
+++ b/src/cmd/go/internal/modfetch/codehost/git.go
@@ -56,7 +56,7 @@ func newGitRepoCached(remote string, localOK bool) (Repo, error) {
err error
}
- c := gitRepoCache.Do(key{remote, localOK}, func() interface{} {
+ c := gitRepoCache.Do(key{remote, localOK}, func() any {
repo, err := newGitRepo(remote, localOK)
return cached{repo, err}
}).(cached)
@@ -503,7 +503,7 @@ func (r *gitRepo) Stat(rev string) (*RevInfo, error) {
info *RevInfo
err error
}
- c := r.statCache.Do(rev, func() interface{} {
+ c := r.statCache.Do(rev, func() any {
info, err := r.stat(rev)
return cached{info, err}
}).(cached)
@@ -523,140 +523,6 @@ func (r *gitRepo) ReadFile(rev, file string, maxSize int64) ([]byte, error) {
return out, nil
}
-func (r *gitRepo) ReadFileRevs(revs []string, file string, maxSize int64) (map[string]*FileRev, error) {
- // Create space to hold results.
- files := make(map[string]*FileRev)
- for _, rev := range revs {
- f := &FileRev{Rev: rev}
- files[rev] = f
- }
-
- // Collect locally-known revs.
- need, err := r.readFileRevs(revs, file, files)
- if err != nil {
- return nil, err
- }
- if len(need) == 0 {
- return files, nil
- }
-
- // Build list of known remote refs that might help.
- var redo []string
- refs, err := r.loadRefs()
- if err != nil {
- return nil, err
- }
- for _, tag := range need {
- if refs["refs/tags/"+tag] != "" {
- redo = append(redo, tag)
- }
- }
- if len(redo) == 0 {
- return files, nil
- }
-
- // Protect r.fetchLevel and the "fetch more and more" sequence.
- // See stat method above.
- unlock, err := r.mu.Lock()
- if err != nil {
- return nil, err
- }
- defer unlock()
-
- if err := r.fetchRefsLocked(); err != nil {
- return nil, err
- }
-
- if _, err := r.readFileRevs(redo, file, files); err != nil {
- return nil, err
- }
-
- return files, nil
-}
-
-func (r *gitRepo) readFileRevs(tags []string, file string, fileMap map[string]*FileRev) (missing []string, err error) {
- var stdin bytes.Buffer
- for _, tag := range tags {
- fmt.Fprintf(&stdin, "refs/tags/%s\n", tag)
- fmt.Fprintf(&stdin, "refs/tags/%s:%s\n", tag, file)
- }
-
- data, err := RunWithStdin(r.dir, &stdin, "git", "cat-file", "--batch")
- if err != nil {
- return nil, err
- }
-
- next := func() (typ string, body []byte, ok bool) {
- var line string
- i := bytes.IndexByte(data, '\n')
- if i < 0 {
- return "", nil, false
- }
- line, data = string(bytes.TrimSpace(data[:i])), data[i+1:]
- if strings.HasSuffix(line, " missing") {
- return "missing", nil, true
- }
- f := strings.Fields(line)
- if len(f) != 3 {
- return "", nil, false
- }
- n, err := strconv.Atoi(f[2])
- if err != nil || n > len(data) {
- return "", nil, false
- }
- body, data = data[:n], data[n:]
- if len(data) > 0 && data[0] == '\r' {
- data = data[1:]
- }
- if len(data) > 0 && data[0] == '\n' {
- data = data[1:]
- }
- return f[1], body, true
- }
-
- badGit := func() ([]string, error) {
- return nil, fmt.Errorf("malformed output from git cat-file --batch")
- }
-
- for _, tag := range tags {
- commitType, _, ok := next()
- if !ok {
- return badGit()
- }
- fileType, fileData, ok := next()
- if !ok {
- return badGit()
- }
- f := fileMap[tag]
- f.Data = nil
- f.Err = nil
- switch commitType {
- default:
- f.Err = fmt.Errorf("unexpected non-commit type %q for rev %s", commitType, tag)
-
- case "missing":
- // Note: f.Err must not satisfy os.IsNotExist. That's reserved for the file not existing in a valid commit.
- f.Err = fmt.Errorf("no such rev %s", tag)
- missing = append(missing, tag)
-
- case "tag", "commit":
- switch fileType {
- default:
- f.Err = &fs.PathError{Path: tag + ":" + file, Op: "read", Err: fmt.Errorf("unexpected non-blob type %q", fileType)}
- case "missing":
- f.Err = &fs.PathError{Path: tag + ":" + file, Op: "read", Err: fs.ErrNotExist}
- case "blob":
- f.Data = fileData
- }
- }
- }
- if len(bytes.TrimSpace(data)) != 0 {
- return badGit()
- }
-
- return missing, nil
-}
-
func (r *gitRepo) RecentTag(rev, prefix string, allowed func(string) bool) (tag string, err error) {
info, err := r.Stat(rev)
if err != nil {
diff --git a/src/cmd/go/internal/modfetch/codehost/vcs.go b/src/cmd/go/internal/modfetch/codehost/vcs.go
index c2cca084e3..de62265efc 100644
--- a/src/cmd/go/internal/modfetch/codehost/vcs.go
+++ b/src/cmd/go/internal/modfetch/codehost/vcs.go
@@ -38,7 +38,7 @@ type VCSError struct {
func (e *VCSError) Error() string { return e.Err.Error() }
-func vcsErrorf(format string, a ...interface{}) error {
+func vcsErrorf(format string, a ...any) error {
return &VCSError{Err: fmt.Errorf(format, a...)}
}
@@ -51,7 +51,7 @@ func NewRepo(vcs, remote string) (Repo, error) {
repo Repo
err error
}
- c := vcsRepoCache.Do(key{vcs, remote}, func() interface{} {
+ c := vcsRepoCache.Do(key{vcs, remote}, func() any {
repo, err := newVCSRepo(vcs, remote)
if err != nil {
err = &VCSError{err}
@@ -382,19 +382,6 @@ func (r *vcsRepo) ReadFile(rev, file string, maxSize int64) ([]byte, error) {
return out, nil
}
-func (r *vcsRepo) ReadFileRevs(revs []string, file string, maxSize int64) (map[string]*FileRev, error) {
- // We don't technically need to lock here since we're returning an error
- // uncondititonally, but doing so anyway will help to avoid baking in
- // lock-inversion bugs.
- unlock, err := r.mu.Lock()
- if err != nil {
- return nil, err
- }
- defer unlock()
-
- return nil, vcsErrorf("ReadFileRevs not implemented")
-}
-
func (r *vcsRepo) RecentTag(rev, prefix string, allowed func(string) bool) (tag string, err error) {
// We don't technically need to lock here since we're returning an error
// uncondititonally, but doing so anyway will help to avoid baking in
diff --git a/src/cmd/go/internal/modfetch/coderepo.go b/src/cmd/go/internal/modfetch/coderepo.go
index df835c3d7e..79da010809 100644
--- a/src/cmd/go/internal/modfetch/coderepo.go
+++ b/src/cmd/go/internal/modfetch/coderepo.go
@@ -321,7 +321,7 @@ func (r *codeRepo) convert(info *codehost.RevInfo, statVers string) (*RevInfo, e
return ok
}
- invalidf := func(format string, args ...interface{}) error {
+ invalidf := func(format string, args ...any) error {
return &module.ModuleError{
Path: r.modPath,
Err: &module.InvalidVersionError{
@@ -1066,7 +1066,7 @@ func (fi dataFileInfo) Size() int64 { return int64(len(fi.f.data)) }
func (fi dataFileInfo) Mode() fs.FileMode { return 0644 }
func (fi dataFileInfo) ModTime() time.Time { return time.Time{} }
func (fi dataFileInfo) IsDir() bool { return false }
-func (fi dataFileInfo) Sys() interface{} { return nil }
+func (fi dataFileInfo) Sys() any { return nil }
// hasPathPrefix reports whether the path s begins with the
// elements in prefix.
diff --git a/src/cmd/go/internal/modfetch/fetch.go b/src/cmd/go/internal/modfetch/fetch.go
index 408b2860ad..f5423b48ad 100644
--- a/src/cmd/go/internal/modfetch/fetch.go
+++ b/src/cmd/go/internal/modfetch/fetch.go
@@ -48,7 +48,7 @@ func Download(ctx context.Context, mod module.Version) (dir string, err error) {
dir string
err error
}
- c := downloadCache.Do(mod, func() interface{} {
+ c := downloadCache.Do(mod, func() any {
dir, err := download(ctx, mod)
if err != nil {
return cached{"", err}
@@ -165,7 +165,7 @@ func DownloadZip(ctx context.Context, mod module.Version) (zipfile string, err e
zipfile string
err error
}
- c := downloadZipCache.Do(mod, func() interface{} {
+ c := downloadZipCache.Do(mod, func() any {
zipfile, err := CachePath(mod, "zip")
if err != nil {
return cached{"", err}
@@ -384,7 +384,8 @@ func RemoveAll(dir string) error {
return robustio.RemoveAll(dir)
}
-var GoSumFile string // path to go.sum; set by package modload
+var GoSumFile string // path to go.sum; set by package modload
+var WorkspaceGoSumFiles []string // path to module go.sums in workspace; set by package modload
type modSum struct {
mod module.Version
@@ -393,16 +394,39 @@ type modSum struct {
var goSum struct {
mu sync.Mutex
- m map[module.Version][]string // content of go.sum file
- status map[modSum]modSumStatus // state of sums in m
- overwrite bool // if true, overwrite go.sum without incorporating its contents
- enabled bool // whether to use go.sum at all
+ m map[module.Version][]string // content of go.sum file
+ w map[string]map[module.Version][]string // sum file in workspace -> content of that sum file
+ status map[modSum]modSumStatus // state of sums in m
+ overwrite bool // if true, overwrite go.sum without incorporating its contents
+ enabled bool // whether to use go.sum at all
}
type modSumStatus struct {
used, dirty bool
}
+// Reset resets globals in the modfetch package, so previous loads don't affect
+// contents of go.sum files
+func Reset() {
+ GoSumFile = ""
+ WorkspaceGoSumFiles = nil
+
+ // Uses of lookupCache and downloadCache both can call checkModSum,
+ // which in turn sets the used bit on goSum.status for modules.
+ // Reset them so used can be computed properly.
+ lookupCache = par.Cache{}
+ downloadCache = par.Cache{}
+
+ // Clear all fields on goSum. It will be initialized later
+ goSum.mu.Lock()
+ goSum.m = nil
+ goSum.w = nil
+ goSum.status = nil
+ goSum.overwrite = false
+ goSum.enabled = false
+ goSum.mu.Unlock()
+}
+
// initGoSum initializes the go.sum data.
// The boolean it returns reports whether the
// use of go.sum is now enabled.
@@ -417,23 +441,38 @@ func initGoSum() (bool, error) {
goSum.m = make(map[module.Version][]string)
goSum.status = make(map[modSum]modSumStatus)
+ goSum.w = make(map[string]map[module.Version][]string)
+
+ for _, f := range WorkspaceGoSumFiles {
+ goSum.w[f] = make(map[module.Version][]string)
+ _, err := readGoSumFile(goSum.w[f], f)
+ if err != nil {
+ return false, err
+ }
+ }
+
+ enabled, err := readGoSumFile(goSum.m, GoSumFile)
+ goSum.enabled = enabled
+ return enabled, err
+}
+
+func readGoSumFile(dst map[module.Version][]string, file string) (bool, error) {
var (
data []byte
err error
)
- if actualSumFile, ok := fsys.OverlayPath(GoSumFile); ok {
+ if actualSumFile, ok := fsys.OverlayPath(file); ok {
// Don't lock go.sum if it's part of the overlay.
// On Plan 9, locking requires chmod, and we don't want to modify any file
// in the overlay. See #44700.
data, err = os.ReadFile(actualSumFile)
} else {
- data, err = lockedfile.Read(GoSumFile)
+ data, err = lockedfile.Read(file)
}
if err != nil && !os.IsNotExist(err) {
return false, err
}
- goSum.enabled = true
- readGoSum(goSum.m, GoSumFile, data)
+ readGoSum(dst, file, data)
return true, nil
}
@@ -485,6 +524,16 @@ func HaveSum(mod module.Version) bool {
if err != nil || !inited {
return false
}
+ for _, goSums := range goSum.w {
+ for _, h := range goSums[mod] {
+ if !strings.HasPrefix(h, "h1:") {
+ continue
+ }
+ if !goSum.status[modSum{mod, h}].dirty {
+ return true
+ }
+ }
+ }
for _, h := range goSum.m[mod] {
if !strings.HasPrefix(h, "h1:") {
continue
@@ -602,15 +651,32 @@ func checkModSum(mod module.Version, h string) error {
// If it finds a conflicting pair instead, it calls base.Fatalf.
// goSum.mu must be locked.
func haveModSumLocked(mod module.Version, h string) bool {
+ sumFileName := "go.sum"
+ if strings.HasSuffix(GoSumFile, "go.work.sum") {
+ sumFileName = "go.work.sum"
+ }
for _, vh := range goSum.m[mod] {
if h == vh {
return true
}
if strings.HasPrefix(vh, "h1:") {
- base.Fatalf("verifying %s@%s: checksum mismatch\n\tdownloaded: %v\n\tgo.sum: %v"+goSumMismatch, mod.Path, mod.Version, h, vh)
+ base.Fatalf("verifying %s@%s: checksum mismatch\n\tdownloaded: %v\n\t%s: %v"+goSumMismatch, mod.Path, mod.Version, h, sumFileName, vh)
}
}
- return false
+ // Also check workspace sums.
+ foundMatch := false
+ // Check sums from all files in case there are conflicts between
+ // the files.
+ for goSumFile, goSums := range goSum.w {
+ for _, vh := range goSums[mod] {
+ if h == vh {
+ foundMatch = true
+ } else if strings.HasPrefix(vh, "h1:") {
+ base.Fatalf("verifying %s@%s: checksum mismatch\n\tdownloaded: %v\n\t%s: %v"+goSumMismatch, mod.Path, mod.Version, h, goSumFile, vh)
+ }
+ }
+ }
+ return foundMatch
}
// addModSumLocked adds the pair mod,h to go.sum.
@@ -749,7 +815,7 @@ Outer:
goSum.m = make(map[module.Version][]string, len(goSum.m))
readGoSum(goSum.m, GoSumFile, data)
for ms, st := range goSum.status {
- if st.used {
+ if st.used && !sumInWorkspaceModulesLocked(ms.mod) {
addModSumLocked(ms.mod, ms.sum)
}
}
@@ -767,7 +833,7 @@ Outer:
sort.Strings(list)
for _, h := range list {
st := goSum.status[modSum{m, h}]
- if !st.dirty || (st.used && keep[m]) {
+ if (!st.dirty || (st.used && keep[m])) && !sumInWorkspaceModulesLocked(m) {
fmt.Fprintf(&buf, "%s %s %s\n", m.Path, m.Version, h)
}
}
@@ -784,6 +850,15 @@ Outer:
return nil
}
+func sumInWorkspaceModulesLocked(m module.Version) bool {
+ for _, goSums := range goSum.w {
+ if _, ok := goSums[m]; ok {
+ return true
+ }
+ }
+ return false
+}
+
// TrimGoSum trims go.sum to contain only the modules needed for reproducible
// builds.
//
diff --git a/src/cmd/go/internal/modfetch/repo.go b/src/cmd/go/internal/modfetch/repo.go
index 0bffa55af6..1b42ecb6ed 100644
--- a/src/cmd/go/internal/modfetch/repo.go
+++ b/src/cmd/go/internal/modfetch/repo.go
@@ -196,7 +196,7 @@ func Lookup(proxy, path string) Repo {
type cached struct {
r Repo
}
- c := lookupCache.Do(lookupCacheKey{proxy, path}, func() interface{} {
+ c := lookupCache.Do(lookupCacheKey{proxy, path}, func() any {
r := newCachingRepo(path, func() (Repo, error) {
r, err := lookup(proxy, path)
if err == nil && traceRepo {
@@ -308,7 +308,7 @@ func newLoggingRepo(r Repo) *loggingRepo {
// defer logCall("hello %s", arg)()
//
// Note the final ().
-func logCall(format string, args ...interface{}) func() {
+func logCall(format string, args ...any) func() {
start := time.Now()
fmt.Fprintf(os.Stderr, "+++ %s\n", fmt.Sprintf(format, args...))
return func() {
@@ -371,7 +371,7 @@ type notExistError struct {
err error
}
-func notExistErrorf(format string, args ...interface{}) error {
+func notExistErrorf(format string, args ...any) error {
return notExistError{fmt.Errorf(format, args...)}
}
diff --git a/src/cmd/go/internal/modget/get.go b/src/cmd/go/internal/modget/get.go
index 2c48c3c444..3d8463e892 100644
--- a/src/cmd/go/internal/modget/get.go
+++ b/src/cmd/go/internal/modget/get.go
@@ -601,7 +601,7 @@ func (r *resolver) matchInModule(ctx context.Context, pattern string, m module.V
err error
}
- e := r.matchInModuleCache.Do(key{pattern, m}, func() interface{} {
+ e := r.matchInModuleCache.Do(key{pattern, m}, func() any {
match := modload.MatchInModule(ctx, pattern, m, imports.AnyTags())
if len(match.Errs) > 0 {
return entry{match.Pkgs, match.Errs[0]}
@@ -893,7 +893,7 @@ func (r *resolver) checkWildcardVersions(ctx context.Context) {
// curM at its original version contains a path matching q.pattern,
// but at rev.Version it does not, so (somewhat paradoxically) if
// we changed the version of curM it would no longer match the query.
- var version interface{} = m
+ var version any = m
if rev.Version != q.version {
version = fmt.Sprintf("%s@%s (%s)", m.Path, q.version, m.Version)
}
@@ -1124,7 +1124,7 @@ func (r *resolver) loadPackages(ctx context.Context, patterns []string, findPack
}
opts.AllowPackage = func(ctx context.Context, path string, m module.Version) error {
- if m.Path == "" || m.Version == "" && modload.MainModules.Contains(m.Path) {
+ if m.Path == "" || m.Version == "" {
// Packages in the standard library and main modules are already at their
// latest (and only) available versions.
return nil
diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go
index 0e0292ec15..bfc73cc2f9 100644
--- a/src/cmd/go/internal/modload/build.go
+++ b/src/cmd/go/internal/modload/build.go
@@ -346,33 +346,22 @@ func findModule(ld *loader, path string) (module.Version, bool) {
}
func ModInfoProg(info string, isgccgo bool) []byte {
- // Inject a variable with the debug information as runtime.modinfo,
- // but compile it in package main so that it is specific to the binary.
- // The variable must be a literal so that it will have the correct value
- // before the initializer for package main runs.
- //
- // The runtime startup code refers to the variable, which keeps it live
- // in all binaries.
- //
- // Note: we use an alternate recipe below for gccgo (based on an
- // init function) due to the fact that gccgo does not support
- // applying a "//go:linkname" directive to a variable. This has
- // drawbacks in that other packages may want to look at the module
- // info in their init functions (see issue 29628), which won't
- // work for gccgo. See also issue 30344.
-
- if !isgccgo {
- return []byte(fmt.Sprintf(`package main
-import _ "unsafe"
-//go:linkname __debug_modinfo__ runtime.modinfo
-var __debug_modinfo__ = %q
-`, string(infoStart)+info+string(infoEnd)))
- } else {
+ // Inject an init function to set runtime.modinfo.
+ // This is only used for gccgo - with gc we hand the info directly to the linker.
+ // The init function has the drawback that packages may want to
+ // look at the module info in their init functions (see issue 29628),
+ // which won't work. See also issue 30344.
+ if isgccgo {
return []byte(fmt.Sprintf(`package main
import _ "unsafe"
//go:linkname __set_debug_modinfo__ runtime.setmodinfo
func __set_debug_modinfo__(string)
func init() { __set_debug_modinfo__(%q) }
-`, string(infoStart)+info+string(infoEnd)))
+`, ModInfoData(info)))
}
+ return nil
+}
+
+func ModInfoData(info string) []byte {
+ return []byte(string(infoStart) + info + string(infoEnd))
}
diff --git a/src/cmd/go/internal/modload/buildlist.go b/src/cmd/go/internal/modload/buildlist.go
index 27cab0b9c8..6f9072c8c4 100644
--- a/src/cmd/go/internal/modload/buildlist.go
+++ b/src/cmd/go/internal/modload/buildlist.go
@@ -38,11 +38,17 @@ type Requirements struct {
// If pruned, the graph includes only the root modules, the explicit
// requirements of those root modules, and the transitive requirements of only
// the root modules that do not support pruning.
+ //
+ // If workspace, the graph includes only the workspace modules, the explicit
+ // requirements of the workspace modules, and the transitive requirements of
+ // the workspace modules that do not support pruning.
pruning modPruning
- // rootModules is the set of module versions explicitly required by the main
- // modules, sorted and capped to length. It may contain duplicates, and may
- // contain multiple versions for a given module path.
+ // rootModules is the set of root modules of the graph, sorted and capped to
+ // length. It may contain duplicates, and may contain multiple versions for a
+ // given module path. The root modules of the groph are the set of main
+ // modules in workspace mode, and the main module's direct requirements
+ // outside workspace mode.
rootModules []module.Version
maxRootVersion map[string]string
@@ -99,6 +105,19 @@ var requirements *Requirements
// If vendoring is in effect, the caller must invoke initVendor on the returned
// *Requirements before any other method.
func newRequirements(pruning modPruning, rootModules []module.Version, direct map[string]bool) *Requirements {
+ if pruning == workspace {
+ return &Requirements{
+ pruning: pruning,
+ rootModules: capVersionSlice(rootModules),
+ maxRootVersion: nil,
+ direct: direct,
+ }
+ }
+
+ if workFilePath != "" && pruning != workspace {
+ panic("in workspace mode, but pruning is not workspace in newRequirements")
+ }
+
for i, m := range rootModules {
if m.Version == "" && MainModules.Contains(m.Path) {
panic(fmt.Sprintf("newRequirements called with untrimmed build list: rootModules[%v] is a main module", i))
@@ -291,13 +310,11 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio
g: mvs.NewGraph(cmpVersion, MainModules.Versions()),
}
)
- for _, m := range MainModules.Versions() {
- // Require all roots from all main modules.
- _ = TODOWorkspaces("This flattens a level of the module graph, adding the dependencies " +
- "of all main modules to a single requirements struct, and losing the information of which " +
- "main module required which requirement. Rework the requirements struct and change this" +
- "to reflect the structure of the main modules.")
- mg.g.Require(m, roots)
+ if pruning != workspace {
+ if inWorkspaceMode() {
+ panic("pruning is not workspace in workspace mode")
+ }
+ mg.g.Require(MainModules.mustGetSingleMainModule(), roots)
}
var (
@@ -309,7 +326,7 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio
// It does not load the transitive requirements of m even if the go version in
// m's go.mod file indicates that it supports graph pruning.
loadOne := func(m module.Version) (*modFileSummary, error) {
- cached := mg.loadCache.Do(m, func() interface{} {
+ cached := mg.loadCache.Do(m, func() any {
summary, err := goModSummary(m)
mu.Lock()
@@ -335,7 +352,7 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio
if pruning == unpruned {
if _, dup := loadingUnpruned.LoadOrStore(m, nil); dup {
// m has already been enqueued for loading. Since unpruned loading may
- // follow cycles in the the requirement graph, we need to return early
+ // follow cycles in the requirement graph, we need to return early
// to avoid making the load queue infinitely long.
return
}
@@ -352,9 +369,13 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio
// are sufficient to build the packages it contains. We must load its full
// transitive dependency graph to be sure that we see all relevant
// dependencies.
- if pruning == unpruned || summary.pruning == unpruned {
+ if pruning != pruned || summary.pruning == unpruned {
+ nextPruning := summary.pruning
+ if pruning == unpruned {
+ nextPruning = unpruned
+ }
for _, r := range summary.require {
- enqueue(r, unpruned)
+ enqueue(r, nextPruning)
}
}
})
@@ -365,6 +386,52 @@ func readModGraph(ctx context.Context, pruning modPruning, roots []module.Versio
}
<-loadQueue.Idle()
+ // Reload any dependencies of the main modules which are not
+ // at their selected versions at workspace mode, because the
+ // requirements don't accurately reflect the transitive imports.
+ if pruning == workspace {
+ // hasDepsInAll contains the set of modules that need to be loaded
+ // at workspace pruning because any of their dependencies may
+ // provide packages in all.
+ hasDepsInAll := make(map[string]bool)
+ seen := map[module.Version]bool{}
+ for _, m := range roots {
+ hasDepsInAll[m.Path] = true
+ seen[m] = true
+ }
+ // This loop will terminate because it will call enqueue on each version of
+ // each dependency of the modules in hasDepsInAll at most once (and only
+ // calls enqueue on successively increasing versions of each dependency).
+ for {
+ needsEnqueueing := map[module.Version]bool{}
+ for p := range hasDepsInAll {
+ m := module.Version{Path: p, Version: mg.g.Selected(p)}
+ reqs, ok := mg.g.RequiredBy(m)
+ if !ok {
+ needsEnqueueing[m] = true
+ continue
+ }
+ for _, r := range reqs {
+ s := module.Version{Path: r.Path, Version: mg.g.Selected(r.Path)}
+ if cmpVersion(s.Version, r.Version) > 0 && !seen[s] {
+ needsEnqueueing[s] = true
+ }
+ }
+ }
+ // add all needs enqueueing to paths we care about
+ if len(needsEnqueueing) == 0 {
+ break
+ }
+
+ for p := range needsEnqueueing {
+ enqueue(p, workspace)
+ seen[p] = true
+ hasDepsInAll[p.Path] = true
+ }
+ <-loadQueue.Idle()
+ }
+ }
+
if hasError {
return mg, mg.findError()
}
@@ -424,12 +491,15 @@ func (mg *ModuleGraph) findError() error {
}
func (mg *ModuleGraph) allRootsSelected() bool {
- for _, mm := range MainModules.Versions() {
- roots, _ := mg.g.RequiredBy(mm)
- for _, m := range roots {
- if mg.Selected(m.Path) != m.Version {
- return false
- }
+ var roots []module.Version
+ if inWorkspaceMode() {
+ roots = MainModules.Versions()
+ } else {
+ roots, _ = mg.g.RequiredBy(MainModules.mustGetSingleMainModule())
+ }
+ for _, m := range roots {
+ if mg.Selected(m.Path) != m.Version {
+ return false
}
}
return true
@@ -576,10 +646,30 @@ func tidyRoots(ctx context.Context, rs *Requirements, pkgs []*loadPkg) (*Require
}
func updateRoots(ctx context.Context, direct map[string]bool, rs *Requirements, pkgs []*loadPkg, add []module.Version, rootsImported bool) (*Requirements, error) {
- if rs.pruning == unpruned {
+ switch rs.pruning {
+ case unpruned:
return updateUnprunedRoots(ctx, direct, rs, add)
+ case pruned:
+ return updatePrunedRoots(ctx, direct, rs, pkgs, add, rootsImported)
+ case workspace:
+ return updateWorkspaceRoots(ctx, rs, add)
+ default:
+ panic(fmt.Sprintf("unsupported pruning mode: %v", rs.pruning))
}
- return updatePrunedRoots(ctx, direct, rs, pkgs, add, rootsImported)
+}
+
+func updateWorkspaceRoots(ctx context.Context, rs *Requirements, add []module.Version) (*Requirements, error) {
+ if len(add) != 0 {
+ // add should be empty in workspace mode because workspace mode implies
+ // -mod=readonly, which in turn implies no new requirements. The code path
+ // that would result in add being non-empty returns an error before it
+ // reaches this point: The set of modules to add comes from
+ // resolveMissingImports, which in turn resolves each package by calling
+ // queryImport. But queryImport explicitly checks for -mod=readonly, and
+ // return an error.
+ panic("add is not empty")
+ }
+ return rs, nil
}
// tidyPrunedRoots returns a minimal set of root requirements that maintains the
@@ -1156,7 +1246,6 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir
}
}
- // TODO(matloob): Make roots into a map.
var roots []module.Version
for _, mainModule := range MainModules.Versions() {
min, err := mvs.Req(mainModule, rootPaths, &mvsReqs{roots: keep})
@@ -1182,6 +1271,8 @@ func updateUnprunedRoots(ctx context.Context, direct map[string]bool, rs *Requir
func convertPruning(ctx context.Context, rs *Requirements, pruning modPruning) (*Requirements, error) {
if rs.pruning == pruning {
return rs, nil
+ } else if rs.pruning == workspace || pruning == workspace {
+ panic("attempthing to convert to/from workspace pruning and another pruning type")
}
if pruning == unpruned {
diff --git a/src/cmd/go/internal/modload/edit.go b/src/cmd/go/internal/modload/edit.go
index 023983caed..0f37e3b2e9 100644
--- a/src/cmd/go/internal/modload/edit.go
+++ b/src/cmd/go/internal/modload/edit.go
@@ -76,7 +76,7 @@ func editRequirements(ctx context.Context, rs *Requirements, tryUpgrade, mustSel
// requirements.
var rootPaths []string
for _, m := range mustSelect {
- if !MainModules.Contains(m.Path) && m.Version != "none" {
+ if m.Version != "none" && !MainModules.Contains(m.Path) {
rootPaths = append(rootPaths, m.Path)
}
}
@@ -370,7 +370,7 @@ func selectPotentiallyImportedModules(ctx context.Context, limiter *versionLimit
if err != nil {
return nil, false, err
}
- initial = mg.BuildList()[1:]
+ initial = mg.BuildList()[MainModules.Len():]
} else {
initial = rs.rootModules
}
diff --git a/src/cmd/go/internal/modload/import.go b/src/cmd/go/internal/modload/import.go
index bc2b0a0230..812e48a156 100644
--- a/src/cmd/go/internal/modload/import.go
+++ b/src/cmd/go/internal/modload/import.go
@@ -612,7 +612,7 @@ func dirInModule(path, mpath, mdir string, isLocal bool) (dir string, haveGoFile
// (the main module, and any directory trees pointed at by replace directives).
if isLocal {
for d := dir; d != mdir && len(d) > len(mdir); {
- haveGoMod := haveGoModCache.Do(d, func() interface{} {
+ haveGoMod := haveGoModCache.Do(d, func() any {
fi, err := fsys.Stat(filepath.Join(d, "go.mod"))
return err == nil && !fi.IsDir()
}).(bool)
@@ -635,7 +635,7 @@ func dirInModule(path, mpath, mdir string, isLocal bool) (dir string, haveGoFile
// Are there Go source files in the directory?
// We don't care about build tags, not even "+build ignore".
// We're just looking for a plausible directory.
- res := haveGoFilesCache.Do(dir, func() interface{} {
+ res := haveGoFilesCache.Do(dir, func() any {
ok, err := fsys.IsDirWithGoFiles(dir)
return goFilesEntry{haveGoFiles: ok, err: err}
}).(goFilesEntry)
diff --git a/src/cmd/go/internal/modload/import_test.go b/src/cmd/go/internal/modload/import_test.go
index 11310489ad..65a889ec52 100644
--- a/src/cmd/go/internal/modload/import_test.go
+++ b/src/cmd/go/internal/modload/import_test.go
@@ -69,7 +69,7 @@ func TestQueryImport(t *testing.T) {
RootMode = NoRoot
ctx := context.Background()
- rs := newRequirements(unpruned, nil, nil)
+ rs := LoadModFile(ctx)
for _, tt := range importTests {
t.Run(strings.ReplaceAll(tt.path, "/", "_"), func(t *testing.T) {
diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go
index 9aef5a7c33..cdcfbeb8de 100644
--- a/src/cmd/go/internal/modload/init.go
+++ b/src/cmd/go/internal/modload/init.go
@@ -12,6 +12,7 @@ import (
"fmt"
"go/build"
"internal/lazyregexp"
+ "io/ioutil"
"os"
"path"
"path/filepath"
@@ -56,10 +57,6 @@ var (
ExplicitWriteGoMod bool
)
-func TODOWorkspaces(s string) error {
- return fmt.Errorf("need to support this for workspaces: %s", s)
-}
-
// Variables set in Init.
var (
initialized bool
@@ -73,6 +70,17 @@ var (
gopath string
)
+// EnterModule resets MainModules and requirements to refer to just this one module.
+func EnterModule(ctx context.Context, enterModroot string) {
+ MainModules = nil // reset MainModules
+ requirements = nil
+ workFilePath = "" // Force module mode
+ modfetch.Reset()
+
+ modRoots = []string{enterModroot}
+ LoadModFile(ctx)
+}
+
// Variable set in InitWorkfile
var (
// Set to the path to the go.work file, or "" if workspace mode is disabled.
@@ -287,7 +295,7 @@ func InitWorkfile() {
workFilePath = findWorkspaceFile(base.Cwd())
default:
if !filepath.IsAbs(cfg.WorkFile) {
- base.Errorf("the path provided to -workfile must be an absolute path")
+ base.Fatalf("the path provided to -workfile must be an absolute path")
}
workFilePath = cfg.WorkFile
}
@@ -407,9 +415,6 @@ func Init() {
// We're in module mode. Set any global variables that need to be set.
cfg.ModulesEnabled = true
setDefaultBuildMod()
- _ = TODOWorkspaces("In workspace mode, mod will not be readonly for go mod download," +
- "verify, graph, and why. Implement support for go mod download and add test cases" +
- "to ensure verify, graph, and why work properly.")
list := filepath.SplitList(cfg.BuildContext.GOPATH)
if len(list) > 0 && list[0] != "" {
gopath = list[0]
@@ -521,6 +526,9 @@ func die() {
if cfg.Getenv("GO111MODULE") == "off" {
base.Fatalf("go: modules disabled by GO111MODULE=off; see 'go help modules'")
}
+ if inWorkspaceMode() {
+ base.Fatalf("go: no modules were found in the current workspace; see 'go help work'")
+ }
if dir, name := findAltConfig(base.Cwd()); dir != "" {
rel, err := filepath.Rel(base.Cwd(), dir)
if err != nil {
@@ -552,13 +560,8 @@ func (goModDirtyError) Error() string {
var errGoModDirty error = goModDirtyError{}
func loadWorkFile(path string) (goVersion string, modRoots []string, replaces []*modfile.Replace, err error) {
- _ = TODOWorkspaces("Clean up and write back the go.work file: add module paths for workspace modules.")
workDir := filepath.Dir(path)
- workData, err := lockedfile.Read(path)
- if err != nil {
- return "", nil, nil, err
- }
- wf, err := modfile.ParseWork(path, workData, nil)
+ wf, err := ReadWorkFile(path)
if err != nil {
return "", nil, nil, err
}
@@ -566,20 +569,64 @@ func loadWorkFile(path string) (goVersion string, modRoots []string, replaces []
goVersion = wf.Go.Version
}
seen := map[string]bool{}
- for _, d := range wf.Directory {
+ for _, d := range wf.Use {
modRoot := d.Path
if !filepath.IsAbs(modRoot) {
modRoot = filepath.Join(workDir, modRoot)
}
+
if seen[modRoot] {
return "", nil, nil, fmt.Errorf("path %s appears multiple times in workspace", modRoot)
}
seen[modRoot] = true
modRoots = append(modRoots, modRoot)
}
+
return goVersion, modRoots, wf.Replace, nil
}
+// ReadWorkFile reads and parses the go.work file at the given path.
+func ReadWorkFile(path string) (*modfile.WorkFile, error) {
+ workData, err := ioutil.ReadFile(path)
+ if err != nil {
+ return nil, err
+ }
+
+ return modfile.ParseWork(path, workData, nil)
+}
+
+// WriteWorkFile cleans and writes out the go.work file to the given path.
+func WriteWorkFile(path string, wf *modfile.WorkFile) error {
+ wf.SortBlocks()
+ wf.Cleanup()
+ out := modfile.Format(wf.Syntax)
+
+ return ioutil.WriteFile(path, out, 0666)
+}
+
+// UpdateWorkFile updates comments on directory directives in the go.work
+// file to include the associated module path.
+func UpdateWorkFile(wf *modfile.WorkFile) {
+ missingModulePaths := map[string]string{} // module directory listed in file -> abspath modroot
+
+ for _, d := range wf.Use {
+ modRoot := d.Path
+ if d.ModulePath == "" {
+ missingModulePaths[d.Path] = modRoot
+ }
+ }
+
+ // Clean up and annotate directories.
+ // TODO(matloob): update x/mod to actually add module paths.
+ for moddir, absmodroot := range missingModulePaths {
+ _, f, err := ReadModFile(filepath.Join(absmodroot, "go.mod"), nil)
+ if err != nil {
+ continue // Error will be reported if modules are loaded.
+ }
+ wf.AddUse(moddir, f.Module.Mod.Path)
+ }
+}
+
// LoadModFile sets Target and, if there is a main module, parses the initial
// build list from its go.mod file.
//
@@ -614,8 +661,10 @@ func LoadModFile(ctx context.Context) *Requirements {
if err != nil {
base.Fatalf("reading go.work: %v", err)
}
- _ = TODOWorkspaces("Support falling back to individual module go.sum " +
- "files for sums not in the workspace sum file.")
+ for _, modRoot := range modRoots {
+ sumFile := strings.TrimSuffix(modFilePath(modRoot), ".mod") + ".sum"
+ modfetch.WorkspaceGoSumFiles = append(modfetch.WorkspaceGoSumFiles, sumFile)
+ }
modfetch.GoSumFile = workFilePath + ".sum"
} else if modRoots == nil {
// We're in module mode, but not inside a module.
@@ -639,12 +688,18 @@ func LoadModFile(ctx context.Context) *Requirements {
modfetch.GoSumFile = strings.TrimSuffix(modFilePath(modRoots[0]), ".mod") + ".sum"
}
if len(modRoots) == 0 {
- _ = TODOWorkspaces("Instead of creating a fake module with an empty modroot, make MainModules.Len() == 0 mean that we're in module mode but not inside any module.")
+ // TODO(#49228): Instead of creating a fake module with an empty modroot,
+ // make MainModules.Len() == 0 mean that we're in module mode but not inside
+ // any module.
mainModule := module.Version{Path: "command-line-arguments"}
MainModules = makeMainModules([]module.Version{mainModule}, []string{""}, []*modfile.File{nil}, []*modFileIndex{nil}, "", nil)
goVersion := LatestGoVersion()
rawGoVersion.Store(mainModule, goVersion)
- requirements = newRequirements(pruningForGoVersion(goVersion), nil, nil)
+ pruning := pruningForGoVersion(goVersion)
+ if inWorkspaceMode() {
+ pruning = workspace
+ }
+ requirements = newRequirements(pruning, nil, nil)
return requirements
}
@@ -704,7 +759,7 @@ func LoadModFile(ctx context.Context) *Requirements {
}
}
- if MainModules.Index(mainModule).goVersionV == "" {
+ if MainModules.Index(mainModule).goVersionV == "" && rs.pruning != workspace {
// TODO(#45551): Do something more principled instead of checking
// cfg.CmdName directly here.
if cfg.BuildMod == "mod" && cfg.CmdName != "mod graph" && cfg.CmdName != "mod why" {
@@ -839,11 +894,11 @@ func CreateWorkFile(ctx context.Context, workFile string, modDirs []string) {
}
base.Fatalf("go: error parsing go.mod in directory %s: %v", dir, err)
}
- workF.AddDirectory(ToDirectoryPath(dir), f.Module.Mod.Path)
+ workF.AddUse(ToDirectoryPath(dir), f.Module.Mod.Path)
}
- data := modfile.Format(workF.Syntax)
- lockedfile.Write(workFile, bytes.NewReader(data), 0666)
+ UpdateWorkFile(workF)
+ WriteWorkFile(workFile, workF)
}
// fixVersion returns a modfile.VersionFixer implemented using the Query function.
@@ -928,9 +983,16 @@ func makeMainModules(ms []module.Version, rootDirs []string, modFiles []*modfile
workFileReplaceMap: toReplaceMap(workFileReplaces),
highestReplaced: map[string]string{},
}
+ mainModulePaths := make(map[string]bool)
+ for _, m := range ms {
+ mainModulePaths[m.Path] = true
+ }
replacedByWorkFile := make(map[string]bool)
replacements := make(map[module.Version]module.Version)
for _, r := range workFileReplaces {
+ if mainModulePaths[r.Old.Path] && r.Old.Version == "" {
+ base.Errorf("go: workspace module %v is replaced at all versions in the go.work file. To fix, remove the replacement from the go.work file or specify the version at which to replace the module.", r.Old.Path)
+ }
replacedByWorkFile[r.Old.Path] = true
v, ok := mainModules.highestReplaced[r.Old.Path]
if !ok || semver.Compare(r.Old.Version, v) > 0 {
@@ -969,7 +1031,7 @@ func makeMainModules(ms []module.Version, rootDirs []string, modFiles []*modfile
if replacedByWorkFile[r.Old.Path] {
continue
} else if prev, ok := replacements[r.Old]; ok && !curModuleReplaces[r.Old] && prev != r.New {
- base.Fatalf("go: conflicting replacements for %v:\n\t%v\n\t%v\nuse \"go mod editwork -replace %v=[override]\" to resolve", r.Old, prev, r.New, r.Old)
+ base.Fatalf("go: conflicting replacements for %v:\n\t%v\n\t%v\nuse \"go work edit -replace %v=[override]\" to resolve", r.Old, prev, r.New, r.Old)
}
curModuleReplaces[r.Old] = true
replacements[r.Old] = r.New
@@ -987,29 +1049,29 @@ func makeMainModules(ms []module.Version, rootDirs []string, modFiles []*modfile
// requirementsFromModFiles returns the set of non-excluded requirements from
// the global modFile.
func requirementsFromModFiles(ctx context.Context, modFiles []*modfile.File) *Requirements {
- rootCap := 0
- for i := range modFiles {
- rootCap += len(modFiles[i].Require)
- }
- roots := make([]module.Version, 0, rootCap)
- mPathCount := make(map[string]int)
- for _, m := range MainModules.Versions() {
- mPathCount[m.Path] = 1
- }
+ var roots []module.Version
direct := map[string]bool{}
- for _, modFile := range modFiles {
- requirement:
+ var pruning modPruning
+ if inWorkspaceMode() {
+ pruning = workspace
+ roots = make([]module.Version, len(MainModules.Versions()))
+ copy(roots, MainModules.Versions())
+ } else {
+ pruning = pruningForGoVersion(MainModules.GoVersion())
+ if len(modFiles) != 1 {
+ panic(fmt.Errorf("requirementsFromModFiles called with %v modfiles outside workspace mode", len(modFiles)))
+ }
+ modFile := modFiles[0]
+ roots = make([]module.Version, 0, len(modFile.Require))
+ mm := MainModules.mustGetSingleMainModule()
for _, r := range modFile.Require {
- // TODO(#45713): Maybe join
- for _, mainModule := range MainModules.Versions() {
- if index := MainModules.Index(mainModule); index != nil && index.exclude[r.Mod] {
- if cfg.BuildMod == "mod" {
- fmt.Fprintf(os.Stderr, "go: dropping requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version)
- } else {
- fmt.Fprintf(os.Stderr, "go: ignoring requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version)
- }
- continue requirement
+ if index := MainModules.Index(mm); index != nil && index.exclude[r.Mod] {
+ if cfg.BuildMod == "mod" {
+ fmt.Fprintf(os.Stderr, "go: dropping requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version)
+ } else {
+ fmt.Fprintf(os.Stderr, "go: ignoring requirement on excluded version %s %s\n", r.Mod.Path, r.Mod.Version)
}
+ continue
}
roots = append(roots, r.Mod)
@@ -1019,7 +1081,7 @@ func requirementsFromModFiles(ctx context.Context, modFiles []*modfile.File) *Re
}
}
module.Sort(roots)
- rs := newRequirements(pruningForGoVersion(MainModules.GoVersion()), roots, direct)
+ rs := newRequirements(pruning, roots, direct)
return rs
}
@@ -1040,7 +1102,7 @@ func setDefaultBuildMod() {
// to modload functions instead of relying on an implicit setting
// based on command name.
switch cfg.CmdName {
- case "get", "mod download", "mod init", "mod tidy":
+ case "get", "mod download", "mod init", "mod tidy", "work sync":
// These commands are intended to update go.mod and go.sum.
cfg.BuildMod = "mod"
return
@@ -1221,9 +1283,10 @@ func findWorkspaceFile(dir string) (root string) {
break
}
if d == cfg.GOROOT {
- _ = TODOWorkspaces("If we end up checking in a go.work file to GOROOT/src," +
- "remove this case.")
- return "" // As a special case, don't cross GOROOT to find a go.work file.
+ // As a special case, don't cross GOROOT to find a go.work file.
+ // The standard library and commands built in go always use the vendored
+ // dependencies, so avoid using a most likely irrelevant go.work file.
+ return ""
}
dir = d
}
diff --git a/src/cmd/go/internal/modload/load.go b/src/cmd/go/internal/modload/load.go
index 845bf2f8a2..617b634d26 100644
--- a/src/cmd/go/internal/modload/load.go
+++ b/src/cmd/go/internal/modload/load.go
@@ -231,6 +231,9 @@ type PackageOpts struct {
// SilenceUnmatchedWarnings suppresses the warnings normally emitted for
// patterns that did not match any packages.
SilenceUnmatchedWarnings bool
+
+ // Resolve the query against this module.
+ MainModule module.Version
}
// LoadPackages identifies the set of packages matching the given patterns and
@@ -256,7 +259,11 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma
case m.IsLocal():
// Evaluate list of file system directories on first iteration.
if m.Dirs == nil {
- matchLocalDirs(ctx, m, rs)
+ matchModRoots := modRoots
+ if opts.MainModule != (module.Version{}) {
+ matchModRoots = []string{MainModules.ModRoot(opts.MainModule)}
+ }
+ matchLocalDirs(ctx, matchModRoots, m, rs)
}
// Make a copy of the directory list and translate to import paths.
@@ -309,7 +316,11 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma
// The initial roots are the packages in the main module.
// loadFromRoots will expand that to "all".
m.Errs = m.Errs[:0]
- matchPackages(ctx, m, opts.Tags, omitStd, MainModules.Versions())
+ matchModules := MainModules.Versions()
+ if opts.MainModule != (module.Version{}) {
+ matchModules = []module.Version{opts.MainModule}
+ }
+ matchPackages(ctx, m, opts.Tags, omitStd, matchModules)
} else {
// Starting with the packages in the main module,
// enumerate the full list of "all".
@@ -441,7 +452,7 @@ func LoadPackages(ctx context.Context, opts PackageOpts, patterns ...string) (ma
// matchLocalDirs is like m.MatchDirs, but tries to avoid scanning directories
// outside of the standard library and active modules.
-func matchLocalDirs(ctx context.Context, m *search.Match, rs *Requirements) {
+func matchLocalDirs(ctx context.Context, modRoots []string, m *search.Match, rs *Requirements) {
if !m.IsLocal() {
panic(fmt.Sprintf("internal error: resolveLocalDirs on non-local pattern %s", m.Pattern()))
}
@@ -460,8 +471,8 @@ func matchLocalDirs(ctx context.Context, m *search.Match, rs *Requirements) {
modRoot := findModuleRoot(absDir)
found := false
- for _, mod := range MainModules.Versions() {
- if MainModules.ModRoot(mod) == modRoot {
+ for _, mainModuleRoot := range modRoots {
+ if mainModuleRoot == modRoot {
found = true
break
}
@@ -848,7 +859,7 @@ func (ld *loader) reset() {
// errorf reports an error via either os.Stderr or base.Errorf,
// according to whether ld.AllowErrors is set.
-func (ld *loader) errorf(format string, args ...interface{}) {
+func (ld *loader) errorf(format string, args ...any) {
if ld.AllowErrors {
fmt.Fprintf(os.Stderr, format, args...)
} else {
@@ -1004,7 +1015,11 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader {
}
var err error
- ld.requirements, err = convertPruning(ctx, ld.requirements, pruningForGoVersion(ld.GoVersion))
+ desiredPruning := pruningForGoVersion(ld.GoVersion)
+ if ld.requirements.pruning == workspace {
+ desiredPruning = workspace
+ }
+ ld.requirements, err = convertPruning(ctx, ld.requirements, desiredPruning)
if err != nil {
ld.errorf("go: %v\n", err)
}
@@ -1076,7 +1091,7 @@ func loadFromRoots(ctx context.Context, params loaderParams) *loader {
break
}
if changed {
- // Don't resolve missing imports until the module graph have stabilized.
+ // Don't resolve missing imports until the module graph has stabilized.
// If the roots are still changing, they may turn out to specify a
// requirement on the missing package(s), and we would rather use a
// version specified by a new root than add a new dependency on an
@@ -1246,6 +1261,24 @@ func (ld *loader) updateRequirements(ctx context.Context) (changed bool, err err
continue
}
+ if inWorkspaceMode() {
+ // In workspace mode / workspace pruning mode, the roots are the main modules
+ // rather than the main module's direct dependencies. The check below on the selected
+ // roots does not apply.
+ if mg, err := rs.Graph(ctx); err != nil {
+ return false, err
+ } else if _, ok := mg.RequiredBy(dep.mod); !ok {
+ // dep.mod is not an explicit dependency, but needs to be.
+ // See comment on error returned below.
+ pkg.err = &DirectImportFromImplicitDependencyError{
+ ImporterPath: pkg.path,
+ ImportedPath: dep.path,
+ Module: dep.mod,
+ }
+ }
+ continue
+ }
+
if pkg.err == nil && cfg.BuildMod != "mod" {
if v, ok := rs.rootSelected(dep.mod.Path); !ok || v != dep.mod.Version {
// dep.mod is not an explicit dependency, but needs to be.
@@ -1459,7 +1492,7 @@ func (ld *loader) pkg(ctx context.Context, path string, flags loadPkgFlags) *loa
panic("internal error: (*loader).pkg called with pkgImportsLoaded flag set")
}
- pkg := ld.pkgCache.Do(path, func() interface{} {
+ pkg := ld.pkgCache.Do(path, func() any {
pkg := &loadPkg{
path: path,
}
diff --git a/src/cmd/go/internal/modload/modfile.go b/src/cmd/go/internal/modload/modfile.go
index a7e92222a1..627cf1dbc0 100644
--- a/src/cmd/go/internal/modload/modfile.go
+++ b/src/cmd/go/internal/modload/modfile.go
@@ -118,8 +118,9 @@ type requireMeta struct {
type modPruning uint8
const (
- pruned modPruning = iota // transitive dependencies of modules at go 1.17 and higher are pruned out
- unpruned // no transitive dependencies are pruned out
+ pruned modPruning = iota // transitive dependencies of modules at go 1.17 and higher are pruned out
+ unpruned // no transitive dependencies are pruned out
+ workspace // pruned to the union of modules in the workspace
)
func pruningForGoVersion(goVersion string) modPruning {
@@ -339,6 +340,9 @@ func Replacement(mod module.Version) module.Version {
foundFrom, found, foundModRoot := "", module.Version{}, ""
if MainModules == nil {
return module.Version{}
+ } else if MainModules.Contains(mod.Path) && mod.Version == "" {
+ // Don't replace the workspace version of the main module.
+ return module.Version{}
}
if _, r, ok := replacement(mod, MainModules.WorkFileReplaceMap()); ok {
return r
@@ -554,7 +558,7 @@ type retraction struct {
//
// The caller must not modify the returned summary.
func goModSummary(m module.Version) (*modFileSummary, error) {
- if m.Version == "" && MainModules.Contains(m.Path) {
+ if m.Version == "" && !inWorkspaceMode() && MainModules.Contains(m.Path) {
panic("internal error: goModSummary called on a main module")
}
@@ -663,7 +667,7 @@ func rawGoModSummary(m module.Version) (*modFileSummary, error) {
summary *modFileSummary
err error
}
- c := rawGoModSummaryCache.Do(key{m}, func() interface{} {
+ c := rawGoModSummaryCache.Do(key{m}, func() any {
summary := new(modFileSummary)
name, data, err := rawGoModData(m)
if err != nil {
@@ -718,9 +722,14 @@ var rawGoModSummaryCache par.Cache // module.Version → rawGoModSummary result
func rawGoModData(m module.Version) (name string, data []byte, err error) {
if m.Version == "" {
// m is a replacement module with only a file path.
+
dir := m.Path
if !filepath.IsAbs(dir) {
- dir = filepath.Join(replaceRelativeTo(), dir)
+ if inWorkspaceMode() && MainModules.Contains(m.Path) {
+ dir = MainModules.ModRoot(m)
+ } else {
+ dir = filepath.Join(replaceRelativeTo(), dir)
+ }
}
name = filepath.Join(dir, "go.mod")
if gomodActual, ok := fsys.OverlayPath(name); ok {
@@ -760,7 +769,7 @@ func queryLatestVersionIgnoringRetractions(ctx context.Context, path string) (la
latest module.Version
err error
}
- e := latestVersionIgnoringRetractionsCache.Do(path, func() interface{} {
+ e := latestVersionIgnoringRetractionsCache.Do(path, func() any {
ctx, span := trace.StartSpan(ctx, "queryLatestVersionIgnoringRetractions "+path)
defer span.Done()
diff --git a/src/cmd/go/internal/modload/mvs.go b/src/cmd/go/internal/modload/mvs.go
index 40224d534b..588bcf4bdc 100644
--- a/src/cmd/go/internal/modload/mvs.go
+++ b/src/cmd/go/internal/modload/mvs.go
@@ -42,7 +42,7 @@ type mvsReqs struct {
}
func (r *mvsReqs) Required(mod module.Version) ([]module.Version, error) {
- if MainModules.Contains(mod.Path) {
+ if mod.Version == "" && MainModules.Contains(mod.Path) {
// Use the build list as it existed when r was constructed, not the current
// global build list.
return r.roots, nil
@@ -108,12 +108,12 @@ func versions(ctx context.Context, path string, allowed AllowedFunc) ([]string,
// previousVersion returns the tagged version of m.Path immediately prior to
// m.Version, or version "none" if no prior version is tagged.
//
-// Since the version of Target is not found in the version list,
+// Since the version of a main module is not found in the version list,
// it has no previous version.
func previousVersion(m module.Version) (module.Version, error) {
// TODO(golang.org/issue/38714): thread tracing context through MVS.
- if MainModules.Contains(m.Path) {
+ if m.Version == "" && MainModules.Contains(m.Path) {
return module.Version{Path: m.Path, Version: "none"}, nil
}
diff --git a/src/cmd/go/internal/modload/query.go b/src/cmd/go/internal/modload/query.go
index 1eb484de9d..33808ea109 100644
--- a/src/cmd/go/internal/modload/query.go
+++ b/src/cmd/go/internal/modload/query.go
@@ -21,8 +21,8 @@ import (
"cmd/go/internal/imports"
"cmd/go/internal/modfetch"
"cmd/go/internal/search"
- "cmd/go/internal/trace"
"cmd/go/internal/str"
+ "cmd/go/internal/trace"
"golang.org/x/mod/module"
"golang.org/x/mod/semver"
diff --git a/src/cmd/go/internal/modload/vendor.go b/src/cmd/go/internal/modload/vendor.go
index a735cad905..5ea82a8620 100644
--- a/src/cmd/go/internal/modload/vendor.go
+++ b/src/cmd/go/internal/modload/vendor.go
@@ -147,7 +147,7 @@ func checkVendorConsistency(index *modFileIndex, modFile *modfile.File) {
}
vendErrors := new(strings.Builder)
- vendErrorf := func(mod module.Version, format string, args ...interface{}) {
+ vendErrorf := func(mod module.Version, format string, args ...any) {
detail := fmt.Sprintf(format, args...)
if mod.Version == "" {
fmt.Fprintf(vendErrors, "\n\t%s: %s", mod.Path, detail)
diff --git a/src/cmd/go/internal/mvs/mvs.go b/src/cmd/go/internal/mvs/mvs.go
index 566fa4b6b3..d25d447b0e 100644
--- a/src/cmd/go/internal/mvs/mvs.go
+++ b/src/cmd/go/internal/mvs/mvs.go
@@ -114,7 +114,7 @@ func buildList(targets []module.Version, reqs Reqs, upgrade func(module.Version)
for _, target := range targets {
work.Add(target)
}
- work.Do(10, func(item interface{}) {
+ work.Do(10, func(item any) {
m := item.(module.Version)
var required []module.Version
diff --git a/src/cmd/go/internal/par/work.go b/src/cmd/go/internal/par/work.go
index 960cec6fb1..496c41b150 100644
--- a/src/cmd/go/internal/par/work.go
+++ b/src/cmd/go/internal/par/work.go
@@ -14,24 +14,24 @@ import (
// Work manages a set of work items to be executed in parallel, at most once each.
// The items in the set must all be valid map keys.
type Work struct {
- f func(interface{}) // function to run for each item
- running int // total number of runners
+ f func(any) // function to run for each item
+ running int // total number of runners
mu sync.Mutex
- added map[interface{}]bool // items added to set
- todo []interface{} // items yet to be run
- wait sync.Cond // wait when todo is empty
- waiting int // number of runners waiting for todo
+ added map[any]bool // items added to set
+ todo []any // items yet to be run
+ wait sync.Cond // wait when todo is empty
+ waiting int // number of runners waiting for todo
}
func (w *Work) init() {
if w.added == nil {
- w.added = make(map[interface{}]bool)
+ w.added = make(map[any]bool)
}
}
// Add adds item to the work set, if it hasn't already been added.
-func (w *Work) Add(item interface{}) {
+func (w *Work) Add(item any) {
w.mu.Lock()
w.init()
if !w.added[item] {
@@ -51,7 +51,7 @@ func (w *Work) Add(item interface{}) {
// before calling Do (or else Do returns immediately),
// but it is allowed for f(item) to add new items to the set.
// Do should only be used once on a given Work.
-func (w *Work) Do(n int, f func(item interface{})) {
+func (w *Work) Do(n int, f func(item any)) {
if n < 1 {
panic("par.Work.Do: n < 1")
}
@@ -110,13 +110,13 @@ type Cache struct {
type cacheEntry struct {
done uint32
mu sync.Mutex
- result interface{}
+ result any
}
// Do calls the function f if and only if Do is being called for the first time with this key.
// No call to Do with a given key returns until the one call to f returns.
// Do returns the value returned by the one call to f.
-func (c *Cache) Do(key interface{}, f func() interface{}) interface{} {
+func (c *Cache) Do(key any, f func() any) any {
entryIface, ok := c.m.Load(key)
if !ok {
entryIface, _ = c.m.LoadOrStore(key, new(cacheEntry))
@@ -136,7 +136,7 @@ func (c *Cache) Do(key interface{}, f func() interface{}) interface{} {
// Get returns the cached result associated with key.
// It returns nil if there is no such result.
// If the result for key is being computed, Get does not wait for the computation to finish.
-func (c *Cache) Get(key interface{}) interface{} {
+func (c *Cache) Get(key any) any {
entryIface, ok := c.m.Load(key)
if !ok {
return nil
@@ -156,7 +156,7 @@ func (c *Cache) Get(key interface{}) interface{} {
// TODO(jayconrod): Delete this after the package cache clearing functions
// in internal/load have been removed.
func (c *Cache) Clear() {
- c.m.Range(func(key, value interface{}) bool {
+ c.m.Range(func(key, value any) bool {
c.m.Delete(key)
return true
})
@@ -169,7 +169,7 @@ func (c *Cache) Clear() {
//
// TODO(jayconrod): Delete this after the package cache clearing functions
// in internal/load have been removed.
-func (c *Cache) Delete(key interface{}) {
+func (c *Cache) Delete(key any) {
c.m.Delete(key)
}
@@ -180,8 +180,8 @@ func (c *Cache) Delete(key interface{}) {
//
// TODO(jayconrod): Delete this after the package cache clearing functions
// in internal/load have been removed.
-func (c *Cache) DeleteIf(pred func(key interface{}) bool) {
- c.m.Range(func(key, _ interface{}) bool {
+func (c *Cache) DeleteIf(pred func(key any) bool) {
+ c.m.Range(func(key, _ any) bool {
if pred(key) {
c.Delete(key)
}
diff --git a/src/cmd/go/internal/par/work_test.go b/src/cmd/go/internal/par/work_test.go
index f104bc4106..add0e640d8 100644
--- a/src/cmd/go/internal/par/work_test.go
+++ b/src/cmd/go/internal/par/work_test.go
@@ -16,7 +16,7 @@ func TestWork(t *testing.T) {
const N = 10000
n := int32(0)
w.Add(N)
- w.Do(100, func(x interface{}) {
+ w.Do(100, func(x any) {
atomic.AddInt32(&n, 1)
i := x.(int)
if i >= 2 {
@@ -40,7 +40,7 @@ func TestWorkParallel(t *testing.T) {
}
start := time.Now()
var n int32
- w.Do(N, func(x interface{}) {
+ w.Do(N, func(x any) {
time.Sleep(1 * time.Millisecond)
atomic.AddInt32(&n, +1)
})
@@ -58,19 +58,19 @@ func TestCache(t *testing.T) {
var cache Cache
n := 1
- v := cache.Do(1, func() interface{} { n++; return n })
+ v := cache.Do(1, func() any { n++; return n })
if v != 2 {
t.Fatalf("cache.Do(1) did not run f")
}
- v = cache.Do(1, func() interface{} { n++; return n })
+ v = cache.Do(1, func() any { n++; return n })
if v != 2 {
t.Fatalf("cache.Do(1) ran f again!")
}
- v = cache.Do(2, func() interface{} { n++; return n })
+ v = cache.Do(2, func() any { n++; return n })
if v != 3 {
t.Fatalf("cache.Do(2) did not run f")
}
- v = cache.Do(1, func() interface{} { n++; return n })
+ v = cache.Do(1, func() any { n++; return n })
if v != 2 {
t.Fatalf("cache.Do(1) did not returned saved value from original cache.Do(1)")
}
diff --git a/src/cmd/go/internal/run/run.go b/src/cmd/go/internal/run/run.go
index 03895d27eb..c4b70b64fe 100644
--- a/src/cmd/go/internal/run/run.go
+++ b/src/cmd/go/internal/run/run.go
@@ -18,8 +18,8 @@ import (
"cmd/go/internal/cfg"
"cmd/go/internal/load"
"cmd/go/internal/modload"
- "cmd/go/internal/work"
"cmd/go/internal/str"
+ "cmd/go/internal/work"
)
var CmdRun = &base.Command{
@@ -69,7 +69,7 @@ func init() {
CmdRun.Flag.Var((*base.StringsFlag)(&work.ExecCmd), "exec", "")
}
-func printStderr(args ...interface{}) (int, error) {
+func printStderr(args ...any) (int, error) {
return fmt.Fprint(os.Stderr, args...)
}
diff --git a/src/cmd/go/internal/str/str.go b/src/cmd/go/internal/str/str.go
index 5bc521b9df..021bfbff77 100644
--- a/src/cmd/go/internal/str/str.go
+++ b/src/cmd/go/internal/str/str.go
@@ -14,7 +14,7 @@ import (
// StringList flattens its arguments into a single []string.
// Each argument in args must have type string or []string.
-func StringList(args ...interface{}) []string {
+func StringList(args ...any) []string {
var x []string
for _, arg := range args {
switch arg := arg.(type) {
diff --git a/src/cmd/go/internal/test/test.go b/src/cmd/go/internal/test/test.go
index 7361c11786..50e6d5201b 100644
--- a/src/cmd/go/internal/test/test.go
+++ b/src/cmd/go/internal/test/test.go
@@ -65,7 +65,7 @@ followed by detailed output for each failed package.
'Go test' recompiles each package along with any files with names matching
the file pattern "*_test.go".
These additional files can contain test functions, benchmark functions, fuzz
-targets and example functions. See 'go help testfunc' for more.
+tests and example functions. See 'go help testfunc' for more.
Each listed package causes the execution of a separate test binary.
Files whose names begin with "_" (including "_test.go") or "." are ignored.
@@ -214,7 +214,7 @@ control the execution of any test:
Run each test, benchmark, and fuzz seed n times (default 1).
If -cpu is set, run n times for each GOMAXPROCS value.
Examples are always run once. -count does not apply to
- fuzz targets matched by -fuzz.
+ fuzz tests matched by -fuzz.
-cover
Enable coverage analysis.
@@ -242,41 +242,48 @@ control the execution of any test:
-cpu 1,2,4
Specify a list of GOMAXPROCS values for which the tests, benchmarks or
- fuzz targets should be executed. The default is the current value
- of GOMAXPROCS. -cpu does not apply to fuzz targets matched by -fuzz.
+ fuzz tests should be executed. The default is the current value
+ of GOMAXPROCS. -cpu does not apply to fuzz tests matched by -fuzz.
-failfast
Do not start new tests after the first test failure.
-fuzz regexp
- Run the fuzz target matching the regular expression. When specified,
+ Run the fuzz test matching the regular expression. When specified,
the command line argument must match exactly one package within the
- main module, and regexp must match exactly one fuzz target within
- that package. After tests, benchmarks, seed corpora of other fuzz
- targets, and examples have completed, the matching target will be
- fuzzed. See the Fuzzing section of the testing package documentation
- for details.
+ main module, and regexp must match exactly one fuzz test within
+ that package. Fuzzing will occur after tests, benchmarks, seed corpora
+ of other fuzz tests, and examples have completed. See the Fuzzing
+ section of the testing package documentation for details.
-fuzztime t
- Run enough iterations of the fuzz test to take t, specified as a
- time.Duration (for example, -fuzztime 1h30s). The default is to run
- forever.
- The special syntax Nx means to run the fuzz test N times
- (for example, -fuzztime 100x).
+ Run enough iterations of the fuzz target during fuzzing to take t,
+ specified as a time.Duration (for example, -fuzztime 1h30s).
+ The default is to run forever.
+ The special syntax Nx means to run the fuzz target N times
+ (for example, -fuzztime 1000x).
+
+ -fuzzminimizetime t
+ Run enough iterations of the fuzz target during each minimization
+ attempt to take t, as specified as a time.Duration (for example,
+ -fuzzminimizetime 30s).
+ The default is 60s.
+ The special syntax Nx means to run the fuzz target N times
+ (for example, -fuzzminimizetime 100x).
-json
Log verbose output and test results in JSON. This presents the
same information as the -v flag in a machine-readable format.
-list regexp
- List tests, benchmarks, fuzz targets, or examples matching the regular
- expression. No tests, benchmarks, fuzz targets, or examples will be run.
+ List tests, benchmarks, fuzz tests, or examples matching the regular
+ expression. No tests, benchmarks, fuzz tests, or examples will be run.
This will only list top-level tests. No subtest or subbenchmarks will be
shown.
-parallel n
Allow parallel execution of test functions that call t.Parallel, and
- f.Fuzz functions that call t.Parallel when running the seed corpus.
+ fuzz targets that call t.Parallel when running the seed corpus.
The value of this flag is the maximum number of tests to run
simultaneously.
While fuzzing, the value of this flag is the maximum number of
@@ -291,7 +298,7 @@ control the execution of any test:
(see 'go help build').
-run regexp
- Run only those tests, examples, and fuzz targets matching the regular
+ Run only those tests, examples, and fuzz tests matching the regular
expression. For tests, the regular expression is split by unbracketed
slash (/) characters into a sequence of regular expressions, and each
part of a test's identifier must match the corresponding element in
@@ -468,7 +475,7 @@ A benchmark function is one named BenchmarkXxx and should have the signature,
func BenchmarkXxx(b *testing.B) { ... }
-A fuzz target is one named FuzzXxx and should have the signature,
+A fuzz test is one named FuzzXxx and should have the signature,
func FuzzXxx(f *testing.F) { ... }
@@ -511,7 +518,7 @@ Here is another example where the ordering of the output is ignored:
The entire test file is presented as the example when it contains a single
example function, at least one other function, type, variable, or constant
-declaration, and no fuzz targets or test or benchmark functions.
+declaration, and no tests, benchmarks, or fuzz tests.
See the documentation of the testing package for more information.
`,
@@ -620,8 +627,8 @@ var defaultVetFlags = []string{
}
func runTest(ctx context.Context, cmd *base.Command, args []string) {
- modload.InitWorkfile()
pkgArgs, testArgs = testFlags(args)
+ modload.InitWorkfile() // The test command does custom flag processing; initialize workspaces after that.
if cfg.DebugTrace != "" {
var close func() error
@@ -891,6 +898,17 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) {
}
}
+ // Collect all the packages imported by the packages being tested.
+ allImports := make(map[*load.Package]bool)
+ for _, p := range pkgs {
+ if p.Error != nil && p.Error.IsImportCycle {
+ continue
+ }
+ for _, p1 := range p.Internal.Imports {
+ allImports[p1] = true
+ }
+ }
+
// Prepare build + run + print actions for all packages being tested.
for _, p := range pkgs {
// sync/atomic import is inserted by the cover tool. See #18486
@@ -898,7 +916,7 @@ func runTest(ctx context.Context, cmd *base.Command, args []string) {
ensureImport(p, "sync/atomic")
}
- buildTest, runTest, printTest, err := builderTest(&b, ctx, pkgOpts, p)
+ buildTest, runTest, printTest, err := builderTest(&b, ctx, pkgOpts, p, allImports[p])
if err != nil {
str := err.Error()
str = strings.TrimPrefix(str, "\n")
@@ -965,7 +983,7 @@ var windowsBadWords = []string{
"update",
}
-func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts, p *load.Package) (buildAction, runAction, printAction *work.Action, err error) {
+func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts, p *load.Package, imported bool) (buildAction, runAction, printAction *work.Action, err error) {
if len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 {
build := b.CompileAction(work.ModeBuild, work.ModeBuild, p)
run := &work.Action{Mode: "test run", Package: p, Deps: []*work.Action{build}}
@@ -993,6 +1011,16 @@ func builderTest(b *work.Builder, ctx context.Context, pkgOpts load.PackageOpts,
return nil, nil, nil, err
}
+ // If imported is true then this package is imported by some
+ // package being tested. Make building the test version of the
+ // package depend on building the non-test version, so that we
+ // only report build errors once. Issue #44624.
+ if imported && ptest != p {
+ buildTest := b.CompileAction(work.ModeBuild, work.ModeBuild, ptest)
+ buildP := b.CompileAction(work.ModeBuild, work.ModeBuild, p)
+ buildTest.Deps = append(buildTest.Deps, buildP)
+ }
+
// Use last element of import path, not package name.
// They differ when package name is "main".
// But if the import path is "command-line-arguments",
@@ -1196,8 +1224,8 @@ func declareCoverVars(p *load.Package, files ...string) map[string]*load.CoverVa
}
var noTestsToRun = []byte("\ntesting: warning: no tests to run\n")
-var noTargetsToFuzz = []byte("\ntesting: warning: no targets to fuzz\n")
-var tooManyTargetsToFuzz = []byte("\ntesting: warning: -fuzz matches more than one target, won't fuzz\n")
+var noFuzzTestsToFuzz = []byte("\ntesting: warning: no fuzz tests to fuzz\n")
+var tooManyFuzzTestsToFuzz = []byte("\ntesting: warning: -fuzz matches more than one fuzz test, won't fuzz\n")
type runCache struct {
disableCache bool // cache should be disabled for this run
@@ -1399,11 +1427,11 @@ func (c *runCache) builderRunTest(b *work.Builder, ctx context.Context, a *work.
if bytes.HasPrefix(out, noTestsToRun[1:]) || bytes.Contains(out, noTestsToRun) {
norun = " [no tests to run]"
}
- if bytes.HasPrefix(out, noTargetsToFuzz[1:]) || bytes.Contains(out, noTargetsToFuzz) {
- norun = " [no targets to fuzz]"
+ if bytes.HasPrefix(out, noFuzzTestsToFuzz[1:]) || bytes.Contains(out, noFuzzTestsToFuzz) {
+ norun = " [no fuzz tests to fuzz]"
}
- if bytes.HasPrefix(out, tooManyTargetsToFuzz[1:]) || bytes.Contains(out, tooManyTargetsToFuzz) {
- norun = " [will not fuzz, -fuzz matches more than one target]"
+ if bytes.HasPrefix(out, tooManyFuzzTestsToFuzz[1:]) || bytes.Contains(out, tooManyFuzzTestsToFuzz) {
+ norun = "[-fuzz matches more than one fuzz test, won't fuzz]"
}
if len(out) > 0 && !bytes.HasSuffix(out, []byte("\n")) {
// Ensure that the output ends with a newline before the "ok"
diff --git a/src/cmd/go/internal/vcs/vcs.go b/src/cmd/go/internal/vcs/vcs.go
index b2ce80325a..fd521b2eb1 100644
--- a/src/cmd/go/internal/vcs/vcs.go
+++ b/src/cmd/go/internal/vcs/vcs.go
@@ -6,7 +6,6 @@ package vcs
import (
"bytes"
- "encoding/json"
"errors"
"fmt"
exec "internal/execabs"
@@ -26,8 +25,8 @@ import (
"cmd/go/internal/base"
"cmd/go/internal/cfg"
"cmd/go/internal/search"
- "cmd/go/internal/web"
"cmd/go/internal/str"
+ "cmd/go/internal/web"
"golang.org/x/mod/module"
)
@@ -165,7 +164,7 @@ func hgRemoteRepo(vcsHg *Cmd, rootDir string) (remoteRepo string, err error) {
func hgStatus(vcsHg *Cmd, rootDir string) (Status, error) {
// Output changeset ID and seconds since epoch.
- out, err := vcsHg.runOutputVerboseOnly(rootDir, `log -l1 -T {node}:{date(date,"%s")}`)
+ out, err := vcsHg.runOutputVerboseOnly(rootDir, `log -l1 -T {node}:{date|hgdate}`)
if err != nil {
return Status{}, err
}
@@ -174,6 +173,10 @@ func hgStatus(vcsHg *Cmd, rootDir string) (Status, error) {
var rev string
var commitTime time.Time
if len(out) > 0 {
+ // Strip trailing timezone offset.
+ if i := bytes.IndexByte(out, ' '); i > 0 {
+ out = out[:i]
+ }
rev, commitTime, err = parseRevTime(out)
if err != nil {
return Status{}, err
@@ -309,7 +312,7 @@ func gitStatus(vcsGit *Cmd, rootDir string) (Status, error) {
// uncommitted files and skip tagging revision / committime.
var rev string
var commitTime time.Time
- out, err = vcsGit.runOutputVerboseOnly(rootDir, "show -s --format=%H:%ct")
+ out, err = vcsGit.runOutputVerboseOnly(rootDir, "show -s --no-show-signature --format=%H:%ct")
if err != nil && !uncommitted {
return Status{}, err
} else if err == nil {
@@ -1311,7 +1314,7 @@ func metaImportsForPrefix(importPrefix string, mod ModuleMode, security web.Secu
return res, nil
}
- resi, _, _ := fetchGroup.Do(importPrefix, func() (resi interface{}, err error) {
+ resi, _, _ := fetchGroup.Do(importPrefix, func() (resi any, err error) {
fetchCacheMu.Lock()
if res, ok := fetchCache[importPrefix]; ok {
fetchCacheMu.Unlock()
@@ -1437,8 +1440,9 @@ var vcsPaths = []*vcsPath{
{
pathPrefix: "bitbucket.org",
regexp: lazyregexp.New(`^(?Pbitbucket\.org/(?P[A-Za-z0-9_.\-]+/[A-Za-z0-9_.\-]+))(/[A-Za-z0-9_.\-]+)*$`),
+ vcs: "git",
repo: "https://{root}",
- check: bitbucketVCS,
+ check: noVCSSuffix,
},
// IBM DevOps Services (JazzHub)
@@ -1510,56 +1514,6 @@ func noVCSSuffix(match map[string]string) error {
return nil
}
-// bitbucketVCS determines the version control system for a
-// Bitbucket repository, by using the Bitbucket API.
-func bitbucketVCS(match map[string]string) error {
- if err := noVCSSuffix(match); err != nil {
- return err
- }
-
- var resp struct {
- SCM string `json:"scm"`
- }
- url := &urlpkg.URL{
- Scheme: "https",
- Host: "api.bitbucket.org",
- Path: expand(match, "/2.0/repositories/{bitname}"),
- RawQuery: "fields=scm",
- }
- data, err := web.GetBytes(url)
- if err != nil {
- if httpErr, ok := err.(*web.HTTPError); ok && httpErr.StatusCode == 403 {
- // this may be a private repository. If so, attempt to determine which
- // VCS it uses. See issue 5375.
- root := match["root"]
- for _, vcs := range []string{"git", "hg"} {
- if vcsByCmd(vcs).Ping("https", root) == nil {
- resp.SCM = vcs
- break
- }
- }
- }
-
- if resp.SCM == "" {
- return err
- }
- } else {
- if err := json.Unmarshal(data, &resp); err != nil {
- return fmt.Errorf("decoding %s: %v", url, err)
- }
- }
-
- if vcsByCmd(resp.SCM) != nil {
- match["vcs"] = resp.SCM
- if resp.SCM == "git" {
- match["repo"] += ".git"
- }
- return nil
- }
-
- return fmt.Errorf("unable to detect version control system for bitbucket.org/ path")
-}
-
// launchpadVCS solves the ambiguity for "lp.net/project/foo". In this case,
// "foo" could be a series name registered in Launchpad with its own branch,
// and it could also be the name of a directory within the main project
@@ -1588,7 +1542,7 @@ type importError struct {
err error
}
-func importErrorf(path, format string, args ...interface{}) error {
+func importErrorf(path, format string, args ...any) error {
err := &importError{importPath: path, err: fmt.Errorf(format, args...)}
if errStr := err.Error(); !strings.Contains(errStr, path) {
panic(fmt.Sprintf("path %q not in error %q", path, errStr))
diff --git a/src/cmd/go/internal/vcs/vcs_test.go b/src/cmd/go/internal/vcs/vcs_test.go
index c4e4f4d3c6..943d520d54 100644
--- a/src/cmd/go/internal/vcs/vcs_test.go
+++ b/src/cmd/go/internal/vcs/vcs_test.go
@@ -183,6 +183,13 @@ func TestRepoRootForImportPath(t *testing.T) {
"chiselapp.com/user/kyle/fossilgg",
nil,
},
+ {
+ "bitbucket.org/workspace/pkgname",
+ &RepoRoot{
+ VCS: vcsGit,
+ Repo: "https://bitbucket.org/workspace/pkgname",
+ },
+ },
}
for _, test := range tests {
diff --git a/src/cmd/go/internal/version/version.go b/src/cmd/go/internal/version/version.go
index febc7c638a..52502e95c6 100644
--- a/src/cmd/go/internal/version/version.go
+++ b/src/cmd/go/internal/version/version.go
@@ -151,6 +151,7 @@ func scanFile(file string, info fs.FileInfo, mustPrint bool) {
fmt.Fprintf(os.Stderr, "%s: %v\n", file, err)
}
}
+ return
}
fmt.Printf("%s: %s\n", file, bi.GoVersion)
diff --git a/src/cmd/go/internal/work/action.go b/src/cmd/go/internal/work/action.go
index 6f5ac1364c..c0862c5efe 100644
--- a/src/cmd/go/internal/work/action.go
+++ b/src/cmd/go/internal/work/action.go
@@ -37,7 +37,7 @@ type Builder struct {
actionCache map[cacheKey]*Action // a cache of already-constructed actions
mkdirCache map[string]bool // a cache of created directories
flagCache map[[2]string]bool // a cache of supported compiler flags
- Print func(args ...interface{}) (int, error)
+ Print func(args ...any) (int, error)
IsCmdList bool // running as part of go list; set p.Stale and additional fields below
NeedError bool // list needs p.Error
@@ -120,8 +120,8 @@ type actionQueue []*Action
func (q *actionQueue) Len() int { return len(*q) }
func (q *actionQueue) Swap(i, j int) { (*q)[i], (*q)[j] = (*q)[j], (*q)[i] }
func (q *actionQueue) Less(i, j int) bool { return (*q)[i].priority < (*q)[j].priority }
-func (q *actionQueue) Push(x interface{}) { *q = append(*q, x.(*Action)) }
-func (q *actionQueue) Pop() interface{} {
+func (q *actionQueue) Push(x any) { *q = append(*q, x.(*Action)) }
+func (q *actionQueue) Pop() any {
n := len(*q) - 1
x := (*q)[n]
*q = (*q)[:n]
@@ -241,7 +241,7 @@ const (
)
func (b *Builder) Init() {
- b.Print = func(a ...interface{}) (int, error) {
+ b.Print = func(a ...any) (int, error) {
return fmt.Fprint(os.Stderr, a...)
}
b.actionCache = make(map[cacheKey]*Action)
diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go
index 9d0ad27f0d..1c278d3d99 100644
--- a/src/cmd/go/internal/work/build.go
+++ b/src/cmd/go/internal/work/build.go
@@ -88,11 +88,6 @@ and test commands:
-asmflags '[pattern=]arg list'
arguments to pass on each go tool asm invocation.
- -buildinfo
- Whether to stamp binaries with build flags. By default, the compiler name
- (gc or gccgo), toolchain flags (like -gcflags), and environment variables
- containing flags (like CGO_CFLAGS) are stamped into binaries. Use
- -buildinfo=false to omit build information. See also -buildvcs.
-buildmode mode
build mode to use. See 'go help buildmode' for more.
-buildvcs
@@ -100,7 +95,7 @@ and test commands:
version control information is stamped into a binary if the main package
and the main module containing it are in the repository containing the
current directory (if there is a repository). Use -buildvcs=false to
- omit version control information. See also -buildinfo.
+ omit version control information.
-compiler name
name of compiler to use, as in runtime.Compiler (gccgo or gc).
-gccgoflags '[pattern=]arg list'
@@ -167,9 +162,8 @@ and test commands:
-trimpath
remove all file system paths from the resulting executable.
Instead of absolute file system paths, the recorded file names
- will begin with either "go" (for the standard library),
- or a module path@version (when using modules),
- or a plain import path (when using GOPATH).
+ will begin either a module path@version (when using modules),
+ or a plain import path (when using the standard library, or GOPATH).
-toolexec 'cmd args'
a program to use to invoke toolchain programs like vet and asm.
For example, instead of running asm, the go command will run
@@ -317,7 +311,6 @@ func AddBuildFlags(cmd *base.Command, mask BuildFlagMask) {
cmd.Flag.Var((*base.StringsFlag)(&cfg.BuildToolexec), "toolexec", "")
cmd.Flag.BoolVar(&cfg.BuildTrimpath, "trimpath", false, "")
cmd.Flag.BoolVar(&cfg.BuildWork, "work", false, "")
- cmd.Flag.BoolVar(&cfg.BuildBuildinfo, "buildinfo", true, "")
cmd.Flag.BoolVar(&cfg.BuildBuildvcs, "buildvcs", true, "")
// Undocumented, unstable debugging flags.
@@ -617,6 +610,7 @@ func runInstall(ctx context.Context, cmd *base.Command, args []string) {
}
}
+ modload.InitWorkfile()
BuildInit()
pkgs := load.PackagesAndErrors(ctx, load.PackageOpts{}, args)
if cfg.ModulesEnabled && !modload.HasModRoot() {
diff --git a/src/cmd/go/internal/work/build_test.go b/src/cmd/go/internal/work/build_test.go
index 600fc3083f..0b6b83a706 100644
--- a/src/cmd/go/internal/work/build_test.go
+++ b/src/cmd/go/internal/work/build_test.go
@@ -234,7 +234,7 @@ func TestRespectSetgidDir(t *testing.T) {
// of `(*Builder).ShowCmd` afterwards as a sanity check.
cfg.BuildX = true
var cmdBuf bytes.Buffer
- b.Print = func(a ...interface{}) (int, error) {
+ b.Print = func(a ...any) (int, error) {
return cmdBuf.WriteString(fmt.Sprint(a...))
}
diff --git a/src/cmd/go/internal/work/buildid.go b/src/cmd/go/internal/work/buildid.go
index d4f2a716d7..76335e9bb1 100644
--- a/src/cmd/go/internal/work/buildid.go
+++ b/src/cmd/go/internal/work/buildid.go
@@ -15,8 +15,8 @@ import (
"cmd/go/internal/cache"
"cmd/go/internal/cfg"
"cmd/go/internal/fsys"
- "cmd/internal/buildid"
"cmd/go/internal/str"
+ "cmd/internal/buildid"
)
// Build IDs
@@ -570,6 +570,8 @@ func showStdout(b *Builder, c *cache.Cache, actionID cache.ActionID, key string)
b.Showcmd("", "%s # internal", joinUnambiguously(str.StringList("cat", c.OutputFile(stdoutEntry.OutputID))))
}
if !cfg.BuildN {
+ b.output.Lock()
+ defer b.output.Unlock()
b.Print(string(stdout))
}
}
@@ -578,6 +580,8 @@ func showStdout(b *Builder, c *cache.Cache, actionID cache.ActionID, key string)
// flushOutput flushes the output being queued in a.
func (b *Builder) flushOutput(a *Action) {
+ b.output.Lock()
+ defer b.output.Unlock()
b.Print(string(a.output))
a.output = nil
}
diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go
index 03f8866cf2..48a74458bd 100644
--- a/src/cmd/go/internal/work/exec.go
+++ b/src/cmd/go/internal/work/exec.go
@@ -236,11 +236,13 @@ func (b *Builder) buildActionID(a *Action) cache.ActionID {
}
} else if p.Goroot {
// The Go compiler always hides the exact value of $GOROOT
- // when building things in GOROOT, but the C compiler
- // merely rewrites GOROOT to GOROOT_FINAL.
- if len(p.CFiles) > 0 {
- fmt.Fprintf(h, "goroot %s\n", cfg.GOROOT_FINAL)
- }
+ // when building things in GOROOT.
+ //
+ // The C compiler does not, but for packages in GOROOT we rewrite the path
+ // as though -trimpath were set, so that we don't invalidate the build cache
+ // (and especially any precompiled C archive files) when changing
+ // GOROOT_FINAL. (See https://go.dev/issue/50183.)
+ //
// b.WorkDir is always either trimmed or rewritten to
// the literal string "/tmp/go-build".
} else if !strings.HasPrefix(p.Dir, b.WorkDir) {
@@ -794,10 +796,13 @@ OverlayLoop:
}
if p.Internal.BuildInfo != "" && cfg.ModulesEnabled {
- if err := b.writeFile(objdir+"_gomod_.go", modload.ModInfoProg(p.Internal.BuildInfo, cfg.BuildToolchainName == "gccgo")); err != nil {
- return err
+ prog := modload.ModInfoProg(p.Internal.BuildInfo, cfg.BuildToolchainName == "gccgo")
+ if len(prog) > 0 {
+ if err := b.writeFile(objdir+"_gomod_.go", prog); err != nil {
+ return err
+ }
+ gofiles = append(gofiles, objdir+"_gomod_.go")
}
- gofiles = append(gofiles, objdir+"_gomod_.go")
}
// Compile Go.
@@ -1394,6 +1399,7 @@ func (b *Builder) writeLinkImportcfg(a *Action, file string) error {
fmt.Fprintf(&icfg, "packageshlib %s=%s\n", p1.ImportPath, p1.Shlib)
}
}
+ fmt.Fprintf(&icfg, "modinfo %q\n", modload.ModInfoData(a.Package.Internal.BuildInfo))
return b.writeFile(file, icfg.Bytes())
}
@@ -1944,7 +1950,7 @@ func mayberemovefile(s string) {
// fmtcmd replaces the name of the current directory with dot (.)
// but only when it is at the beginning of a space-separated token.
//
-func (b *Builder) fmtcmd(dir string, format string, args ...interface{}) string {
+func (b *Builder) fmtcmd(dir string, format string, args ...any) string {
cmd := fmt.Sprintf(format, args...)
if dir != "" && dir != "/" {
dot := " ."
@@ -1970,7 +1976,7 @@ func (b *Builder) fmtcmd(dir string, format string, args ...interface{}) string
// showcmd prints the given command to standard output
// for the implementation of -n or -x.
-func (b *Builder) Showcmd(dir string, format string, args ...interface{}) {
+func (b *Builder) Showcmd(dir string, format string, args ...any) {
b.output.Lock()
defer b.output.Unlock()
b.Print(b.fmtcmd(dir, format, args...) + "\n")
@@ -2034,7 +2040,7 @@ var cgoTypeSigRe = lazyregexp.New(`\b_C2?(type|func|var|macro)_\B`)
// run runs the command given by cmdline in the directory dir.
// If the command fails, run prints information about the failure
// and returns a non-nil error.
-func (b *Builder) run(a *Action, dir string, desc string, env []string, cmdargs ...interface{}) error {
+func (b *Builder) run(a *Action, dir string, desc string, env []string, cmdargs ...any) error {
out, err := b.runOut(a, dir, env, cmdargs...)
if len(out) > 0 {
if desc == "" {
@@ -2068,7 +2074,7 @@ func (b *Builder) processOutput(out []byte) string {
// runOut runs the command given by cmdline in the directory dir.
// It returns the command output and any errors that occurred.
// It accumulates execution time in a.
-func (b *Builder) runOut(a *Action, dir string, env []string, cmdargs ...interface{}) ([]byte, error) {
+func (b *Builder) runOut(a *Action, dir string, env []string, cmdargs ...any) ([]byte, error) {
cmdline := str.StringList(cmdargs...)
for _, arg := range cmdline {
@@ -2333,7 +2339,7 @@ func (b *Builder) ccompile(a *Action, p *load.Package, outfile string, flags []s
// directives pointing to the source directory. It should not generate those
// when -trimpath is enabled.
if b.gccSupportsFlag(compiler, "-fdebug-prefix-map=a=b") {
- if cfg.BuildTrimpath {
+ if cfg.BuildTrimpath || p.Goroot {
// Keep in sync with Action.trimpath.
// The trimmed paths are a little different, but we need to trim in the
// same situations.
@@ -2355,8 +2361,6 @@ func (b *Builder) ccompile(a *Action, p *load.Package, outfile string, flags []s
to = filepath.Join("/_", toPath)
}
flags = append(flags[:len(flags):len(flags)], "-fdebug-prefix-map="+from+"="+to)
- } else if p.Goroot && cfg.GOROOT_FINAL != cfg.GOROOT {
- flags = append(flags[:len(flags):len(flags)], "-fdebug-prefix-map="+cfg.GOROOT+"="+cfg.GOROOT_FINAL)
}
}
@@ -2405,7 +2409,7 @@ func (b *Builder) gccld(a *Action, p *load.Package, objdir, outfile string, flag
cmd = b.GccCmd(p.Dir, objdir)
}
- cmdargs := []interface{}{cmd, "-o", outfile, objs, flags}
+ cmdargs := []any{cmd, "-o", outfile, objs, flags}
dir := p.Dir
out, err := b.runOut(a, base.Cwd(), b.cCompilerEnv(), cmdargs...)
diff --git a/src/cmd/go/internal/work/gc.go b/src/cmd/go/internal/work/gc.go
index e3b4a817e7..40175324d2 100644
--- a/src/cmd/go/internal/work/gc.go
+++ b/src/cmd/go/internal/work/gc.go
@@ -165,7 +165,7 @@ func (gcToolchain) gc(b *Builder, a *Action, archive string, importcfg, embedcfg
gcflags = append(gcflags, fmt.Sprintf("-c=%d", c))
}
- args := []interface{}{cfg.BuildToolexec, base.Tool("compile"), "-o", ofile, "-trimpath", a.trimpath(), defaultGcFlags, gcflags}
+ args := []any{cfg.BuildToolexec, base.Tool("compile"), "-o", ofile, "-trimpath", a.trimpath(), defaultGcFlags, gcflags}
if p.Internal.LocalPrefix == "" {
args = append(args, "-nolocalimports")
} else {
@@ -362,11 +362,11 @@ func (a *Action) trimpath() string {
return rewrite
}
-func asmArgs(a *Action, p *load.Package) []interface{} {
+func asmArgs(a *Action, p *load.Package) []any {
// Add -I pkg/GOOS_GOARCH so #include "textflag.h" works in .s files.
inc := filepath.Join(cfg.GOROOT, "pkg", "include")
pkgpath := pkgPath(a)
- args := []interface{}{cfg.BuildToolexec, base.Tool("asm"), "-p", pkgpath, "-trimpath", a.trimpath(), "-I", a.Objdir, "-I", inc, "-D", "GOOS_" + cfg.Goos, "-D", "GOARCH_" + cfg.Goarch, forcedAsmflags, p.Internal.Asmflags}
+ args := []any{cfg.BuildToolexec, base.Tool("asm"), "-p", pkgpath, "-trimpath", a.trimpath(), "-I", a.Objdir, "-I", inc, "-D", "GOOS_" + cfg.Goos, "-D", "GOARCH_" + cfg.Goarch, forcedAsmflags, p.Internal.Asmflags}
if p.ImportPath == "runtime" && cfg.Goarch == "386" {
for _, arg := range forcedAsmflags {
if arg == "-dynlink" {
@@ -455,8 +455,8 @@ func (gcToolchain) symabis(b *Builder, a *Action, sfiles []string) (string, erro
// toolVerify checks that the command line args writes the same output file
// if run using newTool instead.
// Unused now but kept around for future use.
-func toolVerify(a *Action, b *Builder, p *load.Package, newTool string, ofile string, args []interface{}) error {
- newArgs := make([]interface{}, len(args))
+func toolVerify(a *Action, b *Builder, p *load.Package, newTool string, ofile string, args []any) error {
+ newArgs := make([]any, len(args))
copy(newArgs, args)
newArgs[1] = base.Tool(newTool)
newArgs[3] = ofile + ".new" // x.6 becomes x.6.new
diff --git a/src/cmd/go/internal/work/gccgo.go b/src/cmd/go/internal/work/gccgo.go
index 60181b99e4..1499536932 100644
--- a/src/cmd/go/internal/work/gccgo.go
+++ b/src/cmd/go/internal/work/gccgo.go
@@ -16,8 +16,8 @@ import (
"cmd/go/internal/cfg"
"cmd/go/internal/fsys"
"cmd/go/internal/load"
- "cmd/internal/pkgpath"
"cmd/go/internal/str"
+ "cmd/internal/pkgpath"
)
// The Gccgo toolchain.
diff --git a/src/cmd/go/internal/work/init.go b/src/cmd/go/internal/work/init.go
index dc368de1c1..26192ecaed 100644
--- a/src/cmd/go/internal/work/init.go
+++ b/src/cmd/go/internal/work/init.go
@@ -138,7 +138,7 @@ func instrumentInit() {
cfg.BuildContext.InstallSuffix += "_"
}
cfg.BuildContext.InstallSuffix += mode
- cfg.BuildContext.BuildTags = append(cfg.BuildContext.BuildTags, mode)
+ cfg.BuildContext.ToolTags = append(cfg.BuildContext.ToolTags, mode)
}
func buildModeInit() {
diff --git a/src/cmd/go/internal/modcmd/editwork.go b/src/cmd/go/internal/workcmd/edit.go
similarity index 58%
rename from src/cmd/go/internal/modcmd/editwork.go
rename to src/cmd/go/internal/workcmd/edit.go
index 50f86366a0..879ddc3b1d 100644
--- a/src/cmd/go/internal/modcmd/editwork.go
+++ b/src/cmd/go/internal/workcmd/edit.go
@@ -2,32 +2,32 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// go mod editwork
+// go work edit
-package modcmd
+package workcmd
import (
- "bytes"
"cmd/go/internal/base"
- "cmd/go/internal/lockedfile"
"cmd/go/internal/modload"
"context"
"encoding/json"
- "errors"
+ "fmt"
"os"
"path/filepath"
"strings"
+ "golang.org/x/mod/module"
+
"golang.org/x/mod/modfile"
)
-var cmdEditwork = &base.Command{
- UsageLine: "go mod editwork [editing flags] [go.work]",
+var cmdEdit = &base.Command{
+ UsageLine: "go work edit [editing flags] [go.work]",
Short: "edit go.work from tools or scripts",
- Long: `Editwork provides a command-line interface for editing go.work,
+ Long: `Edit provides a command-line interface for editing go.work,
for use primarily by tools or scripts. It only reads go.work;
it does not look up information about the modules involved.
-If no file is specified, editwork looks for a go.work file in the current
+If no file is specified, Edit looks for a go.work file in the current
directory and its parent directories
The editing flags specify a sequence of editing operations.
@@ -35,10 +35,10 @@ The editing flags specify a sequence of editing operations.
The -fmt flag reformats the go.work file without making other changes.
This reformatting is also implied by any other modifications that use or
rewrite the go.mod file. The only time this flag is needed is if no other
-flags are specified, as in 'go mod editwork -fmt'.
+flags are specified, as in 'go work edit -fmt'.
-The -directory=path and -dropdirectory=path flags
-add and drop a directory from the go.work files set of module directories.
+The -use=path and -dropuse=path flags
+add and drop a use directive from the go.work file's set of module directories.
The -replace=old[@v]=new[@v] flag adds a replacement of the given
module path and version pair. If the @v in old@v is omitted, a
@@ -52,7 +52,7 @@ The -dropreplace=old[@v] flag drops a replacement of the given
module path and version pair. If the @v is omitted, a replacement without
a version on the left side is dropped.
-The -directory, -dropdirectory, -replace, and -dropreplace,
+The -use, -dropuse, -replace, and -dropreplace,
editing flags may be repeated, and the changes are applied in the order given.
The -go=version flag sets the expected Go language version.
@@ -63,19 +63,14 @@ writing it back to go.mod.
The -json flag prints the final go.work file in JSON format instead of
writing it back to go.mod. The JSON output corresponds to these Go types:
- type Module struct {
- Path string
- Version string
- }
-
type GoWork struct {
- Go string
- Directory []Directory
- Replace []Replace
+ Go string
+ Use []Use
+ Replace []Replace
}
- type Directory struct {
- Path string
+ type Use struct {
+ DiskPath string
ModulePath string
}
@@ -84,6 +79,11 @@ writing it back to go.mod. The JSON output corresponds to these Go types:
New Module
}
+ type Module struct {
+ Path string
+ Version string
+ }
+
See the workspaces design proposal at
https://go.googlesource.com/proposal/+/master/design/45713-workspace.md for
more information.
@@ -91,42 +91,47 @@ more information.
}
var (
- editworkFmt = cmdEditwork.Flag.Bool("fmt", false, "")
- editworkGo = cmdEditwork.Flag.String("go", "", "")
- editworkJSON = cmdEditwork.Flag.Bool("json", false, "")
- editworkPrint = cmdEditwork.Flag.Bool("print", false, "")
- workedits []func(file *modfile.WorkFile) // edits specified in flags
+ editFmt = cmdEdit.Flag.Bool("fmt", false, "")
+ editGo = cmdEdit.Flag.String("go", "", "")
+ editJSON = cmdEdit.Flag.Bool("json", false, "")
+ editPrint = cmdEdit.Flag.Bool("print", false, "")
+ workedits []func(file *modfile.WorkFile) // edits specified in flags
)
+type flagFunc func(string)
+
+func (f flagFunc) String() string { return "" }
+func (f flagFunc) Set(s string) error { f(s); return nil }
+
func init() {
- cmdEditwork.Run = runEditwork // break init cycle
+ cmdEdit.Run = runEditwork // break init cycle
- cmdEditwork.Flag.Var(flagFunc(flagEditworkDirectory), "directory", "")
- cmdEditwork.Flag.Var(flagFunc(flagEditworkDropDirectory), "dropdirectory", "")
- cmdEditwork.Flag.Var(flagFunc(flagEditworkReplace), "replace", "")
- cmdEditwork.Flag.Var(flagFunc(flagEditworkDropReplace), "dropreplace", "")
+ cmdEdit.Flag.Var(flagFunc(flagEditworkUse), "use", "")
+ cmdEdit.Flag.Var(flagFunc(flagEditworkDropUse), "dropuse", "")
+ cmdEdit.Flag.Var(flagFunc(flagEditworkReplace), "replace", "")
+ cmdEdit.Flag.Var(flagFunc(flagEditworkDropReplace), "dropreplace", "")
- base.AddWorkfileFlag(&cmdEditwork.Flag)
+ base.AddWorkfileFlag(&cmdEdit.Flag)
}
func runEditwork(ctx context.Context, cmd *base.Command, args []string) {
anyFlags :=
- *editworkGo != "" ||
- *editworkJSON ||
- *editworkPrint ||
- *editworkFmt ||
+ *editGo != "" ||
+ *editJSON ||
+ *editPrint ||
+ *editFmt ||
len(workedits) > 0
if !anyFlags {
- base.Fatalf("go: no flags specified (see 'go help mod editwork').")
+ base.Fatalf("go: no flags specified (see 'go help work edit').")
}
- if *editworkJSON && *editworkPrint {
+ if *editJSON && *editPrint {
base.Fatalf("go: cannot use both -json and -print")
}
if len(args) > 1 {
- base.Fatalf("go: 'go mod editwork' accepts at most one argument")
+ base.Fatalf("go: 'go help work edit' accepts at most one argument")
}
var gowork string
if len(args) == 1 {
@@ -136,24 +141,19 @@ func runEditwork(ctx context.Context, cmd *base.Command, args []string) {
gowork = modload.WorkFilePath()
}
- if *editworkGo != "" {
- if !modfile.GoVersionRE.MatchString(*editworkGo) {
+ if *editGo != "" {
+ if !modfile.GoVersionRE.MatchString(*editGo) {
base.Fatalf(`go mod: invalid -go option; expecting something like "-go %s"`, modload.LatestGoVersion())
}
}
- data, err := lockedfile.Read(gowork)
- if err != nil {
- base.Fatalf("go: %v", err)
- }
-
- workFile, err := modfile.ParseWork(gowork, data, nil)
+ workFile, err := modload.ReadWorkFile(gowork)
if err != nil {
base.Fatalf("go: errors parsing %s:\n%s", base.ShortPath(gowork), err)
}
- if *editworkGo != "" {
- if err := workFile.AddGoStmt(*editworkGo); err != nil {
+ if *editGo != "" {
+ if err := workFile.AddGoStmt(*editGo); err != nil {
base.Fatalf("go: internal error: %v", err)
}
}
@@ -163,56 +163,77 @@ func runEditwork(ctx context.Context, cmd *base.Command, args []string) {
edit(workFile)
}
}
+
+ modload.UpdateWorkFile(workFile)
+
workFile.SortBlocks()
workFile.Cleanup() // clean file after edits
- if *editworkJSON {
- editworkPrintJSON(workFile)
+ if *editJSON {
+ editPrintJSON(workFile)
return
}
- out := modfile.Format(workFile.Syntax)
-
- if *editworkPrint {
- os.Stdout.Write(out)
+ if *editPrint {
+ os.Stdout.Write(modfile.Format(workFile.Syntax))
return
}
- err = lockedfile.Transform(gowork, func(lockedData []byte) ([]byte, error) {
- if !bytes.Equal(lockedData, data) {
- return nil, errors.New("go.work changed during editing; not overwriting")
- }
- return out, nil
- })
- if err != nil {
- base.Fatalf("go: %v", err)
- }
+ modload.WriteWorkFile(gowork, workFile)
}
-// flagEditworkDirectory implements the -directory flag.
-func flagEditworkDirectory(arg string) {
+// flagEditworkUse implements the -use flag.
+func flagEditworkUse(arg string) {
workedits = append(workedits, func(f *modfile.WorkFile) {
_, mf, err := modload.ReadModFile(filepath.Join(arg, "go.mod"), nil)
modulePath := ""
if err == nil {
modulePath = mf.Module.Mod.Path
}
- f.AddDirectory(modload.ToDirectoryPath(arg), modulePath)
- if err := f.AddDirectory(modload.ToDirectoryPath(arg), ""); err != nil {
- base.Fatalf("go: -directory=%s: %v", arg, err)
+ f.AddUse(modload.ToDirectoryPath(arg), modulePath)
+ if err := f.AddUse(modload.ToDirectoryPath(arg), ""); err != nil {
+ base.Fatalf("go: -use=%s: %v", arg, err)
}
})
}
-// flagEditworkDropDirectory implements the -dropdirectory flag.
-func flagEditworkDropDirectory(arg string) {
+// flagEditworkDropUse implements the -dropuse flag.
+func flagEditworkDropUse(arg string) {
workedits = append(workedits, func(f *modfile.WorkFile) {
- if err := f.DropDirectory(modload.ToDirectoryPath(arg)); err != nil {
+ if err := f.DropUse(modload.ToDirectoryPath(arg)); err != nil {
base.Fatalf("go: -dropdirectory=%s: %v", arg, err)
}
})
}
+// allowedVersionArg returns whether a token may be used as a version in go.mod.
+// We don't call modfile.CheckPathVersion, because that insists on versions
+// being in semver form, but here we want to allow versions like "master" or
+// "1234abcdef", which the go command will resolve the next time it runs (or
+// during -fix). Even so, we need to make sure the version is a valid token.
+func allowedVersionArg(arg string) bool {
+ return !modfile.MustQuote(arg)
+}
+
+// parsePathVersionOptional parses path[@version], using adj to
+// describe any errors.
+func parsePathVersionOptional(adj, arg string, allowDirPath bool) (path, version string, err error) {
+ if i := strings.Index(arg, "@"); i < 0 {
+ path = arg
+ } else {
+ path, version = strings.TrimSpace(arg[:i]), strings.TrimSpace(arg[i+1:])
+ }
+ if err := module.CheckImportPath(path); err != nil {
+ if !allowDirPath || !modfile.IsDirectoryPath(path) {
+ return path, version, fmt.Errorf("invalid %s path: %v", adj, err)
+ }
+ }
+ if path != arg && !allowedVersionArg(version) {
+ return path, version, fmt.Errorf("invalid %s version: %q", adj, version)
+ }
+ return path, version, nil
+}
+
// flagReplace implements the -replace flag.
func flagEditworkReplace(arg string) {
var i int
@@ -255,14 +276,19 @@ func flagEditworkDropReplace(arg string) {
})
}
+type replaceJSON struct {
+ Old module.Version
+ New module.Version
+}
+
// editPrintJSON prints the -json output.
-func editworkPrintJSON(workFile *modfile.WorkFile) {
+func editPrintJSON(workFile *modfile.WorkFile) {
var f workfileJSON
if workFile.Go != nil {
f.Go = workFile.Go.Version
}
- for _, d := range workFile.Directory {
- f.Directory = append(f.Directory, directoryJSON{DiskPath: d.Path, ModPath: d.ModulePath})
+ for _, d := range workFile.Use {
+ f.Use = append(f.Use, useJSON{DiskPath: d.Path, ModPath: d.ModulePath})
}
for _, r := range workFile.Replace {
@@ -278,12 +304,12 @@ func editworkPrintJSON(workFile *modfile.WorkFile) {
// workfileJSON is the -json output data structure.
type workfileJSON struct {
- Go string `json:",omitempty"`
- Directory []directoryJSON
- Replace []replaceJSON
+ Go string `json:",omitempty"`
+ Use []useJSON
+ Replace []replaceJSON
}
-type directoryJSON struct {
+type useJSON struct {
DiskPath string
ModPath string `json:",omitempty"`
}
diff --git a/src/cmd/go/internal/workcmd/init.go b/src/cmd/go/internal/workcmd/init.go
new file mode 100644
index 0000000000..cefecee832
--- /dev/null
+++ b/src/cmd/go/internal/workcmd/init.go
@@ -0,0 +1,52 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go work init
+
+package workcmd
+
+import (
+ "cmd/go/internal/base"
+ "cmd/go/internal/modload"
+ "context"
+ "path/filepath"
+)
+
+var cmdInit = &base.Command{
+ UsageLine: "go work init [moddirs]",
+ Short: "initialize workspace file",
+ Long: `Init initializes and writes a new go.work file in the
+current directory, in effect creating a new workspace at the current
+directory.
+
+go work init optionally accepts paths to the workspace modules as
+arguments. If the argument is omitted, an empty workspace with no
+modules will be created.
+
+Each argument path is added to a use directive in the go.work file. The
+current go version will also be listed in the go.work file.
+
+`,
+ Run: runInit,
+}
+
+func init() {
+ base.AddModCommonFlags(&cmdInit.Flag)
+ base.AddWorkfileFlag(&cmdInit.Flag)
+}
+
+func runInit(ctx context.Context, cmd *base.Command, args []string) {
+ modload.InitWorkfile()
+
+ modload.ForceUseModules = true
+
+ // TODO(matloob): support using the -workfile path
+ // To do that properly, we'll have to make the module directories
+ // make dirs relative to workFile path before adding the paths to
+ // the directory entries
+
+ workFile := filepath.Join(base.Cwd(), "go.work")
+
+ modload.CreateWorkFile(ctx, workFile, args)
+}
diff --git a/src/cmd/go/internal/workcmd/sync.go b/src/cmd/go/internal/workcmd/sync.go
new file mode 100644
index 0000000000..1cca817517
--- /dev/null
+++ b/src/cmd/go/internal/workcmd/sync.go
@@ -0,0 +1,130 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go work sync
+
+package workcmd
+
+import (
+ "cmd/go/internal/base"
+ "cmd/go/internal/imports"
+ "cmd/go/internal/modload"
+ "context"
+
+ "golang.org/x/mod/module"
+)
+
+var cmdSync = &base.Command{
+ UsageLine: "go work sync",
+ Short: "sync workspace build list to modules",
+ Long: `Sync syncs the workspace's build list back to the
+workspace's modules
+
+The workspace's build list is the set of versions of all the
+(transitive) dependency modules used to do builds in the workspace. go
+work sync generates that build list using the Minimal Version Selection
+algorithm, and then syncs those versions back to each of modules
+specified in the workspace (with use directives).
+
+The syncing is done by sequentially upgrading each of the dependency
+modules specified in a workspace module to the version in the build list
+if the dependency module's version is not already the same as the build
+list's version. Note that Minimal Version Selection guarantees that the
+build list's version of each module is always the same or higher than
+that in each workspace module.
+`,
+ Run: runSync,
+}
+
+func init() {
+ base.AddModCommonFlags(&cmdSync.Flag)
+ base.AddWorkfileFlag(&cmdSync.Flag)
+}
+
+func runSync(ctx context.Context, cmd *base.Command, args []string) {
+ modload.InitWorkfile()
+
+ modload.ForceUseModules = true
+
+ workGraph := modload.LoadModGraph(ctx, "")
+ _ = workGraph
+ mustSelectFor := map[module.Version][]module.Version{}
+
+ mms := modload.MainModules
+
+ opts := modload.PackageOpts{
+ Tags: imports.AnyTags(),
+ VendorModulesInGOROOTSrc: true,
+ ResolveMissingImports: false,
+ LoadTests: true,
+ AllowErrors: true,
+ SilencePackageErrors: true,
+ SilenceUnmatchedWarnings: true,
+ }
+ for _, m := range mms.Versions() {
+ opts.MainModule = m
+ _, pkgs := modload.LoadPackages(ctx, opts, "all")
+ opts.MainModule = module.Version{} // reset
+
+ var (
+ mustSelect []module.Version
+ inMustSelect = map[module.Version]bool{}
+ )
+ for _, pkg := range pkgs {
+ if r := modload.PackageModule(pkg); r.Version != "" && !inMustSelect[r] {
+ // r has a known version, so force that version.
+ mustSelect = append(mustSelect, r)
+ inMustSelect[r] = true
+ }
+ }
+ module.Sort(mustSelect) // ensure determinism
+ mustSelectFor[m] = mustSelect
+ }
+
+ workFilePath := modload.WorkFilePath() // save go.work path because EnterModule clobbers it.
+
+ for _, m := range mms.Versions() {
+ if mms.ModRoot(m) == "" && m.Path == "command-line-arguments" {
+ // This is not a real module.
+ // TODO(#49228): Remove this special case once the special
+ // command-line-arguments module is gone.
+ continue
+ }
+
+ // Use EnterModule to reset the global state in modload to be in
+ // single-module mode using the modroot of m.
+ modload.EnterModule(ctx, mms.ModRoot(m))
+
+ // Edit the build list in the same way that 'go get' would if we
+ // requested the relevant module versions explicitly.
+ changed, err := modload.EditBuildList(ctx, nil, mustSelectFor[m])
+ if err != nil {
+ base.Errorf("go: %v", err)
+ }
+ if !changed {
+ continue
+ }
+
+ modload.LoadPackages(ctx, modload.PackageOpts{
+ Tags: imports.AnyTags(),
+ Tidy: true,
+ VendorModulesInGOROOTSrc: true,
+ ResolveMissingImports: false,
+ LoadTests: true,
+ AllowErrors: true,
+ SilenceMissingStdImports: true,
+ SilencePackageErrors: true,
+ }, "all")
+ modload.WriteGoMod(ctx)
+ }
+
+ wf, err := modload.ReadWorkFile(workFilePath)
+ if err != nil {
+ base.Fatalf("go: %v", err)
+ }
+ modload.UpdateWorkFile(wf)
+ if err := modload.WriteWorkFile(workFilePath, wf); err != nil {
+ base.Fatalf("go: %v", err)
+ }
+}
diff --git a/src/cmd/go/internal/workcmd/use.go b/src/cmd/go/internal/workcmd/use.go
new file mode 100644
index 0000000000..a5ba6c7133
--- /dev/null
+++ b/src/cmd/go/internal/workcmd/use.go
@@ -0,0 +1,130 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// go work use
+
+package workcmd
+
+import (
+ "cmd/go/internal/base"
+ "cmd/go/internal/fsys"
+ "cmd/go/internal/modload"
+ "context"
+ "io/fs"
+ "os"
+ "path/filepath"
+)
+
+var cmdUse = &base.Command{
+ UsageLine: "go work use [-r] [moddirs]",
+ Short: "add modules to workspace file",
+ Long: `Use provides a command-line interface for adding
+directories, optionally recursively, to a go.work file.
+
+A use directive will be added to the go.work file for each argument
+directory listed on the command line go.work file, if it exists on disk,
+or removed from the go.work file if it does not exist on disk.
+
+The -r flag searches recursively for modules in the argument
+directories, and the use command operates as if each of the directories
+were specified as arguments: namely, use directives will be added for
+directories that exist, and removed for directories that do not exist.
+`,
+}
+
+var useR = cmdUse.Flag.Bool("r", false, "")
+
+func init() {
+ cmdUse.Run = runUse // break init cycle
+
+ base.AddModCommonFlags(&cmdUse.Flag)
+ base.AddWorkfileFlag(&cmdUse.Flag)
+}
+
+func runUse(ctx context.Context, cmd *base.Command, args []string) {
+ modload.ForceUseModules = true
+
+ var gowork string
+ modload.InitWorkfile()
+ gowork = modload.WorkFilePath()
+
+ workFile, err := modload.ReadWorkFile(gowork)
+ if err != nil {
+ base.Fatalf("go: %v", err)
+ }
+
+ haveDirs := make(map[string][]string) // absolute → original(s)
+ for _, use := range workFile.Use {
+ var absDir string
+ if filepath.IsAbs(use.Path) {
+ absDir = filepath.Clean(use.Path)
+ } else {
+ absDir = filepath.Join(filepath.Dir(gowork), use.Path)
+ }
+ haveDirs[absDir] = append(haveDirs[absDir], use.Path)
+ }
+
+ addDirs := make(map[string]bool)
+ removeDirs := make(map[string]bool)
+ lookDir := func(dir string) {
+ // If the path is absolute, try to keep it absolute. If it's relative,
+ // make it relative to the go.work file rather than the working directory.
+ absDir := dir
+ if !filepath.IsAbs(dir) {
+ absDir = filepath.Join(base.Cwd(), dir)
+ rel, err := filepath.Rel(filepath.Dir(gowork), absDir)
+ if err == nil {
+ // Normalize relative paths to use slashes, so that checked-in go.work
+ // files with relative paths within the repo are platform-independent.
+ dir = filepath.ToSlash(rel)
+ } else {
+ // The path can't be made relative to the go.work file,
+ // so it must be kept absolute instead.
+ dir = absDir
+ }
+ }
+
+ fi, err := os.Stat(filepath.Join(absDir, "go.mod"))
+ if err != nil {
+ if os.IsNotExist(err) {
+ for _, origDir := range haveDirs[absDir] {
+ removeDirs[origDir] = true
+ }
+ return
+ }
+ base.Errorf("go: %v", err)
+ }
+
+ if !fi.Mode().IsRegular() {
+ base.Errorf("go: %v is not regular", filepath.Join(dir, "go.mod"))
+ }
+
+ if len(haveDirs[absDir]) == 0 {
+ addDirs[dir] = true
+ }
+ }
+
+ for _, useDir := range args {
+ if *useR {
+ fsys.Walk(useDir, func(path string, info fs.FileInfo, err error) error {
+ if !info.IsDir() {
+ return nil
+ }
+ lookDir(path)
+ return nil
+ })
+ continue
+ }
+ lookDir(useDir)
+ }
+
+ for dir := range removeDirs {
+ workFile.DropUse(dir)
+ }
+ for dir := range addDirs {
+ workFile.AddUse(dir, "")
+ }
+ modload.UpdateWorkFile(workFile)
+ modload.WriteWorkFile(gowork, workFile)
+}
diff --git a/src/cmd/go/internal/workcmd/work.go b/src/cmd/go/internal/workcmd/work.go
new file mode 100644
index 0000000000..d3cc250231
--- /dev/null
+++ b/src/cmd/go/internal/workcmd/work.go
@@ -0,0 +1,72 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package workcmd implements the ``go work'' command.
+package workcmd
+
+import (
+ "cmd/go/internal/base"
+)
+
+var CmdWork = &base.Command{
+ UsageLine: "go work",
+ Short: "workspace maintenance",
+ Long: `Go workspace provides access to operations on workspaces.
+
+Note that support for workspaces is built into many other commands, not
+just 'go work'.
+
+See 'go help modules' for information about Go's module system of which
+workspaces are a part.
+
+A workspace is specified by a go.work file that specifies a set of
+module directories with the "use" directive. These modules are used as
+root modules by the go command for builds and related operations. A
+workspace that does not specify modules to be used cannot be used to do
+builds from local modules.
+
+go.work files are line-oriented. Each line holds a single directive,
+made up of a keyword followed by arguments. For example:
+
+ go 1.18
+
+ use ../foo/bar
+ use ./baz
+
+ replace example.com/foo v1.2.3 => example.com/bar v1.4.5
+
+The leading keyword can be factored out of adjacent lines to create a block,
+like in Go imports.
+
+ use (
+ ../foo/bar
+ ./baz
+ )
+
+The use directive specifies a module to be included in the workspace's
+set of main modules. The argument to the use directive is the directory
+containing the module's go.mod file.
+
+The go directive specifies the version of Go the file was written at. It
+is possible there may be future changes in the semantics of workspaces
+that could be controlled by this version, but for now the version
+specified has no effect.
+
+The replace directive has the same syntax as the replace directive in a
+go.mod file and takes precedence over replaces in go.mod files. It is
+primarily intended to override conflicting replaces in different workspace
+modules.
+
+To determine whether the go command is operating in workspace mode, use
+the "go env GOWORK" command. This will specify the workspace file being
+used.
+`,
+
+ Commands: []*base.Command{
+ cmdEdit,
+ cmdInit,
+ cmdSync,
+ cmdUse,
+ },
+}
diff --git a/src/cmd/go/main.go b/src/cmd/go/main.go
index 16361e02ca..c0a1d3ccfc 100644
--- a/src/cmd/go/main.go
+++ b/src/cmd/go/main.go
@@ -7,6 +7,7 @@
package main
import (
+ "cmd/go/internal/workcmd"
"context"
"flag"
"fmt"
@@ -56,6 +57,7 @@ func init() {
work.CmdInstall,
list.CmdList,
modcmd.CmdMod,
+ workcmd.CmdWork,
run.CmdRun,
test.CmdTest,
tool.CmdTool,
diff --git a/src/cmd/go/proxy_test.go b/src/cmd/go/proxy_test.go
index a387fe67db..517a885542 100644
--- a/src/cmd/go/proxy_test.go
+++ b/src/cmd/go/proxy_test.go
@@ -357,7 +357,7 @@ func proxyHandler(w http.ResponseWriter, r *http.Request) {
zip []byte
err error
}
- c := zipCache.Do(a, func() interface{} {
+ c := zipCache.Do(a, func() any {
var buf bytes.Buffer
z := zip.NewWriter(&buf)
for _, f := range a.Files {
@@ -431,7 +431,7 @@ func readArchive(path, vers string) (*txtar.Archive, error) {
prefix := strings.ReplaceAll(enc, "/", "_")
name := filepath.Join(cmdGoDir, "testdata/mod", prefix+"_"+encVers+".txt")
- a := archiveCache.Do(name, func() interface{} {
+ a := archiveCache.Do(name, func() any {
a, err := txtar.ParseFile(name)
if err != nil {
if testing.Verbose() || !os.IsNotExist(err) {
diff --git a/src/cmd/go/script_test.go b/src/cmd/go/script_test.go
index 98c1b68ed9..722921f74c 100644
--- a/src/cmd/go/script_test.go
+++ b/src/cmd/go/script_test.go
@@ -374,7 +374,7 @@ Script:
default:
if strings.HasPrefix(cond.tag, "exec:") {
prog := cond.tag[len("exec:"):]
- ok = execCache.Do(prog, func() interface{} {
+ ok = execCache.Do(prog, func() any {
if runtime.GOOS == "plan9" && prog == "git" {
// The Git command is usually not the real Git on Plan 9.
// See https://golang.org/issues/29640.
@@ -491,6 +491,7 @@ var scriptCmds = map[string]func(*testScript, simpleStatus, []string){
"go": (*testScript).cmdGo,
"grep": (*testScript).cmdGrep,
"mkdir": (*testScript).cmdMkdir,
+ "mv": (*testScript).cmdMv,
"rm": (*testScript).cmdRm,
"skip": (*testScript).cmdSkip,
"stale": (*testScript).cmdStale,
@@ -585,10 +586,6 @@ func (ts *testScript) cmdChmod(want simpleStatus, args []string) {
// cmp compares two files.
func (ts *testScript) cmdCmp(want simpleStatus, args []string) {
- if want != success {
- // It would be strange to say "this file can have any content except this precise byte sequence".
- ts.fatalf("unsupported: %v cmp", want)
- }
quiet := false
if len(args) > 0 && args[0] == "-q" {
quiet = true
@@ -597,14 +594,11 @@ func (ts *testScript) cmdCmp(want simpleStatus, args []string) {
if len(args) != 2 {
ts.fatalf("usage: cmp file1 file2")
}
- ts.doCmdCmp(args, false, quiet)
+ ts.doCmdCmp(want, args, false, quiet)
}
// cmpenv compares two files with environment variable substitution.
func (ts *testScript) cmdCmpenv(want simpleStatus, args []string) {
- if want != success {
- ts.fatalf("unsupported: %v cmpenv", want)
- }
quiet := false
if len(args) > 0 && args[0] == "-q" {
quiet = true
@@ -613,17 +607,18 @@ func (ts *testScript) cmdCmpenv(want simpleStatus, args []string) {
if len(args) != 2 {
ts.fatalf("usage: cmpenv file1 file2")
}
- ts.doCmdCmp(args, true, quiet)
+ ts.doCmdCmp(want, args, true, quiet)
}
-func (ts *testScript) doCmdCmp(args []string, env, quiet bool) {
+func (ts *testScript) doCmdCmp(want simpleStatus, args []string, env, quiet bool) {
name1, name2 := args[0], args[1]
var text1, text2 string
- if name1 == "stdout" {
+ switch name1 {
+ case "stdout":
text1 = ts.stdout
- } else if name1 == "stderr" {
+ case "stderr":
text1 = ts.stderr
- } else {
+ default:
data, err := os.ReadFile(ts.mkabs(name1))
ts.check(err)
text1 = string(data)
@@ -638,14 +633,28 @@ func (ts *testScript) doCmdCmp(args []string, env, quiet bool) {
text2 = ts.expand(text2, false)
}
- if text1 == text2 {
- return
- }
-
- if !quiet {
+ eq := text1 == text2
+ if !eq && !quiet && want != failure {
fmt.Fprintf(&ts.log, "[diff -%s +%s]\n%s\n", name1, name2, diff(text1, text2))
}
- ts.fatalf("%s and %s differ", name1, name2)
+ switch want {
+ case failure:
+ if eq {
+ ts.fatalf("%s and %s do not differ", name1, name2)
+ }
+ case success:
+ if !eq {
+ ts.fatalf("%s and %s differ", name1, name2)
+ }
+ case successOrFailure:
+ if eq {
+ fmt.Fprintf(&ts.log, "%s and %s do not differ\n", name1, name2)
+ } else {
+ fmt.Fprintf(&ts.log, "%s and %s differ\n", name1, name2)
+ }
+ default:
+ ts.fatalf("unsupported: %v cmp", want)
+ }
}
// cp copies files, maybe eventually directories.
@@ -840,6 +849,16 @@ func (ts *testScript) cmdMkdir(want simpleStatus, args []string) {
}
}
+func (ts *testScript) cmdMv(want simpleStatus, args []string) {
+ if want != success {
+ ts.fatalf("unsupported: %v mv", want)
+ }
+ if len(args) != 2 {
+ ts.fatalf("usage: mv old new")
+ }
+ ts.check(os.Rename(ts.mkabs(args[0]), ts.mkabs(args[1])))
+}
+
// rm removes files or directories.
func (ts *testScript) cmdRm(want simpleStatus, args []string) {
if want != success {
@@ -883,7 +902,7 @@ func (ts *testScript) cmdStale(want simpleStatus, args []string) {
tmpl := "{{if .Error}}{{.ImportPath}}: {{.Error.Err}}{{else}}"
switch want {
case failure:
- tmpl += "{{if .Stale}}{{.ImportPath}} is unexpectedly stale{{end}}"
+ tmpl += "{{if .Stale}}{{.ImportPath}} is unexpectedly stale: {{.StaleReason}}{{end}}"
case success:
tmpl += "{{if not .Stale}}{{.ImportPath}} is unexpectedly NOT stale{{end}}"
default:
@@ -1309,7 +1328,7 @@ func (ts *testScript) expand(s string, inRegexp bool) string {
}
// fatalf aborts the test with the given failure message.
-func (ts *testScript) fatalf(format string, args ...interface{}) {
+func (ts *testScript) fatalf(format string, args ...any) {
fmt.Fprintf(&ts.log, "FAIL: %s:%d: %s\n", ts.file, ts.lineno, fmt.Sprintf(format, args...))
ts.t.FailNow()
}
diff --git a/src/cmd/go/testdata/addmod.go b/src/cmd/go/testdata/addmod.go
index a1ace4ce59..eac2a7ad44 100644
--- a/src/cmd/go/testdata/addmod.go
+++ b/src/cmd/go/testdata/addmod.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build ignore
// +build ignore
// Addmod adds a module as a txtar archive to the testdata/mod directory.
@@ -39,7 +40,7 @@ func usage() {
var tmpdir string
-func fatalf(format string, args ...interface{}) {
+func fatalf(format string, args ...any) {
os.RemoveAll(tmpdir)
log.Fatalf(format, args...)
}
diff --git a/src/cmd/go/testdata/savedir.go b/src/cmd/go/testdata/savedir.go
index 6a8a232702..53c78cfb00 100644
--- a/src/cmd/go/testdata/savedir.go
+++ b/src/cmd/go/testdata/savedir.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build ignore
// +build ignore
// Savedir archives a directory tree as a txtar archive printed to standard output.
diff --git a/src/cmd/go/testdata/script/README b/src/cmd/go/testdata/script/README
index 2b55fa8977..b2a7fd1915 100644
--- a/src/cmd/go/testdata/script/README
+++ b/src/cmd/go/testdata/script/README
@@ -110,14 +110,15 @@ The commands are:
Change the permissions of the files or directories named by the path arguments
to be equal to perm. Only numerical permissions are supported.
-- cmp file1 file2
- Check that the named files have the same content.
+- [! | ?] cmp file1 file2
+ Check that the named files have (or do not have) the same content.
By convention, file1 is the actual data and file2 the expected data.
File1 can be "stdout" or "stderr" to use the standard output or standard error
from the most recent exec or go command.
- (If the files have differing content, the failure prints a diff.)
+ (If the file contents differ and the command is not negated,
+ the failure prints a diff.)
-- cmpenv file1 file2
+- [! | ?] cmpenv file1 file2
Like cmp, but environment variables are substituted in the file contents
before the comparison. For example, $GOOS is replaced by the target GOOS.
@@ -163,6 +164,10 @@ The commands are:
- mkdir path...
Create the listed directories, if they do not already exists.
+- mv path1 path2
+ Rename path1 to path2. OS-specific restrictions may apply when path1 and path2
+ are in different directories.
+
- rm file...
Remove the listed files or directories.
diff --git a/src/cmd/go/testdata/script/build_issue48319.txt b/src/cmd/go/testdata/script/build_issue48319.txt
index f58a5faa3f..3979247f2f 100644
--- a/src/cmd/go/testdata/script/build_issue48319.txt
+++ b/src/cmd/go/testdata/script/build_issue48319.txt
@@ -1,50 +1,33 @@
+# Regression test for https://go.dev/issue/48319:
+# cgo builds should not include debug information from a stale GOROOT_FINAL.
+
[short] skip
[!cgo] skip
+[windows] skip # The Go Windows builders have an extremely out-of-date gcc that does not support reproducible builds; see https://go.dev/issue/50824.
-# Set up fresh GOCACHE
+# This test is sensitive to cache invalidation,
+# so use a separate build cache that we can control.
env GOCACHE=$WORK/gocache
mkdir $GOCACHE
-# 1. unset GOROOT_FINAL, Build a simple binary with cgo by origin go.
-# The DW_AT_comp_dir of runtime/cgo should have a prefix with origin goroot.
-env GOROOT_FINAL=
-# If using "go run", it is no debuginfo in binary. So use "go build".
-# And we can check the stderr to judge if the cache of "runtime/cgo"
-# was used or not.
-go build -o binary.exe
-exec ./binary.exe $TESTGO_GOROOT
-stdout 'cgo DW_AT_comp_dir is right in binary'
+# Build a binary using a specific value of GOROOT_FINAL.
+env GOROOT_FINAL=$WORK${/}goroot1
+go build -o main.exe
+mv main.exe main1.exe
+# Now clean the cache and build using a different GOROOT_FINAL.
+# The resulting binaries should differ in their debug metadata.
+go clean -cache
+env GOROOT_FINAL=$WORK${/}goroot2
+go build -o main.exe
+mv main.exe main2.exe
+! cmp main2.exe main1.exe
-# 2. GOROOT_FINAL will be changed, the runtime/cgo will be rebuild.
-env GOROOT_FINAL=$WORK/gorootfinal
-go build -x -o binary.exe
-stderr '(clang|gcc)( |\.exe).*gcc_.*\.c'
-exec ./binary.exe $GOROOT_FINAL
-stdout 'cgo DW_AT_comp_dir is right in binary'
-
-
-[!symlink] skip
-
-# Symlink the compiler to another path
-env GOROOT=$WORK/goroot
-symlink $GOROOT -> $TESTGO_GOROOT
-
-# 3. GOROOT_FINAL is same with 2, build with the other go
-# the runtime/cgo will not be rebuild.
-go build -x -o binary.exe
-! stderr '(clang|gcc)( |\.exe).*gcc_.*\.c'
-exec ./binary.exe $GOROOT_FINAL
-stdout 'cgo DW_AT_comp_dir is right in binary'
-
-
-# 4. unset GOROOT_FINAL, build with the other go
-# the runtime/cgo will be rebuild.
-env GOROOT_FINAL=
-go build -x -o binary.exe
-stderr '(clang|gcc)( |\.exe).*gcc_.*\.c'
-exec ./binary.exe $GOROOT
-stdout 'cgo DW_AT_comp_dir is right in binary'
+# Set GOROOT_FINAL back to the first value.
+# If the build is properly reproducible, the two binaries should match.
+env GOROOT_FINAL=$WORK${/}goroot1
+go build -o main.exe
+cmp -q main.exe main1.exe
-- go.mod --
module main
@@ -54,100 +37,11 @@ go 1.18
package main
import "C"
-import (
- "debug/dwarf"
- "fmt"
- "log"
- "os"
- "path/filepath"
- "strings"
-)
+
+import "runtime"
var _ C.int
func main() {
- dwarfData, err := readDWARF(os.Args[0])
- if err != nil {
- log.Fatal(err)
- }
- goroot := filepath.Join(os.Args[1], "src")
- dwarfReader := dwarfData.Reader()
- cgopackage := filepath.Join("runtime", "cgo")
- var hascgo bool
- for {
- e, err := dwarfReader.Next()
- if err != nil {
- log.Fatal(err)
- }
- if e == nil {
- break
- }
- field := e.AttrField(dwarf.AttrCompDir)
- if field == nil {
- continue
- }
- compdir := field.Val.(string)
- if strings.HasSuffix(compdir, cgopackage) {
- hascgo = true
- if !strings.HasPrefix(compdir, goroot) {
- fmt.Printf("cgo DW_AT_comp_dir %s contains incorrect path in binary.\n", compdir)
- return
- }
- }
- }
- if hascgo {
- fmt.Println("cgo DW_AT_comp_dir is right in binary")
- } else {
- fmt.Println("binary does not contain cgo")
- }
-}
--- read_darwin.go --
-package main
-
-import (
- "debug/dwarf"
- "debug/macho"
-)
-
-func readDWARF(exePath string) (*dwarf.Data, error) {
- machoFile, err := macho.Open(exePath)
- if err != nil {
- return nil, err
- }
- defer machoFile.Close()
- return machoFile.DWARF()
-}
--- read_elf.go --
-// +build android dragonfly freebsd illumos linux netbsd openbsd solaris
-
-package main
-
-import (
- "debug/dwarf"
- "debug/elf"
-)
-
-func readDWARF(exePath string) (*dwarf.Data, error) {
- elfFile, err := elf.Open(exePath)
- if err != nil {
- return nil, err
- }
- defer elfFile.Close()
- return elfFile.DWARF()
-}
--- read_windows.go --
-package main
-
-import (
- "debug/dwarf"
- "debug/pe"
-)
-
-func readDWARF(exePath string) (*dwarf.Data, error) {
- peFile, err := pe.Open(exePath)
- if err != nil {
- return nil, err
- }
- defer peFile.Close()
- return peFile.DWARF()
+ println(runtime.GOROOT())
}
diff --git a/src/cmd/go/testdata/script/build_overlay.txt b/src/cmd/go/testdata/script/build_overlay.txt
index 2932b94e6c..56e812f44b 100644
--- a/src/cmd/go/testdata/script/build_overlay.txt
+++ b/src/cmd/go/testdata/script/build_overlay.txt
@@ -31,17 +31,17 @@ exec ./print_trimpath_two_files$GOEXE
stdout $WORK[/\\]gopath[/\\]src[/\\]m[/\\]printpath[/\\]main.go
stdout $WORK[/\\]gopath[/\\]src[/\\]m[/\\]printpath[/\\]other.go
-go build -overlay overlay.json -o main_cgo_replace$GOEXE ./cgo_hello_replace
-exec ./main_cgo_replace$GOEXE
-stdout '^hello cgo\r?\n'
+[cgo] go build -overlay overlay.json -o main_cgo_replace$GOEXE ./cgo_hello_replace
+[cgo] exec ./main_cgo_replace$GOEXE
+[cgo] stdout '^hello cgo\r?\n'
-go build -overlay overlay.json -o main_cgo_quote$GOEXE ./cgo_hello_quote
-exec ./main_cgo_quote$GOEXE
-stdout '^hello cgo\r?\n'
+[cgo] go build -overlay overlay.json -o main_cgo_quote$GOEXE ./cgo_hello_quote
+[cgo] exec ./main_cgo_quote$GOEXE
+[cgo] stdout '^hello cgo\r?\n'
-go build -overlay overlay.json -o main_cgo_angle$GOEXE ./cgo_hello_angle
-exec ./main_cgo_angle$GOEXE
-stdout '^hello cgo\r?\n'
+[cgo] go build -overlay overlay.json -o main_cgo_angle$GOEXE ./cgo_hello_angle
+[cgo] exec ./main_cgo_angle$GOEXE
+[cgo] stdout '^hello cgo\r?\n'
go build -overlay overlay.json -o main_call_asm$GOEXE ./call_asm
exec ./main_call_asm$GOEXE
@@ -55,11 +55,11 @@ cp overlay/test_cache_different.go overlay/test_cache.go
go list -overlay overlay.json -f '{{.Stale}}' ./test_cache
stdout '^true$'
-go list -compiled -overlay overlay.json -f '{{range .CompiledGoFiles}}{{. | printf "%s\n"}}{{end}}' ./cgo_hello_replace
-cp stdout compiled_cgo_sources.txt
-go run ../print_line_comments.go compiled_cgo_sources.txt
-stdout $GOPATH[/\\]src[/\\]m[/\\]cgo_hello_replace[/\\]cgo_hello_replace.go
-! stdout $GOPATH[/\\]src[/\\]m[/\\]overlay[/\\]hello.c
+[cgo] go list -compiled -overlay overlay.json -f '{{range .CompiledGoFiles}}{{. | printf "%s\n"}}{{end}}' ./cgo_hello_replace
+[cgo] cp stdout compiled_cgo_sources.txt
+[cgo] go run ../print_line_comments.go compiled_cgo_sources.txt
+[cgo] stdout $GOPATH[/\\]src[/\\]m[/\\]cgo_hello_replace[/\\]cgo_hello_replace.go
+[cgo] ! stdout $GOPATH[/\\]src[/\\]m[/\\]overlay[/\\]hello.c
# Run same tests but with gccgo.
env GO111MODULE=off
diff --git a/src/cmd/go/testdata/script/build_single_error.txt b/src/cmd/go/testdata/script/build_single_error.txt
new file mode 100644
index 0000000000..241cdb954b
--- /dev/null
+++ b/src/cmd/go/testdata/script/build_single_error.txt
@@ -0,0 +1,18 @@
+# go test ./... with a bad package should report the error once (#44624).
+! go test ./...
+stderr -count=1 undefined
+
+-- go.mod --
+module example.com
+
+go 1.18
+-- a/a.go --
+package a
+
+import "example.com/b"
+-- b/b.go --
+package b
+
+var X = Y
+-- b/b_test.go --
+package b
diff --git a/src/cmd/go/testdata/script/cgo_stale_precompiled.txt b/src/cmd/go/testdata/script/cgo_stale_precompiled.txt
new file mode 100644
index 0000000000..80ed751afc
--- /dev/null
+++ b/src/cmd/go/testdata/script/cgo_stale_precompiled.txt
@@ -0,0 +1,28 @@
+# Regression test for https://go.dev/issue/47215 and https://go.dev/issue/50183:
+# A mismatched $GOROOT_FINAL or missing $CC caused the C dependencies of the net
+# package to appear stale, and it could not be rebuilt due to a missing $CC.
+
+[!cgo] skip
+
+# This test may start with the runtime/cgo package already stale.
+# Explicitly rebuild it to ensure that it is cached.
+# (See https://go.dev/issue/50892.)
+#
+# If running in non-short mode, explicitly vary CGO_CFLAGS
+# as a control case (to ensure that our regexps do catch rebuilds).
+
+[!short] env GOCACHE=$WORK/cache
+[!short] env CGO_CFLAGS=-DTestScript_cgo_stale_precompiled=true
+go build -x runtime/cgo
+[!short] stderr '[/\\]cgo'$GOEXE'["]? .* -importpath runtime/cgo'
+
+# https://go.dev/issue/47215: a missing $(go env CC) caused the precompiled net to be stale.
+[!plan9] env PATH='' # Guaranteed not to include $(go env CC)!
+[plan9] env path=''
+go build -x runtime/cgo
+! stderr '[/\\]cgo'$GOEXE'["]? .* -importpath runtime/cgo'
+
+# https://go.dev/issue/50183: a mismatched GOROOT_FINAL caused net to be stale.
+env GOROOT_FINAL=$WORK${/}goroot
+go build -x runtime/cgo
+! stderr '[/\\]cgo'$GOEXE'["]? .* -importpath runtime/cgo'
diff --git a/src/cmd/go/testdata/script/embed.txt b/src/cmd/go/testdata/script/embed.txt
index 04b17cd62b..5f7f6edd77 100644
--- a/src/cmd/go/testdata/script/embed.txt
+++ b/src/cmd/go/testdata/script/embed.txt
@@ -60,6 +60,18 @@ rm t/x.txt
! go build m/use
stderr '^x.go:5:12: pattern [*]t: cannot embed directory t: contains no embeddable files$'
+# all still ignores .git and symlinks
+cp x.go3 x.go
+! go build -x
+stderr '^x.go:5:12: pattern all:t: cannot embed directory t: contains no embeddable files$'
+
+# all finds dot files and underscore files
+cp x.txt t/.x.txt
+go build -x
+rm t/.x.txt
+cp x.txt t/_x.txt
+go build -x
+
-- x.go --
package p
@@ -92,6 +104,14 @@ import "embed"
//go:embed *t
var X embed.FS
+-- x.go3 --
+package p
+
+import "embed"
+
+//go:embed all:t
+var X embed.FS
+
-- x.txt --
hello
diff --git a/src/cmd/go/testdata/script/go_version.txt b/src/cmd/go/testdata/script/go_version.txt
new file mode 100644
index 0000000000..1a787e1b18
--- /dev/null
+++ b/src/cmd/go/testdata/script/go_version.txt
@@ -0,0 +1,9 @@
+# test that go version doesn't panic on non-go binaries
+# See Issue #49181
+
+[exec:/bin/true] cp /bin/true true
+[exec:C:\windows\system32\help.exe] cp C:\windows\system32\help.exe help.exe
+
+go version -m .
+! stdout .
+! stderr .
diff --git a/src/cmd/go/testdata/script/list_cgo_compiled_importmap.txt b/src/cmd/go/testdata/script/list_cgo_compiled_importmap.txt
index 3d68ef3055..30effb104b 100644
--- a/src/cmd/go/testdata/script/list_cgo_compiled_importmap.txt
+++ b/src/cmd/go/testdata/script/list_cgo_compiled_importmap.txt
@@ -12,7 +12,7 @@ env CGO_ENABLED=1
env GOFLAGS=-tags=netcgo # Force net to use cgo even on Windows.
-# "runtime/cgo [runtime.test]" appears in the the test dependencies of "runtime",
+# "runtime/cgo [runtime.test]" appears in the test dependencies of "runtime",
# because "runtime/cgo" itself depends on "runtime"
go list -deps -test -compiled -f '{{if eq .ImportPath "net [runtime.test]"}}{{printf "%q" .Imports}}{{end}}' runtime
diff --git a/src/cmd/go/testdata/script/list_swigcxx.txt b/src/cmd/go/testdata/script/list_swigcxx.txt
index c6acd9ecdb..d4227a80e8 100644
--- a/src/cmd/go/testdata/script/list_swigcxx.txt
+++ b/src/cmd/go/testdata/script/list_swigcxx.txt
@@ -2,17 +2,19 @@
[!exec:swig] skip
[!exec:g++] skip
+[!cgo] skip
# CompiledGoFiles should contain 4 files:
# a.go
# a.swigcxx.go
# _cgo_gotypes.go
# a.cgo1.go
+#
+# These names we see here, other than a.go, will be from the build cache,
+# so we just count them.
go list -f '{{.CompiledGoFiles}}' -compiled=true example/swig
-# These names we see here, other than a.go, will be from the build cache,
-# so we just count them.
stdout a\.go
stdout -count=3 $GOCACHE
diff --git a/src/cmd/go/testdata/script/mod_edit_go.txt b/src/cmd/go/testdata/script/mod_edit_go.txt
index 38321d071f..7e9740fec4 100644
--- a/src/cmd/go/testdata/script/mod_edit_go.txt
+++ b/src/cmd/go/testdata/script/mod_edit_go.txt
@@ -2,7 +2,7 @@
env GO111MODULE=on
! go build
-stderr 'type aliases only supported as of'
+stderr ' type aliases requires'
go mod edit -go=1.9
grep 'go 1.9' go.mod
go build
@@ -11,7 +11,7 @@ go build
# the cached 1.9 build. (https://golang.org/issue/37804)
go mod edit -go=1.8
! go build
-stderr 'type aliases only supported as of'
+stderr 'type aliases requires'
-- go.mod --
diff --git a/src/cmd/go/testdata/script/mod_get_direct.txt b/src/cmd/go/testdata/script/mod_get_direct.txt
index 42ccbcd38a..856e05bc32 100644
--- a/src/cmd/go/testdata/script/mod_get_direct.txt
+++ b/src/cmd/go/testdata/script/mod_get_direct.txt
@@ -10,7 +10,7 @@ env GO111MODULE=on
env GOPROXY=direct
env GOSUMDB=off
-go list -m cloud.google.com/go@master
+go list -m cloud.google.com/go@main
! stdout 'v0.0.0-'
-- go.mod --
diff --git a/src/cmd/go/testdata/script/mod_get_issue48511.txt b/src/cmd/go/testdata/script/mod_get_issue48511.txt
new file mode 100644
index 0000000000..0ba486d35b
--- /dev/null
+++ b/src/cmd/go/testdata/script/mod_get_issue48511.txt
@@ -0,0 +1,68 @@
+# Regression test for https://golang.org/issue/48511:
+# requirement minimization was accidentally replacing previous
+# versions of the main module, causing dependencies to be
+# spuriously dropping during requirement minimization and
+# leading to an infinite loop.
+
+cp go.mod go.mod.orig
+go mod tidy
+cmp go.mod go.mod.orig
+
+go get -u=patch ./...
+cmp go.mod go.mod.want
+
+-- go.mod --
+module example.net/m
+
+go 1.16
+
+replace (
+ example.net/a v0.1.0 => ./a
+ example.net/b v0.1.0 => ./b
+ example.net/b v0.1.1 => ./b
+ example.net/m v0.1.0 => ./m1
+)
+
+require example.net/a v0.1.0
+-- go.mod.want --
+module example.net/m
+
+go 1.16
+
+replace (
+ example.net/a v0.1.0 => ./a
+ example.net/b v0.1.0 => ./b
+ example.net/b v0.1.1 => ./b
+ example.net/m v0.1.0 => ./m1
+)
+
+require (
+ example.net/a v0.1.0
+ example.net/b v0.1.1 // indirect
+)
+-- m.go --
+package m
+
+import "example.net/a"
+-- m1/go.mod --
+module example.net/m
+
+go 1.16
+
+require example.net/b v0.1.0
+-- a/go.mod --
+module example.net/a
+
+go 1.16
+
+require example.net/m v0.1.0
+-- a/a.go --
+package a
+
+import "example.net/b"
+-- b/go.mod --
+module example.net/b
+
+go 1.16
+-- b/b.go --
+package b
diff --git a/src/cmd/go/testdata/script/mod_go_version_missing.txt b/src/cmd/go/testdata/script/mod_go_version_missing.txt
index d704816729..2159a1e4c0 100644
--- a/src/cmd/go/testdata/script/mod_go_version_missing.txt
+++ b/src/cmd/go/testdata/script/mod_go_version_missing.txt
@@ -27,7 +27,7 @@ cmp go.mod go.mod.orig
! go list -mod=vendor all
! stderr '^go: inconsistent vendoring'
-stderr 'cannot find package "\." in:\n\t.*[/\\]vendor[/\\]example.com[/\\]badedit$'
+stderr 'cannot find package "vendor/example.com/badedit" in:\n\t.*[/\\]vendor[/\\]example.com[/\\]badedit$'
# When we set -mod=mod, the go version should be updated immediately,
# to the current version, converting the requirements from eager to lazy.
diff --git a/src/cmd/go/testdata/script/mod_lazy_new_import.txt b/src/cmd/go/testdata/script/mod_lazy_new_import.txt
index 4272a52de1..520d8459cc 100644
--- a/src/cmd/go/testdata/script/mod_lazy_new_import.txt
+++ b/src/cmd/go/testdata/script/mod_lazy_new_import.txt
@@ -7,7 +7,7 @@
# \
# ---- a/y (new) ---- c
#
-# Where a/x and x/y are disjoint packages, but both contained in module a.
+# Where a/x and a/y are disjoint packages, but both contained in module a.
#
# The module dependency graph initially looks like:
#
diff --git a/src/cmd/go/testdata/script/mod_list_compiled_concurrent.txt b/src/cmd/go/testdata/script/mod_list_compiled_concurrent.txt
index b08713dcfd..896bbab9fc 100644
--- a/src/cmd/go/testdata/script/mod_list_compiled_concurrent.txt
+++ b/src/cmd/go/testdata/script/mod_list_compiled_concurrent.txt
@@ -1,6 +1,7 @@
env GO111MODULE=on
[short] skip
+[!cgo] skip
# Regression test for golang.org/issue/29667:
# spurious 'failed to cache compiled Go files' errors.
diff --git a/src/cmd/go/testdata/script/mod_vendor.txt b/src/cmd/go/testdata/script/mod_vendor.txt
index 4eb80c2332..a2727ddf7f 100644
--- a/src/cmd/go/testdata/script/mod_vendor.txt
+++ b/src/cmd/go/testdata/script/mod_vendor.txt
@@ -82,6 +82,48 @@ exists vendor/mysite/myname/mypkg/LICENSE.txt
! exists vendor/x/x2
! exists vendor/x/x2/LICENSE
+# 'go mod vendor' should work with an alternative vendor directory if the -o flag is provided.
+go mod vendor -v -o alternative-vendor-dir
+exists alternative-vendor-dir/modules.txt
+exists alternative-vendor-dir/a/foo/LICENSE
+
+# 'go mod vendor' should interpret paths relative to the current working directory when the -o flag is provided.
+mkdir dir1
+mkdir dir2
+
+cd dir1
+go mod vendor -v -o relative-vendor-dir
+
+go mod vendor -v -o ../dir2/relative-vendor-dir
+
+cd ..
+exists dir1/relative-vendor-dir/modules.txt
+exists dir1/relative-vendor-dir/a/foo/LICENSE
+exists dir2/relative-vendor-dir/modules.txt
+exists dir2/relative-vendor-dir/a/foo/LICENSE
+
+# 'go mod vendor' should fall back to the default 'vendor' directory when an empty argument is passed to the -o flag
+# the same behavior should be exhibited both on the module root directory, as well as nested subdirectories
+
+go mod vendor -v -o ''
+exists vendor/modules.txt
+
+env GOFLAGS=-o=foo
+go mod vendor -v -o ''
+exists vendor/modules.txt
+env GOFLAGS=''
+
+mkdir -p nested/dir
+cd nested/dir
+go mod vendor -v -o ''
+! exists vendor/
+exists ../../vendor/modules.txt
+cd ../..
+
+# 'go mod vendor' should work with absolute paths as well
+go mod vendor -v -o $WORK/tmp/absolute-vendor-dir
+exists $WORK/tmp/absolute-vendor-dir/modules.txt
+
[short] stop
# 'go build' and 'go test' using vendored packages should succeed.
diff --git a/src/cmd/go/testdata/script/mod_vendor_goversion.txt b/src/cmd/go/testdata/script/mod_vendor_goversion.txt
index a92eb73d27..9e3618a218 100644
--- a/src/cmd/go/testdata/script/mod_vendor_goversion.txt
+++ b/src/cmd/go/testdata/script/mod_vendor_goversion.txt
@@ -26,7 +26,7 @@ go mod vendor
! grep 1.17 vendor/modules.txt
! go build example.net/need117
stderr '^vendor[/\\]example\.net[/\\]need117[/\\]need117.go:5:1[89]:'
-stderr 'conversion of slices to array pointers only supported as of -lang=go1\.17'
+stderr 'conversion of slices to array pointers requires go1\.17 or later'
! grep 1.13 vendor/modules.txt
go build example.net/bad114
diff --git a/src/cmd/go/testdata/script/test_build_failure.txt b/src/cmd/go/testdata/script/test_build_failure.txt
index 8d13634c8c..e8c984f272 100644
--- a/src/cmd/go/testdata/script/test_build_failure.txt
+++ b/src/cmd/go/testdata/script/test_build_failure.txt
@@ -3,7 +3,7 @@
! go test -x coverbad
! stderr '[\\/]coverbad\.test( |$)' # 'go test' should not claim to have run the test.
stderr 'undefined: g'
-stderr 'undefined: j'
+[cgo] stderr 'undefined: j'
-- go.mod --
module coverbad
diff --git a/src/cmd/go/testdata/script/test_fuzz.txt b/src/cmd/go/testdata/script/test_fuzz.txt
index 150491be04..3e048e00c5 100644
--- a/src/cmd/go/testdata/script/test_fuzz.txt
+++ b/src/cmd/go/testdata/script/test_fuzz.txt
@@ -60,34 +60,34 @@ stdout ok
! stdout ^ok
! stdout 'fatal here'
stdout FAIL
-stdout 'f.Fuzz function'
+stdout 'fuzz target'
# Test that f.Error within f.Fuzz panics
! go test error_fuzz_fn_fuzz_test.go
! stdout ^ok
! stdout 'error here'
stdout FAIL
-stdout 'f.Fuzz function'
+stdout 'fuzz target'
# Test that f.Fail within f.Fuzz panics
! go test fail_fuzz_fn_fuzz_test.go
! stdout ^ok
stdout FAIL
-stdout 'f.Fuzz function'
+stdout 'fuzz target'
# Test that f.Skip within f.Fuzz panics
! go test skip_fuzz_fn_fuzz_test.go
! stdout ^ok
! stdout 'skip here'
stdout FAIL
-stdout 'f.Fuzz function'
+stdout 'fuzz target'
# Test that f.Skipped within f.Fuzz panics
! go test skipped_fuzz_fn_fuzz_test.go
! stdout ^ok
! stdout 'f.Skipped is'
stdout FAIL
-stdout 'f.Fuzz function'
+stdout 'fuzz target'
stdout 't.Skipped is false'
# Test that runtime.Goexit within the fuzz function is an error.
diff --git a/src/cmd/go/testdata/script/test_fuzz_chatty.txt b/src/cmd/go/testdata/script/test_fuzz_chatty.txt
index 1abcbbd389..d07fe50f95 100644
--- a/src/cmd/go/testdata/script/test_fuzz_chatty.txt
+++ b/src/cmd/go/testdata/script/test_fuzz_chatty.txt
@@ -37,11 +37,9 @@ go test -v chatty_with_test_fuzz_test.go -fuzz=Fuzz -fuzztime=1x
stdout ok
stdout PASS
! stdout FAIL
-# TODO: It's currently the case that it's logged twice. Fix that, and change
-# this check to verify it.
-stdout 'all good here'
+stdout -count=1 'all good here'
# Verify that the unit test is only run once.
-! stdout '(?s)logged foo.*logged foo'
+stdout -count=1 'logged foo'
-- chatty_error_fuzz_test.go --
package chatty_error_fuzz
diff --git a/src/cmd/go/testdata/script/test_fuzz_cov.txt b/src/cmd/go/testdata/script/test_fuzz_cov.txt
new file mode 100644
index 0000000000..05b634889f
--- /dev/null
+++ b/src/cmd/go/testdata/script/test_fuzz_cov.txt
@@ -0,0 +1,33 @@
+# Test that coverage instrumentation is working. Without the instrumentation
+# it is _extremely_ unlikely that the fuzzer would produce this particular
+# input in any reasonable amount of time.
+
+[short] skip
+[!fuzz-instrumented] skip
+
+! go test -fuzz=FuzzCov
+! stderr 'cov instrumentation working'
+
+-- go.mod --
+module test
+
+-- cov_test.go --
+package cov
+
+import "testing"
+
+func FuzzCov(f *testing.F) {
+ f.Fuzz(func(t *testing.T, b []byte) {
+ if len(b) == 8 &&
+ b[0] == 'h' &&
+ b[1] == 'e' &&
+ b[2] == 'l' &&
+ b[3] == 'l' &&
+ b[4] == 'o' &&
+ b[5] == ' ' &&
+ b[6] == ':' &&
+ b[7] == ')' {
+ panic("cov instrumentation working")
+ }
+ })
+}
diff --git a/src/cmd/go/testdata/script/test_fuzz_dup_cache.txt b/src/cmd/go/testdata/script/test_fuzz_dup_cache.txt
new file mode 100644
index 0000000000..52d44a26ff
--- /dev/null
+++ b/src/cmd/go/testdata/script/test_fuzz_dup_cache.txt
@@ -0,0 +1,52 @@
+[!fuzz] skip
+[short] skip
+
+# This test checks that cached corpus loading properly handles duplicate entries (this can
+# happen when a f.Add value has a duplicate entry in the cached corpus.) Duplicate entries
+# should be discarded, and the rest of the cache should be loaded as normal.
+
+env GOCACHE=$WORK/cache
+env GODEBUG=fuzzdebug=1
+
+mkdir -p $GOCACHE/fuzz/fuzztest/FuzzTarget
+go run ./populate $GOCACHE/fuzz/fuzztest/FuzzTarget
+
+go test -fuzz=FuzzTarget -fuzztime=10x .
+stdout 'entries: 5'
+
+-- go.mod --
+module fuzztest
+
+go 1.17
+
+-- fuzz_test.go --
+package fuzz
+
+import "testing"
+
+func FuzzTarget(f *testing.F) {
+ f.Add(int(0))
+ f.Fuzz(func(t *testing.T, _ int) {})
+}
+
+-- populate/main.go --
+package main
+
+import (
+ "path/filepath"
+ "fmt"
+ "os"
+)
+
+func main() {
+ for i := 0; i < 10; i++ {
+ b := byte(0)
+ if i > 5 {
+ b = byte(i)
+ }
+ tmpl := "go test fuzz v1\nint(%d)\n"
+ if err := os.WriteFile(filepath.Join(os.Args[1], fmt.Sprint(i)), []byte(fmt.Sprintf(tmpl, b)), 0777); err != nil {
+ panic(err)
+ }
+ }
+}
\ No newline at end of file
diff --git a/src/cmd/go/testdata/script/test_fuzz_match.txt b/src/cmd/go/testdata/script/test_fuzz_match.txt
index 0c0085f2c2..dbf987605f 100644
--- a/src/cmd/go/testdata/script/test_fuzz_match.txt
+++ b/src/cmd/go/testdata/script/test_fuzz_match.txt
@@ -14,7 +14,7 @@ stdout '^ok'
go test -fuzz ThisWillNotMatch -fuzztime 1x standalone_fuzz_test.go
! stdout '^ok.*no tests to run'
stdout '^ok'
-stdout 'no targets to fuzz'
+stdout 'no fuzz tests to fuzz'
[short] stop
@@ -26,7 +26,7 @@ stdout '^ok'
# Matches no fuzz targets.
go test -run ThisWillNotMatch standalone_fuzz_test.go
stdout '^ok.*no tests to run'
-! stdout 'no targets to fuzz'
+! stdout 'no fuzz tests to fuzz'
-- standalone_fuzz_test.go --
package standalone_fuzz
diff --git a/src/cmd/go/testdata/script/test_fuzz_minimize.txt b/src/cmd/go/testdata/script/test_fuzz_minimize.txt
index 462fb9a963..a6dc3f1953 100644
--- a/src/cmd/go/testdata/script/test_fuzz_minimize.txt
+++ b/src/cmd/go/testdata/script/test_fuzz_minimize.txt
@@ -67,7 +67,7 @@ rm testdata
! go test -fuzz=FuzzMinimizerNonrecoverable -run=FuzzMinimizerNonrecoverable -fuzztime=10000x .
! stdout '^ok'
! stdout 'minimizing'
-stdout -count=1 'fuzzing process terminated unexpectedly: exit status 99'
+stdout -count=1 '^\s+fuzzing process hung or terminated unexpectedly: exit status 99'
stdout FAIL
# Check that re-running the value causes a crash.
diff --git a/src/cmd/go/testdata/script/test_fuzz_minimize_interesting.txt b/src/cmd/go/testdata/script/test_fuzz_minimize_interesting.txt
index e017a4cad3..5d0de17f6b 100644
--- a/src/cmd/go/testdata/script/test_fuzz_minimize_interesting.txt
+++ b/src/cmd/go/testdata/script/test_fuzz_minimize_interesting.txt
@@ -1,3 +1,4 @@
+[short] skip
[!fuzz-instrumented] skip
# Test that when an interesting value is discovered (one that expands coverage),
@@ -15,21 +16,32 @@
go test -c -fuzz=. # Build using shared build cache for speed.
env GOCACHE=$WORK/gocache
exec ./fuzz.test$GOEXE -test.fuzzcachedir=$GOCACHE/fuzz -test.fuzz=FuzzMinCache -test.fuzztime=1000x
-go run check_cache.go $GOCACHE/fuzz/FuzzMinCache
+go run check_cache/check_cache.go $GOCACHE/fuzz/FuzzMinCache
+
+go test -c -fuzz=. # Build using shared build cache for speed.
+env GOCACHE=$WORK/gocache
# Test that minimization occurs for a crash that appears while minimizing a
# newly found interesting input. There must be only one worker for this test to
# be flaky like we want.
-go test -c -fuzz=. # Build using shared build cache for speed.
-env GOCACHE=$WORK/gocache
-! exec ./fuzz.test$GOEXE -test.fuzzcachedir=$GOCACHE/fuzz -test.fuzz=FuzzMinimizerCrashInMinimization -test.fuzztime=10000x -test.parallel=1
+! exec ./fuzz.test$GOEXE -test.fuzzcachedir=$GOCACHE/fuzz -test.fuzz=FuzzMinimizerCrashInMinimization -test.run=FuzzMinimizerCrashInMinimization -test.fuzztime=10000x -test.parallel=1
! stdout '^ok'
-stdout 'got the minimum size!'
+stdout -count=1 'got the minimum size!'
stdout -count=1 'flaky failure'
stdout FAIL
+# Check that the input written to testdata will reproduce the error, and is the
+# smallest possible.
+go run check_testdata/check_testdata.go FuzzMinimizerCrashInMinimization 50
-# Make sure the crash that was written will fail when run with go test
-! go test -run=FuzzMinimizerCrashInMinimization .
+# Test that a nonrecoverable error that occurs while minimizing an interesting
+# input is reported correctly.
+! exec ./fuzz.test$GOEXE -test.fuzzcachedir=$GOCACHE/fuzz -test.fuzz=FuzzMinimizerNonrecoverableCrashInMinimization -test.run=FuzzMinimizerNonrecoverableCrashInMinimization -test.fuzztime=10000x -test.parallel=1
+! stdout '^ok'
+stdout -count=1 'fuzzing process hung or terminated unexpectedly while minimizing'
+stdout -count=1 'EOF'
+stdout FAIL
+# Check that the input written to testdata will reproduce the error.
+go run check_testdata/check_testdata.go FuzzMinimizerNonrecoverableCrashInMinimization 100
-- go.mod --
module fuzz
@@ -43,8 +55,8 @@ import (
"io"
)
-func Y(w io.Writer, b []byte) {
- if !bytes.Equal(b, []byte("y")) {
+func Y(w io.Writer, s string) {
+ if !bytes.Equal([]byte(s), []byte("y")) {
w.Write([]byte("not equal"))
}
}
@@ -54,27 +66,56 @@ package fuzz
import (
"bytes"
"io"
+ "os"
+ "strings"
"testing"
+ "unicode/utf8"
)
func FuzzMinimizerCrashInMinimization(f *testing.F) {
- seed := make([]byte, 1000)
+ seed := strings.Repeat("A", 1000)
f.Add(seed)
- f.Fuzz(func(t *testing.T, b []byte) {
- if len(b) < 50 || len(b) > 1100 {
+ i := 3
+ f.Fuzz(func(t *testing.T, s string) {
+ if len(s) < 50 || len(s) > 1100 {
// Make sure that b is large enough that it can be minimized
return
}
- if !bytes.Equal(b, seed) {
- // This should have hit a new edge, and the interesting input
- // should be attempting minimization
- Y(io.Discard, b)
+ if s != seed {
+ // This should hit a new edge, and the interesting input
+ // should attempt minimization
+ Y(io.Discard, s)
}
- if len(b) < 350 {
+ if i > 0 {
+ // Don't let it fail right away.
+ i--
+ } else if utf8.RuneCountInString(s) == len(s) && len(s) <= 100 {
+ // Make sure this only fails if the number of bytes in the
+ // marshaled string is the same as the unmarshaled string,
+ // so that we can check the length of the testdata file.
t.Error("flaky failure")
+ if len(s) == 50 {
+ t.Error("got the minimum size!")
+ }
}
- if len(b) == 50 {
- t.Log("got the minimum size!")
+ })
+}
+
+func FuzzMinimizerNonrecoverableCrashInMinimization(f *testing.F) {
+ seed := strings.Repeat("A", 1000)
+ f.Add(seed)
+ i := 3
+ f.Fuzz(func(t *testing.T, s string) {
+ if len(s) < 50 || len(s) > 1100 {
+ return
+ }
+ if s != seed {
+ Y(io.Discard, s)
+ }
+ if i > 0 {
+ i--
+ } else if utf8.RuneCountInString(s) == len(s) && len(s) <= 100 {
+ os.Exit(19)
}
})
}
@@ -99,7 +140,59 @@ func sum(buf []byte) int {
}
return n
}
--- check_cache.go --
+-- check_testdata/check_testdata.go --
+//go:build ignore
+// +build ignore
+
+// check_testdata.go checks that the string written
+// is not longer than the provided length.
+package main
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strconv"
+)
+
+func main() {
+ wantLen, err := strconv.Atoi(os.Args[2])
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ testName := os.Args[1]
+ dir := filepath.Join("testdata/fuzz", testName)
+
+ files, err := ioutil.ReadDir(dir)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+
+ if len(files) == 0 {
+ fmt.Fprintf(os.Stderr, "expect at least one failure to be written to testdata\n")
+ os.Exit(1)
+ }
+
+ fname := files[0].Name()
+ contents, err := ioutil.ReadFile(filepath.Join(dir, fname))
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ contentsLen := len(contents) - len(`go test fuzz v1
+string("")
+`)
+ if got, want := contentsLen, wantLen; got > want {
+ fmt.Fprintf(os.Stderr, "expect length <= %d, got %d\n", want, got)
+ os.Exit(1)
+ }
+ fmt.Fprintf(os.Stderr, "%s\n", contents)
+}
+
+-- check_cache/check_cache.go --
//go:build ignore
// +build ignore
diff --git a/src/cmd/go/testdata/script/test_fuzz_multiple.txt b/src/cmd/go/testdata/script/test_fuzz_multiple.txt
index d96b2b6206..1ec4985613 100644
--- a/src/cmd/go/testdata/script/test_fuzz_multiple.txt
+++ b/src/cmd/go/testdata/script/test_fuzz_multiple.txt
@@ -18,7 +18,7 @@ go test -fuzz=. -fuzztime=1x ./one
# With fuzzing enabled, at most one target in the same package may match.
! go test -fuzz=. ./two
-stdout '^testing: will not fuzz, -fuzz matches more than one target: \[FuzzOne FuzzTwo\]$'
+stdout '^testing: will not fuzz, -fuzz matches more than one fuzz test: \[FuzzOne FuzzTwo\]$'
go test -fuzz=FuzzTwo -fuzztime=1x ./two
-- go.mod --
diff --git a/src/cmd/go/testdata/script/test_fuzz_mutate_crash.txt b/src/cmd/go/testdata/script/test_fuzz_mutate_crash.txt
index 4c4fa8e651..99bae1daf0 100644
--- a/src/cmd/go/testdata/script/test_fuzz_mutate_crash.txt
+++ b/src/cmd/go/testdata/script/test_fuzz_mutate_crash.txt
@@ -54,9 +54,14 @@ go run check_testdata.go FuzzWithFatalf
! go test -run=FuzzWithBadExit -fuzz=FuzzWithBadExit -fuzztime=100x -fuzzminimizetime=1000x
stdout 'testdata[/\\]fuzz[/\\]FuzzWithBadExit[/\\]'
-stdout 'unexpectedly'
+stdout '^\s+fuzzing process hung or terminated unexpectedly: exit status'
go run check_testdata.go FuzzWithBadExit
+! go test -run=FuzzDeadlock -fuzz=FuzzDeadlock -fuzztime=100x -fuzzminimizetime=0x
+stdout 'testdata[/\\]fuzz[/\\]FuzzDeadlock[/\\]'
+stdout '^\s+fuzzing process hung or terminated unexpectedly: exit status'
+go run check_testdata.go FuzzDeadlock
+
# Running the fuzzer should find a crashing input quickly for fuzzing two types.
! go test -run=FuzzWithTwoTypes -fuzz=FuzzWithTwoTypes -fuzztime=100x -fuzzminimizetime=1000x
stdout 'testdata[/\\]fuzz[/\\]FuzzWithTwoTypes[/\\]'
@@ -190,6 +195,15 @@ func FuzzWithBadExit(f *testing.F) {
})
}
+func FuzzDeadlock(f *testing.F) {
+ f.Add(int(0))
+ f.Fuzz(func(t *testing.T, n int) {
+ if n != 0 {
+ select {}
+ }
+ })
+}
+
func FuzzWithTwoTypes(f *testing.F) {
f.Fuzz(func(t *testing.T, a, b []byte) {
if len(a) > 0 && len(b) > 0 {
diff --git a/src/cmd/go/testdata/script/test_fuzz_mutator_repeat.txt b/src/cmd/go/testdata/script/test_fuzz_mutator_repeat.txt
index 15d7cb6b32..3764dcb915 100644
--- a/src/cmd/go/testdata/script/test_fuzz_mutator_repeat.txt
+++ b/src/cmd/go/testdata/script/test_fuzz_mutator_repeat.txt
@@ -12,8 +12,8 @@
# The fuzzing engine reconstructs the crashing input and saves it to testdata.
! exists want
! go test -fuzz=. -parallel=1 -fuzztime=110x -fuzzminimizetime=10x -v
-stdout 'fuzzing process terminated unexpectedly'
-stdout 'Crash written to testdata'
+stdout '^\s+fuzzing process hung or terminated unexpectedly: exit status'
+stdout 'Failing input written to testdata'
# Run the fuzz target without fuzzing. The fuzz function is called with the
# crashing input in testdata. The test passes if that input is identical to
diff --git a/src/cmd/go/testdata/script/test_fuzz_non_crash_signal.txt b/src/cmd/go/testdata/script/test_fuzz_non_crash_signal.txt
index 31d54bcb70..1051292fcb 100644
--- a/src/cmd/go/testdata/script/test_fuzz_non_crash_signal.txt
+++ b/src/cmd/go/testdata/script/test_fuzz_non_crash_signal.txt
@@ -25,7 +25,7 @@ stdout 'fuzzing process terminated by unexpected signal; no crash will be record
# We should save a crasher.
! go test -fuzz=FuzzCrash
exists testdata/fuzz/FuzzCrash
-stdout 'fuzzing process terminated unexpectedly'
+stdout '^\s+fuzzing process hung or terminated unexpectedly: exit status'
-- go.mod --
module test
diff --git a/src/cmd/go/testdata/script/test_fuzz_parallel.txt b/src/cmd/go/testdata/script/test_fuzz_parallel.txt
index 1795e0b2a5..e6325208d0 100644
--- a/src/cmd/go/testdata/script/test_fuzz_parallel.txt
+++ b/src/cmd/go/testdata/script/test_fuzz_parallel.txt
@@ -13,6 +13,13 @@ go test -run=FuzzSeed
! go test -run=FuzzMutate -fuzz=FuzzMutate
exists testdata/fuzz/FuzzMutate
+# Testdata should now contain a corpus entry which will fail FuzzMutate.
+# Run the test without fuzzing, setting -parallel to different values to make
+# sure it fails, and doesn't hang.
+! go test -run=FuzzMutate -parallel=1
+! go test -run=FuzzMutate -parallel=2
+! go test -run=FuzzMutate -parallel=4
+
-- go.mod --
module fuzz_parallel
diff --git a/src/cmd/go/testdata/script/test_fuzz_seed_corpus.txt b/src/cmd/go/testdata/script/test_fuzz_seed_corpus.txt
index 4be9a6e385..57c8a8ba65 100644
--- a/src/cmd/go/testdata/script/test_fuzz_seed_corpus.txt
+++ b/src/cmd/go/testdata/script/test_fuzz_seed_corpus.txt
@@ -6,7 +6,7 @@ env GOCACHE=$WORK/cache
# and doesn't write anything to testdata/fuzz
! go test -fuzz=FuzzWithAdd -run=FuzzWithAdd -fuzztime=1x
! stdout ^ok
-! stdout 'Crash written to testdata[/\\]fuzz[/\\]FuzzWithAdd[/\\]'
+! stdout 'Failing input written to testdata[/\\]fuzz[/\\]FuzzWithAdd[/\\]'
stdout FAIL
# Test that fuzzing a target with a sucess in f.Add and a fuzztime of only
@@ -19,15 +19,15 @@ stdout ok
# and doesn't write anything to testdata/fuzz
! go test -fuzz=FuzzWithTestdata -run=FuzzWithTestdata -fuzztime=1x
! stdout ^ok
-! stdout 'Crash written to testdata[/\\]fuzz[/\\]FuzzWithTestdata[/\\]'
-stdout 'found a crash while testing seed corpus entry: FuzzWithTestdata/1'
+! stdout 'Failing input written to testdata[/\\]fuzz[/\\]FuzzWithTestdata[/\\]'
+stdout 'failure while testing seed corpus entry: FuzzWithTestdata/1'
stdout FAIL
# Test that fuzzing a target with no seed corpus or cache finds a crash, prints
# it, and write it to testdata
! go test -fuzz=FuzzWithNoCache -run=FuzzWithNoCache -fuzztime=1x
! stdout ^ok
-stdout 'Crash written to testdata[/\\]fuzz[/\\]FuzzWithNoCache[/\\]'
+stdout 'Failing input written to testdata[/\\]fuzz[/\\]FuzzWithNoCache[/\\]'
stdout FAIL
# Write a crashing input to the cache
@@ -38,7 +38,7 @@ cp cache-file $GOCACHE/fuzz/example.com/x/FuzzWithCache/1
# and writes this as a "new" crash to testdata/fuzz
! go test -fuzz=FuzzWithCache -run=FuzzWithCache -fuzztime=1x
! stdout ^ok
-stdout 'Crash written to testdata[/\\]fuzz[/\\]FuzzWithCache[/\\]'
+stdout 'Failing input written to testdata[/\\]fuzz[/\\]FuzzWithCache[/\\]'
stdout FAIL
# Write a crashing input to the cache
@@ -52,7 +52,7 @@ cp cache-file-bytes $GOCACHE/fuzz/example.com/x/FuzzWithMinimizableCache/1
stdout 'gathering baseline coverage'
stdout 'got the minimum size!'
stdout 'contains a non-zero byte of length 10'
-stdout 'Crash written to testdata[/\\]fuzz[/\\]FuzzWithMinimizableCache[/\\]'
+stdout 'Failing input written to testdata[/\\]fuzz[/\\]FuzzWithMinimizableCache[/\\]'
stdout FAIL
# Make sure this crash didn't come from fuzzing
# (the log line that states fuzzing began shouldn't have printed)
@@ -70,7 +70,7 @@ go clean -fuzzcache
# the crash and doesn't write anything to testdata/fuzz -fuzztime=1x
! go test -fuzz=FuzzWithAdd -run=None
! stdout ^ok
-! stdout 'Crash written to testdata[/\\]fuzz[/\\]FuzzWithAdd[/\\]'
+! stdout 'Failing input written to testdata[/\\]fuzz[/\\]FuzzWithAdd[/\\]'
stdout FAIL
# Test that fuzzing a target (with -run=None set) with a sucess in f.Add and a
@@ -83,7 +83,7 @@ stdout ok
# testdata/fuzz prints the crash and doesn't write anything to testdata/fuzz
! go test -fuzz=FuzzWithTestdata -run=None -fuzztime=1x
! stdout ^ok
-! stdout 'Crash written to testdata[/\\]fuzz[/\\]FuzzWithTestdata[/\\]'
+! stdout 'Failing input written to testdata[/\\]fuzz[/\\]FuzzWithTestdata[/\\]'
stdout FAIL
# Write a crashing input to the cache
@@ -94,7 +94,7 @@ cp cache-file $GOCACHE/fuzz/example.com/x/FuzzRunNoneWithCache/1
# prints the crash and writes this as a "new" crash to testdata/fuzz
! go test -fuzz=FuzzRunNoneWithCache -run=None -fuzztime=1x
! stdout ^ok
-stdout 'Crash written to testdata[/\\]fuzz[/\\]FuzzRunNoneWithCache[/\\]'
+stdout 'Failing input written to testdata[/\\]fuzz[/\\]FuzzRunNoneWithCache[/\\]'
stdout FAIL
# Clear the fuzz cache and make sure it's gone
@@ -109,14 +109,14 @@ go clean -fuzzcache
go test -c
! exec ./x.test$GOEXE -test.fuzz=FuzzWithAdd -test.run=FuzzWithAdd -test.fuzztime=1x -test.fuzzcachedir=$WORK/cache
! stdout ^ok
-! stdout 'Crash written to testdata[/\\]fuzz[/\\]FuzzWithAdd[/\\]'
+! stdout 'Failing input written to testdata[/\\]fuzz[/\\]FuzzWithAdd[/\\]'
stdout FAIL
stderr warning
go test -c
! exec ./x.test$GOEXE -test.fuzz=FuzzWithTestdata -test.run=FuzzWithTestdata -test.fuzztime=1x -test.fuzzcachedir=$WORK/cache
! stdout ^ok
-! stdout 'Crash written to testdata[/\\]fuzz[/\\]FuzzWithTestdata[/\\]'
+! stdout 'Failing input written to testdata[/\\]fuzz[/\\]FuzzWithTestdata[/\\]'
stdout FAIL
stderr warning
diff --git a/src/cmd/go/testdata/script/test_fuzz_test_race.txt b/src/cmd/go/testdata/script/test_fuzz_test_race.txt
new file mode 100644
index 0000000000..9d39cd684e
--- /dev/null
+++ b/src/cmd/go/testdata/script/test_fuzz_test_race.txt
@@ -0,0 +1,39 @@
+# Test that when both race detection and coverage instrumentation are enabled,
+# and seed values are being executed, the race detector isn't mistakenly
+# triggered.
+
+[short] skip
+[!fuzz] skip
+[!race] skip
+
+# Test with coverage instrumentation enabled (-fuzz) and race instrumentation
+# but without actually fuzzing the target (by using a non-matching pattern)
+go test -fuzz=xxx -race -v
+! stderr 'race detected during execution of test'
+
+# Test with just race instrumentation enabled
+go test -race -v
+! stderr 'race detected during execution of test'
+
+# Test with coverage and race instrumentation enabled, and a matching fuzz
+# pattern
+go test -fuzz=FuzzRace -race -v -fuzztime=200x
+! stderr 'race detected during execution of test'
+
+-- go.mod --
+module test
+
+-- race_test.go --
+package race
+
+import "testing"
+
+func FuzzRace(f *testing.F) {
+ for i := 0; i < 100; i++ {
+ f.Add(i)
+ }
+
+ f.Fuzz(func(t *testing.T, i int) {
+ t.Parallel()
+ })
+}
diff --git a/src/cmd/go/testdata/script/test_issue45477.txt b/src/cmd/go/testdata/script/test_issue45477.txt
new file mode 100644
index 0000000000..f435b6a6f4
--- /dev/null
+++ b/src/cmd/go/testdata/script/test_issue45477.txt
@@ -0,0 +1,12 @@
+[short] skip # links and runs a test binary
+
+go test -v .
+
+-- go.mod --
+module example.com/pkg_test
+
+-- pkg.go --
+package pkg_test
+
+-- pkg_test.go --
+package pkg_test
diff --git a/src/cmd/go/testdata/script/version.txt b/src/cmd/go/testdata/script/version.txt
index 8c08bae725..adca7af7a9 100644
--- a/src/cmd/go/testdata/script/version.txt
+++ b/src/cmd/go/testdata/script/version.txt
@@ -16,7 +16,14 @@ stdout '^go version'
env GOFLAGS=
env GO111MODULE=on
-# Skip the builds below if we are running in short mode.
+
+# Check that very basic version lookup succeeds.
+go build empty.go
+go version empty$GOEXE
+[cgo] go build -ldflags=-linkmode=external empty.go
+[cgo] go version empty$GOEXE
+
+# Skip the remaining builds if we are running in short mode.
[short] skip
# Check that 'go version' and 'go version -m' work on a binary built in module mode.
@@ -57,3 +64,7 @@ stdout '^\tmod\trsc.io/fortune\tv1.0.0'
-- go.mod --
module m
+
+-- empty.go --
+package main
+func main(){}
diff --git a/src/cmd/go/testdata/script/version_build_settings.txt b/src/cmd/go/testdata/script/version_build_settings.txt
index 1ced285ac3..dc9e67681e 100644
--- a/src/cmd/go/testdata/script/version_build_settings.txt
+++ b/src/cmd/go/testdata/script/version_build_settings.txt
@@ -3,22 +3,25 @@
# Compiler name is always added.
go build
go version -m m$GOEXE
-stdout '^\tbuild\tcompiler\tgc$'
+stdout '^\tbuild\t-compiler=gc$'
+stdout '^\tbuild\tGOOS='
+stdout '^\tbuild\tGOARCH='
+[amd64] stdout '^\tbuild\tGOAMD64='
! stdout asmflags|gcflags|ldflags|gccgoflags
# Toolchain flags are added if present.
# The raw flags are included, with package patterns if specified.
go build -asmflags=example.com/m=-D=FOO=bar
go version -m m$GOEXE
-stdout '^\tbuild\tasmflags\texample\.com/m=-D=FOO=bar$'
+stdout '^\tbuild\t-asmflags=example\.com/m=-D=FOO=bar$'
go build -gcflags=example.com/m=-N
go version -m m$GOEXE
-stdout '^\tbuild\tgcflags\texample\.com/m=-N$'
+stdout '^\tbuild\t-gcflags=example\.com/m=-N$'
go build -ldflags=example.com/m=-w
go version -m m$GOEXE
-stdout '^\tbuild\tldflags\texample\.com/m=-w$'
+stdout '^\tbuild\t-ldflags=example\.com/m=-w$'
# gccgoflags are not added when gc is used, and vice versa.
# TODO: test gccgo.
@@ -30,10 +33,11 @@ go version -m m$GOEXE
# "race" is included with build tags but not "cgo".
go build -tags=a,b
go version -m m$GOEXE
-stdout '^\tbuild\ttags\ta,b(,goexperiment\.[a-z0-9]+)*$'
+stdout '^\tbuild\t-tags=a,b$'
[race] go build -race
[race] go version -m m$GOEXE
-[race] stdout '^\tbuild\ttags\t.*race.*$'
+[race] ! stdout '^\tbuild\t-tags='
+[race] stdout '^\tbuild\t-race=true$'
# CGO flags are separate settings.
# CGO_ENABLED is always present.
@@ -41,7 +45,7 @@ stdout '^\tbuild\ttags\ta,b(,goexperiment\.[a-z0-9]+)*$'
env CGO_ENABLED=0
go build
go version -m m$GOEXE
-stdout '^\tbuild\tCGO_ENABLED\tfalse$'
+stdout '^\tbuild\tCGO_ENABLED=0$'
! stdout CGO_CPPFLAGS|CGO_CFLAGS|CGO_CXXFLAGS|CGO_LDFLAGS
[cgo] env CGO_ENABLED=1
[cgo] env CGO_CPPFLAGS=-DFROM_CPPFLAGS=1
@@ -50,11 +54,11 @@ stdout '^\tbuild\tCGO_ENABLED\tfalse$'
[cgo] env CGO_LDFLAGS=-L/extra/dir/does/not/exist
[cgo] go build
[cgo] go version -m m$GOEXE
-[cgo] stdout '^\tbuild\tCGO_ENABLED\ttrue$'
-[cgo] stdout '^\tbuild\tCGO_CPPFLAGS\t-DFROM_CPPFLAGS=1$'
-[cgo] stdout '^\tbuild\tCGO_CFLAGS\t-DFROM_CFLAGS=1$'
-[cgo] stdout '^\tbuild\tCGO_CXXFLAGS\t-DFROM_CXXFLAGS=1$'
-[cgo] stdout '^\tbuild\tCGO_LDFLAGS\t-L/extra/dir/does/not/exist$'
+[cgo] stdout '^\tbuild\tCGO_ENABLED=1$'
+[cgo] stdout '^\tbuild\tCGO_CPPFLAGS=-DFROM_CPPFLAGS=1$'
+[cgo] stdout '^\tbuild\tCGO_CFLAGS=-DFROM_CFLAGS=1$'
+[cgo] stdout '^\tbuild\tCGO_CXXFLAGS=-DFROM_CXXFLAGS=1$'
+[cgo] stdout '^\tbuild\tCGO_LDFLAGS=-L/extra/dir/does/not/exist$'
-- go.mod --
module example.com/m
diff --git a/src/cmd/go/testdata/script/version_buildvcs_bzr.txt b/src/cmd/go/testdata/script/version_buildvcs_bzr.txt
index 83069713d7..85db9bab6d 100644
--- a/src/cmd/go/testdata/script/version_buildvcs_bzr.txt
+++ b/src/cmd/go/testdata/script/version_buildvcs_bzr.txt
@@ -31,14 +31,15 @@ cd ..
env PATH=$oldpath
rm .bzr
-# If there is an empty repository in a parent directory, only "uncommitted" is tagged.
+# If there is an empty repository in a parent directory, only "modified" is tagged.
exec bzr init
cd a
go install
go version -m $GOBIN/a$GOEXE
-! stdout bzrrevision
-! stdout bzrcommittime
-stdout '^\tbuild\tbzruncommitted\ttrue$'
+stdout '^\tbuild\tvcs=bzr$'
+! stdout vcs.revision
+! stdout vcs.time
+stdout '^\tbuild\tvcs.modified=true$'
cd ..
# Revision and commit time are tagged for repositories with commits.
@@ -47,9 +48,10 @@ exec bzr commit -m 'initial commit'
cd a
go install
go version -m $GOBIN/a$GOEXE
-stdout '^\tbuild\tbzrrevision\t'
-stdout '^\tbuild\tbzrcommittime\t'
-stdout '^\tbuild\tbzruncommitted\tfalse$'
+stdout '^\tbuild\tvcs=bzr$'
+stdout '^\tbuild\tvcs.revision='
+stdout '^\tbuild\tvcs.time='
+stdout '^\tbuild\tvcs.modified=false$'
rm $GOBIN/a$GOEXE
# Building an earlier commit should still build clean.
@@ -59,29 +61,30 @@ exec bzr commit -m 'add NEWS'
exec bzr update -r1
go install
go version -m $GOBIN/a$GOEXE
-stdout '^\tbuild\tbzrrevision\t'
-stdout '^\tbuild\tbzrcommittime\t'
-stdout '^\tbuild\tbzruncommitted\tfalse$'
+stdout '^\tbuild\tvcs=bzr$'
+stdout '^\tbuild\tvcs.revision='
+stdout '^\tbuild\tvcs.time='
+stdout '^\tbuild\tvcs.modified=false$'
# Building with -buildvcs=false suppresses the info.
go install -buildvcs=false
go version -m $GOBIN/a$GOEXE
-! stdout bzrrevision
+! stdout vcs.revision
rm $GOBIN/a$GOEXE
-# An untracked file is shown as uncommitted, even if it isn't part of the build.
+# An untracked file is shown as modified, even if it isn't part of the build.
cp ../../outside/empty.txt .
go install
go version -m $GOBIN/a$GOEXE
-stdout '^\tbuild\tbzruncommitted\ttrue$'
+stdout '^\tbuild\tvcs.modified=true$'
rm empty.txt
rm $GOBIN/a$GOEXE
-# An edited file is shown as uncommitted, even if it isn't part of the build.
+# An edited file is shown as modified, even if it isn't part of the build.
cp ../../outside/empty.txt ../README
go install
go version -m $GOBIN/a$GOEXE
-stdout '^\tbuild\tbzruncommitted\ttrue$'
+stdout '^\tbuild\tvcs.modified=true$'
exec bzr revert ../README
rm $GOBIN/a$GOEXE
diff --git a/src/cmd/go/testdata/script/version_buildvcs_fossil.txt b/src/cmd/go/testdata/script/version_buildvcs_fossil.txt
index 3a4bde883f..720306868b 100644
--- a/src/cmd/go/testdata/script/version_buildvcs_fossil.txt
+++ b/src/cmd/go/testdata/script/version_buildvcs_fossil.txt
@@ -19,7 +19,7 @@ cd repo/a
# If there's no local repository, there's no VCS info.
go install
go version -m $GOBIN/a$GOEXE
-! stdout fossilrevision
+! stdout vcs.revision
rm $GOBIN/a$GOEXE
# If there is a repository, but it can't be used for some reason,
@@ -44,30 +44,33 @@ exec fossil commit -m 'initial commit'
cd a
go install
go version -m $GOBIN/a$GOEXE
-stdout '^\tbuild\tfossilrevision\t'
-stdout '^\tbuild\tfossilcommittime\t'
-stdout '^\tbuild\tfossiluncommitted\tfalse$'
+stdout '^\tbuild\tvcs=fossil\n'
+stdout '^\tbuild\tvcs.revision='
+stdout '^\tbuild\tvcs.time='
+stdout '^\tbuild\tvcs.modified=false$'
rm $GOBIN/a$GOEXE
# Building with -buildvcs=false suppresses the info.
go install -buildvcs=false
go version -m $GOBIN/a$GOEXE
-! stdout fossilrevision
+! stdout vcs.revision
rm $GOBIN/a$GOEXE
-# An untracked file is shown as uncommitted, even if it isn't part of the build.
+# An untracked file is shown as modified, even if it isn't part of the build.
cp ../../outside/empty.txt .
go install
go version -m $GOBIN/a$GOEXE
-stdout '^\tbuild\tfossiluncommitted\ttrue$'
+stdout '^\tbuild\tvcs=fossil\n'
+stdout '^\tbuild\tvcs.modified=true$'
rm empty.txt
rm $GOBIN/a$GOEXE
-# An edited file is shown as uncommitted, even if it isn't part of the build.
+# An edited file is shown as modified, even if it isn't part of the build.
cp ../../outside/empty.txt ../README
go install
go version -m $GOBIN/a$GOEXE
-stdout '^\tbuild\tfossiluncommitted\ttrue$'
+stdout '^\tbuild\tvcs=fossil\n'
+stdout '^\tbuild\tvcs.modified=true$'
exec fossil revert ../README
rm $GOBIN/a$GOEXE
diff --git a/src/cmd/go/testdata/script/version_buildvcs_git.txt b/src/cmd/go/testdata/script/version_buildvcs_git.txt
index 3d56c6d8b4..86d1de06df 100644
--- a/src/cmd/go/testdata/script/version_buildvcs_git.txt
+++ b/src/cmd/go/testdata/script/version_buildvcs_git.txt
@@ -11,11 +11,12 @@ cd repo/a
# If there's no local repository, there's no VCS info.
go install
go version -m $GOBIN/a$GOEXE
-! stdout gitrevision
+! stdout vcs.revision
rm $GOBIN/a$GOEXE
# If there is a repository, but it can't be used for some reason,
# there should be an error. It should hint about -buildvcs=false.
+# Also ensure that multiple errors are collected by "go list -e".
cd ..
mkdir .git
env PATH=$WORK${/}fakebin${:}$oldpath
@@ -24,6 +25,10 @@ chmod 0755 $WORK/fakebin/git
cd a
! go install
stderr '^error obtaining VCS status: exit status 1\n\tUse -buildvcs=false to disable VCS stamping.$'
+go list -e -f '{{.ImportPath}}: {{.Error}}' ./...
+stdout -count=1 '^example\.com/a: error obtaining VCS status'
+stdout -count=1 '^example\.com/a/library: '
+stdout -count=1 '^example\.com/a/othermain: error obtaining VCS status'
cd ..
env PATH=$oldpath
rm .git
@@ -35,9 +40,10 @@ exec git config user.name 'J.R. Gopher'
cd a
go install
go version -m $GOBIN/a$GOEXE
-! stdout gitrevision
-! stdout gitcommittime
-stdout '^\tbuild\tgituncommitted\ttrue$'
+stdout '^\tbuild\tvcs=git$'
+stdout '^\tbuild\tvcs.modified=true$'
+! stdout vcs.revision
+! stdout vcs.time
rm $GOBIN/a$GOEXE
# Revision and commit time are tagged for repositories with commits.
@@ -45,22 +51,22 @@ exec git add -A
exec git commit -m 'initial commit'
go install
go version -m $GOBIN/a$GOEXE
-stdout '^\tbuild\tgitrevision\t'
-stdout '^\tbuild\tgitcommittime\t'
-stdout '^\tbuild\tgituncommitted\tfalse$'
+stdout '^\tbuild\tvcs.revision='
+stdout '^\tbuild\tvcs.time='
+stdout '^\tbuild\tvcs.modified=false$'
rm $GOBIN/a$GOEXE
# Building with -buildvcs=false suppresses the info.
go install -buildvcs=false
go version -m $GOBIN/a$GOEXE
-! stdout gitrevision
+! stdout vcs.revision
rm $GOBIN/a$GOEXE
# An untracked file is shown as uncommitted, even if it isn't part of the build.
cp ../../outside/empty.txt .
go install
go version -m $GOBIN/a$GOEXE
-stdout '^\tbuild\tgituncommitted\ttrue$'
+stdout '^\tbuild\tvcs.modified=true$'
rm empty.txt
rm $GOBIN/a$GOEXE
@@ -68,7 +74,7 @@ rm $GOBIN/a$GOEXE
cp ../../outside/empty.txt ../README
go install
go version -m $GOBIN/a$GOEXE
-stdout '^\tbuild\tgituncommitted\ttrue$'
+stdout '^\tbuild\tvcs.modified=true$'
exec git checkout ../README
rm $GOBIN/a$GOEXE
@@ -76,14 +82,14 @@ rm $GOBIN/a$GOEXE
# there should be no VCS info.
go install example.com/cmd/a@v1.0.0
go version -m $GOBIN/a$GOEXE
-! stdout gitrevision
+! stdout vcs.revision
rm $GOBIN/a$GOEXE
go mod edit -require=example.com/c@v0.0.0
go mod edit -replace=example.com/c@v0.0.0=../../outside/c
go install example.com/c
go version -m $GOBIN/c$GOEXE
-! stdout gitrevision
+! stdout vcs.revision
rm $GOBIN/c$GOEXE
exec git checkout go.mod
@@ -95,10 +101,18 @@ go mod edit -require=example.com/d@v0.0.0
go mod edit -replace=example.com/d@v0.0.0=../../outside/d
go install example.com/d
go version -m $GOBIN/d$GOEXE
-! stdout gitrevision
+! stdout vcs.revision
exec git checkout go.mod
rm $GOBIN/d$GOEXE
+# If we're loading multiple main packages,
+# but they share the same VCS repository,
+# we only need to execute VCS status commands once.
+go list -x ./...
+stdout -count=3 '^example.com'
+stderr -count=1 '^git status'
+stderr -count=1 '^git show'
+
-- $WORK/fakebin/git --
#!/bin/sh
exit 1
@@ -114,6 +128,12 @@ go 1.18
-- repo/a/a.go --
package main
+func main() {}
+-- repo/a/library/f.go --
+package library
+-- repo/a/othermain/f.go --
+package main
+
func main() {}
-- repo/b/go.mod --
module example.com/b
diff --git a/src/cmd/go/testdata/script/version_buildvcs_git_gpg.txt b/src/cmd/go/testdata/script/version_buildvcs_git_gpg.txt
new file mode 100644
index 0000000000..dcf97d7c44
--- /dev/null
+++ b/src/cmd/go/testdata/script/version_buildvcs_git_gpg.txt
@@ -0,0 +1,105 @@
+# This test checks that VCS information is stamped into Go binaries even when
+# the current commit is signed and the use has configured git to display commit
+# signatures.
+
+[!exec:git] skip
+[!exec:gpg] skip
+[short] skip
+env GOBIN=$GOPATH/bin
+env GNUPGHOME=$WORK/.gpupg
+mkdir $GNUPGHOME
+chmod 0700 $GNUPGHOME
+
+# Create GPG key
+exec gpg --batch --passphrase '' --quick-generate-key gopher@golang.org
+exec gpg --list-secret-keys --with-colons gopher@golang.org
+cp stdout keyinfo.txt
+go run extract_key_id.go keyinfo.txt
+cp stdout keyid.txt
+
+# Initialize repo
+cd repo/
+exec git init
+exec git config user.email gopher@golang.org
+exec git config user.name 'J.R. Gopher'
+exec git config --add log.showSignature true
+go run ../configure_signing_key.go ../keyid.txt
+
+# Create signed commit
+cd a
+exec git add -A
+exec git commit -m 'initial commit' --gpg-sign
+exec git log
+
+# Verify commit signature does not interfere with versioning
+go install
+go version -m $GOBIN/a
+stdout '^\tbuild\tvcs\.revision='
+stdout '^\tbuild\tvcs\.time='
+stdout '^\tbuild\tvcs\.modified=false$'
+
+-- repo/README --
+Far out in the uncharted backwaters of the unfashionable end of the western
+spiral arm of the Galaxy lies a small, unregarded yellow sun.
+-- repo/a/go.mod --
+module example.com/a
+
+go 1.18
+-- repo/a/a.go --
+package main
+
+func main() {}
+
+-- extract_key_id.go --
+package main
+
+import "fmt"
+import "io/ioutil"
+import "os"
+import "strings"
+
+func main() {
+ err := run(os.Args[1])
+ if err != nil {
+ panic(err)
+ }
+}
+
+func run(keyInfoFilePath string) error {
+ contents, err := ioutil.ReadFile(keyInfoFilePath)
+ if err != nil {
+ return err
+ }
+ lines := strings.Split(string(contents), "\n")
+ for _, line := range lines {
+ fields := strings.Split(line, ":")
+ if fields[0] == "sec" {
+ fmt.Print(fields[4])
+ return nil
+ }
+ }
+ return fmt.Errorf("key ID not found in: %s", keyInfoFilePath)
+}
+
+-- configure_signing_key.go --
+package main
+
+import "io/ioutil"
+import "os"
+import "os/exec"
+
+func main() {
+ err := run(os.Args[1])
+ if err != nil {
+ panic(err)
+ }
+}
+
+func run(keyIdFilePath string) error {
+ keyId, err := ioutil.ReadFile(keyIdFilePath)
+ if err != nil {
+ return err
+ }
+ gitCmd := exec.Command("git", "config", "user.signingKey", string(keyId))
+ return gitCmd.Run()
+}
diff --git a/src/cmd/go/testdata/script/version_buildvcs_hg.txt b/src/cmd/go/testdata/script/version_buildvcs_hg.txt
index df4938742d..fbbd886102 100644
--- a/src/cmd/go/testdata/script/version_buildvcs_hg.txt
+++ b/src/cmd/go/testdata/script/version_buildvcs_hg.txt
@@ -34,9 +34,9 @@ exec hg init
cd a
go install
go version -m $GOBIN/a$GOEXE
-! stdout hgrevision
-! stdout hgcommittime
-stdout '^\tbuild\thguncommitted\ttrue$'
+! stdout vcs.revision
+! stdout vcs.time
+stdout '^\tbuild\tvcs.modified=true$'
cd ..
# Revision and commit time are tagged for repositories with commits.
@@ -45,9 +45,9 @@ exec hg commit -m 'initial commit'
cd a
go install
go version -m $GOBIN/a$GOEXE
-stdout '^\tbuild\thgrevision\t'
-stdout '^\tbuild\thgcommittime\t'
-stdout '^\tbuild\thguncommitted\tfalse$'
+stdout '^\tbuild\tvcs.revision='
+stdout '^\tbuild\tvcs.time='
+stdout '^\tbuild\tvcs.modified=false$'
rm $GOBIN/a$GOEXE
# Building with -buildvcs=false suppresses the info.
@@ -60,7 +60,7 @@ rm $GOBIN/a$GOEXE
cp ../../outside/empty.txt .
go install
go version -m $GOBIN/a$GOEXE
-stdout '^\tbuild\thguncommitted\ttrue$'
+stdout '^\tbuild\tvcs.modified=true$'
rm empty.txt
rm $GOBIN/a$GOEXE
@@ -68,7 +68,7 @@ rm $GOBIN/a$GOEXE
cp ../../outside/empty.txt ../README
go install
go version -m $GOBIN/a$GOEXE
-stdout '^\tbuild\thguncommitted\ttrue$'
+stdout '^\tbuild\tvcs.modified=true$'
exec hg revert ../README
rm $GOBIN/a$GOEXE
diff --git a/src/cmd/go/testdata/script/work.txt b/src/cmd/go/testdata/script/work.txt
index 613f037615..cbb3746a69 100644
--- a/src/cmd/go/testdata/script/work.txt
+++ b/src/cmd/go/testdata/script/work.txt
@@ -1,9 +1,9 @@
-! go mod initwork doesnotexist
+! go work init doesnotexist
stderr 'go: creating workspace file: no go.mod file exists in directory doesnotexist'
go env GOWORK
! stdout .
-go mod initwork ./a ./b
+go work init ./a ./b
cmp go.work go.work.want
go env GOWORK
stdout '^'$WORK'(\\|/)gopath(\\|/)src(\\|/)go.work$'
@@ -34,7 +34,7 @@ go list -mod=readonly all
stderr '^go: -mod may only be set to readonly when in workspace mode'
go list -mod=mod -workfile=off all
-# Test that duplicates in the directory list return an error
+# Test that duplicates in the use list return an error
cp go.work go.work.backup
cp go.work.dup go.work
! go run example.com/b
@@ -59,7 +59,7 @@ go build -n -o foo foo.go
-- go.work.dup --
go 1.18
-directory (
+use (
a
b
../src/a
@@ -67,14 +67,14 @@ directory (
-- go.work.want --
go 1.18
-directory (
+use (
./a
./b
)
-- go.work.d --
go 1.18
-directory (
+use (
a
b
d
@@ -133,7 +133,7 @@ func main() {
-- go.work.backwards --
go 1.18
-directory (
+use (
d
b
a
diff --git a/src/cmd/go/testdata/script/work_build_no_modules.txt b/src/cmd/go/testdata/script/work_build_no_modules.txt
new file mode 100644
index 0000000000..c9859b437e
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_build_no_modules.txt
@@ -0,0 +1,13 @@
+! go build .
+stderr 'go: no modules were found in the current workspace; see ''go help work'''
+
+-- go.work --
+go 1.18
+-- go.mod --
+go 1.18
+
+module foo
+-- foo.go --
+package main
+
+func main() {}
\ No newline at end of file
diff --git a/src/cmd/go/testdata/script/work_edit.txt b/src/cmd/go/testdata/script/work_edit.txt
index 979c1f98e0..fd04bbda6e 100644
--- a/src/cmd/go/testdata/script/work_edit.txt
+++ b/src/cmd/go/testdata/script/work_edit.txt
@@ -1,36 +1,36 @@
# Test editing go.work files.
-go mod initwork m
+go work init m
cmp go.work go.work.want_initial
-go mod editwork -directory n
-cmp go.work go.work.want_directory_n
+go work edit -use n
+cmp go.work go.work.want_use_n
-go mod editwork -go 1.18
+go work edit -go 1.18
cmp go.work go.work.want_go_118
-go mod editwork -dropdirectory m
-cmp go.work go.work.want_dropdirectory_m
+go work edit -dropuse m
+cmp go.work go.work.want_dropuse_m
-go mod editwork -replace=x.1@v1.3.0=y.1@v1.4.0 -replace='x.1@v1.4.0 = ../z'
+go work edit -replace=x.1@v1.3.0=y.1@v1.4.0 -replace='x.1@v1.4.0 = ../z'
cmp go.work go.work.want_add_replaces
-go mod editwork -directory n -directory ../a -directory /b -directory c -directory c
-cmp go.work go.work.want_multidirectory
+go work edit -use n -use ../a -use /b -use c -use c
+cmp go.work go.work.want_multiuse
-go mod editwork -dropdirectory /b -dropdirectory n
-cmp go.work go.work.want_multidropdirectory
+go work edit -dropuse /b -dropuse n
+cmp go.work go.work.want_multidropuse
-go mod editwork -dropreplace='x.1@v1.4.0'
+go work edit -dropreplace='x.1@v1.4.0'
cmp go.work go.work.want_dropreplace
-go mod editwork -print -go 1.19 -directory b -dropdirectory c -replace 'x.1@v1.4.0 = ../z' -dropreplace x.1 -dropreplace x.1@v1.3.0
+go work edit -print -go 1.19 -use b -dropuse c -replace 'x.1@v1.4.0 = ../z' -dropreplace x.1 -dropreplace x.1@v1.3.0
cmp stdout go.work.want_print
-go mod editwork -json -go 1.19 -directory b -dropdirectory c -replace 'x.1@v1.4.0 = ../z' -dropreplace x.1 -dropreplace x.1@v1.3.0
+go work edit -json -go 1.19 -use b -dropuse c -replace 'x.1@v1.4.0 = ../z' -dropreplace x.1 -dropreplace x.1@v1.3.0
cmp stdout go.work.want_json
-go mod editwork -print -fmt -workfile $GOPATH/src/unformatted
+go work edit -print -fmt -workfile $GOPATH/src/unformatted
cmp stdout formatted
-- m/go.mod --
@@ -40,38 +40,38 @@ go 1.18
-- go.work.want_initial --
go 1.18
-directory ./m
--- go.work.want_directory_n --
+use ./m
+-- go.work.want_use_n --
go 1.18
-directory (
+use (
./m
./n
)
-- go.work.want_go_118 --
go 1.18
-directory (
+use (
./m
./n
)
--- go.work.want_dropdirectory_m --
+-- go.work.want_dropuse_m --
go 1.18
-directory ./n
+use ./n
-- go.work.want_add_replaces --
go 1.18
-directory ./n
+use ./n
replace (
x.1 v1.3.0 => y.1 v1.4.0
x.1 v1.4.0 => ../z
)
--- go.work.want_multidirectory --
+-- go.work.want_multiuse --
go 1.18
-directory (
+use (
../a
./c
./n
@@ -82,10 +82,10 @@ replace (
x.1 v1.3.0 => y.1 v1.4.0
x.1 v1.4.0 => ../z
)
--- go.work.want_multidropdirectory --
+-- go.work.want_multidropuse --
go 1.18
-directory (
+use (
../a
./c
)
@@ -97,7 +97,7 @@ replace (
-- go.work.want_dropreplace --
go 1.18
-directory (
+use (
../a
./c
)
@@ -106,7 +106,7 @@ replace x.1 v1.3.0 => y.1 v1.4.0
-- go.work.want_print --
go 1.19
-directory (
+use (
../a
./b
)
@@ -115,7 +115,7 @@ replace x.1 v1.4.0 => ../z
-- go.work.want_json --
{
"Go": "1.19",
- "Directory": [
+ "Use": [
{
"DiskPath": "../a"
},
@@ -137,7 +137,7 @@ replace x.1 v1.4.0 => ../z
}
-- unformatted --
go 1.18
- directory (
+ use (
a
b
c
@@ -149,7 +149,7 @@ go 1.18
-- formatted --
go 1.18
-directory (
+use (
a
b
c
diff --git a/src/cmd/go/testdata/script/work_env.txt b/src/cmd/go/testdata/script/work_env.txt
index de67255696..ec3d3be3ed 100644
--- a/src/cmd/go/testdata/script/work_env.txt
+++ b/src/cmd/go/testdata/script/work_env.txt
@@ -19,6 +19,6 @@ stderr '^go: GOWORK cannot be modified$'
-- go.work --
go 1.18
-directory a
+use a
-- a/go.mod --
module example.com/a
diff --git a/src/cmd/go/testdata/script/work_install_submodule.txt b/src/cmd/go/testdata/script/work_install_submodule.txt
new file mode 100644
index 0000000000..3d1171736d
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_install_submodule.txt
@@ -0,0 +1,36 @@
+# This is a regression test for golang.org/issue/50036
+# Don't check sums for other modules in the workspace.
+
+cd m/sub
+go install -n
+
+-- go.work --
+go 1.18
+
+use (
+ ./m
+ ./m/sub
+)
+-- m/go.mod --
+module example.com/m
+
+go 1.18
+
+-- m/m.go --
+package m
+
+func M() {}
+-- m/sub/go.mod --
+module example.com/m/sub
+
+go 1.18
+
+require example.com/m v1.0.0
+-- m/sub/main.go --
+package main
+
+import "example.com/m"
+
+func main() {
+ m.M()
+}
diff --git a/src/cmd/go/testdata/script/work_prune.txt b/src/cmd/go/testdata/script/work_prune.txt
index f0fb073c4b..7e2ae4e6ce 100644
--- a/src/cmd/go/testdata/script/work_prune.txt
+++ b/src/cmd/go/testdata/script/work_prune.txt
@@ -14,12 +14,12 @@
# TODO(#48331): We currently load the wrong version of q. Fix this.
go list -m -f '{{.Version}}' example.com/q
-stdout '^v1.0.0$' # TODO(#48331): This should be 1.1.0. Fix this.
+stdout '^v1.1.0$'
-- go.work --
go 1.18
-directory (
+use (
./a
./p
)
diff --git a/src/cmd/go/testdata/script/work_prune_all.txt b/src/cmd/go/testdata/script/work_prune_all.txt
new file mode 100644
index 0000000000..a7ad9c04af
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_prune_all.txt
@@ -0,0 +1,176 @@
+# This test makes sure workspace mode's handling of the module graph
+# is compatible with module pruning. The graph we load from either of
+# the workspace modules should be the same, even if their graphs
+# don't overlap.
+#
+# This is the module graph in the test:
+#
+# example.com/p -> example.com/q v1.0.0
+# example.com/a -> example.com/b v1.0.0 -> example.com/q v1.1.0 -> example.com/w v1.0.0 -> example.com/x v1.0.0 -> example.com/y v1.0.0
+# |-> example.com/z v1.0.0 |-> example.com/z v1.1.0
+# |-> example.com/q v1.0.5 -> example.com/r v1.0.0
+# If we didn't load the whole graph and didn't load the dependencies of b
+# when loading p, we would end up loading q v1.0.0, rather than v1.1.0,
+# which is selected by MVS.
+
+go list -m all
+stdout 'example.com/w v1.0.0'
+stdout 'example.com/q v1.1.0'
+stdout 'example.com/z v1.1.0'
+stdout 'example.com/x v1.0.0'
+! stdout 'example.com/r'
+! stdout 'example.com/y'
+
+-- go.work --
+go 1.18
+
+use (
+ ./a
+ ./p
+)
+
+replace example.com/b v1.0.0 => ./b
+replace example.com/q v1.0.0 => ./q1_0_0
+replace example.com/q v1.0.5 => ./q1_0_5
+replace example.com/q v1.1.0 => ./q1_1_0
+replace example.com/r v1.0.0 => ./r
+replace example.com/w v1.0.0 => ./w
+replace example.com/x v1.0.0 => ./x
+replace example.com/y v1.0.0 => ./y
+replace example.com/z v1.0.0 => ./z1_0_0
+replace example.com/z v1.1.0 => ./z1_1_0
+
+-- a/go.mod --
+module example.com/a
+
+go 1.18
+
+require example.com/b v1.0.0
+require example.com/z v1.0.0
+-- a/foo.go --
+package main
+
+import "example.com/b"
+
+func main() {
+ b.B()
+}
+-- b/go.mod --
+module example.com/b
+
+go 1.18
+
+require example.com/q v1.1.0
+-- b/b.go --
+package b
+
+func B() {
+}
+-- p/go.mod --
+module example.com/p
+
+go 1.18
+
+require example.com/q v1.0.0
+
+replace example.com/q v1.0.0 => ../q1_0_0
+replace example.com/q v1.1.0 => ../q1_1_0
+-- p/main.go --
+package main
+
+import "example.com/q"
+
+func main() {
+ q.PrintVersion()
+}
+-- q1_0_0/go.mod --
+module example.com/q
+
+go 1.18
+-- q1_0_0/q.go --
+package q
+
+import "fmt"
+
+func PrintVersion() {
+ fmt.Println("version 1.0.0")
+}
+-- q1_0_5/go.mod --
+module example.com/q
+
+go 1.18
+
+require example.com/r v1.0.0
+-- q1_0_5/q.go --
+package q
+
+import _ "example.com/r"
+-- q1_1_0/go.mod --
+module example.com/q
+
+require example.com/w v1.0.0
+require example.com/z v1.1.0
+
+go 1.18
+-- q1_1_0/q.go --
+package q
+
+import _ "example.com/w"
+import _ "example.com/z"
+
+import "fmt"
+
+func PrintVersion() {
+ fmt.Println("version 1.1.0")
+}
+-- r/go.mod --
+module example.com/r
+
+go 1.18
+
+require example.com/r v1.0.0
+-- r/r.go --
+package r
+-- w/go.mod --
+module example.com/w
+
+go 1.18
+
+require example.com/x v1.0.0
+-- w/w.go --
+package w
+-- w/w_test.go --
+package w
+
+import _ "example.com/x"
+-- x/go.mod --
+module example.com/x
+
+go 1.18
+-- x/x.go --
+package x
+-- x/x_test.go --
+package x
+import _ "example.com/y"
+-- y/go.mod --
+module example.com/y
+
+go 1.18
+-- y/y.go --
+package y
+-- z1_0_0/go.mod --
+module example.com/z
+
+go 1.18
+
+require example.com/q v1.0.5
+-- z1_0_0/z.go --
+package z
+
+import _ "example.com/q"
+-- z1_1_0/go.mod --
+module example.com/z
+
+go 1.18
+-- z1_1_0/z.go --
+package z
diff --git a/src/cmd/go/testdata/script/work_regression_hang.txt b/src/cmd/go/testdata/script/work_regression_hang.txt
new file mode 100644
index 0000000000..a7661b68ad
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_regression_hang.txt
@@ -0,0 +1,71 @@
+# This test makes checks against a regression of a bug in the Go command
+# where the module loader hung forever because all main module dependencies
+# kept workspace pruning instead of adopting the pruning in their go.mod
+# files, and the loader kept adding dependencies on the queue until they
+# were either pruned or unpruned, never breaking a module dependency cycle.
+#
+# This is the module graph in the test:
+#
+# /-------------------------\
+# | |
+# V |
+# example.com/a -> example.com/b v1.0.0 -> example.com/c v1.1.0
+
+go list -m -f '{{.Version}}' example.com/c
+
+-- go.work --
+go 1.16
+
+use (
+ ./a
+)
+-- a/go.mod --
+module example.com/a
+
+go 1.18
+
+require example.com/b v1.0.0
+
+replace example.com/b v1.0.0 => ../b
+replace example.com/c v1.0.0 => ../c
+-- a/foo.go --
+package main
+
+import "example.com/b"
+
+func main() {
+ b.B()
+}
+-- b/go.mod --
+module example.com/b
+
+go 1.18
+
+require example.com/c v1.0.0
+-- b/b.go --
+package b
+
+func B() {
+}
+-- b/cmd/main.go --
+package main
+
+import "example.com/c"
+
+func main() {
+ c.C()
+}
+-- c/go.mod --
+module example.com/c
+
+go 1.18
+
+require example.com/b v1.0.0
+-- c/c.go --
+package c
+
+import "example.com/b"
+
+func C() {
+ b.B()
+}
\ No newline at end of file
diff --git a/src/cmd/go/testdata/script/work_replace.txt b/src/cmd/go/testdata/script/work_replace.txt
index 5a4cb0eebb..81268e5069 100644
--- a/src/cmd/go/testdata/script/work_replace.txt
+++ b/src/cmd/go/testdata/script/work_replace.txt
@@ -10,7 +10,7 @@ go list -m example.com/other
stdout 'example.com/other v1.0.0 => ./other2'
-- go.work --
-directory m
+use m
replace example.com/dep => ./dep
replace example.com/other => ./other2
diff --git a/src/cmd/go/testdata/script/work_replace_conflict.txt b/src/cmd/go/testdata/script/work_replace_conflict.txt
index a2f76d13a0..81d1fcb043 100644
--- a/src/cmd/go/testdata/script/work_replace_conflict.txt
+++ b/src/cmd/go/testdata/script/work_replace_conflict.txt
@@ -2,15 +2,15 @@
# overriding it in the go.work file.
! go list -m example.com/dep
-stderr 'go: conflicting replacements for example.com/dep@v1.0.0:\n\t./dep1\n\t./dep2\nuse "go mod editwork -replace example.com/dep@v1.0.0=\[override\]" to resolve'
-go mod editwork -replace example.com/dep@v1.0.0=./dep1
+stderr 'go: conflicting replacements for example.com/dep@v1.0.0:\n\t./dep1\n\t./dep2\nuse "go work edit -replace example.com/dep@v1.0.0=\[override\]" to resolve'
+go work edit -replace example.com/dep@v1.0.0=./dep1
go list -m example.com/dep
stdout 'example.com/dep v1.0.0 => ./dep1'
-- foo --
-- go.work --
-directory m
-directory n
+use m
+use n
-- m/go.mod --
module example.com/m
diff --git a/src/cmd/go/testdata/script/work_replace_conflict_override.txt b/src/cmd/go/testdata/script/work_replace_conflict_override.txt
index ebb517dd7c..c62084bee6 100644
--- a/src/cmd/go/testdata/script/work_replace_conflict_override.txt
+++ b/src/cmd/go/testdata/script/work_replace_conflict_override.txt
@@ -5,8 +5,8 @@ go list -m example.com/dep
stdout 'example.com/dep v1.0.0 => ./dep3'
-- go.work --
-directory m
-directory n
+use m
+use n
replace example.com/dep => ./dep3
-- m/go.mod --
module example.com/m
diff --git a/src/cmd/go/testdata/script/work_replace_main_module.txt b/src/cmd/go/testdata/script/work_replace_main_module.txt
new file mode 100644
index 0000000000..b213764280
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_replace_main_module.txt
@@ -0,0 +1,45 @@
+# Ensure that replaces of the main module in workspace modules
+# are ignored, and replaces in the go.work file are disallowed.
+# This tests against an issue where requirements of the
+# main module were being ignored because the main module
+# was replaced in a transitive dependency with another
+# version.
+
+go list example.com/dep
+
+cp replace_main_module.go.work go.work
+! go list example.com/dep
+stderr 'go: workspace module example.com/mainmoda is replaced at all versions in the go.work file. To fix, remove the replacement from the go.work file or specify the version at which to replace the module.'
+
+-- replace_main_module.go.work --
+go 1.18
+use (
+ ./mainmoda
+ ./mainmodb
+)
+replace example.com/mainmoda => ../mainmodareplacement
+-- go.work --
+go 1.18
+use (
+ ./mainmoda
+ ./mainmodb
+)
+-- mainmoda/go.mod --
+module example.com/mainmoda
+
+go 1.18
+
+require example.com/dep v1.0.0
+replace example.com/dep => ../dep
+
+-- dep/go.mod --
+module example.com/dep
+-- dep/dep.go --
+package dep
+-- mainmodb/go.mod --
+module example.com/mainmodb
+go 1.18
+replace example.com/mainmoda => ../mainmodareplacement
+-- mainmodareplacement/go.mod --
+module example.com/mainmoda
+go 1.18
\ No newline at end of file
diff --git a/src/cmd/go/testdata/script/work_sum.txt b/src/cmd/go/testdata/script/work_sum.txt
index 99f66a4003..19dbb90507 100644
--- a/src/cmd/go/testdata/script/work_sum.txt
+++ b/src/cmd/go/testdata/script/work_sum.txt
@@ -8,18 +8,19 @@ golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c h1:pvCbr/wm8HzDD3fVywevekuf
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
rsc.io/quote v1.5.2 h1:3fEykkD9k7lYzXqCYrwGAf7iNhbk4yCjHmKBN9td4L0=
rsc.io/quote v1.5.2/go.mod h1:LzX7hefJvL54yjefDEDHNONDjII0t9xZLPXsUe+TKr0=
-rsc.io/sampler v1.3.0 h1:HLGR/BgEtI3r0uymSP/nl2uPLsUnNJX8toRyhfpBTII=
-rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-- go.work --
go 1.18
-directory .
+use .
-- go.mod --
go 1.18
module example.com/hi
require "rsc.io/quote" v1.5.2
+-- go.sum --
+rsc.io/sampler v1.3.0 h1:HLGR/BgEtI3r0uymSP/nl2uPLsUnNJX8toRyhfpBTII=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
-- main.go --
package main
diff --git a/src/cmd/go/testdata/script/work_sum_mismatch.txt b/src/cmd/go/testdata/script/work_sum_mismatch.txt
new file mode 100644
index 0000000000..9e9474304e
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_sum_mismatch.txt
@@ -0,0 +1,61 @@
+# Test mismatched sums in go.sum files
+
+! go run ./a
+cmpenv stderr want-error
+
+-- want-error --
+verifying rsc.io/sampler@v1.3.0/go.mod: checksum mismatch
+ downloaded: h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+ $WORK${/}gopath${/}src${/}a${/}go.sum: h1:U1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+
+SECURITY ERROR
+This download does NOT match an earlier download recorded in go.sum.
+The bits may have been replaced on the origin server, or an attacker may
+have intercepted the download attempt.
+
+For more information, see 'go help module-auth'.
+-- go.work --
+go 1.18
+
+use ./a
+use ./b
+-- a/go.mod --
+go 1.18
+
+module example.com/hi
+
+require "rsc.io/quote" v1.5.2
+-- a/go.sum --
+rsc.io/sampler v1.3.0 h1:HLGR/BgEtI3r0uymSP/nl2uPLsUnNJX8toRyhfpBTII=
+rsc.io/sampler v1.3.0/go.mod h1:U1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+-- a/main.go --
+package main
+
+import (
+ "fmt"
+ "rsc.io/quote"
+)
+
+func main() {
+ fmt.Println(quote.Hello())
+}
+-- b/go.mod --
+go 1.18
+
+module example.com/hi
+
+require "rsc.io/quote" v1.5.2
+-- b/go.sum --
+rsc.io/sampler v1.3.0 h1:HLGR/BgEtI3r0uymSP/nl2uPLsUnNJX8toRyhfpBTII=
+rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
+-- b/main.go --
+package main
+
+import (
+ "fmt"
+ "rsc.io/quote"
+)
+
+func main() {
+ fmt.Println(quote.Hello())
+}
\ No newline at end of file
diff --git a/src/cmd/go/testdata/script/work_sync.txt b/src/cmd/go/testdata/script/work_sync.txt
new file mode 100644
index 0000000000..69167d4cc1
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_sync.txt
@@ -0,0 +1,119 @@
+go work sync
+cmp a/go.mod a/want_go.mod
+cmp b/go.mod b/want_go.mod
+
+-- go.work --
+go 1.18
+
+use (
+ ./a
+ ./b
+)
+
+-- a/go.mod --
+go 1.18
+
+module example.com/a
+
+require (
+ example.com/p v1.0.0
+ example.com/q v1.1.0
+ example.com/r v1.0.0
+)
+
+replace (
+ example.com/p => ../p
+ example.com/q => ../q
+ example.com/r => ../r
+)
+-- a/want_go.mod --
+go 1.18
+
+module example.com/a
+
+require (
+ example.com/p v1.1.0
+ example.com/q v1.1.0
+)
+
+replace (
+ example.com/p => ../p
+ example.com/q => ../q
+ example.com/r => ../r
+)
+-- a/a.go --
+package a
+
+import (
+ "example.com/p"
+ "example.com/q"
+)
+
+func Foo() {
+ p.P()
+ q.Q()
+}
+-- b/go.mod --
+go 1.18
+
+module example.com/b
+
+require (
+ example.com/p v1.1.0
+ example.com/q v1.0.0
+)
+
+replace (
+ example.com/p => ../p
+ example.com/q => ../q
+)
+-- b/want_go.mod --
+go 1.18
+
+module example.com/b
+
+require (
+ example.com/p v1.1.0
+ example.com/q v1.1.0
+)
+
+replace (
+ example.com/p => ../p
+ example.com/q => ../q
+)
+-- b/b.go --
+package b
+
+import (
+ "example.com/p"
+ "example.com/q"
+)
+
+func Foo() {
+ p.P()
+ q.Q()
+}
+-- p/go.mod --
+go 1.18
+
+module example.com/p
+-- p/p.go --
+package p
+
+func P() {}
+-- q/go.mod --
+go 1.18
+
+module example.com/q
+-- q/q.go --
+package q
+
+func Q() {}
+-- r/go.mod --
+go 1.18
+
+module example.com/r
+-- r/q.go --
+package r
+
+func R() {}
\ No newline at end of file
diff --git a/src/cmd/go/testdata/script/work_sync_irrelevant_dependency.txt b/src/cmd/go/testdata/script/work_sync_irrelevant_dependency.txt
new file mode 100644
index 0000000000..072323d15d
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_sync_irrelevant_dependency.txt
@@ -0,0 +1,119 @@
+# Test of go work sync in a workspace in which some dependency needed by `a`
+# appears at a lower version in the build list of `b`, but is not needed at all
+# by `b` (so it should not be upgraded within b).
+#
+# a -> p 1.1
+# b -> q 1.0 -(through a test dependency)-> p 1.0
+go work sync
+cmp a/go.mod a/want_go.mod
+cmp b/go.mod b/want_go.mod
+
+-- go.work --
+go 1.18
+
+use (
+ ./a
+ ./b
+)
+
+-- a/go.mod --
+go 1.18
+
+module example.com/a
+
+require (
+ example.com/p v1.1.0
+)
+
+replace (
+ example.com/p => ../p
+)
+-- a/want_go.mod --
+go 1.18
+
+module example.com/a
+
+require (
+ example.com/p v1.1.0
+)
+
+replace (
+ example.com/p => ../p
+)
+-- a/a.go --
+package a
+
+import (
+ "example.com/p"
+)
+
+func Foo() {
+ p.P()
+}
+-- b/go.mod --
+go 1.18
+
+module example.com/b
+
+require (
+ example.com/q v1.0.0
+)
+
+replace (
+ example.com/q => ../q
+)
+-- b/want_go.mod --
+go 1.18
+
+module example.com/b
+
+require (
+ example.com/q v1.0.0
+)
+
+replace (
+ example.com/q => ../q
+)
+-- b/b.go --
+package b
+
+import (
+ "example.com/q"
+)
+
+func Foo() {
+ q.Q()
+}
+-- p/go.mod --
+go 1.18
+
+module example.com/p
+-- p/p.go --
+package p
+
+func P() {}
+-- q/go.mod --
+go 1.18
+
+module example.com/q
+
+require (
+ example.com/p v1.0.0
+)
+
+replace (
+ example.com/p => ../p
+)
+-- q/q.go --
+package q
+
+func Q() {
+}
+-- q/q_test.go --
+package q
+
+import example.com/p
+
+func TestQ(t *testing.T) {
+ p.P()
+}
\ No newline at end of file
diff --git a/src/cmd/go/testdata/script/work_sync_missing_module.txt b/src/cmd/go/testdata/script/work_sync_missing_module.txt
new file mode 100644
index 0000000000..0018c733ee
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_sync_missing_module.txt
@@ -0,0 +1,12 @@
+# Ensure go work sync works without any modules in go.work.
+go work sync
+
+# Ensure go work sync works even without a go.mod file.
+rm go.mod
+go work sync
+
+-- go.work --
+go 1.18
+-- go.mod --
+go 1.18
+module foo
diff --git a/src/cmd/go/testdata/script/work_sync_relevant_dependency.txt b/src/cmd/go/testdata/script/work_sync_relevant_dependency.txt
new file mode 100644
index 0000000000..d7997027d9
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_sync_relevant_dependency.txt
@@ -0,0 +1,106 @@
+# Test of go work sync in a workspace in which some dependency in the build
+# list of 'b' (but not otherwise needed by `b`, so not seen when lazy loading
+# occurs) actually is relevant to `a`.
+#
+# a -> p 1.0
+# b -> q 1.1 -> p 1.1
+go work sync
+cmp a/go.mod a/want_go.mod
+cmp b/go.mod b/want_go.mod
+
+-- go.work --
+go 1.18
+
+use (
+ ./a
+ ./b
+)
+
+-- a/go.mod --
+go 1.18
+
+module example.com/a
+
+require (
+ example.com/p v1.0.0
+)
+
+replace (
+ example.com/p => ../p
+)
+-- a/want_go.mod --
+go 1.18
+
+module example.com/a
+
+require example.com/p v1.1.0
+
+replace example.com/p => ../p
+-- a/a.go --
+package a
+
+import (
+ "example.com/p"
+)
+
+func Foo() {
+ p.P()
+}
+-- b/go.mod --
+go 1.18
+
+module example.com/b
+
+require (
+ example.com/q v1.1.0
+)
+
+replace (
+ example.com/q => ../q
+)
+-- b/want_go.mod --
+go 1.18
+
+module example.com/b
+
+require (
+ example.com/q v1.1.0
+)
+
+replace (
+ example.com/q => ../q
+)
+-- b/b.go --
+package b
+
+import (
+ "example.com/q"
+)
+
+func Foo() {
+ q.Q()
+}
+-- p/go.mod --
+go 1.18
+
+module example.com/p
+-- p/p.go --
+package p
+
+func P() {}
+-- q/go.mod --
+go 1.18
+
+module example.com/q
+
+require example.com/p v1.1.0
+
+replace example.com/p => ../p
+-- q/q.go --
+package q
+
+import example.com/p
+
+func Q() {
+ p.P()
+}
diff --git a/src/cmd/go/testdata/script/work_sync_sum.txt b/src/cmd/go/testdata/script/work_sync_sum.txt
new file mode 100644
index 0000000000..656fd31379
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_sync_sum.txt
@@ -0,0 +1,40 @@
+# Test that the sum file data state is properly reset between modules in
+# go work sync so that the sum file that's written is correct.
+# Exercises the fix to #50038.
+
+cp b/go.sum b/go.sum.want
+
+# As a sanity check, verify b/go.sum is tidy.
+cd b
+go mod tidy
+cd ..
+cmp b/go.sum b/go.sum.want
+
+# Run go work sync and verify it doesn't change b/go.sum.
+go work sync
+cmp b/go.sum b/go.sum.want
+
+-- b/go.sum --
+rsc.io/quote v1.0.0 h1:kQ3IZQzPTiDJxSZI98YaWgxFEhlNdYASHvh+MplbViw=
+rsc.io/quote v1.0.0/go.mod h1:v83Ri/njykPcgJltBc/gEkJTmjTsNgtO1Y7vyIK1CQA=
+-- go.work --
+go 1.18
+use (
+ ./a
+ ./b
+)
+replace example.com/c => ./c
+-- a/go.mod --
+module example.com/a
+go 1.18
+require rsc.io/fortune v1.0.0
+-- a/a.go --
+package a
+import "rsc.io/fortune"
+-- b/go.mod --
+module example.com/b
+go 1.18
+require rsc.io/quote v1.0.0
+-- b/b.go --
+package b
+import _ "rsc.io/quote"
diff --git a/src/cmd/go/testdata/script/work_use.txt b/src/cmd/go/testdata/script/work_use.txt
new file mode 100644
index 0000000000..f5ea89c900
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_use.txt
@@ -0,0 +1,32 @@
+go work use -r foo
+cmp go.work go.want_work_r
+
+go work use other
+cmp go.work go.want_work_other
+-- go.work --
+go 1.18
+
+use (
+ foo
+ foo/bar // doesn't exist
+)
+-- go.want_work_r --
+go 1.18
+
+use (
+ foo
+ foo/bar/baz
+)
+-- go.want_work_other --
+go 1.18
+
+use (
+ foo
+ foo/bar/baz
+ other
+)
+-- foo/go.mod --
+module foo
+-- foo/bar/baz/go.mod --
+module baz
+-- other/go.mod --
diff --git a/src/cmd/go/testdata/script/work_use_dot.txt b/src/cmd/go/testdata/script/work_use_dot.txt
new file mode 100644
index 0000000000..c24aae33e8
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_use_dot.txt
@@ -0,0 +1,33 @@
+cp go.work go.work.orig
+
+# 'go work use .' should add an entry for the current directory.
+cd bar/baz
+go work use .
+cmp ../../go.work ../../go.work.rel
+
+# If the current directory lacks a go.mod file, 'go work use .'
+# should remove its entry.
+mv go.mod go.mod.bak
+go work use .
+cmp ../../go.work ../../go.work.orig
+
+mv go.mod.bak go.mod
+go work use $PWD
+cmpenv ../../go.work ../../go.work.abs
+
+-- go.mod --
+module example
+go 1.18
+-- go.work --
+go 1.18
+-- go.work.rel --
+go 1.18
+
+use bar/baz
+-- go.work.abs --
+go 1.18
+
+use $PWD
+-- bar/baz/go.mod --
+module example/bar/baz
+go 1.18
diff --git a/src/cmd/go/testdata/script/work_why_download_graph.txt b/src/cmd/go/testdata/script/work_why_download_graph.txt
new file mode 100644
index 0000000000..7964c914a2
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_why_download_graph.txt
@@ -0,0 +1,59 @@
+# Test go mod download, why, and graph work in workspace mode.
+# TODO(bcmills): clarify the interaction with #44435
+
+go mod download rsc.io/quote
+exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.info
+exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.mod
+exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.zip
+! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.info
+! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.mod
+
+go mod download
+exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.info
+exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.mod
+exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.2.zip
+! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.info
+! exists $GOPATH/pkg/mod/cache/download/rsc.io/quote/@v/v1.5.0.mod
+
+go mod why rsc.io/quote
+stdout '# rsc.io/quote\nexample.com/a\nrsc.io/quote'
+
+go mod graph
+stdout 'example.com/a rsc.io/quote@v1.5.2\nexample.com/b example.com/c@v1.0.0\nrsc.io/quote@v1.5.2 rsc.io/sampler@v1.3.0\nrsc.io/sampler@v1.3.0 golang.org/x/text@v0.0.0-20170915032832-14c0d48ead0c'
+
+-- go.work --
+go 1.18
+
+use (
+ ./a
+ ./b
+)
+-- a/go.mod --
+go 1.18
+
+module example.com/a
+
+require "rsc.io/quote" v1.5.2
+-- a/main.go --
+package main
+
+import (
+ "fmt"
+ "rsc.io/quote"
+)
+
+func main() {
+ fmt.Println(quote.Hello())
+}
+-- b/go.mod --
+go 1.18
+
+module example.com/b
+
+require example.com/c v1.0.0
+replace example.com/c => ../c
+-- c/go.mod --
+go 1.18
+
+module example.com/c
+
diff --git a/src/cmd/go/testdata/script/work_workfile.txt b/src/cmd/go/testdata/script/work_workfile.txt
new file mode 100644
index 0000000000..b62918147e
--- /dev/null
+++ b/src/cmd/go/testdata/script/work_workfile.txt
@@ -0,0 +1,21 @@
+! go list -workfile=stop.work a # require absolute path
+! stderr panic
+! go list -workfile=doesnotexist a
+! stderr panic
+
+go list -n -workfile=$GOPATH/src/stop.work a
+go build -n -workfile=$GOPATH/src/stop.work a
+go test -n -workfile=$GOPATH/src/stop.work a
+
+-- stop.work --
+go 1.18
+
+use ./a
+-- a/a.go --
+package a
+-- a/a_test.go --
+package a
+-- a/go.mod --
+module a
+
+go 1.18
\ No newline at end of file
diff --git a/src/cmd/go/testdata/testterminal18153/terminal_test.go b/src/cmd/go/testdata/testterminal18153/terminal_test.go
index 71493efe98..34ee580c0e 100644
--- a/src/cmd/go/testdata/testterminal18153/terminal_test.go
+++ b/src/cmd/go/testdata/testterminal18153/terminal_test.go
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+//go:build linux
// +build linux
// This test is run by src/cmd/dist/test.go (cmd_go_test_terminal),
diff --git a/src/cmd/gofmt/gofmt.go b/src/cmd/gofmt/gofmt.go
index 860d77aaf0..51f6e652d9 100644
--- a/src/cmd/gofmt/gofmt.go
+++ b/src/cmd/gofmt/gofmt.go
@@ -183,7 +183,7 @@ func (r *reporter) getState() *reporterState {
// Warnf emits a warning message to the reporter's error stream,
// without changing its exit code.
-func (r *reporter) Warnf(format string, args ...interface{}) {
+func (r *reporter) Warnf(format string, args ...any) {
fmt.Fprintf(r.getState().err, format, args...)
}
diff --git a/src/cmd/internal/buildid/buildid_test.go b/src/cmd/internal/buildid/buildid_test.go
index 4895a49e11..f04e328046 100644
--- a/src/cmd/internal/buildid/buildid_test.go
+++ b/src/cmd/internal/buildid/buildid_test.go
@@ -103,7 +103,7 @@ func TestFindAndHash(t *testing.T) {
id[i] = byte(i)
}
numError := 0
- errorf := func(msg string, args ...interface{}) {
+ errorf := func(msg string, args ...any) {
t.Errorf(msg, args...)
if numError++; numError > 20 {
t.Logf("stopping after too many errors")
diff --git a/src/cmd/internal/buildid/rewrite.go b/src/cmd/internal/buildid/rewrite.go
index 8814950db0..becc078242 100644
--- a/src/cmd/internal/buildid/rewrite.go
+++ b/src/cmd/internal/buildid/rewrite.go
@@ -151,7 +151,7 @@ func (r *excludedReader) Read(p []byte) (int, error) {
return n, err
}
-func findMachoCodeSignature(r interface{}) (*macho.File, codesign.CodeSigCmd, bool) {
+func findMachoCodeSignature(r any) (*macho.File, codesign.CodeSigCmd, bool) {
ra, ok := r.(io.ReaderAt)
if !ok {
return nil, codesign.CodeSigCmd{}, false
diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go
index 68f0921d4d..f4111f4f5c 100644
--- a/src/cmd/internal/obj/arm64/asm7.go
+++ b/src/cmd/internal/obj/arm64/asm7.go
@@ -1184,7 +1184,7 @@ func (c *ctxt7) addpool128(p *obj.Prog, al, ah *obj.Addr) {
q := c.newprog()
q.As = ADWORD
q.To.Type = obj.TYPE_CONST
- q.To.Offset = al.Offset
+ q.To.Offset = al.Offset // q.Pc is lower than t.Pc, so al.Offset is stored in q.
t := c.newprog()
t.As = ADWORD
diff --git a/src/cmd/internal/obj/arm64/doc.go b/src/cmd/internal/obj/arm64/doc.go
index 14f0f4c616..1234a3e818 100644
--- a/src/cmd/internal/obj/arm64/doc.go
+++ b/src/cmd/internal/obj/arm64/doc.go
@@ -89,12 +89,12 @@ In the following example, PCALIGN at the entry of the function Add will align it
7. Move large constants to vector registers.
Go asm uses VMOVQ/VMOVD/VMOVS to move 128-bit, 64-bit and 32-bit constants into vector registers, respectively.
-And for a 128-bit interger, it take two 64-bit operands, for the high and low parts separately.
+And for a 128-bit interger, it take two 64-bit operands, for the low and high parts separately.
Examples:
VMOVS $0x11223344, V0
VMOVD $0x1122334455667788, V1
- VMOVQ $0x1122334455667788, $8877665544332211, V2 // V2=0x11223344556677888877665544332211
+ VMOVQ $0x1122334455667788, $0x99aabbccddeeff00, V2 // V2=0x99aabbccddeeff001122334455667788
8. Move an optionally-shifted 16-bit immediate value to a register.
diff --git a/src/cmd/internal/obj/mips/obj0.go b/src/cmd/internal/obj/mips/obj0.go
index 9e2ccc1929..b96a28a944 100644
--- a/src/cmd/internal/obj/mips/obj0.go
+++ b/src/cmd/internal/obj/mips/obj0.go
@@ -466,9 +466,15 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q = c.newprog()
q.As = AJMP
q.Pos = p.Pos
- q.To.Type = obj.TYPE_MEM
- q.To.Offset = 0
- q.To.Reg = REGLINK
+ if retSym != nil { // retjmp
+ q.To.Type = obj.TYPE_BRANCH
+ q.To.Name = obj.NAME_EXTERN
+ q.To.Sym = retSym
+ } else {
+ q.To.Type = obj.TYPE_MEM
+ q.To.Reg = REGLINK
+ q.To.Offset = 0
+ }
q.Mark |= BRANCH
q.Spadj = +autosize
diff --git a/src/cmd/internal/obj/ppc64/asm_test.go b/src/cmd/internal/obj/ppc64/asm_test.go
index ee2e5962f7..1de6e76b09 100644
--- a/src/cmd/internal/obj/ppc64/asm_test.go
+++ b/src/cmd/internal/obj/ppc64/asm_test.go
@@ -300,7 +300,7 @@ func TestLarge(t *testing.T) {
t.Fatal(err)
}
if !matched {
- t.Errorf("Failed to detect long foward BC fixup in (%v):%s\n", platenv, out)
+ t.Errorf("Failed to detect long forward BC fixup in (%v):%s\n", platenv, out)
}
matched, err = regexp.MatchString(strings.Join(test.backpattern, "\n\t*"), string(out))
if err != nil {
diff --git a/src/cmd/internal/obj/ppc64/obj9.go b/src/cmd/internal/obj/ppc64/obj9.go
index 7ac6465a72..c986c0d2b6 100644
--- a/src/cmd/internal/obj/ppc64/obj9.go
+++ b/src/cmd/internal/obj/ppc64/obj9.go
@@ -884,8 +884,13 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q = c.newprog()
q.As = ABR
q.Pos = p.Pos
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_LR
+ if retTarget == nil {
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_LR
+ } else {
+ q.To.Type = obj.TYPE_BRANCH
+ q.To.Sym = retTarget
+ }
q.Mark |= BRANCH
q.Spadj = +autosize
diff --git a/src/cmd/internal/obj/riscv/cpu.go b/src/cmd/internal/obj/riscv/cpu.go
index ed88f621d9..d9434e7415 100644
--- a/src/cmd/internal/obj/riscv/cpu.go
+++ b/src/cmd/internal/obj/riscv/cpu.go
@@ -183,7 +183,7 @@ const (
REGG = REG_G
)
-// https://github.com/riscv/riscv-elf-psabi-doc/blob/master/riscv-elf.md#dwarf-register-numbers
+// https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-dwarf.adoc#dwarf-register-numbers
var RISCV64DWARFRegisters = map[int16]int16{
// Integer Registers.
REG_X0: 0,
diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go
index 5755b118db..9f16de0c8c 100644
--- a/src/cmd/internal/obj/riscv/obj.go
+++ b/src/cmd/internal/obj/riscv/obj.go
@@ -790,6 +790,12 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, cursym *obj.LSym, newprog obj.ProgA
p.To.Type = obj.TYPE_REG
p.To.Reg = REG_X10
+ // Mark the stack bound check and morestack call async nonpreemptible.
+ // If we get preempted here, when resumed the preemption request is
+ // cleared, but we'll still call morestack, which will double the stack
+ // unnecessarily. See issue #35470.
+ p = ctxt.StartUnsafePoint(p, newprog)
+
var to_done, to_more *obj.Prog
if framesize <= objabi.StackSmall {
@@ -854,7 +860,7 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, cursym *obj.LSym, newprog obj.ProgA
to_done = p
}
- p = ctxt.EmitEntryLiveness(cursym, p, newprog)
+ p = ctxt.EmitEntryStackMap(cursym, p, newprog)
// CALL runtime.morestack(SB)
p = obj.Appendp(p, newprog)
@@ -872,6 +878,8 @@ func stacksplit(ctxt *obj.Link, p *obj.Prog, cursym *obj.LSym, newprog obj.ProgA
}
jalToSym(ctxt, p, REG_X5)
+ p = ctxt.EndUnsafePoint(p, newprog, -1)
+
// JMP start
p = obj.Appendp(p, newprog)
p.As = AJAL
diff --git a/src/cmd/internal/obj/s390x/objz.go b/src/cmd/internal/obj/s390x/objz.go
index de40ff05af..aebbf8dbc5 100644
--- a/src/cmd/internal/obj/s390x/objz.go
+++ b/src/cmd/internal/obj/s390x/objz.go
@@ -488,8 +488,13 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
q = obj.Appendp(p, c.newprog)
q.As = ABR
q.From = obj.Addr{}
- q.To.Type = obj.TYPE_REG
- q.To.Reg = REG_LR
+ if retTarget == nil {
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_LR
+ } else {
+ q.To.Type = obj.TYPE_BRANCH
+ q.To.Sym = retTarget
+ }
q.Mark |= BRANCH
q.Spadj = autosize
break
diff --git a/src/cmd/internal/obj/x86/asm6.go b/src/cmd/internal/obj/x86/asm6.go
index 6555756fd3..a508e484e4 100644
--- a/src/cmd/internal/obj/x86/asm6.go
+++ b/src/cmd/internal/obj/x86/asm6.go
@@ -2174,7 +2174,7 @@ func span6(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
}
n++
- if n > 20 {
+ if n > 1000 {
ctxt.Diag("span must be looping")
log.Fatalf("loop")
}
diff --git a/src/cmd/internal/objfile/objfile.go b/src/cmd/internal/objfile/objfile.go
index dcfd158ec2..d890a0b756 100644
--- a/src/cmd/internal/objfile/objfile.go
+++ b/src/cmd/internal/objfile/objfile.go
@@ -152,6 +152,15 @@ func (e *Entry) PCLineTable() (Liner, error) {
if err != nil {
return nil, err
}
+ syms, err := e.raw.symbols()
+ if err == nil {
+ for _, s := range syms {
+ if s.Name == "runtime.text" {
+ textStart = s.Addr
+ break
+ }
+ }
+ }
return gosym.NewTable(symtab, gosym.NewLineTable(pclntab, textStart))
}
diff --git a/src/cmd/internal/sys/supported.go b/src/cmd/internal/sys/supported.go
index f25aaabddd..82b65511de 100644
--- a/src/cmd/internal/sys/supported.go
+++ b/src/cmd/internal/sys/supported.go
@@ -50,7 +50,7 @@ func ASanSupported(goos, goarch string) bool {
// ('go test -fuzz=.').
func FuzzSupported(goos, goarch string) bool {
switch goos {
- case "darwin", "linux", "windows":
+ case "darwin", "freebsd", "linux", "windows":
return true
default:
return false
diff --git a/src/cmd/internal/test2json/test2json_test.go b/src/cmd/internal/test2json/test2json_test.go
index 4683907888..e69739d3fe 100644
--- a/src/cmd/internal/test2json/test2json_test.go
+++ b/src/cmd/internal/test2json/test2json_test.go
@@ -145,7 +145,7 @@ func writeAndKill(w io.Writer, b []byte) {
// and fails the test with a useful message if they don't match.
func diffJSON(t *testing.T, have, want []byte) {
t.Helper()
- type event map[string]interface{}
+ type event map[string]any
// Parse into events, one per line.
parseEvents := func(b []byte) ([]event, []string) {
diff --git a/src/cmd/internal/traceviewer/format.go b/src/cmd/internal/traceviewer/format.go
index 871477447f..3636c1053d 100644
--- a/src/cmd/internal/traceviewer/format.go
+++ b/src/cmd/internal/traceviewer/format.go
@@ -16,20 +16,20 @@ type Data struct {
}
type Event struct {
- Name string `json:"name,omitempty"`
- Phase string `json:"ph"`
- Scope string `json:"s,omitempty"`
- Time float64 `json:"ts"`
- Dur float64 `json:"dur,omitempty"`
- PID uint64 `json:"pid"`
- TID uint64 `json:"tid"`
- ID uint64 `json:"id,omitempty"`
- BindPoint string `json:"bp,omitempty"`
- Stack int `json:"sf,omitempty"`
- EndStack int `json:"esf,omitempty"`
- Arg interface{} `json:"args,omitempty"`
- Cname string `json:"cname,omitempty"`
- Category string `json:"cat,omitempty"`
+ Name string `json:"name,omitempty"`
+ Phase string `json:"ph"`
+ Scope string `json:"s,omitempty"`
+ Time float64 `json:"ts"`
+ Dur float64 `json:"dur,omitempty"`
+ PID uint64 `json:"pid"`
+ TID uint64 `json:"tid"`
+ ID uint64 `json:"id,omitempty"`
+ BindPoint string `json:"bp,omitempty"`
+ Stack int `json:"sf,omitempty"`
+ EndStack int `json:"esf,omitempty"`
+ Arg any `json:"args,omitempty"`
+ Cname string `json:"cname,omitempty"`
+ Category string `json:"cat,omitempty"`
}
type Frame struct {
diff --git a/src/cmd/link/elf_test.go b/src/cmd/link/elf_test.go
index 012c0b5169..760d9ea60d 100644
--- a/src/cmd/link/elf_test.go
+++ b/src/cmd/link/elf_test.go
@@ -201,6 +201,61 @@ func TestMinusRSymsWithSameName(t *testing.T) {
}
}
+func TestMergeNoteSections(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ expected := 1
+
+ switch runtime.GOOS {
+ case "linux", "freebsd", "dragonfly":
+ case "openbsd", "netbsd":
+ // These OSes require independent segment
+ expected = 2
+ default:
+ t.Skip("We should only test on elf output.")
+ }
+ t.Parallel()
+
+ goFile := filepath.Join(t.TempDir(), "notes.go")
+ if err := ioutil.WriteFile(goFile, []byte(goSource), 0444); err != nil {
+ t.Fatal(err)
+ }
+ outFile := filepath.Join(t.TempDir(), "notes.exe")
+ goTool := testenv.GoToolPath(t)
+ // sha1sum of "gopher"
+ id := "0xf4e8cd51ce8bae2996dc3b74639cdeaa1f7fee5f"
+ cmd := exec.Command(goTool, "build", "-o", outFile, "-ldflags",
+ "-B "+id, goFile)
+ cmd.Dir = t.TempDir()
+ if out, err := cmd.CombinedOutput(); err != nil {
+ t.Logf("%s", out)
+ t.Fatal(err)
+ }
+
+ ef, err := elf.Open(outFile)
+ if err != nil {
+ t.Fatalf("open elf file failed:%v", err)
+ }
+ defer ef.Close()
+ sec := ef.Section(".note.gnu.build-id")
+ if sec == nil {
+ t.Fatalf("can't find gnu build id")
+ }
+
+ sec = ef.Section(".note.go.buildid")
+ if sec == nil {
+ t.Fatalf("can't find go build id")
+ }
+ cnt := 0
+ for _, ph := range ef.Progs {
+ if ph.Type == elf.PT_NOTE {
+ cnt += 1
+ }
+ }
+ if cnt != expected {
+ t.Fatalf("want %d PT_NOTE segment, got %d", expected, cnt)
+ }
+}
+
const pieSourceTemplate = `
package main
diff --git a/src/cmd/link/internal/dwtest/dwtest.go b/src/cmd/link/internal/dwtest/dwtest.go
new file mode 100644
index 0000000000..c68edf4187
--- /dev/null
+++ b/src/cmd/link/internal/dwtest/dwtest.go
@@ -0,0 +1,197 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dwtest
+
+import (
+ "debug/dwarf"
+ "errors"
+ "fmt"
+ "os"
+)
+
+// Helper type for supporting queries on DIEs within a DWARF
+// .debug_info section. Invoke the populate() method below passing in
+// a dwarf.Reader, which will read in all DIEs and keep track of
+// parent/child relationships. Queries can then be made to ask for
+// DIEs by name or by offset. This will hopefully reduce boilerplate
+// for future test writing.
+
+type Examiner struct {
+ dies []*dwarf.Entry
+ idxByOffset map[dwarf.Offset]int
+ kids map[int][]int
+ parent map[int]int
+ byname map[string][]int
+}
+
+// Populate the Examiner using the DIEs read from rdr.
+func (ex *Examiner) Populate(rdr *dwarf.Reader) error {
+ ex.idxByOffset = make(map[dwarf.Offset]int)
+ ex.kids = make(map[int][]int)
+ ex.parent = make(map[int]int)
+ ex.byname = make(map[string][]int)
+ var nesting []int
+ for entry, err := rdr.Next(); entry != nil; entry, err = rdr.Next() {
+ if err != nil {
+ return err
+ }
+ if entry.Tag == 0 {
+ // terminator
+ if len(nesting) == 0 {
+ return errors.New("nesting stack underflow")
+ }
+ nesting = nesting[:len(nesting)-1]
+ continue
+ }
+ idx := len(ex.dies)
+ ex.dies = append(ex.dies, entry)
+ if _, found := ex.idxByOffset[entry.Offset]; found {
+ return errors.New("DIE clash on offset")
+ }
+ ex.idxByOffset[entry.Offset] = idx
+ if name, ok := entry.Val(dwarf.AttrName).(string); ok {
+ ex.byname[name] = append(ex.byname[name], idx)
+ }
+ if len(nesting) > 0 {
+ parent := nesting[len(nesting)-1]
+ ex.kids[parent] = append(ex.kids[parent], idx)
+ ex.parent[idx] = parent
+ }
+ if entry.Children {
+ nesting = append(nesting, idx)
+ }
+ }
+ if len(nesting) > 0 {
+ return errors.New("unterminated child sequence")
+ }
+ return nil
+}
+
+func (e *Examiner) DIEs() []*dwarf.Entry {
+ return e.dies
+}
+
+func indent(ilevel int) {
+ for i := 0; i < ilevel; i++ {
+ fmt.Printf(" ")
+ }
+}
+
+// For debugging new tests
+func (ex *Examiner) DumpEntry(idx int, dumpKids bool, ilevel int) {
+ if idx >= len(ex.dies) {
+ fmt.Fprintf(os.Stderr, "DumpEntry: bad DIE %d: index out of range\n", idx)
+ return
+ }
+ entry := ex.dies[idx]
+ indent(ilevel)
+ fmt.Printf("0x%x: %v\n", idx, entry.Tag)
+ for _, f := range entry.Field {
+ indent(ilevel)
+ fmt.Printf("at=%v val=0x%x\n", f.Attr, f.Val)
+ }
+ if dumpKids {
+ ksl := ex.kids[idx]
+ for _, k := range ksl {
+ ex.DumpEntry(k, true, ilevel+2)
+ }
+ }
+}
+
+// Given a DIE offset, return the previously read dwarf.Entry, or nil
+func (ex *Examiner) EntryFromOffset(off dwarf.Offset) *dwarf.Entry {
+ if idx, found := ex.idxByOffset[off]; found && idx != -1 {
+ return ex.entryFromIdx(idx)
+ }
+ return nil
+}
+
+// Return the ID that Examiner uses to refer to the DIE at offset off
+func (ex *Examiner) IdxFromOffset(off dwarf.Offset) int {
+ if idx, found := ex.idxByOffset[off]; found {
+ return idx
+ }
+ return -1
+}
+
+// Return the dwarf.Entry pointer for the DIE with id 'idx'
+func (ex *Examiner) entryFromIdx(idx int) *dwarf.Entry {
+ if idx >= len(ex.dies) || idx < 0 {
+ return nil
+ }
+ return ex.dies[idx]
+}
+
+// Returns a list of child entries for a die with ID 'idx'
+func (ex *Examiner) Children(idx int) []*dwarf.Entry {
+ sl := ex.kids[idx]
+ ret := make([]*dwarf.Entry, len(sl))
+ for i, k := range sl {
+ ret[i] = ex.entryFromIdx(k)
+ }
+ return ret
+}
+
+// Returns parent DIE for DIE 'idx', or nil if the DIE is top level
+func (ex *Examiner) Parent(idx int) *dwarf.Entry {
+ p, found := ex.parent[idx]
+ if !found {
+ return nil
+ }
+ return ex.entryFromIdx(p)
+}
+
+// ParentCU returns the enclosing compilation unit DIE for the DIE
+// with a given index, or nil if for some reason we can't establish a
+// parent.
+func (ex *Examiner) ParentCU(idx int) *dwarf.Entry {
+ for {
+ parentDie := ex.Parent(idx)
+ if parentDie == nil {
+ return nil
+ }
+ if parentDie.Tag == dwarf.TagCompileUnit {
+ return parentDie
+ }
+ idx = ex.IdxFromOffset(parentDie.Offset)
+ }
+}
+
+// FileRef takes a given DIE by index and a numeric file reference
+// (presumably from a decl_file or call_file attribute), looks up the
+// reference in the .debug_line file table, and returns the proper
+// string for it. We need to know which DIE is making the reference
+// so as to find the right compilation unit.
+func (ex *Examiner) FileRef(dw *dwarf.Data, dieIdx int, fileRef int64) (string, error) {
+
+ // Find the parent compilation unit DIE for the specified DIE.
+ cuDie := ex.ParentCU(dieIdx)
+ if cuDie == nil {
+ return "", fmt.Errorf("no parent CU DIE for DIE with idx %d?", dieIdx)
+ }
+ // Construct a line reader and then use it to get the file string.
+ lr, lrerr := dw.LineReader(cuDie)
+ if lrerr != nil {
+ return "", fmt.Errorf("d.LineReader: %v", lrerr)
+ }
+ files := lr.Files()
+ if fileRef < 0 || int(fileRef) > len(files)-1 {
+ return "", fmt.Errorf("Examiner.FileRef: malformed file reference %d", fileRef)
+ }
+ return files[fileRef].Name, nil
+}
+
+// Return a list of all DIEs with name 'name'. When searching for DIEs
+// by name, keep in mind that the returned results will include child
+// DIEs such as params/variables. For example, asking for all DIEs named
+// "p" for even a small program will give you 400-500 entries.
+func (ex *Examiner) Named(name string) []*dwarf.Entry {
+ sl := ex.byname[name]
+ ret := make([]*dwarf.Entry, len(sl))
+ for i, k := range sl {
+ ret[i] = ex.entryFromIdx(k)
+ }
+ return ret
+}
diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go
index 4d85977d43..95a8e0facb 100644
--- a/src/cmd/link/internal/ld/data.go
+++ b/src/cmd/link/internal/ld/data.go
@@ -2169,11 +2169,10 @@ func (ctxt *Link) buildinfo() {
return
}
+ // Write the buildinfo symbol, which go version looks for.
+ // The code reading this data is in package debug/buildinfo.
ldr := ctxt.loader
s := ldr.CreateSymForUpdate(".go.buildinfo", 0)
- // On AIX, .go.buildinfo must be in the symbol table as
- // it has relocations.
- s.SetNotInSymbolTable(!ctxt.IsAIX())
s.SetType(sym.SBUILDINFO)
s.SetAlign(16)
// The \xff is invalid UTF-8, meant to make it less likely
@@ -2186,16 +2185,24 @@ func (ctxt *Link) buildinfo() {
if ctxt.Arch.ByteOrder == binary.BigEndian {
data[len(prefix)+1] = 1
}
+ data[len(prefix)+1] |= 2 // signals new pointer-free format
+ data = appendString(data, strdata["runtime.buildVersion"])
+ data = appendString(data, strdata["runtime.modinfo"])
+ // MacOS linker gets very upset if the size os not a multiple of alignment.
+ for len(data)%16 != 0 {
+ data = append(data, 0)
+ }
s.SetData(data)
s.SetSize(int64(len(data)))
- r, _ := s.AddRel(objabi.R_ADDR)
- r.SetOff(16)
- r.SetSiz(uint8(ctxt.Arch.PtrSize))
- r.SetSym(ldr.LookupOrCreateSym("runtime.buildVersion", 0))
- r, _ = s.AddRel(objabi.R_ADDR)
- r.SetOff(16 + int32(ctxt.Arch.PtrSize))
- r.SetSiz(uint8(ctxt.Arch.PtrSize))
- r.SetSym(ldr.LookupOrCreateSym("runtime.modinfo", 0))
+}
+
+// appendString appends s to data, prefixed by its varint-encoded length.
+func appendString(data []byte, s string) []byte {
+ var v [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(v[:], uint64(len(s)))
+ data = append(data, v[:n]...)
+ data = append(data, s...)
+ return data
}
// assign addresses to text
diff --git a/src/cmd/link/internal/ld/deadcode.go b/src/cmd/link/internal/ld/deadcode.go
index 7b57a85cde..dba22323b0 100644
--- a/src/cmd/link/internal/ld/deadcode.go
+++ b/src/cmd/link/internal/ld/deadcode.go
@@ -71,12 +71,6 @@ func (d *deadcodePass) init() {
// runtime.unreachableMethod is a function that will throw if called.
// We redirect unreachable methods to it.
names = append(names, "runtime.unreachableMethod")
- if !d.ctxt.linkShared && d.ctxt.BuildMode != BuildModePlugin {
- // runtime.buildVersion and runtime.modinfo are referenced in .go.buildinfo section
- // (see function buildinfo in data.go). They should normally be reachable from the
- // runtime. Just make it explicit, in case.
- names = append(names, "runtime.buildVersion", "runtime.modinfo")
- }
if d.ctxt.BuildMode == BuildModePlugin {
names = append(names, objabi.PathToPrefix(*flagPluginPath)+"..inittask", objabi.PathToPrefix(*flagPluginPath)+".main", "go.plugin.tabs")
diff --git a/src/cmd/link/internal/ld/dwarf_test.go b/src/cmd/link/internal/ld/dwarf_test.go
index db9002491e..2f9bf25d10 100644
--- a/src/cmd/link/internal/ld/dwarf_test.go
+++ b/src/cmd/link/internal/ld/dwarf_test.go
@@ -7,9 +7,9 @@ package ld
import (
intdwarf "cmd/internal/dwarf"
objfilepkg "cmd/internal/objfile" // renamed to avoid conflict with objfile function
+ "cmd/link/internal/dwtest"
"debug/dwarf"
"debug/pe"
- "errors"
"fmt"
"internal/buildcfg"
"internal/testenv"
@@ -352,8 +352,8 @@ func varDeclCoordsAndSubrogramDeclFile(t *testing.T, testpoint string, expectFil
}
rdr := d.Reader()
- ex := examiner{}
- if err := ex.populate(rdr); err != nil {
+ ex := dwtest.Examiner{}
+ if err := ex.Populate(rdr); err != nil {
t.Fatalf("error reading DWARF: %v", err)
}
@@ -373,7 +373,7 @@ func varDeclCoordsAndSubrogramDeclFile(t *testing.T, testpoint string, expectFil
}
// Walk main's children and select variable "i".
- mainIdx := ex.idxFromOffset(maindie.Offset)
+ mainIdx := ex.IdxFromOffset(maindie.Offset)
childDies := ex.Children(mainIdx)
var iEntry *dwarf.Entry
for _, child := range childDies {
@@ -396,7 +396,10 @@ func varDeclCoordsAndSubrogramDeclFile(t *testing.T, testpoint string, expectFil
if !fileIdxOK {
t.Errorf("missing or invalid DW_AT_decl_file for main")
}
- file := ex.FileRef(t, d, mainIdx, fileIdx)
+ file, err := ex.FileRef(d, mainIdx, fileIdx)
+ if err != nil {
+ t.Fatalf("FileRef: %v", err)
+ }
base := filepath.Base(file)
if base != expectFile {
t.Errorf("DW_AT_decl_file for main is %v, want %v", base, expectFile)
@@ -424,191 +427,6 @@ func TestVarDeclCoordsWithLineDirective(t *testing.T) {
"foobar.go", 202, "//line /foobar.go:200")
}
-// Helper class for supporting queries on DIEs within a DWARF .debug_info
-// section. Invoke the populate() method below passing in a dwarf.Reader,
-// which will read in all DIEs and keep track of parent/child
-// relationships. Queries can then be made to ask for DIEs by name or
-// by offset. This will hopefully reduce boilerplate for future test
-// writing.
-
-type examiner struct {
- dies []*dwarf.Entry
- idxByOffset map[dwarf.Offset]int
- kids map[int][]int
- parent map[int]int
- byname map[string][]int
-}
-
-// Populate the examiner using the DIEs read from rdr.
-func (ex *examiner) populate(rdr *dwarf.Reader) error {
- ex.idxByOffset = make(map[dwarf.Offset]int)
- ex.kids = make(map[int][]int)
- ex.parent = make(map[int]int)
- ex.byname = make(map[string][]int)
- var nesting []int
- for entry, err := rdr.Next(); entry != nil; entry, err = rdr.Next() {
- if err != nil {
- return err
- }
- if entry.Tag == 0 {
- // terminator
- if len(nesting) == 0 {
- return errors.New("nesting stack underflow")
- }
- nesting = nesting[:len(nesting)-1]
- continue
- }
- idx := len(ex.dies)
- ex.dies = append(ex.dies, entry)
- if _, found := ex.idxByOffset[entry.Offset]; found {
- return errors.New("DIE clash on offset")
- }
- ex.idxByOffset[entry.Offset] = idx
- if name, ok := entry.Val(dwarf.AttrName).(string); ok {
- ex.byname[name] = append(ex.byname[name], idx)
- }
- if len(nesting) > 0 {
- parent := nesting[len(nesting)-1]
- ex.kids[parent] = append(ex.kids[parent], idx)
- ex.parent[idx] = parent
- }
- if entry.Children {
- nesting = append(nesting, idx)
- }
- }
- if len(nesting) > 0 {
- return errors.New("unterminated child sequence")
- }
- return nil
-}
-
-func indent(ilevel int) {
- for i := 0; i < ilevel; i++ {
- fmt.Printf(" ")
- }
-}
-
-// For debugging new tests
-func (ex *examiner) dumpEntry(idx int, dumpKids bool, ilevel int) error {
- if idx >= len(ex.dies) {
- msg := fmt.Sprintf("bad DIE %d: index out of range\n", idx)
- return errors.New(msg)
- }
- entry := ex.dies[idx]
- indent(ilevel)
- fmt.Printf("0x%x: %v\n", idx, entry.Tag)
- for _, f := range entry.Field {
- indent(ilevel)
- fmt.Printf("at=%v val=0x%x\n", f.Attr, f.Val)
- }
- if dumpKids {
- ksl := ex.kids[idx]
- for _, k := range ksl {
- ex.dumpEntry(k, true, ilevel+2)
- }
- }
- return nil
-}
-
-// Given a DIE offset, return the previously read dwarf.Entry, or nil
-func (ex *examiner) entryFromOffset(off dwarf.Offset) *dwarf.Entry {
- if idx, found := ex.idxByOffset[off]; found && idx != -1 {
- return ex.entryFromIdx(idx)
- }
- return nil
-}
-
-// Return the ID that examiner uses to refer to the DIE at offset off
-func (ex *examiner) idxFromOffset(off dwarf.Offset) int {
- if idx, found := ex.idxByOffset[off]; found {
- return idx
- }
- return -1
-}
-
-// Return the dwarf.Entry pointer for the DIE with id 'idx'
-func (ex *examiner) entryFromIdx(idx int) *dwarf.Entry {
- if idx >= len(ex.dies) || idx < 0 {
- return nil
- }
- return ex.dies[idx]
-}
-
-// Returns a list of child entries for a die with ID 'idx'
-func (ex *examiner) Children(idx int) []*dwarf.Entry {
- sl := ex.kids[idx]
- ret := make([]*dwarf.Entry, len(sl))
- for i, k := range sl {
- ret[i] = ex.entryFromIdx(k)
- }
- return ret
-}
-
-// Returns parent DIE for DIE 'idx', or nil if the DIE is top level
-func (ex *examiner) Parent(idx int) *dwarf.Entry {
- p, found := ex.parent[idx]
- if !found {
- return nil
- }
- return ex.entryFromIdx(p)
-}
-
-// ParentCU returns the enclosing compilation unit DIE for the DIE
-// with a given index, or nil if for some reason we can't establish a
-// parent.
-func (ex *examiner) ParentCU(idx int) *dwarf.Entry {
- for {
- parentDie := ex.Parent(idx)
- if parentDie == nil {
- return nil
- }
- if parentDie.Tag == dwarf.TagCompileUnit {
- return parentDie
- }
- idx = ex.idxFromOffset(parentDie.Offset)
- }
-}
-
-// FileRef takes a given DIE by index and a numeric file reference
-// (presumably from a decl_file or call_file attribute), looks up the
-// reference in the .debug_line file table, and returns the proper
-// string for it. We need to know which DIE is making the reference
-// so as find the right compilation unit.
-func (ex *examiner) FileRef(t *testing.T, dw *dwarf.Data, dieIdx int, fileRef int64) string {
-
- // Find the parent compilation unit DIE for the specified DIE.
- cuDie := ex.ParentCU(dieIdx)
- if cuDie == nil {
- t.Fatalf("no parent CU DIE for DIE with idx %d?", dieIdx)
- return ""
- }
- // Construct a line reader and then use it to get the file string.
- lr, lrerr := dw.LineReader(cuDie)
- if lrerr != nil {
- t.Fatal("d.LineReader: ", lrerr)
- return ""
- }
- files := lr.Files()
- if fileRef < 0 || int(fileRef) > len(files)-1 {
- t.Fatalf("examiner.FileRef: malformed file reference %d", fileRef)
- return ""
- }
- return files[fileRef].Name
-}
-
-// Return a list of all DIEs with name 'name'. When searching for DIEs
-// by name, keep in mind that the returned results will include child
-// DIEs such as params/variables. For example, asking for all DIEs named
-// "p" for even a small program will give you 400-500 entries.
-func (ex *examiner) Named(name string) []*dwarf.Entry {
- sl := ex.byname[name]
- ret := make([]*dwarf.Entry, len(sl))
- for i, k := range sl {
- ret[i] = ex.entryFromIdx(k)
- }
- return ret
-}
-
func TestInlinedRoutineRecords(t *testing.T) {
testenv.MustHaveGoBuild(t)
@@ -656,8 +474,8 @@ func main() {
expectedInl := []string{"main.cand"}
rdr := d.Reader()
- ex := examiner{}
- if err := ex.populate(rdr); err != nil {
+ ex := dwtest.Examiner{}
+ if err := ex.Populate(rdr); err != nil {
t.Fatalf("error reading DWARF: %v", err)
}
@@ -677,7 +495,7 @@ func main() {
}
// Walk main's children and pick out the inlined subroutines
- mainIdx := ex.idxFromOffset(maindie.Offset)
+ mainIdx := ex.IdxFromOffset(maindie.Offset)
childDies := ex.Children(mainIdx)
exCount := 0
for _, child := range childDies {
@@ -687,7 +505,7 @@ func main() {
if !originOK {
t.Fatalf("no abstract origin attr for inlined subroutine at offset %v", child.Offset)
}
- originDIE := ex.entryFromOffset(ooff)
+ originDIE := ex.EntryFromOffset(ooff)
if originDIE == nil {
t.Fatalf("can't locate origin DIE at off %v", ooff)
}
@@ -696,7 +514,7 @@ func main() {
// to see child variables there, even if (perhaps due to
// optimization) there are no references to them from the
// inlined subroutine DIE.
- absFcnIdx := ex.idxFromOffset(ooff)
+ absFcnIdx := ex.IdxFromOffset(ooff)
absFcnChildDies := ex.Children(absFcnIdx)
if len(absFcnChildDies) != 2 {
t.Fatalf("expected abstract function: expected 2 children, got %d children", len(absFcnChildDies))
@@ -735,7 +553,11 @@ func main() {
if !cfOK {
t.Fatalf("no call_file attr for inlined subroutine at offset %v", child.Offset)
}
- file := ex.FileRef(t, d, mainIdx, cf)
+ file, err := ex.FileRef(d, mainIdx, cf)
+ if err != nil {
+ t.Errorf("FileRef: %v", err)
+ continue
+ }
base := filepath.Base(file)
if base != "test.go" {
t.Errorf("bad call_file attribute, found '%s', want '%s'",
@@ -747,7 +569,7 @@ func main() {
// Walk the child variables of the inlined routine. Each
// of them should have a distinct abstract origin-- if two
// vars point to the same origin things are definitely broken.
- inlIdx := ex.idxFromOffset(child.Offset)
+ inlIdx := ex.IdxFromOffset(child.Offset)
inlChildDies := ex.Children(inlIdx)
for _, k := range inlChildDies {
ooff, originOK := k.Val(dwarf.AttrAbstractOrigin).(dwarf.Offset)
@@ -780,15 +602,15 @@ func abstractOriginSanity(t *testing.T, pkgDir string, flags string) {
t.Fatalf("error reading DWARF: %v", err)
}
rdr := d.Reader()
- ex := examiner{}
- if err := ex.populate(rdr); err != nil {
+ ex := dwtest.Examiner{}
+ if err := ex.Populate(rdr); err != nil {
t.Fatalf("error reading DWARF: %v", err)
}
// Make a pass through all DIEs looking for abstract origin
// references.
abscount := 0
- for i, die := range ex.dies {
+ for i, die := range ex.DIEs() {
// Does it have an abstract origin?
ooff, originOK := die.Val(dwarf.AttrAbstractOrigin).(dwarf.Offset)
if !originOK {
@@ -797,9 +619,9 @@ func abstractOriginSanity(t *testing.T, pkgDir string, flags string) {
// All abstract origin references should be resolvable.
abscount += 1
- originDIE := ex.entryFromOffset(ooff)
+ originDIE := ex.EntryFromOffset(ooff)
if originDIE == nil {
- ex.dumpEntry(i, false, 0)
+ ex.DumpEntry(i, false, 0)
t.Fatalf("unresolved abstract origin ref in DIE at offset 0x%x\n", die.Offset)
}
@@ -807,7 +629,7 @@ func abstractOriginSanity(t *testing.T, pkgDir string, flags string) {
// K2, ... KN}. If X has an abstract origin of A, then for
// each KJ, the abstract origin of KJ should be a child of A.
// Note that this same rule doesn't hold for non-variable DIEs.
- pidx := ex.idxFromOffset(die.Offset)
+ pidx := ex.IdxFromOffset(die.Offset)
if pidx < 0 {
t.Fatalf("can't locate DIE id")
}
@@ -821,15 +643,15 @@ func abstractOriginSanity(t *testing.T, pkgDir string, flags string) {
if !originOK {
continue
}
- childOriginDIE := ex.entryFromOffset(kooff)
+ childOriginDIE := ex.EntryFromOffset(kooff)
if childOriginDIE == nil {
- ex.dumpEntry(i, false, 0)
+ ex.DumpEntry(i, false, 0)
t.Fatalf("unresolved abstract origin ref in DIE at offset %x", kid.Offset)
}
- coidx := ex.idxFromOffset(childOriginDIE.Offset)
+ coidx := ex.IdxFromOffset(childOriginDIE.Offset)
childOriginParent := ex.Parent(coidx)
if childOriginParent != originDIE {
- ex.dumpEntry(i, false, 0)
+ ex.DumpEntry(i, false, 0)
t.Fatalf("unexpected parent of abstract origin DIE at offset %v", childOriginDIE.Offset)
}
}
@@ -977,8 +799,8 @@ func main() {
}
rdr := d.Reader()
- ex := examiner{}
- if err := ex.populate(rdr); err != nil {
+ ex := dwtest.Examiner{}
+ if err := ex.Populate(rdr); err != nil {
t.Fatalf("error reading DWARF: %v", err)
}
dies := ex.Named("*main.X")
@@ -1501,8 +1323,8 @@ func TestIssue39757(t *testing.T) {
t.Fatalf("error parsing DWARF: %v", err)
}
rdr := dw.Reader()
- ex := examiner{}
- if err := ex.populate(rdr); err != nil {
+ ex := dwtest.Examiner{}
+ if err := ex.Populate(rdr); err != nil {
t.Fatalf("error reading DWARF: %v", err)
}
@@ -1521,7 +1343,7 @@ func TestIssue39757(t *testing.T) {
highpc := maindie.Val(dwarf.AttrHighpc).(uint64)
// Now read the line table for the 'main' compilation unit.
- mainIdx := ex.idxFromOffset(maindie.Offset)
+ mainIdx := ex.IdxFromOffset(maindie.Offset)
cuentry := ex.Parent(mainIdx)
if cuentry == nil {
t.Fatalf("main.main DIE appears orphaned")
@@ -1635,6 +1457,66 @@ func TestIssue42484(t *testing.T) {
f.Close()
}
+// processParams examines the formal parameter children of subprogram
+// DIE "die" using the explorer "ex" and returns a string that
+// captures the name, order, and classification of the subprogram's
+// input and output parameters. For example, for the go function
+//
+// func foo(i1 int, f1 float64) (string, bool) {
+//
+// this function would return a string something like
+//
+// i1:0:1 f1:1:1 ~r0:2:2 ~r1:3:2
+//
+// where each chunk above is of the form NAME:ORDER:INOUTCLASSIFICATION
+//
+func processParams(die *dwarf.Entry, ex *dwtest.Examiner) string {
+ // Values in the returned map are of the form :
+ // where order is the order within the child DIE list of the
+ // param, and is an integer:
+ //
+ // -1: varparm attr not found
+ // 1: varparm found with value false
+ // 2: varparm found with value true
+ //
+ foundParams := make(map[string]string)
+
+ // Walk the subprogram DIE's children looking for params.
+ pIdx := ex.IdxFromOffset(die.Offset)
+ childDies := ex.Children(pIdx)
+ idx := 0
+ for _, child := range childDies {
+ if child.Tag == dwarf.TagFormalParameter {
+ // NB: a setting of DW_AT_variable_parameter indicates
+ // that the param in question is an output parameter; we
+ // want to see this attribute set to TRUE for all Go
+ // return params. It would be OK to have it missing for
+ // input parameters, but for the moment we verify that the
+ // attr is present but set to false.
+ st := -1
+ if vp, ok := child.Val(dwarf.AttrVarParam).(bool); ok {
+ if vp {
+ st = 2
+ } else {
+ st = 1
+ }
+ }
+ if name, ok := child.Val(dwarf.AttrName).(string); ok {
+ foundParams[name] = fmt.Sprintf("%d:%d", idx, st)
+ idx++
+ }
+ }
+ }
+
+ found := make([]string, 0, len(foundParams))
+ for k, v := range foundParams {
+ found = append(found, fmt.Sprintf("%s:%s", k, v))
+ }
+ sort.Strings(found)
+
+ return fmt.Sprintf("%+v", found)
+}
+
func TestOutputParamAbbrevAndAttr(t *testing.T) {
testenv.MustHaveGoBuild(t)
@@ -1674,8 +1556,8 @@ func main() {
}
rdr := d.Reader()
- ex := examiner{}
- if err := ex.populate(rdr); err != nil {
+ ex := dwtest.Examiner{}
+ if err := ex.Populate(rdr); err != nil {
t.Fatalf("error reading DWARF: %v", err)
}
@@ -1694,56 +1576,15 @@ func main() {
t.Fatalf("unexpected tag %v on main.ABC DIE", abcdie.Tag)
}
- // A setting of DW_AT_variable_parameter indicates that the
- // param in question is an output parameter; we want to see this
- // attribute set to TRUE for all Go return params. It would be
- // OK to have it missing for input parameters, but for the moment
- // we verify that the attr is present but set to false.
-
- // Values in this map are of the form :
- // where order is the order within the child DIE list of the param,
- // and is an integer:
- //
- // -1: varparm attr not found
- // 1: varparm found with value false
- // 2: varparm found with value true
- //
- foundParams := make(map[string]string)
-
- // Walk ABCs's children looking for params.
- abcIdx := ex.idxFromOffset(abcdie.Offset)
- childDies := ex.Children(abcIdx)
- idx := 0
- for _, child := range childDies {
- if child.Tag == dwarf.TagFormalParameter {
- st := -1
- if vp, ok := child.Val(dwarf.AttrVarParam).(bool); ok {
- if vp {
- st = 2
- } else {
- st = 1
- }
- }
- if name, ok := child.Val(dwarf.AttrName).(string); ok {
- foundParams[name] = fmt.Sprintf("%d:%d", idx, st)
- idx++
- }
- }
- }
-
- // Digest the result.
- found := make([]string, 0, len(foundParams))
- for k, v := range foundParams {
- found = append(found, fmt.Sprintf("%s:%s", k, v))
- }
- sort.Strings(found)
+ // Call a helper to collect param info.
+ found := processParams(abcdie, &ex)
// Make sure we see all of the expected params in the proper
- // order, that they have the varparam attr, and the varparm is set
- // for the returns.
+ // order, that they have the varparam attr, and the varparam is
+ // set for the returns.
expected := "[c1:0:1 c2:1:1 c3:2:1 d1:3:1 d2:4:1 d3:5:1 d4:6:1 f1:7:1 f2:8:1 f3:9:1 g1:10:1 r1:11:2 r2:12:2 r3:13:2 r4:14:2 r5:15:2 r6:16:2]"
- if fmt.Sprintf("%+v", found) != expected {
- t.Errorf("param check failed, wanted %s got %s\n",
+ if found != expected {
+ t.Errorf("param check failed, wanted:\n%s\ngot:\n%s\n",
expected, found)
}
}
@@ -1835,8 +1676,8 @@ func main() {
}
rdr.Seek(0)
- ex := examiner{}
- if err := ex.populate(rdr); err != nil {
+ ex := dwtest.Examiner{}
+ if err := ex.Populate(rdr); err != nil {
t.Fatalf("error reading DWARF: %v", err)
}
for _, typeName := range []string{"main.CustomInt", "map[int]main.CustomInt"} {
@@ -1849,3 +1690,156 @@ func main() {
}
}
}
+
+func TestOptimizedOutParamHandling(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ if runtime.GOOS == "plan9" {
+ t.Skip("skipping on plan9; no DWARF symbol table in executables")
+ }
+ t.Parallel()
+
+ // This test is intended to verify that the compiler emits DWARF
+ // DIE entries for all input and output parameters, and that:
+ //
+ // - attributes are set correctly for output params,
+ // - things appear in the proper order
+ // - things work properly for both register-resident
+ // params and params passed on the stack
+ // - things work for both referenced and unreferenced params
+ // - things work for named return values un-named return vals
+ //
+ // The scenarios below don't cover all possible permutations and
+ // combinations, but they hit a bunch of the high points.
+
+ const prog = `
+package main
+
+// First testcase. All input params in registers, all params used.
+
+//go:noinline
+func tc1(p1, p2 int, p3 string) (int, string) {
+ return p1 + p2, p3 + "foo"
+}
+
+// Second testcase. Some params in registers, some on stack.
+
+//go:noinline
+func tc2(p1 int, p2 [128]int, p3 string) (int, string, [128]int) {
+ return p1 + p2[p1], p3 + "foo", [128]int{p1}
+}
+
+// Third testcase. Named return params.
+
+//go:noinline
+func tc3(p1 int, p2 [128]int, p3 string) (r1 int, r2 bool, r3 string, r4 [128]int) {
+ if p1 == 101 {
+ r1 = p1 + p2[p1]
+ r2 = p3 == "foo"
+ r4 = [128]int{p1}
+ return
+ } else {
+ return p1 - p2[p1+3], false, "bar", [128]int{p1 + 2}
+ }
+}
+
+// Fourth testcase. Some thing are used, some are unused.
+
+//go:noinline
+func tc4(p1, p1un int, p2, p2un [128]int, p3, p3un string) (r1 int, r1un int, r2 bool, r3 string, r4, r4un [128]int) {
+ if p1 == 101 {
+ r1 = p1 + p2[p2[0]]
+ r2 = p3 == "foo"
+ r4 = [128]int{p1}
+ return
+ } else {
+ return p1, -1, true, "plex", [128]int{p1 + 2}, [128]int{-1}
+ }
+}
+
+func main() {
+ {
+ r1, r2 := tc1(3, 4, "five")
+ println(r1, r2)
+ }
+ {
+ x := [128]int{9}
+ r1, r2, r3 := tc2(3, x, "five")
+ println(r1, r2, r3[0])
+ }
+ {
+ x := [128]int{9}
+ r1, r2, r3, r4 := tc3(3, x, "five")
+ println(r1, r2, r3, r4[0])
+ }
+ {
+ x := [128]int{3}
+ y := [128]int{7}
+ r1, r1u, r2, r3, r4, r4u := tc4(0, 1, x, y, "a", "b")
+ println(r1, r1u, r2, r3, r4[0], r4u[1])
+ }
+
+}
+`
+ dir := t.TempDir()
+ f := gobuild(t, dir, prog, DefaultOpt)
+ defer f.Close()
+
+ d, err := f.DWARF()
+ if err != nil {
+ t.Fatalf("error reading DWARF: %v", err)
+ }
+
+ rdr := d.Reader()
+ ex := dwtest.Examiner{}
+ if err := ex.Populate(rdr); err != nil {
+ t.Fatalf("error reading DWARF: %v", err)
+ }
+
+ testcases := []struct {
+ tag string
+ expected string
+ }{
+ {
+ tag: "tc1",
+ expected: "[p1:0:1 p2:1:1 p3:2:1 ~r0:3:2 ~r1:4:2]",
+ },
+ {
+ tag: "tc2",
+ expected: "[p1:0:1 p2:1:1 p3:2:1 ~r0:3:2 ~r1:4:2 ~r2:5:2]",
+ },
+ {
+ tag: "tc3",
+ expected: "[p1:0:1 p2:1:1 p3:2:1 r1:3:2 r2:4:2 r3:5:2 r4:6:2]",
+ },
+ {
+ tag: "tc4",
+ expected: "[p1:0:1 p1un:1:1 p2:2:1 p2un:3:1 p3:4:1 p3un:5:1 r1:6:2 r1un:7:2 r2:8:2 r3:9:2 r4:10:2 r4un:11:2]",
+ },
+ }
+
+ for _, tc := range testcases {
+ // Locate the proper DIE
+ which := fmt.Sprintf("main.%s", tc.tag)
+ tcs := ex.Named(which)
+ if len(tcs) == 0 {
+ t.Fatalf("unable to locate DIE for " + which)
+ }
+ if len(tcs) != 1 {
+ t.Fatalf("more than one " + which + " DIE")
+ }
+ die := tcs[0]
+
+ // Vet the DIE
+ if die.Tag != dwarf.TagSubprogram {
+ t.Fatalf("unexpected tag %v on "+which+" DIE", die.Tag)
+ }
+
+ // Examine params for this subprogram.
+ foundParams := processParams(die, &ex)
+ if foundParams != tc.expected {
+ t.Errorf("check failed for testcase %s -- wanted:\n%s\ngot:%s\n",
+ tc.tag, tc.expected, foundParams)
+ }
+ }
+}
diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go
index fb75c761a1..1bdfb3369c 100644
--- a/src/cmd/link/internal/ld/elf.go
+++ b/src/cmd/link/internal/ld/elf.go
@@ -1080,7 +1080,12 @@ func elfshbits(linkmode LinkMode, sect *sym.Section) *ElfShdr {
}
if sect.Vaddr < sect.Seg.Vaddr+sect.Seg.Filelen {
- sh.Type = uint32(elf.SHT_PROGBITS)
+ switch sect.Name {
+ case ".init_array":
+ sh.Type = uint32(elf.SHT_INIT_ARRAY)
+ default:
+ sh.Type = uint32(elf.SHT_PROGBITS)
+ }
} else {
sh.Type = uint32(elf.SHT_NOBITS)
}
@@ -1682,13 +1687,18 @@ func asmbElf(ctxt *Link) {
var pph *ElfPhdr
var pnote *ElfPhdr
+ getpnote := func() *ElfPhdr {
+ if pnote == nil {
+ pnote = newElfPhdr()
+ pnote.Type = elf.PT_NOTE
+ pnote.Flags = elf.PF_R
+ }
+ return pnote
+ }
if *flagRace && ctxt.IsNetbsd() {
sh := elfshname(".note.netbsd.pax")
resoff -= int64(elfnetbsdpax(sh, uint64(startva), uint64(resoff)))
- pnote = newElfPhdr()
- pnote.Type = elf.PT_NOTE
- pnote.Flags = elf.PF_R
- phsh(pnote, sh)
+ phsh(getpnote(), sh)
}
if ctxt.LinkMode == LinkExternal {
/* skip program headers */
@@ -1787,7 +1797,6 @@ func asmbElf(ctxt *Link) {
phsh(ph, sh)
}
- pnote = nil
if ctxt.HeadType == objabi.Hnetbsd || ctxt.HeadType == objabi.Hopenbsd {
var sh *ElfShdr
switch ctxt.HeadType {
@@ -1799,34 +1808,23 @@ func asmbElf(ctxt *Link) {
sh = elfshname(".note.openbsd.ident")
resoff -= int64(elfopenbsdsig(sh, uint64(startva), uint64(resoff)))
}
-
- pnote = newElfPhdr()
- pnote.Type = elf.PT_NOTE
- pnote.Flags = elf.PF_R
- phsh(pnote, sh)
+ // netbsd and openbsd require ident in an independent segment.
+ pnotei := newElfPhdr()
+ pnotei.Type = elf.PT_NOTE
+ pnotei.Flags = elf.PF_R
+ phsh(pnotei, sh)
}
if len(buildinfo) > 0 {
sh := elfshname(".note.gnu.build-id")
resoff -= int64(elfbuildinfo(sh, uint64(startva), uint64(resoff)))
-
- if pnote == nil {
- pnote = newElfPhdr()
- pnote.Type = elf.PT_NOTE
- pnote.Flags = elf.PF_R
- }
-
- phsh(pnote, sh)
+ phsh(getpnote(), sh)
}
if *flagBuildid != "" {
sh := elfshname(".note.go.buildid")
resoff -= int64(elfgobuildid(sh, uint64(startva), uint64(resoff)))
-
- pnote := newElfPhdr()
- pnote.Type = elf.PT_NOTE
- pnote.Flags = elf.PF_R
- phsh(pnote, sh)
+ phsh(getpnote(), sh)
}
// Additions to the reserved area must be above this line.
diff --git a/src/cmd/link/internal/ld/ld.go b/src/cmd/link/internal/ld/ld.go
index 7ff9c41f96..954921844c 100644
--- a/src/cmd/link/internal/ld/ld.go
+++ b/src/cmd/link/internal/ld/ld.go
@@ -85,6 +85,12 @@ func (ctxt *Link) readImportCfg(file string) {
log.Fatalf(`%s:%d: invalid packageshlib: syntax is "packageshlib path=filename"`, file, lineNum)
}
ctxt.PackageShlib[before] = after
+ case "modinfo":
+ s, err := strconv.Unquote(args)
+ if err != nil {
+ log.Fatalf("%s:%d: invalid modinfo: %v", file, lineNum, err)
+ }
+ addstrdata1(ctxt, "runtime.modinfo="+s)
}
}
}
diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go
index 4aca36db98..5b82dc287d 100644
--- a/src/cmd/link/internal/ld/lib.go
+++ b/src/cmd/link/internal/ld/lib.go
@@ -1104,7 +1104,6 @@ func hostlinksetup(ctxt *Link) {
*flagTmpdir = dir
ownTmpDir = true
AtExit(func() {
- ctxt.Out.Close()
os.RemoveAll(*flagTmpdir)
})
}
@@ -1271,7 +1270,10 @@ func (ctxt *Link) hostlink() {
if ctxt.DynlinkingGo() && buildcfg.GOOS != "ios" {
// -flat_namespace is deprecated on iOS.
// It is useful for supporting plugins. We don't support plugins on iOS.
- argv = append(argv, "-Wl,-flat_namespace")
+ // -flat_namespace may cause the dynamic linker to hang at forkExec when
+ // resolving a lazy binding. See issue 38824.
+ // Force eager resolution to work around.
+ argv = append(argv, "-Wl,-flat_namespace", "-Wl,-bind_at_load")
}
if !combineDwarf {
argv = append(argv, "-Wl,-S") // suppress STAB (symbolic debugging) symbols
@@ -1500,8 +1502,19 @@ func (ctxt *Link) hostlink() {
}
return strings.Trim(string(out), "\n")
}
- argv = append(argv, getPathFile("crtcxa.o"))
- argv = append(argv, getPathFile("crtdbase.o"))
+ // Since GCC version 11, the 64-bit version of GCC starting files
+ // are now suffixed by "_64". Even under "-maix64" multilib directory
+ // "crtcxa.o" is 32-bit.
+ crtcxa := getPathFile("crtcxa_64.o")
+ if !filepath.IsAbs(crtcxa) {
+ crtcxa = getPathFile("crtcxa.o")
+ }
+ crtdbase := getPathFile("crtdbase_64.o")
+ if !filepath.IsAbs(crtdbase) {
+ crtdbase = getPathFile("crtdbase.o")
+ }
+ argv = append(argv, crtcxa)
+ argv = append(argv, crtdbase)
}
if ctxt.linkShared {
diff --git a/src/cmd/link/internal/ld/main.go b/src/cmd/link/internal/ld/main.go
index a1d86965e4..26f9db8ec4 100644
--- a/src/cmd/link/internal/ld/main.go
+++ b/src/cmd/link/internal/ld/main.go
@@ -172,6 +172,10 @@ func Main(arch *sys.Arch, theArch Arch) {
usage()
}
+ if *FlagD && ctxt.UsesLibc() {
+ Exitf("dynamic linking required on %s; -d flag cannot be used", buildcfg.GOOS)
+ }
+
checkStrictDups = *FlagStrictDups
if !buildcfg.Experiment.RegabiWrappers {
diff --git a/src/cmd/link/internal/ld/outbuf.go b/src/cmd/link/internal/ld/outbuf.go
index 9d5e8854fe..1d21dce9c5 100644
--- a/src/cmd/link/internal/ld/outbuf.go
+++ b/src/cmd/link/internal/ld/outbuf.go
@@ -131,6 +131,20 @@ func (out *OutBuf) Close() error {
return nil
}
+// ErrorClose closes the output file (if any).
+// It is supposed to be called only at exit on error, so it doesn't do
+// any clean up or buffer flushing, just closes the file.
+func (out *OutBuf) ErrorClose() {
+ if out.isView {
+ panic(viewCloseError)
+ }
+ if out.f == nil {
+ return
+ }
+ out.f.Close() // best effort, ignore error
+ out.f = nil
+}
+
// isMmapped returns true if the OutBuf is mmaped.
func (out *OutBuf) isMmapped() bool {
return len(out.buf) != 0
diff --git a/src/cmd/link/internal/ld/sym.go b/src/cmd/link/internal/ld/sym.go
index 72639962e2..d51a59ef46 100644
--- a/src/cmd/link/internal/ld/sym.go
+++ b/src/cmd/link/internal/ld/sym.go
@@ -60,7 +60,7 @@ func linknew(arch *sys.Arch) *Link {
AtExit(func() {
if nerrors > 0 {
- ctxt.Out.Close()
+ ctxt.Out.ErrorClose()
mayberemoveoutfile()
}
})
diff --git a/src/cmd/link/internal/ld/target.go b/src/cmd/link/internal/ld/target.go
index f68de8fff1..58d45d1504 100644
--- a/src/cmd/link/internal/ld/target.go
+++ b/src/cmd/link/internal/ld/target.go
@@ -185,3 +185,13 @@ func (t *Target) mustSetHeadType() {
func (t *Target) IsBigEndian() bool {
return t.Arch.ByteOrder == binary.BigEndian
}
+
+func (t *Target) UsesLibc() bool {
+ t.mustSetHeadType()
+ switch t.HeadType {
+ case objabi.Haix, objabi.Hdarwin, objabi.Hopenbsd, objabi.Hsolaris, objabi.Hwindows:
+ // platforms where we use libc for syscalls.
+ return true
+ }
+ return false
+}
diff --git a/src/cmd/link/internal/ld/xcoff.go b/src/cmd/link/internal/ld/xcoff.go
index aba6138c83..aaddf19d16 100644
--- a/src/cmd/link/internal/ld/xcoff.go
+++ b/src/cmd/link/internal/ld/xcoff.go
@@ -1290,10 +1290,6 @@ func Xcoffadddynrel(target *Target, ldr *loader.Loader, syms *ArchSyms, s loader
}
func (ctxt *Link) doxcoff() {
- if *FlagD {
- // All XCOFF files have dynamic symbols because of the syscalls.
- Exitf("-d is not available on AIX")
- }
ldr := ctxt.loader
// TOC
diff --git a/src/cmd/link/internal/loadelf/ldelf.go b/src/cmd/link/internal/loadelf/ldelf.go
index b4f565a153..d05d8e3b4b 100644
--- a/src/cmd/link/internal/loadelf/ldelf.go
+++ b/src/cmd/link/internal/loadelf/ldelf.go
@@ -599,7 +599,7 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, f *bio.Reader,
if strings.HasPrefix(elfsym.name, ".LASF") { // gcc on s390x does this
continue
}
- return errorf("%v: sym#%d: ignoring symbol in section %d (type %d)", elfsym.sym, i, elfsym.shndx, elfsym.type_)
+ return errorf("%v: sym#%d (%s): ignoring symbol in section %d (type %d)", elfsym.sym, i, elfsym.name, elfsym.shndx, elfsym.type_)
}
s := elfsym.sym
diff --git a/src/cmd/nm/nm.go b/src/cmd/nm/nm.go
index 457239921b..178eeb27be 100644
--- a/src/cmd/nm/nm.go
+++ b/src/cmd/nm/nm.go
@@ -93,7 +93,7 @@ func main() {
var exitCode = 0
-func errorf(format string, args ...interface{}) {
+func errorf(format string, args ...any) {
log.Printf(format, args...)
exitCode = 1
}
diff --git a/src/cmd/pack/pack_test.go b/src/cmd/pack/pack_test.go
index 7842b562dc..81e78f53e2 100644
--- a/src/cmd/pack/pack_test.go
+++ b/src/cmd/pack/pack_test.go
@@ -203,7 +203,7 @@ func TestLargeDefs(t *testing.T) {
}
b := bufio.NewWriter(f)
- printf := func(format string, args ...interface{}) {
+ printf := func(format string, args ...any) {
_, err := fmt.Fprintf(b, format, args...)
if err != nil {
t.Fatalf("Writing to %s: %v", large, err)
@@ -454,7 +454,7 @@ func (f *FakeFile) IsDir() bool {
return false
}
-func (f *FakeFile) Sys() interface{} {
+func (f *FakeFile) Sys() any {
return nil
}
diff --git a/src/cmd/pprof/readlineui.go b/src/cmd/pprof/readlineui.go
index 7ad712cd60..b269177650 100644
--- a/src/cmd/pprof/readlineui.go
+++ b/src/cmd/pprof/readlineui.go
@@ -69,18 +69,18 @@ func (r *readlineUI) ReadLine(prompt string) (string, error) {
// It formats the text as fmt.Print would and adds a final \n if not already present.
// For line-based UI, Print writes to standard error.
// (Standard output is reserved for report data.)
-func (r *readlineUI) Print(args ...interface{}) {
+func (r *readlineUI) Print(args ...any) {
r.print(false, args...)
}
// PrintErr shows an error message to the user.
// It formats the text as fmt.Print would and adds a final \n if not already present.
// For line-based UI, PrintErr writes to standard error.
-func (r *readlineUI) PrintErr(args ...interface{}) {
+func (r *readlineUI) PrintErr(args ...any) {
r.print(true, args...)
}
-func (r *readlineUI) print(withColor bool, args ...interface{}) {
+func (r *readlineUI) print(withColor bool, args ...any) {
text := fmt.Sprint(args...)
if !strings.HasSuffix(text, "\n") {
text += "\n"
diff --git a/src/cmd/trace/main.go b/src/cmd/trace/main.go
index 3aeba223ee..a30db9a012 100644
--- a/src/cmd/trace/main.go
+++ b/src/cmd/trace/main.go
@@ -206,7 +206,7 @@ var templMain = template.Must(template.New("").Parse(`