diff --git a/.github/ISSUE_TEMPLATE/00-bug.md b/.github/ISSUE_TEMPLATE/00-bug.md deleted file mode 100644 index f056dab7dd..0000000000 --- a/.github/ISSUE_TEMPLATE/00-bug.md +++ /dev/null @@ -1,45 +0,0 @@ ---- -name: Bugs -about: The go command, standard library, or anything else -title: "affected/package: " ---- - - - -### What version of Go are you using (`go version`)? - -
-$ go version
-
-
- -### Does this issue reproduce with the latest release? - - - -### What operating system and processor architecture are you using (`go env`)? - -
go env Output
-$ go env
-
-
- -### What did you do? - - - - - -### What did you expect to see? - - - -### What did you see instead? - - diff --git a/.github/ISSUE_TEMPLATE/00-bug.yml b/.github/ISSUE_TEMPLATE/00-bug.yml new file mode 100644 index 0000000000..5b0fda4950 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/00-bug.yml @@ -0,0 +1,94 @@ +# https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/configuring-issue-templates-for-your-repository#creating-issue-forms +# https://docs.github.com/en/communities/using-templates-to-encourage-useful-issues-and-pull-requests/syntax-for-githubs-form-schema +name: Bugs +description: The go command, standard library, or anything else +title: "import/path: issue title" + +body: + - type: markdown + attributes: + value: | + Thanks for helping us improve! 🙏 Please answer these questions and provide as much information as possible about your problem. + + - type: input + id: go-version + attributes: + label: Go version + description: | + What version of Go are you using (`go version`)? + + Note: we only [support](https://go.dev/doc/devel/release#policy) the two most recent major releases. + placeholder: ex. go version go1.20.7 darwin/arm64 + validations: + required: true + + - type: textarea + id: go-env + attributes: + label: "Output of `go env` in your module/workspace:" + placeholder: | + GO111MODULE="" + GOARCH="arm64" + GOBIN="/Users/gopher/go/bin" + GOCACHE="/Users/gopher/go/cache" + GOENV="/Users/gopher/Library/Application Support/go/env" + GOEXE="" + GOEXPERIMENT="" + GOFLAGS="" + GOHOSTARCH="arm64" + GOHOSTOS="darwin" + GOINSECURE="" + GOMODCACHE="/Users/gopher/go/pkg/mod" + GONOPROXY="" + GONOSUMDB="" + GOOS="darwin" + GOPATH="/Users/gopher/go" + GOPRIVATE="" + GOPROXY="https://proxy.golang.org,direct" + GOROOT="/usr/local/go" + GOSUMDB="sum.golang.org" + GOTMPDIR="" + GOTOOLDIR="/usr/local/go/pkg/tool/darwin_arm64" + GOVCS="" + GOVERSION="go1.20.7" + GCCGO="gccgo" + AR="ar" + CC="clang" + CXX="clang++" + CGO_ENABLED="1" + GOMOD="/dev/null" + GOWORK="" + CGO_CFLAGS="-O2 -g" + CGO_CPPFLAGS="" + CGO_CXXFLAGS="-O2 -g" + CGO_FFLAGS="-O2 -g" + CGO_LDFLAGS="-O2 -g" + PKG_CONFIG="pkg-config" + GOGCCFLAGS="-fPIC -arch arm64 -pthread -fno-caret-diagnostics -Qunused-arguments -fmessage-length=0 -fdebug-prefix-map=/var/folders/44/nbbyll_10jd0z8rj_qxm43740000gn/T/go-build2331607515=/tmp/go-build -gno-record-gcc-switches -fno-common" + render: shell + validations: + required: true + + - type: textarea + id: what-did-you-do + attributes: + label: "What did you do?" + description: "If possible, provide a recipe for reproducing the error. A complete runnable program is good. A link on [go.dev/play](https://go.dev/play) is best." + validations: + required: true + + - type: textarea + id: actual-behavior + attributes: + label: "What did you see happen?" + description: Command invocations and their associated output, functions with their arguments and return results, full stacktraces for panics (upload a file if it is very long), etc. Prefer copying text output over using screenshots. + validations: + required: true + + - type: textarea + id: expected-behavior + attributes: + label: "What did you expect to see?" + description: Why is the current output incorrect, and any additional context we may need to understand the issue. + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/01-pkgsite.md b/.github/ISSUE_TEMPLATE/01-pkgsite.md deleted file mode 100644 index 31f0fd16b1..0000000000 --- a/.github/ISSUE_TEMPLATE/01-pkgsite.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -name: Pkg.go.dev bugs or feature requests -about: Issues or feature requests for the documentation site -title: "x/pkgsite: " -labels: pkgsite ---- - - - -### What is the URL of the page with the issue? - - - -### What is your user agent? - - - - - -### Screenshot - - - - - -### What did you do? - - - - - -### What did you expect to see? - - - -### What did you see instead? - - diff --git a/.github/ISSUE_TEMPLATE/01-pkgsite.yml b/.github/ISSUE_TEMPLATE/01-pkgsite.yml new file mode 100644 index 0000000000..aaf39b2928 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/01-pkgsite.yml @@ -0,0 +1,47 @@ +name: Pkg.go.dev bugs or feature requests +description: Issues or feature requests for the documentation site +title: "x/pkgsite: issue title" +labels: ["pkgsite"] +body: + - type: markdown + attributes: + value: "Please answer these questions before submitting your issue. Thanks!" + - type: input + id: url + attributes: + label: "What is the URL of the page with the issue?" + validations: + required: true + - type: input + id: user-agent + attributes: + label: "What is your user agent?" + description: "You can find your user agent here: https://www.google.com/search?q=what+is+my+user+agent" + validations: + required: true + - type: textarea + id: screenshot + attributes: + label: "Screenshot" + description: "Please paste a screenshot of the page." + validations: + required: false + - type: textarea + id: what-did-you-do + attributes: + label: "What did you do?" + description: "If possible, provide a recipe for reproducing the error. Starting with a Private/Incognito tab/window may help rule out problematic browser extensions." + validations: + required: true + - type: textarea + id: actual-behavior + attributes: + label: "What did you see happen?" + validations: + required: true + - type: textarea + id: expected-behavior + attributes: + label: "What did you expect to see?" + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/02-pkgsite-removal.md b/.github/ISSUE_TEMPLATE/02-pkgsite-removal.md deleted file mode 100644 index 97fe317f5b..0000000000 --- a/.github/ISSUE_TEMPLATE/02-pkgsite-removal.md +++ /dev/null @@ -1,39 +0,0 @@ ---- -name: Pkg.go.dev package removal request -about: Request a package be removed from the documentation site (pkg.go.dev) -title: "x/pkgsite: package removal request for [type path here]" -labels: pkgsite/package-removal ---- - - - -### What is the path of the package that you would like to have removed? - - - - - -### Are you the owner of this package? - - - - - -### What is the reason that you could not retract this package instead? - - - - diff --git a/.github/ISSUE_TEMPLATE/02-pkgsite-removal.yml b/.github/ISSUE_TEMPLATE/02-pkgsite-removal.yml new file mode 100644 index 0000000000..693f4999dc --- /dev/null +++ b/.github/ISSUE_TEMPLATE/02-pkgsite-removal.yml @@ -0,0 +1,42 @@ +name: Pkg.go.dev package removal request +description: Request a package be removed from the documentation site (pkg.go.dev) +title: "x/pkgsite: package removal request for [type path here]" +labels: ["pkgsite/package-removal"] +body: + - type: markdown + attributes: + value: "Please answer these questions before submitting your issue. Thanks!" + - type: input + id: package-path + attributes: + label: "What is the path of the package that you would like to have removed?" + description: | + We can remove packages with a shared path prefix. + For example, a request for 'github.com/author' would remove all pkg.go.dev pages with that package path prefix. + validations: + required: true + - type: textarea + id: package-owner + attributes: + label: "Are you the owner of this package?" + description: | + Only the package owners can request to have their packages removed from pkg.go.dev. + If the package path doesn't include your github username, please provide some other form of proof of ownership. + validations: + required: true + - type: textarea + id: retraction-reason + attributes: + label: "What is the reason that you could not retract this package instead?" + description: | + Requesting we remove a module here only hides the generated documentation on pkg.go.dev. + It does not affect the behaviour of proxy.golang.org or the go command. + Instead we recommend using the retract directive which will be processed by all 3 of the above. + + If you have deleted your repo, please recreate it and publish a retraction. + + Retracting a module version involves adding a retract directive to your go.mod file and publishing a new version. + For example: https://github.com/jba/retract-demo/blob/main/go.mod#L5-L8. + See https://pkg.go.dev/about#removing-a-package for additional tips on retractions. + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/03-gopls.md b/.github/ISSUE_TEMPLATE/03-gopls.md deleted file mode 100644 index a6b9d913c1..0000000000 --- a/.github/ISSUE_TEMPLATE/03-gopls.md +++ /dev/null @@ -1,63 +0,0 @@ ---- -name: Gopls bugs or feature requests -about: Issues or feature requests for the Go language server (gopls) -title: "x/tools/gopls: " -labels: - - gopls - - Tools ---- - - - -### gopls version - - - - - -### go env - - - - -### What did you do? - - - - - -### What did you expect to see? - - - -### What did you see instead? - - - -### Editor and settings - - - - - -### Logs - - - - diff --git a/.github/ISSUE_TEMPLATE/03-gopls.yml b/.github/ISSUE_TEMPLATE/03-gopls.yml new file mode 100644 index 0000000000..5db1315f27 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/03-gopls.yml @@ -0,0 +1,56 @@ +name: Gopls bugs or feature requests +description: Issues or feature requests for the Go language server (gopls) +title: "x/tools/gopls: issue title" +labels: ["gopls", "Tools"] +body: + - type: markdown + attributes: + value: "Please answer these questions before submitting your issue. Thanks!" + - type: input + id: gopls-version + attributes: + label: "gopls version" + description: "Output of `gopls -v version` on the command line" + validations: + required: true + - type: textarea + id: go-env + attributes: + label: "go env" + description: "Output of `go env` on the command line in your workspace directory" + render: shell + validations: + required: true + - type: textarea + id: what-did-you-do + attributes: + label: "What did you do?" + description: "If possible, provide a recipe for reproducing the error. A complete runnable program is good. A link on [go.dev/play](https://go.dev/play) is better. A failing unit test is the best." + validations: + required: true + - type: textarea + id: actual-behavior + attributes: + label: "What did you see happen?" + validations: + required: true + - type: textarea + id: expected-behavior + attributes: + label: "What did you expect to see?" + validations: + required: true + - type: textarea + id: editor-and-settings + attributes: + label: "Editor and settings" + description: "Your editor and any settings you have configured (for example, your VSCode settings.json file)" + validations: + required: false + - type: textarea + id: logs + attributes: + label: "Logs" + description: "If possible please include gopls logs. Instructions for capturing them can be found here: https://github.com/golang/tools/blob/master/gopls/doc/troubleshooting.md#capture-logs" + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/04-vuln.md b/.github/ISSUE_TEMPLATE/04-vuln.md deleted file mode 100644 index 7e129d78db..0000000000 --- a/.github/ISSUE_TEMPLATE/04-vuln.md +++ /dev/null @@ -1,51 +0,0 @@ ---- -name: Go vulnerability management - bugs and feature requests -about: Issues or feature requests about Go vulnerability management -title: "x/vuln: " -labels: "vulncheck or vulndb" ---- - - - -### What version of Go are you using (`go version`)? - -
-$ go version
-
-
- -### Does this issue reproduce at the latest version of golang.org/x/vuln? - - - -### What operating system and processor architecture are you using (`go env`)? - -
go env Output
-$ go env
-
-
- -### What did you do? - - - - - -### What did you expect to see? - - - -### What did you see instead? - - diff --git a/.github/ISSUE_TEMPLATE/04-vuln.yml b/.github/ISSUE_TEMPLATE/04-vuln.yml new file mode 100644 index 0000000000..dd40af99c6 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/04-vuln.yml @@ -0,0 +1,52 @@ +name: Go vulnerability management - bugs and feature requests +description: Issues or feature requests about Go vulnerability management +title: "x/vuln: issue title" +labels: ["vulncheck or vulndb"] +body: + - type: markdown + attributes: + value: "Please answer these questions before submitting your issue. Thanks! To add a new vulnerability to the Go vulnerability database (https://vuln.go.dev), see https://go.dev/s/vulndb-report-new. To report an issue about a report, see https://go.dev/s/vulndb-report-feedback." + - type: textarea + id: govulncheck-version + attributes: + label: govulncheck version + description: What version of govulncheck are you using (`govulncheck -version`)? + placeholder: | + Go: devel go1.22-0262ea1ff9 Thu Oct 26 18:46:50 2023 +0000 + Scanner: govulncheck@v1.0.2-0.20231108200754-fcf7dff7b242 + DB: https://vuln.go.dev + DB updated: 2023-11-21 15:39:17 +0000 UTC + validations: + required: true + - type: textarea + id: reproduce-latest-version + attributes: + label: "Does this issue reproduce at the latest version of golang.org/x/vuln?" + validations: + required: true + - type: textarea + id: go-env + attributes: + label: "Output of `go env` in your module/workspace:" + render: shell + validations: + required: true + - type: textarea + id: what-did-you-do + attributes: + label: "What did you do?" + description: "If possible, provide a recipe for reproducing the error. A complete runnable program is good. A link on [go.dev/play](https://go.dev/play) is best." + validations: + required: true + - type: textarea + id: actual-behavior + attributes: + label: "What did you see happen?" + validations: + required: true + - type: textarea + id: expected-behavior + attributes: + label: "What did you expect to see?" + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/10-proposal.md b/.github/ISSUE_TEMPLATE/10-proposal.md deleted file mode 100644 index ab30ddf417..0000000000 --- a/.github/ISSUE_TEMPLATE/10-proposal.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -name: Proposals -about: New external API or other notable changes -title: "proposal: affected/package: " -labels: Proposal ---- - - - - diff --git a/.github/ISSUE_TEMPLATE/10-proposal.yml b/.github/ISSUE_TEMPLATE/10-proposal.yml new file mode 100644 index 0000000000..d2a256c5ae --- /dev/null +++ b/.github/ISSUE_TEMPLATE/10-proposal.yml @@ -0,0 +1,15 @@ +name: Proposals +description: New external API or other notable changes +title: "proposal: import/path: proposal title" +labels: ["Proposal"] +body: + - type: markdown + attributes: + value: "Our proposal process is documented here: https://go.dev/s/proposal-process" + - type: textarea + id: proposal-details + attributes: + label: "Proposal Details" + description: "Please provide the details of your proposal here." + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/11-language-change.md b/.github/ISSUE_TEMPLATE/11-language-change.md deleted file mode 100644 index cc9b82b3b7..0000000000 --- a/.github/ISSUE_TEMPLATE/11-language-change.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -name: Language Change Proposals -about: Changes to the language -title: "proposal: Go 2: " -labels: - - Proposal - - v2 - - LanguageChange ---- - - - -### Author background - -- **Would you consider yourself a novice, intermediate, or experienced Go programmer?** -- **What other languages do you have experience with?** - -### Related proposals - -- **Has this idea, or one like it, been proposed before?** - - **If so, how does this proposal differ?** -- **Does this affect error handling?** - - **If so, how does this differ from previous error handling proposals?** -- **Is this about generics?** - - **If so, how does this relate to the accepted design and other generics proposals?** - -### Proposal - -- **What is the proposed change?** -- **Who does this proposal help, and why?** -- **Please describe as precisely as possible the change to the language.** -- **What would change in the language spec?** -- **Please also describe the change informally, as in a class teaching Go.** -- **Is this change backward compatible?** - - Breaking the Go 1 compatibility guarantee is a large cost and requires a large benefit. - Show example code before and after the change. - - **Before** - - **After** -- **Orthogonality: how does this change interact or overlap with existing features?** -- **Is the goal of this change a performance improvement?** - - **If so, what quantifiable improvement should we expect?** - - **How would we measure it?** - -### Costs - -- **Would this change make Go easier or harder to learn, and why?** -- **What is the cost of this proposal? (Every language change has a cost).** -- **How many tools (such as vet, gopls, gofmt, goimports, etc.) would be affected?** -- **What is the compile time cost?** -- **What is the run time cost?** -- **Can you describe a possible implementation?** -- **Do you have a prototype? (This is not required.)** diff --git a/.github/ISSUE_TEMPLATE/11-language-change.yml b/.github/ISSUE_TEMPLATE/11-language-change.yml new file mode 100644 index 0000000000..37ba2d7e40 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/11-language-change.yml @@ -0,0 +1,165 @@ +name: Language Change Proposals +description: Changes to the language +labels: ["Proposal", "v2", "LanguageChange"] +title: "proposal: Go 2: proposal title" + + +body: + - type: markdown + attributes: + value: | + ## Our process for evaluating language changes can be found [here](https://go.googlesource.com/proposal/+/refs/heads/master#language-changes) + + - type: dropdown + id: author-go-experience + attributes: + label: "Go Programming Experience" + description: "Would you consider yourself a novice, intermediate, or experienced Go programmer?" + options: + - "Novice" + - "Intermediate" + - "Experienced" + default: 1 + + - type: input + id: author-other-languages-experience + attributes: + label: "Other Languages Experience" + description: "What other languages do you have experience with?" + placeholder: "Go, Python, JS, Rust" + validations: + required: false + + - type: checkboxes + id: related-idea + attributes: + label: "Related Idea" + options: + - label: "Has this idea, or one like it, been proposed before?" + - label: "Does this affect error handling?" + - label: "Is this about generics?" + - label: "Is this change backward compatible? Breaking the Go 1 compatibility guarantee is a large cost and requires a large benefit" + + - type: textarea + id: related-proposals + attributes: + label: Has this idea, or one like it, been proposed before? + description: If so, how does this proposal differ? + placeholder: | + Yes or No + + If yes, + 1. Mention the related proposals + 2. then describe how this proposal differs + validations: + required: true + + - type: textarea + id: error-handling-proposal + attributes: + label: Does this affect error handling? + description: If so, how does this differ from previous error handling proposals? + placeholder: | + Yes or No + + If yes, + 1.how does this differ from previous error handling proposals? + + validations: + required: true + + - type: textarea + id: generics-proposal + attributes: + label: Is this about generics? + description: If so, how does this relate to the accepted design and other generics proposals? + placeholder: | + Yes or No + + If yes, + 1. how does this relate to the accepted design and other generics proposals? + + validations: + required: true + + - type: textarea + id: proposal + attributes: + label: "Proposal" + description: "What is the proposed change? Who does this proposal help, and why? Please describe as precisely as possible the change to the language." + validations: + required: true + + - type: textarea + id: language-spec-changes + attributes: + label: "Language Spec Changes" + description: "What would change in the language spec?" + validations: + required: false + + - type: textarea + id: informal-change + attributes: + label: "Informal Change" + description: "Please also describe the change informally, as in a class teaching Go." + validations: + required: false + + - type: textarea + id: go-backwards-compatiblity + attributes: + label: Is this change backward compatible? + description: Breaking the Go 1 compatibility guarantee is a large cost and requires a large benefit. + placeholder: | + Yes or No + + If yes, + 1. Show example code before and after the change. + + validations: + required: true + + - type: textarea + id: orthogonality + attributes: + label: "Orthogonality: How does this change interact or overlap with existing features?" + description: "Is the goal of this change a performance improvement? If so, what quantifiable improvement should we expect? How would we measure it?" + validations: + required: false + + - type: textarea + id: learning-curve + attributes: + label: "Would this change make Go easier or harder to learn, and why?" + + - type: textarea + id: cost-description + attributes: + label: "Cost Description" + description: "What is the cost of this proposal? (Every language change has a cost)" + + - type: input + id: go-toolchain + attributes: + label: Changes to Go ToolChain + description: "How many tools (such as vet, gopls, gofmt, goimports, etc.) would be affected? " + validations: + required: false + + - type: input + id: perf-costs + attributes: + label: Performance Costs + description: "What is the compile time cost? What is the run time cost? " + validations: + required: false + + - type: textarea + id: prototype + attributes: + label: "Prototype" + description: "Can you describe a possible implementation?" + validations: + required: false + diff --git a/.github/ISSUE_TEMPLATE/12-telemetry.yml b/.github/ISSUE_TEMPLATE/12-telemetry.yml index 7f1a29c634..4215abfa99 100644 --- a/.github/ISSUE_TEMPLATE/12-telemetry.yml +++ b/.github/ISSUE_TEMPLATE/12-telemetry.yml @@ -1,6 +1,6 @@ name: Go Telemetry Proposals description: New telemetry counter or update on an existing one -title: "x/telemetry/config: " +title: "x/telemetry/config: proposal title" labels: ["Telemetry-Proposal"] projects: ["golang/29"] body: diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index c07f1e4d1c..d6257daf2f 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,4 +1,4 @@ -blank_issues_enabled: false +blank_issues_enabled: true contact_links: - name: Questions about: Please use one of the forums for questions or general discussions diff --git a/api/README b/api/README index 1e52f7a843..050ebd99ab 100644 --- a/api/README +++ b/api/README @@ -21,3 +21,6 @@ warning output from the go api tool. Each file should be named nnnnn.txt, after the issue number for the accepted proposal. (The #nnnnn suffix must also appear at the end of each line in the file; that will be preserved when next/*.txt is concatenated into go1.XX.txt.) + +When you add a file to the api/next directory, you must add at least one file +under doc/next. See doc/README.md for details. diff --git a/api/go1.22.txt b/api/go1.22.txt new file mode 100644 index 0000000000..55f21857bc --- /dev/null +++ b/api/go1.22.txt @@ -0,0 +1,135 @@ +pkg archive/tar, method (*Writer) AddFS(fs.FS) error #58000 +pkg archive/zip, method (*Writer) AddFS(fs.FS) error #54898 +pkg cmp, func Or[$0 comparable](...$0) $0 #60204 +pkg crypto/x509, func OIDFromInts([]uint64) (OID, error) #60665 +pkg crypto/x509, method (*CertPool) AddCertWithConstraint(*Certificate, func([]*Certificate) error) #57178 +pkg crypto/x509, method (OID) Equal(OID) bool #60665 +pkg crypto/x509, method (OID) EqualASN1OID(asn1.ObjectIdentifier) bool #60665 +pkg crypto/x509, method (OID) String() string #60665 +pkg crypto/x509, type Certificate struct, Policies []OID #60665 +pkg crypto/x509, type OID struct #60665 +pkg database/sql, method (*Null[$0]) Scan(interface{}) error #60370 +pkg database/sql, method (Null[$0]) Value() (driver.Value, error) #60370 +pkg database/sql, type Null[$0 interface{}] struct #60370 +pkg database/sql, type Null[$0 interface{}] struct, V $0 #60370 +pkg database/sql, type Null[$0 interface{}] struct, Valid bool #60370 +pkg debug/elf, const R_LARCH_64_PCREL = 109 #63725 +pkg debug/elf, const R_LARCH_64_PCREL R_LARCH #63725 +pkg debug/elf, const R_LARCH_ADD6 = 105 #63725 +pkg debug/elf, const R_LARCH_ADD6 R_LARCH #63725 +pkg debug/elf, const R_LARCH_ADD_ULEB128 = 107 #63725 +pkg debug/elf, const R_LARCH_ADD_ULEB128 R_LARCH #63725 +pkg debug/elf, const R_LARCH_ALIGN = 102 #63725 +pkg debug/elf, const R_LARCH_ALIGN R_LARCH #63725 +pkg debug/elf, const R_LARCH_CFA = 104 #63725 +pkg debug/elf, const R_LARCH_CFA R_LARCH #63725 +pkg debug/elf, const R_LARCH_DELETE = 101 #63725 +pkg debug/elf, const R_LARCH_DELETE R_LARCH #63725 +pkg debug/elf, const R_LARCH_PCREL20_S2 = 103 #63725 +pkg debug/elf, const R_LARCH_PCREL20_S2 R_LARCH #63725 +pkg debug/elf, const R_LARCH_SUB6 = 106 #63725 +pkg debug/elf, const R_LARCH_SUB6 R_LARCH #63725 +pkg debug/elf, const R_LARCH_SUB_ULEB128 = 108 #63725 +pkg debug/elf, const R_LARCH_SUB_ULEB128 R_LARCH #63725 +pkg debug/elf, const R_MIPS_PC32 = 248 #61974 +pkg debug/elf, const R_MIPS_PC32 R_MIPS #61974 +pkg encoding/base32, method (*Encoding) AppendDecode([]uint8, []uint8) ([]uint8, error) #53693 +pkg encoding/base32, method (*Encoding) AppendEncode([]uint8, []uint8) []uint8 #53693 +pkg encoding/base64, method (*Encoding) AppendDecode([]uint8, []uint8) ([]uint8, error) #53693 +pkg encoding/base64, method (*Encoding) AppendEncode([]uint8, []uint8) []uint8 #53693 +pkg encoding/hex, func AppendDecode([]uint8, []uint8) ([]uint8, error) #53693 +pkg encoding/hex, func AppendEncode([]uint8, []uint8) []uint8 #53693 +pkg go/ast, func NewPackage //deprecated #52463 +pkg go/ast, func Unparen(Expr) Expr #60061 +pkg go/ast, type Importer //deprecated #52463 +pkg go/ast, type Object //deprecated #52463 +pkg go/ast, type Package //deprecated #52463 +pkg go/ast, type Scope //deprecated #52463 +pkg go/types, func NewAlias(*TypeName, Type) *Alias #63223 +pkg go/types, func Unalias(Type) Type #63223 +pkg go/types, method (*Alias) Obj() *TypeName #63223 +pkg go/types, method (*Alias) String() string #63223 +pkg go/types, method (*Alias) Underlying() Type #63223 +pkg go/types, method (*Info) PkgNameOf(*ast.ImportSpec) *PkgName #62037 +pkg go/types, method (Checker) PkgNameOf(*ast.ImportSpec) *PkgName #62037 +pkg go/types, type Alias struct #63223 +pkg go/types, type Info struct, FileVersions map[*ast.File]string #62605 +pkg go/version, func Compare(string, string) int #62039 +pkg go/version, func IsValid(string) bool #62039 +pkg go/version, func Lang(string) string #62039 +pkg html/template, const ErrJSTemplate //deprecated #61619 +pkg io, method (*SectionReader) Outer() (ReaderAt, int64, int64) #61870 +pkg log/slog, func SetLogLoggerLevel(Level) Level #62418 +pkg math/big, method (*Rat) FloatPrec() (int, bool) #50489 +pkg math/rand/v2, func ExpFloat64() float64 #61716 +pkg math/rand/v2, func Float32() float32 #61716 +pkg math/rand/v2, func Float64() float64 #61716 +pkg math/rand/v2, func Int() int #61716 +pkg math/rand/v2, func Int32() int32 #61716 +pkg math/rand/v2, func Int32N(int32) int32 #61716 +pkg math/rand/v2, func Int64() int64 #61716 +pkg math/rand/v2, func Int64N(int64) int64 #61716 +pkg math/rand/v2, func IntN(int) int #61716 +pkg math/rand/v2, func N[$0 intType]($0) $0 #61716 +pkg math/rand/v2, func New(Source) *Rand #61716 +pkg math/rand/v2, func NewChaCha8([32]uint8) *ChaCha8 #61716 +pkg math/rand/v2, func NewPCG(uint64, uint64) *PCG #61716 +pkg math/rand/v2, func NewZipf(*Rand, float64, float64, uint64) *Zipf #61716 +pkg math/rand/v2, func NormFloat64() float64 #61716 +pkg math/rand/v2, func Perm(int) []int #61716 +pkg math/rand/v2, func Shuffle(int, func(int, int)) #61716 +pkg math/rand/v2, func Uint32() uint32 #61716 +pkg math/rand/v2, func Uint32N(uint32) uint32 #61716 +pkg math/rand/v2, func Uint64() uint64 #61716 +pkg math/rand/v2, func Uint64N(uint64) uint64 #61716 +pkg math/rand/v2, func UintN(uint) uint #61716 +pkg math/rand/v2, method (*ChaCha8) MarshalBinary() ([]uint8, error) #61716 +pkg math/rand/v2, method (*ChaCha8) Seed([32]uint8) #61716 +pkg math/rand/v2, method (*ChaCha8) Uint64() uint64 #61716 +pkg math/rand/v2, method (*ChaCha8) UnmarshalBinary([]uint8) error #61716 +pkg math/rand/v2, method (*PCG) MarshalBinary() ([]uint8, error) #61716 +pkg math/rand/v2, method (*PCG) Seed(uint64, uint64) #61716 +pkg math/rand/v2, method (*PCG) Uint64() uint64 #61716 +pkg math/rand/v2, method (*PCG) UnmarshalBinary([]uint8) error #61716 +pkg math/rand/v2, method (*Rand) ExpFloat64() float64 #61716 +pkg math/rand/v2, method (*Rand) Float32() float32 #61716 +pkg math/rand/v2, method (*Rand) Float64() float64 #61716 +pkg math/rand/v2, method (*Rand) Int() int #61716 +pkg math/rand/v2, method (*Rand) Int32() int32 #61716 +pkg math/rand/v2, method (*Rand) Int32N(int32) int32 #61716 +pkg math/rand/v2, method (*Rand) Int64() int64 #61716 +pkg math/rand/v2, method (*Rand) Int64N(int64) int64 #61716 +pkg math/rand/v2, method (*Rand) IntN(int) int #61716 +pkg math/rand/v2, method (*Rand) NormFloat64() float64 #61716 +pkg math/rand/v2, method (*Rand) Perm(int) []int #61716 +pkg math/rand/v2, method (*Rand) Shuffle(int, func(int, int)) #61716 +pkg math/rand/v2, method (*Rand) Uint32() uint32 #61716 +pkg math/rand/v2, method (*Rand) Uint32N(uint32) uint32 #61716 +pkg math/rand/v2, method (*Rand) Uint64() uint64 #61716 +pkg math/rand/v2, method (*Rand) Uint64N(uint64) uint64 #61716 +pkg math/rand/v2, method (*Rand) UintN(uint) uint #61716 +pkg math/rand/v2, method (*Zipf) Uint64() uint64 #61716 +pkg math/rand/v2, type ChaCha8 struct #61716 +pkg math/rand/v2, type PCG struct #61716 +pkg math/rand/v2, type Rand struct #61716 +pkg math/rand/v2, type Source interface { Uint64 } #61716 +pkg math/rand/v2, type Source interface, Uint64() uint64 #61716 +pkg math/rand/v2, type Zipf struct #61716 +pkg net, method (*TCPConn) WriteTo(io.Writer) (int64, error) #58808 +pkg net/http, func FileServerFS(fs.FS) Handler #51971 +pkg net/http, func NewFileTransportFS(fs.FS) RoundTripper #51971 +pkg net/http, func ServeFileFS(ResponseWriter, *Request, fs.FS, string) #51971 +pkg net/http, method (*Request) PathValue(string) string #61410 +pkg net/http, method (*Request) SetPathValue(string, string) #61410 +pkg net/netip, method (AddrPort) Compare(AddrPort) int #61642 +pkg os, method (*File) WriteTo(io.Writer) (int64, error) #58808 +pkg reflect, func PtrTo //deprecated #59599 +pkg reflect, func TypeFor[$0 interface{}]() Type #60088 +pkg slices, func Concat[$0 interface{ ~[]$1 }, $1 interface{}](...$0) $0 #56353 +pkg syscall (linux-386), type SysProcAttr struct, PidFD *int #51246 +pkg syscall (linux-386-cgo), type SysProcAttr struct, PidFD *int #51246 +pkg syscall (linux-amd64), type SysProcAttr struct, PidFD *int #51246 +pkg syscall (linux-amd64-cgo), type SysProcAttr struct, PidFD *int #51246 +pkg syscall (linux-arm), type SysProcAttr struct, PidFD *int #51246 +pkg syscall (linux-arm-cgo), type SysProcAttr struct, PidFD *int #51246 +pkg testing/slogtest, func Run(*testing.T, func(*testing.T) slog.Handler, func(*testing.T) map[string]interface{}) #61758 diff --git a/api/next/42888.txt b/api/next/42888.txt new file mode 100644 index 0000000000..f9b8e1e475 --- /dev/null +++ b/api/next/42888.txt @@ -0,0 +1 @@ +pkg runtime/debug, func SetCrashOutput(*os.File) error #42888 diff --git a/api/next/50102.txt b/api/next/50102.txt deleted file mode 100644 index dcb7977e83..0000000000 --- a/api/next/50102.txt +++ /dev/null @@ -1,9 +0,0 @@ -pkg archive/tar, type FileInfoNames interface { Gname, IsDir, ModTime, Mode, Name, Size, Sys, Uname } #50102 -pkg archive/tar, type FileInfoNames interface, Gname(int) (string, error) #50102 -pkg archive/tar, type FileInfoNames interface, IsDir() bool #50102 -pkg archive/tar, type FileInfoNames interface, ModTime() time.Time #50102 -pkg archive/tar, type FileInfoNames interface, Mode() fs.FileMode #50102 -pkg archive/tar, type FileInfoNames interface, Name() string #50102 -pkg archive/tar, type FileInfoNames interface, Size() int64 #50102 -pkg archive/tar, type FileInfoNames interface, Sys() interface{} #50102 -pkg archive/tar, type FileInfoNames interface, Uname(int) (string, error) #50102 diff --git a/api/next/50489.txt b/api/next/50489.txt deleted file mode 100644 index 5fc8723c9e..0000000000 --- a/api/next/50489.txt +++ /dev/null @@ -1 +0,0 @@ -pkg math/big, method (*Rat) FloatPrec() (int, bool) #50489 diff --git a/api/next/51246.txt b/api/next/51246.txt deleted file mode 100644 index c8806c64a3..0000000000 --- a/api/next/51246.txt +++ /dev/null @@ -1,6 +0,0 @@ -pkg syscall (linux-386), type SysProcAttr struct, PidFD *int #51246 -pkg syscall (linux-386-cgo), type SysProcAttr struct, PidFD *int #51246 -pkg syscall (linux-amd64), type SysProcAttr struct, PidFD *int #51246 -pkg syscall (linux-amd64-cgo), type SysProcAttr struct, PidFD *int #51246 -pkg syscall (linux-arm), type SysProcAttr struct, PidFD *int #51246 -pkg syscall (linux-arm-cgo), type SysProcAttr struct, PidFD *int #51246 diff --git a/api/next/51971.txt b/api/next/51971.txt deleted file mode 100644 index f884c3c079..0000000000 --- a/api/next/51971.txt +++ /dev/null @@ -1,3 +0,0 @@ -pkg net/http, func ServeFileFS(ResponseWriter, *Request, fs.FS, string) #51971 -pkg net/http, func FileServerFS(fs.FS) Handler #51971 -pkg net/http, func NewFileTransportFS(fs.FS) RoundTripper #51971 diff --git a/api/next/53693.txt b/api/next/53693.txt deleted file mode 100644 index 5a6f09e6c8..0000000000 --- a/api/next/53693.txt +++ /dev/null @@ -1,6 +0,0 @@ -pkg encoding/base32, method (*Encoding) AppendDecode([]uint8, []uint8) ([]uint8, error) #53693 -pkg encoding/base32, method (*Encoding) AppendEncode([]uint8, []uint8) []uint8 #53693 -pkg encoding/base64, method (*Encoding) AppendDecode([]uint8, []uint8) ([]uint8, error) #53693 -pkg encoding/base64, method (*Encoding) AppendEncode([]uint8, []uint8) []uint8 #53693 -pkg encoding/hex, func AppendDecode([]uint8, []uint8) ([]uint8, error) #53693 -pkg encoding/hex, func AppendEncode([]uint8, []uint8) []uint8 #53693 diff --git a/api/next/54898.txt b/api/next/54898.txt deleted file mode 100644 index 44133bd377..0000000000 --- a/api/next/54898.txt +++ /dev/null @@ -1 +0,0 @@ -pkg archive/zip, method (*Writer) AddFS(fs.FS) error #54898 diff --git a/api/next/56353.txt b/api/next/56353.txt deleted file mode 100644 index c2504a7f63..0000000000 --- a/api/next/56353.txt +++ /dev/null @@ -1 +0,0 @@ -pkg slices, func Concat[$0 interface{ ~[]$1 }, $1 interface{}](...$0) $0 #56353 diff --git a/api/next/57151.txt b/api/next/57151.txt new file mode 100644 index 0000000000..5d0e34e8b7 --- /dev/null +++ b/api/next/57151.txt @@ -0,0 +1 @@ +pkg path/filepath, func Localize(string) (string, error) #57151 diff --git a/api/next/57178.txt b/api/next/57178.txt deleted file mode 100644 index 3ce4d408eb..0000000000 --- a/api/next/57178.txt +++ /dev/null @@ -1 +0,0 @@ -pkg crypto/x509, method (*CertPool) AddCertWithConstraint(*Certificate, func([]*Certificate) error) #57178 diff --git a/api/next/58000.txt b/api/next/58000.txt deleted file mode 100644 index 94db9637cb..0000000000 --- a/api/next/58000.txt +++ /dev/null @@ -1 +0,0 @@ -pkg archive/tar, method (*Writer) AddFS(fs.FS) error #58000 diff --git a/api/next/59599.txt b/api/next/59599.txt deleted file mode 100644 index 952291f323..0000000000 --- a/api/next/59599.txt +++ /dev/null @@ -1 +0,0 @@ -pkg reflect, func PtrTo //deprecated #59599 diff --git a/api/next/60061.txt b/api/next/60061.txt deleted file mode 100644 index 3e497addb7..0000000000 --- a/api/next/60061.txt +++ /dev/null @@ -1 +0,0 @@ -pkg go/ast, func Unparen(Expr) Expr #60061 diff --git a/api/next/60088.txt b/api/next/60088.txt deleted file mode 100644 index 6eacb139a7..0000000000 --- a/api/next/60088.txt +++ /dev/null @@ -1 +0,0 @@ -pkg reflect, func TypeFor[$0 interface{}]() Type #60088 diff --git a/api/next/60204.txt b/api/next/60204.txt deleted file mode 100644 index 62dddc620c..0000000000 --- a/api/next/60204.txt +++ /dev/null @@ -1 +0,0 @@ -pkg cmp, func Or[$0 comparable](...$0) $0 #60204 diff --git a/api/next/60370.txt b/api/next/60370.txt deleted file mode 100644 index 66ced0bfb7..0000000000 --- a/api/next/60370.txt +++ /dev/null @@ -1,5 +0,0 @@ -pkg database/sql, method (*Null[$0]) Scan(interface{}) error #60370 -pkg database/sql, method (Null[$0]) Value() (driver.Value, error) #60370 -pkg database/sql, type Null[$0 interface{}] struct #60370 -pkg database/sql, type Null[$0 interface{}] struct, Valid bool #60370 -pkg database/sql, type Null[$0 interface{}] struct, V $0 #60370 diff --git a/api/next/60427.txt b/api/next/60427.txt new file mode 100644 index 0000000000..0be9da0782 --- /dev/null +++ b/api/next/60427.txt @@ -0,0 +1,4 @@ +pkg reflect, type Type interface, OverflowComplex(complex128) bool #60427 +pkg reflect, type Type interface, OverflowFloat(float64) bool #60427 +pkg reflect, type Type interface, OverflowInt(int64) bool #60427 +pkg reflect, type Type interface, OverflowUint(uint64) bool #60427 diff --git a/api/next/60665.txt b/api/next/60665.txt deleted file mode 100644 index 10e50e1832..0000000000 --- a/api/next/60665.txt +++ /dev/null @@ -1,6 +0,0 @@ -pkg crypto/x509, type Certificate struct, Policies []OID #60665 -pkg crypto/x509, type OID struct #60665 -pkg crypto/x509, method (OID) Equal(OID) bool #60665 -pkg crypto/x509, method (OID) EqualASN1OID(asn1.ObjectIdentifier) bool #60665 -pkg crypto/x509, method (OID) String() string #60665 -pkg crypto/x509, func OIDFromInts([]uint64) (OID, error) #60665 diff --git a/api/next/61410.txt b/api/next/61410.txt deleted file mode 100644 index 01c8a2c3e8..0000000000 --- a/api/next/61410.txt +++ /dev/null @@ -1,2 +0,0 @@ -pkg net/http, method (*Request) PathValue(string) string #61410 -pkg net/http, method (*Request) SetPathValue(string, string) #61410 diff --git a/api/next/61619.txt b/api/next/61619.txt deleted file mode 100644 index c63a3140e8..0000000000 --- a/api/next/61619.txt +++ /dev/null @@ -1 +0,0 @@ -pkg html/template, const ErrJSTemplate //deprecated #61619 diff --git a/api/next/61642.txt b/api/next/61642.txt deleted file mode 100644 index 4c8bf252df..0000000000 --- a/api/next/61642.txt +++ /dev/null @@ -1,2 +0,0 @@ -pkg net/netip, method (AddrPort) Compare(AddrPort) int #61642 -pkg net/netip, method (Prefix) Compare(Prefix) int #61642 diff --git a/api/next/61696.txt b/api/next/61696.txt new file mode 100644 index 0000000000..8adaf3d80e --- /dev/null +++ b/api/next/61696.txt @@ -0,0 +1 @@ +pkg sync, method (*Map) Clear() #61696 diff --git a/api/next/61716.txt b/api/next/61716.txt deleted file mode 100644 index 05b9bb8429..0000000000 --- a/api/next/61716.txt +++ /dev/null @@ -1,48 +0,0 @@ -pkg math/rand/v2, func ExpFloat64() float64 #61716 -pkg math/rand/v2, func Float32() float32 #61716 -pkg math/rand/v2, func Float64() float64 #61716 -pkg math/rand/v2, func Int() int #61716 -pkg math/rand/v2, func Int32() int32 #61716 -pkg math/rand/v2, func Int32N(int32) int32 #61716 -pkg math/rand/v2, func Int64() int64 #61716 -pkg math/rand/v2, func Int64N(int64) int64 #61716 -pkg math/rand/v2, func IntN(int) int #61716 -pkg math/rand/v2, func N[$0 intType]($0) $0 #61716 -pkg math/rand/v2, func New(Source) *Rand #61716 -pkg math/rand/v2, func NewPCG(uint64, uint64) *PCG #61716 -pkg math/rand/v2, func NewZipf(*Rand, float64, float64, uint64) *Zipf #61716 -pkg math/rand/v2, func NormFloat64() float64 #61716 -pkg math/rand/v2, func Perm(int) []int #61716 -pkg math/rand/v2, func Shuffle(int, func(int, int)) #61716 -pkg math/rand/v2, func Uint32() uint32 #61716 -pkg math/rand/v2, func Uint32N(uint32) uint32 #61716 -pkg math/rand/v2, func Uint64() uint64 #61716 -pkg math/rand/v2, func Uint64N(uint64) uint64 #61716 -pkg math/rand/v2, func UintN(uint) uint #61716 -pkg math/rand/v2, method (*PCG) MarshalBinary() ([]uint8, error) #61716 -pkg math/rand/v2, method (*PCG) Seed(uint64, uint64) #61716 -pkg math/rand/v2, method (*PCG) Uint64() uint64 #61716 -pkg math/rand/v2, method (*PCG) UnmarshalBinary([]uint8) error #61716 -pkg math/rand/v2, method (*Rand) ExpFloat64() float64 #61716 -pkg math/rand/v2, method (*Rand) Float32() float32 #61716 -pkg math/rand/v2, method (*Rand) Float64() float64 #61716 -pkg math/rand/v2, method (*Rand) Int() int #61716 -pkg math/rand/v2, method (*Rand) Int32() int32 #61716 -pkg math/rand/v2, method (*Rand) Int32N(int32) int32 #61716 -pkg math/rand/v2, method (*Rand) Int64() int64 #61716 -pkg math/rand/v2, method (*Rand) Int64N(int64) int64 #61716 -pkg math/rand/v2, method (*Rand) IntN(int) int #61716 -pkg math/rand/v2, method (*Rand) NormFloat64() float64 #61716 -pkg math/rand/v2, method (*Rand) Perm(int) []int #61716 -pkg math/rand/v2, method (*Rand) Shuffle(int, func(int, int)) #61716 -pkg math/rand/v2, method (*Rand) Uint32() uint32 #61716 -pkg math/rand/v2, method (*Rand) Uint32N(uint32) uint32 #61716 -pkg math/rand/v2, method (*Rand) Uint64() uint64 #61716 -pkg math/rand/v2, method (*Rand) Uint64N(uint64) uint64 #61716 -pkg math/rand/v2, method (*Rand) UintN(uint) uint #61716 -pkg math/rand/v2, method (*Zipf) Uint64() uint64 #61716 -pkg math/rand/v2, type PCG struct #61716 -pkg math/rand/v2, type Rand struct #61716 -pkg math/rand/v2, type Source interface { Uint64 } #61716 -pkg math/rand/v2, type Source interface, Uint64() uint64 #61716 -pkg math/rand/v2, type Zipf struct #61716 diff --git a/api/next/61758.txt b/api/next/61758.txt deleted file mode 100644 index 35bd224965..0000000000 --- a/api/next/61758.txt +++ /dev/null @@ -1 +0,0 @@ -pkg testing/slogtest, func Run(*testing.T, func(*testing.T) slog.Handler, func(*testing.T) map[string]interface{}) #61758 diff --git a/api/next/61870.txt b/api/next/61870.txt deleted file mode 100644 index 27bb9f6425..0000000000 --- a/api/next/61870.txt +++ /dev/null @@ -1 +0,0 @@ -pkg io, method (*SectionReader) Outer() (ReaderAt, int64, int64) #61870 diff --git a/api/next/61974.txt b/api/next/61974.txt deleted file mode 100644 index d231a62848..0000000000 --- a/api/next/61974.txt +++ /dev/null @@ -1,2 +0,0 @@ -pkg debug/elf, const R_MIPS_PC32 = 248 #61974 -pkg debug/elf, const R_MIPS_PC32 R_MIPS #61974 diff --git a/api/next/62037.txt b/api/next/62037.txt deleted file mode 100644 index 78374214c8..0000000000 --- a/api/next/62037.txt +++ /dev/null @@ -1,2 +0,0 @@ -pkg go/types, method (*Info) PkgNameOf(*ast.ImportSpec) *PkgName #62037 -pkg go/types, method (Checker) PkgNameOf(*ast.ImportSpec) *PkgName #62037 diff --git a/api/next/62039.txt b/api/next/62039.txt deleted file mode 100644 index 8280e87751..0000000000 --- a/api/next/62039.txt +++ /dev/null @@ -1,3 +0,0 @@ -pkg go/version, func Compare(string, string) int #62039 -pkg go/version, func IsValid(string) bool #62039 -pkg go/version, func Lang(string) string #62039 diff --git a/api/next/62254.txt b/api/next/62254.txt new file mode 100644 index 0000000000..49d3214310 --- /dev/null +++ b/api/next/62254.txt @@ -0,0 +1,12 @@ +pkg net, method (*TCPConn) SetKeepAliveConfig(KeepAliveConfig) error #62254 +pkg net, type Dialer struct, KeepAliveConfig KeepAliveConfig #62254 +pkg net, type KeepAliveConfig struct #62254 +pkg net, type KeepAliveConfig struct, Count int #62254 +pkg net, type KeepAliveConfig struct, Enable bool #62254 +pkg net, type KeepAliveConfig struct, Idle time.Duration #62254 +pkg net, type KeepAliveConfig struct, Interval time.Duration #62254 +pkg net, type ListenConfig struct, KeepAliveConfig KeepAliveConfig #62254 +pkg syscall (windows-386), const WSAENOPROTOOPT = 10042 #62254 +pkg syscall (windows-386), const WSAENOPROTOOPT Errno #62254 +pkg syscall (windows-amd64), const WSAENOPROTOOPT = 10042 #62254 +pkg syscall (windows-amd64), const WSAENOPROTOOPT Errno #62254 diff --git a/api/next/62418.txt b/api/next/62418.txt deleted file mode 100644 index fd482f4ba8..0000000000 --- a/api/next/62418.txt +++ /dev/null @@ -1 +0,0 @@ -pkg log/slog, func SetLogLoggerLevel(Level) Level #62418 diff --git a/api/next/62484.txt b/api/next/62484.txt new file mode 100644 index 0000000000..7f5b5ca90c --- /dev/null +++ b/api/next/62484.txt @@ -0,0 +1 @@ +pkg os, func CopyFS(string, fs.FS) error #62484 diff --git a/api/next/62605.txt b/api/next/62605.txt deleted file mode 100644 index 1b0e533d02..0000000000 --- a/api/next/62605.txt +++ /dev/null @@ -1 +0,0 @@ -pkg go/types, type Info struct, FileVersions map[*ast.File]string #62605 diff --git a/api/next/63223.txt b/api/next/63223.txt deleted file mode 100644 index 2dcafb872b..0000000000 --- a/api/next/63223.txt +++ /dev/null @@ -1,6 +0,0 @@ -pkg go/types, func NewAlias(*TypeName, Type) *Alias #63223 -pkg go/types, func Unalias(Type) Type #63223 -pkg go/types, method (*Alias) Obj() *TypeName #63223 -pkg go/types, method (*Alias) String() string #63223 -pkg go/types, method (*Alias) Underlying() Type #63223 -pkg go/types, type Alias struct #63223 diff --git a/api/next/63725.txt b/api/next/63725.txt deleted file mode 100644 index ff3e05348b..0000000000 --- a/api/next/63725.txt +++ /dev/null @@ -1,18 +0,0 @@ -pkg debug/elf, const R_LARCH_64_PCREL = 109 #63725 -pkg debug/elf, const R_LARCH_64_PCREL R_LARCH #63725 -pkg debug/elf, const R_LARCH_ADD6 = 105 #63725 -pkg debug/elf, const R_LARCH_ADD6 R_LARCH #63725 -pkg debug/elf, const R_LARCH_ADD_ULEB128 = 107 #63725 -pkg debug/elf, const R_LARCH_ADD_ULEB128 R_LARCH #63725 -pkg debug/elf, const R_LARCH_ALIGN = 102 #63725 -pkg debug/elf, const R_LARCH_ALIGN R_LARCH #63725 -pkg debug/elf, const R_LARCH_CFA = 104 #63725 -pkg debug/elf, const R_LARCH_CFA R_LARCH #63725 -pkg debug/elf, const R_LARCH_DELETE = 101 #63725 -pkg debug/elf, const R_LARCH_DELETE R_LARCH #63725 -pkg debug/elf, const R_LARCH_PCREL20_S2 = 103 #63725 -pkg debug/elf, const R_LARCH_PCREL20_S2 R_LARCH #63725 -pkg debug/elf, const R_LARCH_SUB6 = 106 #63725 -pkg debug/elf, const R_LARCH_SUB6 R_LARCH #63725 -pkg debug/elf, const R_LARCH_SUB_ULEB128 = 108 #63725 -pkg debug/elf, const R_LARCH_SUB_ULEB128 R_LARCH #63725 diff --git a/doc/README.md b/doc/README.md new file mode 100644 index 0000000000..3bb8412ad5 --- /dev/null +++ b/doc/README.md @@ -0,0 +1,46 @@ +# Release Notes + +The `initial` and `next` subdirectories of this directory are for release notes. + +## For developers + +Release notes should be added to `next` by editing existing files or creating new files. + +At the end of the development cycle, the files will be merged by being +concatenated in sorted order by pathname. Files in the directory matching the +glob "*stdlib/*minor" are treated specially. They should be in subdirectories +corresponding to standard library package paths, and headings for those package +paths will be generated automatically. + +Files in this repo's `api/next` directory must have corresponding files in +`doc/next/*stdlib/*minor`. +The files should be in the subdirectory for the package with the new +API, and should be named after the issue number of the API proposal. +For example, if the directory `6-stdlib/99-minor` is present, +then an `api/next` file with the line + + pkg net/http, function F #12345 + +should have a corresponding file named `doc/next/6-stdlib/99-minor/net/http/12345.md`. +At a minimum, that file should contain either a full sentence or a TODO, +ideally referring to a person with the responsibility to complete the note. + +Use the following forms in your markdown: + + [`http.Request`](/pkg/net/http#Request) # symbol documentation + [#12345](/issue/12345) # GitHub issues + [CL 6789](/cl/6789) # Gerrit changelists + +## For the release team + +At the start of a release development cycle, the contents of `next` should be deleted +and replaced with those of `initial`. From the repo root: + + > cd doc + > rm -r next/* + > cp -r initial/* next + +Then edit `next/1-intro.md` to refer to the next version. + +To prepare the release notes for a release, run `golang.org/x/build/cmd/relnote generate`. +That will merge the `.md` files in `next` into a single file. diff --git a/doc/asm.html b/doc/asm.html index f7787a4076..dd395ec833 100644 --- a/doc/asm.html +++ b/doc/asm.html @@ -464,6 +464,23 @@ Function is the outermost frame of the call stack. Traceback should stop at this +

Special instructions

+ +

+The PCALIGN pseudo-instruction is used to indicate that the next instruction should be aligned +to a specified boundary by padding with no-op instructions. +

+ +

+It is currently supported on arm64, amd64, ppc64, loong64 and riscv64. + +For example, the start of the MOVD instruction below is aligned to 32 bytes: +

+PCALIGN $32
+MOVD $2, R0
+
+

+

Interacting with Go types and constants

diff --git a/doc/go1.17_spec.html b/doc/go1.17_spec.html index 15e73c3867..c87d9aff3c 100644 --- a/doc/go1.17_spec.html +++ b/doc/go1.17_spec.html @@ -7,8 +7,11 @@

Introduction

-This is a reference manual for the Go programming language. For -more information and other documents, see golang.org. +This is the reference manual for the Go programming language as it was for +language version 1.17, in October 2021, before the introduction of generics. +It is provided for historical interest. +The current reference manual can be found here. +For more information and other documents, see go.dev.

diff --git a/doc/go1.22.html b/doc/go1.22.html deleted file mode 100644 index 287ee77bb5..0000000000 --- a/doc/go1.22.html +++ /dev/null @@ -1,105 +0,0 @@ - - - - - - -

DRAFT RELEASE NOTES — Introduction to Go 1.22

- -

- - Go 1.22 is not yet released. These are work-in-progress - release notes. Go 1.22 is expected to be released in February 2024. - -

- -

Changes to the language

- -

- TODO: complete this section -

- -

Tools

- -

Go command

- -

- TODO: complete this section, or delete if not needed -

- -

Cgo

- - - -

Runtime

- -

- TODO: complete this section, or delete if not needed -

- -

Compiler

- -

- TODO: complete this section, or delete if not needed -

- -

Linker

- -

- TODO: complete this section, or delete if not needed -

- -

Core library

- -

Minor changes to the library

- -

- As always, there are various minor changes and updates to the library, - made with the Go 1 promise of compatibility - in mind. - There are also various performance improvements, not enumerated here. -

- -

- TODO: complete this section -

- -
database/sql
-
-

- The new Null[T] type - provide a way to scan nullable columns for any column types. -

-
-
- -
reflect
-
-

- The Value.IsZero - method will now return true for a floating-point or complex - negative zero, and will return true for a struct value if a - blank field (a field named _) somehow has a - non-zero value. - These changes make IsZero consistent with comparing - a value to zero using the language == operator. -

-
-
- -

Ports

- -

- TODO: complete this section, or delete if not needed -

- diff --git a/doc/go_mem.html b/doc/go_mem.html index 026c1172e3..c0b81d3fac 100644 --- a/doc/go_mem.html +++ b/doc/go_mem.html @@ -98,12 +98,12 @@ which in turn are made up of memory operations. A memory operation is modeled by four details:

Some memory operations are read-like, including read, atomic read, mutex lock, and channel receive. @@ -162,8 +162,8 @@ where visible means that both of the following hold:

    -
  1. w happens before r. -
  2. w does not happen before any other write w' (to x) that happens before r. +
  3. w happens before r.
  4. +
  5. w does not happen before any other write w' (to x) that happens before r.

diff --git a/doc/go_spec.html b/doc/go_spec.html index 18f88d5ead..8f48f7444b 100644 --- a/doc/go_spec.html +++ b/doc/go_spec.html @@ -1,6 +1,6 @@ @@ -10,7 +10,7 @@ This is the reference manual for the Go programming language. The pre-Go1.18 version, without generics, can be found here. -For more information and other documents, see golang.org. +For more information and other documents, see go.dev.

@@ -70,6 +70,14 @@ enumerations or code snippets that are not further specified. The character +

+A link of the form [Go 1.xx] indicates that a described +language feature (or some aspect of it) was changed or added with language version 1.xx and +thus requires at minimum that language version to build. +For details, see the linked section +in the appendix. +

+

Source code representation

@@ -263,7 +271,8 @@ continue for import return var

The following character sequences represent operators -(including assignment operators) and punctuation: +(including assignment operators) and punctuation +[Go 1.18]:

 +    &     +=    &=     &&    ==    !=    (    )
@@ -281,7 +290,8 @@ An integer literal is a sequence of digits representing an
 integer constant.
 An optional prefix sets a non-decimal base: 0b or 0B
 for binary, 0, 0o, or 0O for octal,
-and 0x or 0X for hexadecimal.
+and 0x or 0X for hexadecimal
+[Go 1.13].
 A single 0 is considered a decimal zero.
 In hexadecimal literals, letters a through f
 and A through F represent values 10 through 15.
@@ -347,7 +357,8 @@ prefix, an integer part (hexadecimal digits), a radix point, a fractional part (
 and an exponent part (p or P followed by an optional sign and decimal digits).
 One of the integer part or the fractional part may be elided; the radix point may be elided as well,
 but the exponent part is required. (This syntax matches the one given in IEEE 754-2008 §5.12.3.)
-An exponent value exp scales the mantissa (integer and fractional part) by 2exp.
+An exponent value exp scales the mantissa (integer and fractional part) by 2exp
+[Go 1.13].
 

@@ -411,7 +422,8 @@ It consists of an integer or floating-point literal followed by the lowercase letter i. The value of an imaginary literal is the value of the respective -integer or floating-point literal multiplied by the imaginary unit i. +integer or floating-point literal multiplied by the imaginary unit i +[Go 1.13]

@@ -1340,6 +1352,7 @@ interface{}
 
 

For convenience, the predeclared type any is an alias for the empty interface. +[Go 1.18]

@@ -1375,13 +1388,15 @@ as the File interface. In a slightly more general form an interface T may use a (possibly qualified) interface type name E as an interface element. This is called -embedding interface E in T. +embedding interface E in T +[Go 1.14]. The type set of T is the intersection of the type sets defined by T's explicitly declared methods and the type sets of T’s embedded interfaces. In other words, the type set of T is the set of all types that implement all the explicitly declared methods of T and also all the methods of -E. +E +[Go 1.18].

@@ -1420,7 +1435,8 @@ type ReadCloser interface {
 

In their most general form, an interface element may also be an arbitrary type term T, or a term of the form ~T specifying the underlying type T, -or a union of terms t1|t2|…|tn. +or a union of terms t1|t2|…|tn +[Go 1.18]. Together with method specifications, these elements enable the precise definition of an interface's type set as follows:

@@ -1666,6 +1682,7 @@ maps grow to accommodate the number of items stored in them, with the exception of nil maps. A nil map is equivalent to an empty map except that no elements may be added. +

Channel types

@@ -2303,7 +2320,9 @@ as an operand, and in a

The following identifiers are implicitly declared in the -universe block: +universe block +[Go 1.18] +[Go 1.21]:

 Types:
@@ -2487,7 +2506,8 @@ TypeSpec = AliasDecl | TypeDef .
 

Alias declarations

-An alias declaration binds an identifier to the given type. +An alias declaration binds an identifier to the given type +[Go 1.9].

@@ -2636,7 +2656,8 @@ func (l *List[T]) Len() int  { … }
 A type parameter list declares the type parameters of a generic function or type declaration.
 The type parameter list looks like an ordinary function parameter list
 except that the type parameter names must all be present and the list is enclosed
-in square brackets rather than parentheses.
+in square brackets rather than parentheses
+[Go 1.18].
 

@@ -2719,7 +2740,8 @@ type T6[P int] struct{ f *T6[P] }     // ok: reference to T6 is not in type para
 

A type constraint is an interface that defines the set of permissible type arguments for the respective type parameter and controls the -operations supported by values of that type parameter. +operations supported by values of that type parameter +[Go 1.18].

@@ -2749,7 +2771,8 @@ other interfaces based on their type sets. But this should get us going for now.
 The predeclared
 interface type comparable
 denotes the set of all non-interface types that are
-strictly comparable.
+strictly comparable
+[Go 1.18].
 

@@ -2782,7 +2805,8 @@ if T is an element of the type set defined by C; i.e., if T implements C. As an exception, a strictly comparable type constraint may also be satisfied by a comparable -(not necessarily strictly comparable) type argument. +(not necessarily strictly comparable) type argument +[Go 1.20]. More precisely:

@@ -4306,7 +4330,7 @@ with the same underlying array.

A generic function or type is instantiated by substituting type arguments -for the type parameters. +for the type parameters [Go 1.18]. Instantiation proceeds in two steps:

@@ -4759,6 +4783,7 @@ to the type of the other operand.

The right operand in a shift expression must have integer type +[Go 1.13] or be an untyped constant representable by a value of type uint. If the left operand of a non-constant shift expression is an untyped constant, @@ -5426,7 +5451,8 @@ in any of these cases: x is a string and T is a slice of bytes or runes.

  • - x is a slice, T is an array or a pointer to an array, + x is a slice, T is an array [Go 1.20] + or a pointer to an array [Go 1.17], and the slice and array types have identical element types.
  • @@ -6516,7 +6542,6 @@ additionally it may specify an init and a post statement, such as an assignment, an increment or decrement statement. The init statement may be a short variable declaration, but the post statement must not. -Variables declared by the init statement are re-used in each iteration.

    @@ -6548,12 +6573,54 @@ for cond { S() }    is the same as    for ; cond ; { S() }
     for      { S() }    is the same as    for true     { S() }
     
    +

    +Each iteration has its own separate declared variable (or variables) +[Go 1.22]. +The variable used by the first iteration is declared by the init statement. +The variable used by each subsequent iteration is declared implicitly before +executing the post statement and initialized to the value of the previous +iteration's variable at that moment. +

    + +
    +var prints []func()
    +for i := 0; i < 5; i++ {
    +	prints = append(prints, func() { println(i) })
    +	i++
    +}
    +for _, p := range prints {
    +	p()
    +}
    +
    + +

    +prints +

    + +
    +1
    +3
    +5
    +
    + +

    +Prior to [Go 1.22], iterations share one set of variables +instead of having their own separate variables. +In that case, the example above prints +

    + +
    +6
    +6
    +6
    +
    +

    For statements with range clause

    A "for" statement with a "range" clause iterates through all entries of an array, slice, string or map, values received on -a channel, or integer values from zero to an upper limit. +a channel, or integer values from zero to an upper limit [Go 1.22]. For each entry it assigns iteration values to corresponding iteration variables if present and then executes the block.

    @@ -6595,7 +6662,7 @@ array or slice a [n]E, *[n]E, or []E index i int a[i] E string s string type index i int see below rune map m map[K]V key k K m[k] V channel c chan E, <-chan E element e E -integer n integer type I value i I +integer n integer type value i see below
      @@ -6637,25 +6704,33 @@ is nil, the range expression blocks forever.
    1. For an integer value n, the iteration values 0 through n-1 -are produced in increasing order, with the same type as n. +are produced in increasing order. If n <= 0, the loop does not run any iterations.
    -

    -The iteration values are assigned to the respective -iteration variables as in an assignment statement. -

    -

    The iteration variables may be declared by the "range" clause using a form of short variable declaration (:=). -In this case their types are set to the types of the respective iteration values -and their scope is the block of the "for" -statement; they are re-used in each iteration. -If the iteration variables are declared outside the "for" statement, -after execution their values will be those of the last iteration. +In this case their scope is the block of the "for" statement +and each iteration has its own new variables [Go 1.22] +(see also "for" statements with a ForClause). +If the range expression is a (possibly untyped) integer expression n, +the variable has the same type as if it was +declared with initialization +expression n. +Otherwise, the variables have the types of their respective iteration values. +

    + +

    +If the iteration variables are not explicitly declared by the "range" clause, +they must be preexisting. +In this case, the iteration values are assigned to the respective variables +as in an assignment statement. +If the range expression is a (possibly untyped) integer expression n, +n too must be assignable to the iteration variable; +if there is no iteration variable, n must be assignable to int.

    @@ -6698,6 +6773,11 @@ for i := range 10 {
     	// type of i is int (default type for untyped constant 10)
     	f(i)
     }
    +
    +// invalid: 256 cannot be assigned to uint8
    +var u uint8
    +for u = range 256 {
    +}
     
    @@ -7221,7 +7301,7 @@ The number of elements copied is the minimum of len(src) and len(dst). As a special case, if the destination's core type is []byte, copy also accepts a source argument with core type - bytestring. +bytestring. This form copies the bytes from the byte slice or string into the byte slice.

    @@ -7249,7 +7329,8 @@ n3 := copy(b, "Hello, World!") // n3 == 5, b is []byte("Hello")

    The built-in function clear takes an argument of map, slice, or type parameter type, -and deletes or zeroes out all elements. +and deletes or zeroes out all elements +[Go 1.21].

    @@ -7516,7 +7597,8 @@ The precise behavior is implementation-dependent.
     The built-in functions min and max compute the
     smallest—or largest, respectively—value of a fixed number of
     arguments of ordered types.
    -There must be at least one argument.
    +There must be at least one argument
    +[Go 1.21].
     

    @@ -8232,8 +8314,8 @@ of if the general conversion rules take care of this.

    A Pointer is a pointer type but a Pointer value may not be dereferenced. -Any pointer or value of underlying type uintptr can be -converted to a type of underlying type Pointer and vice versa. +Any pointer or value of core type uintptr can be +converted to a type of core type Pointer and vice versa. The effect of converting between Pointer and uintptr is implementation-defined.

    @@ -8244,6 +8326,10 @@ bits = *(*uint64)(unsafe.Pointer(&f)) type ptr unsafe.Pointer bits = *(*uint64)(ptr(&f)) +func f[P ~*B, B any](p P) uintptr { + return uintptr(unsafe.Pointer(p)) +} + var p ptr = nil
    @@ -8292,7 +8378,8 @@ of constant size.

    The function Add adds len to ptr -and returns the updated pointer unsafe.Pointer(uintptr(ptr) + uintptr(len)). +and returns the updated pointer unsafe.Pointer(uintptr(ptr) + uintptr(len)) +[Go 1.17]. The len argument must be of integer type or an untyped constant. A constant len argument must be representable by a value of type int; if it is an untyped constant it is given type int. @@ -8312,7 +8399,8 @@ and whose length and capacity are len.

    except that, as a special case, if ptr is nil and len is zero, -Slice returns nil. +Slice returns nil +[Go 1.17].

    @@ -8321,14 +8409,16 @@ A constant len argument must be non-negative and run-time panic occurs. +a run-time panic occurs +[Go 1.17].

    The function SliceData returns a pointer to the underlying array of the slice argument. If the slice's capacity cap(slice) is not zero, that pointer is &slice[:1][0]. If slice is nil, the result is nil. -Otherwise it is a non-nil pointer to an unspecified memory address. +Otherwise it is a non-nil pointer to an unspecified memory address +[Go 1.20].

    @@ -8337,12 +8427,14 @@ The function String returns a string value whose under The same requirements apply to the ptr and len argument as in the function Slice. If len is zero, the result is the empty string "". Since Go strings are immutable, the bytes passed to String must not be modified afterwards. +[Go 1.20]

    The function StringData returns a pointer to the underlying bytes of the str argument. For an empty string the return value is unspecified, and may be nil. -Since Go strings are immutable, the bytes returned by StringData must not be modified. +Since Go strings are immutable, the bytes returned by StringData must not be modified +[Go 1.20].

    Size and alignment guarantees

    @@ -8383,6 +8475,145 @@ A struct or array type has size zero if it contains no fields (or elements, resp

    Appendix

    +

    Language versions

    + +

    +The Go 1 compatibility guarantee ensures that +programs written to the Go 1 specification will continue to compile and run +correctly, unchanged, over the lifetime of that specification. +More generally, as adjustments are made and features added to the language, +the compatibility guarantee ensures that a Go program that works with a +specific Go language version will continue to work with any subsequent version. +

    + +

    +For instance, the ability to use the prefix 0b for binary +integer literals was introduced with Go 1.13, indicated +by [Go 1.13] in the section on +integer literals. +Source code containing an integer literal such as 0b1011 +will be rejected if the implied or required language version used by +the compiler is older than Go 1.13. +

    + +

    +The following table describes the minimum language version required for +features introduced after Go 1. +

    + +

    Go 1.9

    + + +

    Go 1.13

    +
      +
    • +Integer literals may use the prefixes 0b, 0B, 0o, +and 0O for binary, and octal literals, respectively. +
    • +
    • +Hexadecimal floating-point literals may be written using the prefixes +0x and 0X. +
    • +
    • +The imaginary suffix i may be used with any (binary, decimal, hexadecimal) +integer or floating-point literal, not just decimal literals. +
    • +
    • +The digits of any number literal may be separated (grouped) +using underscores _. +
    • +
    • +The shift count in a shift operation may be a signed integer type. +
    • +
    + +

    Go 1.14

    +
      +
    • +Emdedding a method more than once through different embedded interfaces +is not an error. +
    • +
    + +

    Go 1.17

    +
      +
    • +A slice may be converted to an array pointer if the slice and array element +types match, and the array is not longer than the slice. +
    • +
    • +The built-in package unsafe includes the new functions +Add and Slice. +
    • +
    + +

    Go 1.18

    +

    +The 1.18 release adds polymorphic functions and types ("generics") to the language. +Specifically: +

    + + +

    Go 1.20

    +
      +
    • +A slice may be converted to an array if the slice and array element +types match and the array is not longer than the slice. +
    • +
    • +The built-in package unsafe includes the new functions +SliceData, String, and StringData. +
    • +
    • +Comparable types (such as ordinary interfaces) may satisfy +comparable constraints, even if the type arguments are not strictly comparable. +
    • +
    + +

    Go 1.21

    +
      +
    • +The set of predeclared functions includes the new functions +min, max, and clear. +
    • +
    • +Type inference uses the types of interface methods for inference. +It also infers type arguments for generic functions assigned to variables or +passed as arguments to other (possibly generic) functions. +
    • +
    + +

    Go 1.22

    +
      +
    • +In a "for" statement, each iteration has its own set of iteration +variables rather than sharing the same variables in each iteration. +
    • +
    • +A "for" statement with "range" clause may iterate over +integer values from zero to an upper limit. +
    • +
    +

    Type unification rules

    diff --git a/doc/godebug.md b/doc/godebug.md index 9235635bdd..2b8852a7ec 100644 --- a/doc/godebug.md +++ b/doc/godebug.md @@ -126,6 +126,25 @@ for example, see the [runtime documentation](/pkg/runtime#hdr-Environment_Variables) and the [go command documentation](/cmd/go#hdr-Build_and_test_caching). +### Go 1.23 + +Go 1.23 changed the mode bits reported by [`os.Lstat`](/pkg/os#Lstat) and [`os.Stat`](/pkg/os#Stat) +for reparse points, which can be controlled with the `winsymlink` setting. +As of Go 1.23 (`winsymlink=1`), mount points no longer have [`os.ModeSymlink`](/pkg/os#ModeSymlink) +set, and reparse points that are not symlinks, Unix sockets, or dedup files now +always have [`os.ModeIrregular`](/pkg/os#ModeIrregular) set. As a result of these changes, +[`filepath.EvalSymlinks`](/pkg/path/filepath#EvalSymlinks) no longer evaluates +mount points, which was a source of many inconsistencies and bugs. +At previous versions (`winsymlink=0`), mount points are treated as symlinks, +and other reparse points with non-default [`os.ModeType`](/pkg/os#ModeType) bits +(such as [`os.ModeDir`](/pkg/os#ModeDir)) do not have the `ModeIrregular` bit set. + +Go 1.23 changed [`os.Readlink`](/pkg/os#Readlink) and [`filepath.EvalSymlinks`](/pkg/path/filepath#EvalSymlinks) +to avoid trying to normalize volumes to drive letters, which was not always even possible. +This behavior is controlled by the `winreadlinkvolume` setting. +For Go 1.23, it defaults to `winreadlinkvolume=1`. +Previous versions default to `winreadlinkvolume=0`. + ### Go 1.22 Go 1.22 adds a configurable limit to control the maximum acceptable RSA key size @@ -148,7 +167,7 @@ for the explicit representation of [type aliases](/ref/spec#Type_declarations). Whether the type checker produces `Alias` types or not is controlled by the [`gotypesalias` setting](/pkg/go/types#Alias). For Go 1.22 it defaults to `gotypesalias=0`. -For Go 1.23, `gotypealias=1` will become the default. +For Go 1.23, `gotypesalias=1` will become the default. This setting will be removed in a future release, Go 1.24 at the earliest. Go 1.22 changed the default minimum TLS version supported by both servers @@ -159,6 +178,41 @@ Go 1.22 changed the default TLS cipher suites used by clients and servers when not explicitly configured, removing the cipher suites which used RSA based key exchange. The default can be revert using the [`tlsrsakex` setting](/pkg/crypto/tls/#Config). +Go 1.22 disabled +[`ConnectionState.ExportKeyingMaterial`](/pkg/crypto/tls/#ConnectionState.ExportKeyingMaterial) +when the connection supports neither TLS 1.3 nor Extended Master Secret +(implemented in Go 1.21). It can be reenabled with the [`tlsunsafeekm` +setting](/pkg/crypto/tls/#ConnectionState.ExportKeyingMaterial). + +Go 1.22 changed how the runtime interacts with transparent huge pages on Linux. +In particular, a common default Linux kernel configuration can result in +significant memory overheads, and Go 1.22 no longer works around this default. +To work around this issue without adjusting kernel settings, transparent huge +pages can be disabled for Go memory with the +[`disablethp` setting](/pkg/runtime#hdr-Environment_Variable). +This behavior was backported to Go 1.21.1, but the setting is only available +starting with Go 1.21.6. +This setting may be removed in a future release, and users impacted by this issue +should adjust their Linux configuration according to the recommendations in the +[GC guide](/doc/gc-guide#Linux_transparent_huge_pages), or switch to a Linux +distribution that disables transparent huge pages altogether. + +Go 1.22 added contention on runtime-internal locks to the [`mutex` +profile](/pkg/runtime/pprof#Profile). Contention on these locks is always +reported at `runtime._LostContendedRuntimeLock`. Complete stack traces of +runtime locks can be enabled with the [`runtimecontentionstacks` +setting](/pkg/runtime#hdr-Environment_Variable). These stack traces have +non-standard semantics, see setting documentation for details. + +Go 1.22 added a new [`crypto/x509.Certificate`](/pkg/crypto/x509/#Certificate) +field, [`Policies`](/pkg/crypto/x509/#Certificate.Policies), which supports +certificate policy OIDs with components larger than 31 bits. By default this +field is only used during parsing, when it is populated with policy OIDs, but +not used during marshaling. It can be used to marshal these larger OIDs, instead +of the existing PolicyIdentifiers field, by using the +[`x509usepolicies` setting.](/pkg/crypto/x509/#CreateCertificate). + + ### Go 1.21 Go 1.21 made it a run-time error to call `panic` with a nil interface value, diff --git a/doc/initial/1-intro.md b/doc/initial/1-intro.md new file mode 100644 index 0000000000..e28191ca9e --- /dev/null +++ b/doc/initial/1-intro.md @@ -0,0 +1,12 @@ + + + + +## Introduction to Go 1.XX {#introduction} + diff --git a/doc/initial/2-language.md b/doc/initial/2-language.md new file mode 100644 index 0000000000..61030bd676 --- /dev/null +++ b/doc/initial/2-language.md @@ -0,0 +1,3 @@ +## Changes to the language {#language} + + diff --git a/doc/initial/3-tools.md b/doc/initial/3-tools.md new file mode 100644 index 0000000000..5638f240a5 --- /dev/null +++ b/doc/initial/3-tools.md @@ -0,0 +1,6 @@ +## Tools {#tools} + +### Go command {#go-command} + +### Cgo {#cgo} + diff --git a/doc/initial/4-runtime.md b/doc/initial/4-runtime.md new file mode 100644 index 0000000000..1f8e445e0b --- /dev/null +++ b/doc/initial/4-runtime.md @@ -0,0 +1 @@ +## Runtime {#runtime} diff --git a/doc/initial/5-toolchain.md b/doc/initial/5-toolchain.md new file mode 100644 index 0000000000..0f4a816479 --- /dev/null +++ b/doc/initial/5-toolchain.md @@ -0,0 +1,7 @@ +## Compiler {#compiler} + +## Assembler {#assembler} + +## Linker {#linker} + + diff --git a/doc/initial/6-stdlib/0-heading.md b/doc/initial/6-stdlib/0-heading.md new file mode 100644 index 0000000000..a992170d43 --- /dev/null +++ b/doc/initial/6-stdlib/0-heading.md @@ -0,0 +1,2 @@ +## Standard library {#library} + diff --git a/doc/initial/6-stdlib/99-minor/0-heading.md b/doc/initial/6-stdlib/99-minor/0-heading.md new file mode 100644 index 0000000000..a98105e8cc --- /dev/null +++ b/doc/initial/6-stdlib/99-minor/0-heading.md @@ -0,0 +1,3 @@ +### Minor changes to the library {#minor_library_changes} + + diff --git a/doc/initial/6-stdlib/99-minor/README b/doc/initial/6-stdlib/99-minor/README new file mode 100644 index 0000000000..fac778de05 --- /dev/null +++ b/doc/initial/6-stdlib/99-minor/README @@ -0,0 +1 @@ +API changes and other small changes to the standard library go here. diff --git a/doc/initial/7-ports.md b/doc/initial/7-ports.md new file mode 100644 index 0000000000..8bea3f8fbc --- /dev/null +++ b/doc/initial/7-ports.md @@ -0,0 +1,2 @@ +## Ports {#ports} + diff --git a/doc/next/1-intro.md b/doc/next/1-intro.md new file mode 100644 index 0000000000..639550f92a --- /dev/null +++ b/doc/next/1-intro.md @@ -0,0 +1,12 @@ + + + + +## Introduction to Go 1.23 {#introduction} + diff --git a/doc/next/2-language.md b/doc/next/2-language.md new file mode 100644 index 0000000000..61030bd676 --- /dev/null +++ b/doc/next/2-language.md @@ -0,0 +1,3 @@ +## Changes to the language {#language} + + diff --git a/doc/next/3-tools.md b/doc/next/3-tools.md new file mode 100644 index 0000000000..bdbe6c0771 --- /dev/null +++ b/doc/next/3-tools.md @@ -0,0 +1,12 @@ +## Tools {#tools} + +### Go command {#go-command} + +Setting the `GOROOT_FINAL` environment variable no longer has an effect +([#62047](https://go.dev/issue/62047)). +Distributions that install the `go` command to a location other than +`$GOROOT/bin/go` should install a symlink instead of relocating +or copying the `go` binary. + +### Cgo {#cgo} + diff --git a/doc/next/4-runtime.md b/doc/next/4-runtime.md new file mode 100644 index 0000000000..1f8e445e0b --- /dev/null +++ b/doc/next/4-runtime.md @@ -0,0 +1 @@ +## Runtime {#runtime} diff --git a/doc/next/5-toolchain.md b/doc/next/5-toolchain.md new file mode 100644 index 0000000000..0f4a816479 --- /dev/null +++ b/doc/next/5-toolchain.md @@ -0,0 +1,7 @@ +## Compiler {#compiler} + +## Assembler {#assembler} + +## Linker {#linker} + + diff --git a/doc/next/6-stdlib/0-heading.md b/doc/next/6-stdlib/0-heading.md new file mode 100644 index 0000000000..a992170d43 --- /dev/null +++ b/doc/next/6-stdlib/0-heading.md @@ -0,0 +1,2 @@ +## Standard library {#library} + diff --git a/doc/next/6-stdlib/99-minor/0-heading.md b/doc/next/6-stdlib/99-minor/0-heading.md new file mode 100644 index 0000000000..a98105e8cc --- /dev/null +++ b/doc/next/6-stdlib/99-minor/0-heading.md @@ -0,0 +1,3 @@ +### Minor changes to the library {#minor_library_changes} + + diff --git a/doc/next/6-stdlib/99-minor/README b/doc/next/6-stdlib/99-minor/README new file mode 100644 index 0000000000..fac778de05 --- /dev/null +++ b/doc/next/6-stdlib/99-minor/README @@ -0,0 +1 @@ +API changes and other small changes to the standard library go here. diff --git a/doc/next/6-stdlib/99-minor/database/sql/64707.md b/doc/next/6-stdlib/99-minor/database/sql/64707.md new file mode 100644 index 0000000000..70aad889ff --- /dev/null +++ b/doc/next/6-stdlib/99-minor/database/sql/64707.md @@ -0,0 +1,4 @@ +Errors returned by [`driver.Valuer`](/database/sql/driver#Driver) +implementations are now wrapped for improved error handling during +operations like [`Query`](/database/sql#DB.Query), [`Exec`](/database/sql#DB.Exec), +and [`QueryRow`](/database/sql#DB.QueryRow). diff --git a/doc/next/6-stdlib/99-minor/net/62254.md b/doc/next/6-stdlib/99-minor/net/62254.md new file mode 100644 index 0000000000..012b7ede5e --- /dev/null +++ b/doc/next/6-stdlib/99-minor/net/62254.md @@ -0,0 +1,4 @@ +The new type [`KeepAliveConfig`](/net#KeepAliveConfig) permits fine-tuning +the keep-alive options for TCP connections, via a new +[`TCPConn.SetKeepAliveConfig`](/net#TCPConn.SetKeepAliveConfig) method and +new KeepAliveConfig fields for [`Dialer`](/net#Dialer) and [`ListenConfig`](/net#ListenConfig). diff --git a/doc/next/6-stdlib/99-minor/net/http/64910.md b/doc/next/6-stdlib/99-minor/net/http/64910.md new file mode 100644 index 0000000000..020e18b97b --- /dev/null +++ b/doc/next/6-stdlib/99-minor/net/http/64910.md @@ -0,0 +1,2 @@ +The patterns used by [`net/http.ServeMux`](//net/http#ServeMux) allow +multiple spaces matching regexp '[ \t]+'. diff --git a/doc/next/6-stdlib/99-minor/os/33357.md b/doc/next/6-stdlib/99-minor/os/33357.md new file mode 100644 index 0000000000..3e80943263 --- /dev/null +++ b/doc/next/6-stdlib/99-minor/os/33357.md @@ -0,0 +1,3 @@ +The [`os.Stat`](/os#Stat) function now sets the [`os.ModeSocket`](/os#ModeSocket) +bit for files that are Unix sockets on Windows. These files are identified by +having a reparse tag set to `IO_REPARSE_TAG_AF_UNIX`. diff --git a/doc/next/6-stdlib/99-minor/os/61893.md b/doc/next/6-stdlib/99-minor/os/61893.md new file mode 100644 index 0000000000..b2dd537039 --- /dev/null +++ b/doc/next/6-stdlib/99-minor/os/61893.md @@ -0,0 +1,7 @@ +On Windows, the mode bits reported by [`os.Lstat`](/pkg/os#Lstat) and [`os.Stat`](/pkg/os#Stat) +for reparse points changed. Mount points no longer have [`os.ModeSymlink`](/pkg/os#ModeSymlink) set, +and reparse points that are not symlinks, Unix sockets, or dedup files now +always have [`os.ModeIrregular`](/pkg/os#ModeIrregular) set. +This behavior is controlled by the `winsymlink` setting. +For Go 1.23, it defaults to `winsymlink=1`. +Previous versions default to `winsymlink=0`. diff --git a/doc/next/6-stdlib/99-minor/os/62484.md b/doc/next/6-stdlib/99-minor/os/62484.md new file mode 100644 index 0000000000..81abb4bc68 --- /dev/null +++ b/doc/next/6-stdlib/99-minor/os/62484.md @@ -0,0 +1,2 @@ +The [`CopyFS`](/os#CopyFS) function copies an [`io/fs.FS`](/io/fs#FS) +into the local filesystem. diff --git a/doc/next/6-stdlib/99-minor/os/63703.md b/doc/next/6-stdlib/99-minor/os/63703.md new file mode 100644 index 0000000000..abde9448f5 --- /dev/null +++ b/doc/next/6-stdlib/99-minor/os/63703.md @@ -0,0 +1,5 @@ +On Windows, [`os.Readlink`](/os#Readlink) no longer tries +to normalize volumes to drive letters, which was not always even possible. +This behavior is controlled by the `winreadlinkvolume` setting. +For Go 1.23, it defaults to `winreadlinkvolume=1`. +Previous versions default to `winreadlinkvolume=0`. \ No newline at end of file diff --git a/doc/next/6-stdlib/99-minor/path/filepath/57151.md b/doc/next/6-stdlib/99-minor/path/filepath/57151.md new file mode 100644 index 0000000000..67e84894fe --- /dev/null +++ b/doc/next/6-stdlib/99-minor/path/filepath/57151.md @@ -0,0 +1,2 @@ +The new [`Localize`](/path/filepath#Localize) function safely converts +a slash-separated path into an operating system path. diff --git a/doc/next/6-stdlib/99-minor/path/filepath/63703.md b/doc/next/6-stdlib/99-minor/path/filepath/63703.md new file mode 100644 index 0000000000..0aa0ba6fe3 --- /dev/null +++ b/doc/next/6-stdlib/99-minor/path/filepath/63703.md @@ -0,0 +1,11 @@ +On Windows, [`filepath.EvalSymlinks`](/pkg/path/filepath#EvalSymlinks) no longer evaluates +mount points, which was a source of many inconsistencies and bugs. +This behavior is controlled by the `winsymlink` setting. +For Go 1.23, it defaults to `winsymlink=1`. +Previous versions default to `winsymlink=0`. + +On Windows, [`filepath.EvalSymlinks`](/pkg/path/filepath#EvalSymlinks) no longer tries +to normalize volumes to drive letters, which was not always even possible. +This behavior is controlled by the `winreadlinkvolume` setting. +For Go 1.23, it defaults to `winreadlinkvolume=1`. +Previous versions default to `winreadlinkvolume=0`. \ No newline at end of file diff --git a/doc/next/6-stdlib/99-minor/reflect/60427.md b/doc/next/6-stdlib/99-minor/reflect/60427.md new file mode 100644 index 0000000000..92230cde1a --- /dev/null +++ b/doc/next/6-stdlib/99-minor/reflect/60427.md @@ -0,0 +1,6 @@ +The new methods synonymous with the method of the same name in [`reflect.Value`](/pkg/reflect#Value) +are added to [`reflect.Type`](/pkg/reflect#Type): +1. [`OverflowComplex`](/pkg/reflect#Type.OverflowComplex) +2. [`OverflowFloat`](/pkg/reflect#Type.OverflowFloat) +3. [`OverflowInt`](/pkg/reflect#Type.OverflowInt) +4. [`OverflowUint`](/pkg/reflect#Type.OverflowUint) diff --git a/doc/next/6-stdlib/99-minor/runtime/debug/42888.md b/doc/next/6-stdlib/99-minor/runtime/debug/42888.md new file mode 100644 index 0000000000..d75c86900f --- /dev/null +++ b/doc/next/6-stdlib/99-minor/runtime/debug/42888.md @@ -0,0 +1,8 @@ + +The [`debug.SetCrashOutput`](/runtime#SetCrashOutput) function allows +the user to specify an alternate file to which the runtime should +write its fatal crash report +([#42888](https://github.com/golang/go/issues/42888)). +It may be used to construct an automated reporting mechanism for all +unexpected crashes, not just those in goroutines that explicitly use +`recover`. diff --git a/doc/next/6-stdlib/99-minor/sync/61696.md b/doc/next/6-stdlib/99-minor/sync/61696.md new file mode 100644 index 0000000000..173076ca5e --- /dev/null +++ b/doc/next/6-stdlib/99-minor/sync/61696.md @@ -0,0 +1,4 @@ +The [`(*sync.Map) Clear()`](//sync#Map.Clear) method deletes +all the entries, resulting in an empty map +([#61696](https://github.com/golang/go/issues/61696)). +It is analogous to `clear`. \ No newline at end of file diff --git a/doc/next/6-stdlib/99-minor/syscall/62254.md b/doc/next/6-stdlib/99-minor/syscall/62254.md new file mode 100644 index 0000000000..fe9651a178 --- /dev/null +++ b/doc/next/6-stdlib/99-minor/syscall/62254.md @@ -0,0 +1 @@ +The syscall package now defines WSAENOPROTOOPT on Windows. diff --git a/doc/next/7-ports.md b/doc/next/7-ports.md new file mode 100644 index 0000000000..796cc4bf1b --- /dev/null +++ b/doc/next/7-ports.md @@ -0,0 +1,8 @@ +## Ports {#ports} + +### Darwin {#darwin} + + +As [announced](go1.22#darwin) in the Go 1.22 release notes, +Go 1.23 requires macOS 11 Big Sur or later; +support for previous versions has been discontinued. diff --git a/lib/time/mkzip.go b/lib/time/mkzip.go index 3920b11b6c..2f4d4dc71a 100644 --- a/lib/time/mkzip.go +++ b/lib/time/mkzip.go @@ -31,7 +31,7 @@ import ( ) func usage() { - fmt.Fprintf(os.Stderr, "usage: go run mkzip.go ../../zoneinfo.zip\n") + fmt.Fprintf(os.Stderr, "usage: go run mkzip.go zoneinfo.zip\n") os.Exit(2) } diff --git a/lib/time/update.bash b/lib/time/update.bash index 605afa76d3..bed82b4f40 100755 --- a/lib/time/update.bash +++ b/lib/time/update.bash @@ -24,8 +24,8 @@ # in the CL match the update.bash in the CL. # Versions to use. -CODE=2023c -DATA=2023c +CODE=2024a +DATA=2024a set -e diff --git a/lib/time/zoneinfo.zip b/lib/time/zoneinfo.zip index 417ee2b194..bb38801b7a 100644 Binary files a/lib/time/zoneinfo.zip and b/lib/time/zoneinfo.zip differ diff --git a/misc/wasm/go_wasip1_wasm_exec b/misc/wasm/go_wasip1_wasm_exec index dc110327af..cd16b96ea7 100755 --- a/misc/wasm/go_wasip1_wasm_exec +++ b/misc/wasm/go_wasip1_wasm_exec @@ -14,8 +14,15 @@ case "$GOWASIRUNTIME" in exec wazero run -mount /:/ -env-inherit -cachedir "${TMPDIR:-/tmp}"/wazero ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}" ;; "wasmtime" | "") - # TODO(go.dev/issue/63718): Switch to the new CLI offered in the major version 14 of Wasmtime. - exec env WASMTIME_NEW_CLI=0 wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" --max-wasm-stack 1048576 ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}" + # Match the major version in "wasmtime-cli 14.0.0". For versions before 14 + # we need to use the old CLI. This requires Bash v3.0 and above. + # TODO(johanbrandhorst): Remove this condition once 1.22 is released. + # From 1.23 onwards we'll only support the new wasmtime CLI. + if [[ "$(wasmtime --version)" =~ wasmtime-cli[[:space:]]([0-9]+)\.[0-9]+\.[0-9]+ && "${BASH_REMATCH[1]}" -lt 14 ]]; then + exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" --max-wasm-stack 1048576 ${GOWASIRUNTIMEARGS:-} "$1" -- "${@:2}" + else + exec wasmtime run --dir=/ --env PWD="$PWD" --env PATH="$PATH" -W max-wasm-stack=1048576 ${GOWASIRUNTIMEARGS:-} "$1" "${@:2}" + fi ;; *) echo "Unknown Go WASI runtime specified: $GOWASIRUNTIME" diff --git a/src/archive/tar/common.go b/src/archive/tar/common.go index e507d559cb..4910908f81 100644 --- a/src/archive/tar/common.go +++ b/src/archive/tar/common.go @@ -614,8 +614,6 @@ func (fi headerFileInfo) String() string { // sysStat, if non-nil, populates h from system-dependent fields of fi. var sysStat func(fi fs.FileInfo, h *Header) error -var loadUidAndGid func(fi fs.FileInfo, uid, gid *int) - const ( // Mode constants from the USTAR spec: // See http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html#tag_20_92_13_06 @@ -641,10 +639,6 @@ const ( // Since fs.FileInfo's Name method only returns the base name of // the file it describes, it may be necessary to modify Header.Name // to provide the full path name of the file. -// -// If fi implements [FileInfoNames] -// the Gname and Uname of the header are -// provided by the methods of the interface. func FileInfoHeader(fi fs.FileInfo, link string) (*Header, error) { if fi == nil { return nil, errors.New("archive/tar: FileInfo is nil") @@ -717,38 +711,12 @@ func FileInfoHeader(fi fs.FileInfo, link string) (*Header, error) { } } } - if iface, ok := fi.(FileInfoNames); ok { - var err error - if loadUidAndGid != nil { - loadUidAndGid(fi, &h.Uid, &h.Gid) - } - h.Gname, err = iface.Gname(h.Gid) - if err != nil { - return nil, err - } - h.Uname, err = iface.Uname(h.Uid) - if err != nil { - return nil, err - } - return h, nil - } if sysStat != nil { return h, sysStat(fi, h) } return h, nil } -// FileInfoNames extends [FileInfo] to translate UID/GID to names. -// Passing an instance of this to [FileInfoHeader] permits the caller -// to control UID/GID resolution. -type FileInfoNames interface { - fs.FileInfo - // Uname should translate a UID into a user name. - Uname(uid int) (string, error) - // Gname should translate a GID into a group name. - Gname(gid int) (string, error) -} - // isHeaderOnlyType checks if the given type flag is of the type that has no // data section even if a size is specified. func isHeaderOnlyType(flag byte) bool { diff --git a/src/archive/tar/stat_unix.go b/src/archive/tar/stat_unix.go index 5b23d3c830..0f3428bc24 100644 --- a/src/archive/tar/stat_unix.go +++ b/src/archive/tar/stat_unix.go @@ -17,7 +17,6 @@ import ( func init() { sysStat = statUnix - loadUidAndGid = loadUidAndGidFunc } // userMap and groupMap caches UID and GID lookups for performance reasons. @@ -100,12 +99,3 @@ func statUnix(fi fs.FileInfo, h *Header) error { } return nil } - -func loadUidAndGidFunc(fi fs.FileInfo, uid, gid *int) { - sys, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return - } - *uid = int(sys.Uid) - *gid = int(sys.Gid) -} diff --git a/src/archive/tar/tar_test.go b/src/archive/tar/tar_test.go index 49d31bb757..a476f5eb01 100644 --- a/src/archive/tar/tar_test.go +++ b/src/archive/tar/tar_test.go @@ -848,71 +848,3 @@ func Benchmark(b *testing.B) { }) } - -const ( - testUid = 10 - testGid = 20 -) - -type fileInfoNames struct{} - -func (f *fileInfoNames) Name() string { - return "tmp" -} - -func (f *fileInfoNames) Size() int64 { - return 0 -} - -func (f *fileInfoNames) Mode() fs.FileMode { - return 0777 -} - -func (f *fileInfoNames) ModTime() time.Time { - return time.Time{} -} - -func (f *fileInfoNames) IsDir() bool { - return false -} - -func (f *fileInfoNames) Sys() any { - return nil -} - -func (f *fileInfoNames) Uname(uid int) (string, error) { - if uid == testUid { - return "Uname", nil - } - return "", nil -} - -func (f *fileInfoNames) Gname(gid int) (string, error) { - if gid == testGid { - return "Gname", nil - } - return "", nil -} - -func TestFileInfoHeaderUseFileInfoNames(t *testing.T) { - origLoadUidAndGid := loadUidAndGid - defer func() { - loadUidAndGid = origLoadUidAndGid - }() - loadUidAndGid = func(fi fs.FileInfo, uid, gid *int) { - *uid = testUid - *gid = testGid - } - - info := &fileInfoNames{} - header, err := FileInfoHeader(info, "") - if err != nil { - t.Fatal(err) - } - if header.Uname != "Uname" { - t.Fatalf("header.Uname: got %v, want %v", header.Uname, "Uname") - } - if header.Gname != "Gname" { - t.Fatalf("header.Gname: got %v, want %v", header.Gname, "Gname") - } -} diff --git a/src/archive/tar/writer_test.go b/src/archive/tar/writer_test.go index a9936d6cd5..8b113a34e0 100644 --- a/src/archive/tar/writer_test.go +++ b/src/archive/tar/writer_test.go @@ -581,10 +581,10 @@ func TestPaxSymlink(t *testing.T) { t.Fatal(err) } hdr, err := FileInfoHeader(fileinfo, "") - hdr.Typeflag = TypeSymlink if err != nil { t.Fatalf("os.Stat:1 %v", err) } + hdr.Typeflag = TypeSymlink // Force a PAX long linkname to be written longLinkname := strings.Repeat("1234567890/1234567890", 10) hdr.Linkname = longLinkname @@ -761,10 +761,10 @@ func TestUSTARLongName(t *testing.T) { t.Fatal(err) } hdr, err := FileInfoHeader(fileinfo, "") - hdr.Typeflag = TypeDir if err != nil { t.Fatalf("os.Stat:1 %v", err) } + hdr.Typeflag = TypeDir // Force a PAX long name to be written. The name was taken from a practical example // that fails and replaced ever char through numbers to anonymize the sample. longName := "/0000_0000000/00000-000000000/0000_0000000/00000-0000000000000/0000_0000000/00000-0000000-00000000/0000_0000000/00000000/0000_0000000/000/0000_0000000/00000000v00/0000_0000000/000000/0000_0000000/0000000/0000_0000000/00000y-00/0000/0000/00000000/0x000000/" diff --git a/src/archive/zip/writer.go b/src/archive/zip/writer.go index e33df2431c..9e2dcff713 100644 --- a/src/archive/zip/writer.go +++ b/src/archive/zip/writer.go @@ -433,6 +433,10 @@ func writeHeader(w io.Writer, h *header) error { // [Writer.CreateHeader], [Writer.CreateRaw], or [Writer.Close]. // // In contrast to [Writer.CreateHeader], the bytes passed to Writer are not compressed. +// +// CreateRaw's argument is stored in w. If the argument is a pointer to the embedded +// [FileHeader] in a [File] obtained from a [Reader] created from in-memory data, +// then w will refer to all of that memory. func (w *Writer) CreateRaw(fh *FileHeader) (io.Writer, error) { if err := w.prepare(fh); err != nil { return nil, err @@ -471,7 +475,10 @@ func (w *Writer) Copy(f *File) error { if err != nil { return err } - fw, err := w.CreateRaw(&f.FileHeader) + // Copy the FileHeader so w doesn't store a pointer to the data + // of f's entire archive. See #65499. + fh := f.FileHeader + fw, err := w.CreateRaw(&fh) if err != nil { return err } diff --git a/src/builtin/builtin.go b/src/builtin/builtin.go index da0ace1498..668c799ca7 100644 --- a/src/builtin/builtin.go +++ b/src/builtin/builtin.go @@ -284,9 +284,10 @@ func panic(v any) // by restoring normal execution and retrieves the error value passed to the // call of panic. If recover is called outside the deferred function it will // not stop a panicking sequence. In this case, or when the goroutine is not -// panicking, or if the argument supplied to panic was nil, recover returns -// nil. Thus the return value from recover reports whether the goroutine is -// panicking. +// panicking, recover returns nil. +// +// Prior to Go 1.21, recover would also return nil if panic is called with +// a nil argument. See [panic] for details. func recover() any // The print built-in function formats its arguments in an diff --git a/src/bytes/boundary_test.go b/src/bytes/boundary_test.go index f9855fcb05..67f377e089 100644 --- a/src/bytes/boundary_test.go +++ b/src/bytes/boundary_test.go @@ -98,3 +98,18 @@ func TestIndexNearPageBoundary(t *testing.T) { } q[len(q)-1] = 0 } + +func TestCountNearPageBoundary(t *testing.T) { + t.Parallel() + b := dangerousSlice(t) + for i := range b { + c := Count(b[i:], []byte{1}) + if c != 0 { + t.Fatalf("Count(b[%d:], {1})=%d, want 0\n", i, c) + } + c = Count(b[:i], []byte{0}) + if c != i { + t.Fatalf("Count(b[:%d], {0})=%d, want %d\n", i, c, i) + } + } +} diff --git a/src/bytes/bytes.go b/src/bytes/bytes.go index 0679b43a20..1871814c6e 100644 --- a/src/bytes/bytes.go +++ b/src/bytes/bytes.go @@ -525,7 +525,7 @@ func Join(s [][]byte, sep []byte) []byte { n += len(v) } - b := bytealg.MakeNoZero(n) + b := bytealg.MakeNoZero(n)[:n:n] bp := copy(b, s[0]) for _, v := range s[1:] { bp += copy(b[bp:], sep) @@ -610,7 +610,7 @@ func Repeat(b []byte, count int) []byte { chunkMax = len(b) } } - nb := bytealg.MakeNoZero(n) + nb := bytealg.MakeNoZero(n)[:n:n] bp := copy(nb, b) for bp < n { chunk := bp @@ -640,7 +640,7 @@ func ToUpper(s []byte) []byte { // Just return a copy. return append([]byte(""), s...) } - b := bytealg.MakeNoZero(len(s)) + b := bytealg.MakeNoZero(len(s))[:len(s):len(s)] for i := 0; i < len(s); i++ { c := s[i] if 'a' <= c && c <= 'z' { @@ -670,7 +670,7 @@ func ToLower(s []byte) []byte { if !hasUpper { return append([]byte(""), s...) } - b := bytealg.MakeNoZero(len(s)) + b := bytealg.MakeNoZero(len(s))[:len(s):len(s)] for i := 0; i < len(s); i++ { c := s[i] if 'A' <= c && c <= 'Z' { diff --git a/src/bytes/bytes_test.go b/src/bytes/bytes_test.go index f0733edd3f..5e8cf85fd9 100644 --- a/src/bytes/bytes_test.go +++ b/src/bytes/bytes_test.go @@ -629,6 +629,11 @@ func BenchmarkEqual(b *testing.B) { }) sizes := []int{1, 6, 9, 15, 16, 20, 32, 4 << 10, 4 << 20, 64 << 20} + + b.Run("same", func(b *testing.B) { + benchBytes(b, sizes, bmEqual(func(a, b []byte) bool { return Equal(a, a) })) + }) + benchBytes(b, sizes, bmEqual(Equal)) } diff --git a/src/bytes/example_test.go b/src/bytes/example_test.go index 5a66b1e436..54df5f74e5 100644 --- a/src/bytes/example_test.go +++ b/src/bytes/example_test.go @@ -102,7 +102,7 @@ func ExampleBuffer_Read() { fmt.Println(n) fmt.Println(b.String()) fmt.Println(string(rdbuf)) - // Output + // Output: // 1 // bcde // a @@ -118,7 +118,7 @@ func ExampleBuffer_ReadByte() { } fmt.Println(c) fmt.Println(b.String()) - // Output + // Output: // 97 // bcde } diff --git a/src/cmd/addr2line/addr2line_test.go b/src/cmd/addr2line/addr2line_test.go index 0ea8994b6a..e5b0a0fdae 100644 --- a/src/cmd/addr2line/addr2line_test.go +++ b/src/cmd/addr2line/addr2line_test.go @@ -109,32 +109,18 @@ func testAddr2Line(t *testing.T, dbgExePath, addr string) { srcPath = filepath.FromSlash(srcPath) fi2, err := os.Stat(srcPath) - // If GOROOT_FINAL is set and srcPath is not the file we expect, perhaps - // srcPath has had GOROOT_FINAL substituted for GOROOT and GOROOT hasn't been - // moved to its final location yet. If so, try the original location instead. - if gorootFinal := os.Getenv("GOROOT_FINAL"); gorootFinal != "" && - (os.IsNotExist(err) || (err == nil && !os.SameFile(fi1, fi2))) { - // srcPath is clean, but GOROOT_FINAL itself might not be. - // (See https://golang.org/issue/41447.) - gorootFinal = filepath.Clean(gorootFinal) - - if strings.HasPrefix(srcPath, gorootFinal) { - fi2, err = os.Stat(runtime.GOROOT() + strings.TrimPrefix(srcPath, gorootFinal)) - } - } - if err != nil { t.Fatalf("Stat failed: %v", err) } if !os.SameFile(fi1, fi2) { t.Fatalf("addr2line_test.go and %s are not same file", srcPath) } - if srcLineNo != "138" { - t.Fatalf("line number = %v; want 138", srcLineNo) + if want := "124"; srcLineNo != want { + t.Fatalf("line number = %v; want %s", srcLineNo, want) } } -// This is line 137. The test depends on that. +// This is line 123. The test depends on that. func TestAddr2Line(t *testing.T) { testenv.MustHaveGoBuild(t) diff --git a/src/cmd/api/api_test.go b/src/cmd/api/api_test.go index 910e046f12..ba358d364d 100644 --- a/src/cmd/api/api_test.go +++ b/src/cmd/api/api_test.go @@ -285,6 +285,25 @@ func TestIssue41358(t *testing.T) { } } +func TestIssue64958(t *testing.T) { + defer func() { + if x := recover(); x != nil { + t.Errorf("expected no panic; recovered %v", x) + } + }() + + testenv.MustHaveGoBuild(t) + + for _, context := range contexts { + w := NewWalker(context, "testdata/src/issue64958") + pkg, err := w.importFrom("p", "", 0) + if err != nil { + t.Errorf("expected no error importing; got %T", err) + } + w.export(pkg) + } +} + func TestCheck(t *testing.T) { if !*flagCheck { t.Skip("-check not specified") diff --git a/src/cmd/api/main_test.go b/src/cmd/api/main_test.go index 94e159e7d8..0a3d44ddd0 100644 --- a/src/cmd/api/main_test.go +++ b/src/cmd/api/main_test.go @@ -851,6 +851,16 @@ func (w *Walker) writeType(buf *bytes.Buffer, typ types.Type) { buf.WriteByte('.') } buf.WriteString(typ.Obj().Name()) + if targs := typ.TypeArgs(); targs.Len() > 0 { + buf.WriteByte('[') + for i := 0; i < targs.Len(); i++ { + if i > 0 { + buf.WriteString(", ") + } + w.writeType(buf, targs.At(i)) + } + buf.WriteByte(']') + } case *types.TypeParam: // Type parameter names may change, so use a placeholder instead. @@ -957,17 +967,17 @@ func (w *Walker) emitType(obj *types.TypeName) { if w.isDeprecated(obj) { w.emitf("type %s //deprecated", name) } + typ := obj.Type() + if obj.IsAlias() { + w.emitf("type %s = %s", name, w.typeString(typ)) + return + } if tparams := obj.Type().(*types.Named).TypeParams(); tparams != nil { var buf bytes.Buffer buf.WriteString(name) w.writeTypeParams(&buf, tparams, true) name = buf.String() } - typ := obj.Type() - if obj.IsAlias() { - w.emitf("type %s = %s", name, w.typeString(typ)) - return - } switch typ := typ.Underlying().(type) { case *types.Struct: w.emitStructType(name, typ) diff --git a/src/cmd/api/testdata/src/issue64958/p/p.go b/src/cmd/api/testdata/src/issue64958/p/p.go new file mode 100644 index 0000000000..feba86797f --- /dev/null +++ b/src/cmd/api/testdata/src/issue64958/p/p.go @@ -0,0 +1,3 @@ +package p + +type BasicAlias = uint8 diff --git a/src/cmd/api/testdata/src/pkg/p4/golden.txt b/src/cmd/api/testdata/src/pkg/p4/golden.txt index eec0598dcd..1ceae17386 100644 --- a/src/cmd/api/testdata/src/pkg/p4/golden.txt +++ b/src/cmd/api/testdata/src/pkg/p4/golden.txt @@ -1,4 +1,4 @@ -pkg p4, func NewPair[$0 interface{ M }, $1 interface{ ~int }]($0, $1) Pair +pkg p4, func NewPair[$0 interface{ M }, $1 interface{ ~int }]($0, $1) Pair[$0, $1] pkg p4, method (Pair[$0, $1]) Second() $1 pkg p4, method (Pair[$0, $1]) First() $0 pkg p4, type Pair[$0 interface{ M }, $1 interface{ ~int }] struct diff --git a/src/cmd/asm/internal/asm/asm.go b/src/cmd/asm/internal/asm/asm.go index 375ef803bb..949b688bbd 100644 --- a/src/cmd/asm/internal/asm/asm.go +++ b/src/cmd/asm/internal/asm/asm.go @@ -16,6 +16,7 @@ import ( "cmd/asm/internal/lex" "cmd/internal/obj" "cmd/internal/obj/ppc64" + "cmd/internal/obj/riscv" "cmd/internal/obj/x86" "cmd/internal/sys" ) @@ -46,7 +47,11 @@ func (p *Parser) append(prog *obj.Prog, cond string, doLabel bool) { p.errorf("%v", err) return } - + case sys.RISCV64: + if err := riscv.ParseSuffix(prog, cond); err != nil { + p.errorf("unrecognized suffix .%q", cond) + return + } default: p.errorf("unrecognized suffix .%q", cond) return diff --git a/src/cmd/asm/internal/asm/endtoend_test.go b/src/cmd/asm/internal/asm/endtoend_test.go index a2de63685c..6e1aa1cd95 100644 --- a/src/cmd/asm/internal/asm/endtoend_test.go +++ b/src/cmd/asm/internal/asm/endtoend_test.go @@ -141,11 +141,17 @@ Diff: // Turn relative (PC) into absolute (PC) automatically, // so that most branch instructions don't need comments // giving the absolute form. - if len(f) > 0 && strings.HasSuffix(printed, "(PC)") { - last := f[len(f)-1] - n, err := strconv.Atoi(last[:len(last)-len("(PC)")]) + if len(f) > 0 && strings.Contains(printed, "(PC)") { + index := len(f) - 1 + suf := "(PC)" + for !strings.HasSuffix(f[index], suf) { + index-- + suf = "(PC)," + } + str := f[index] + n, err := strconv.Atoi(str[:len(str)-len(suf)]) if err == nil { - f[len(f)-1] = fmt.Sprintf("%d(PC)", seq+n) + f[index] = fmt.Sprintf("%d%s", seq+n, suf) } } @@ -372,10 +378,10 @@ func Test386EndToEnd(t *testing.T) { } func TestARMEndToEnd(t *testing.T) { - defer func(old int) { buildcfg.GOARM = old }(buildcfg.GOARM) + defer func(old int) { buildcfg.GOARM.Version = old }(buildcfg.GOARM.Version) for _, goarm := range []int{5, 6, 7} { t.Logf("GOARM=%d", goarm) - buildcfg.GOARM = goarm + buildcfg.GOARM.Version = goarm testEndToEnd(t, "arm", "arm") if goarm == 6 { testEndToEnd(t, "arm", "armv6") diff --git a/src/cmd/asm/internal/asm/parse.go b/src/cmd/asm/internal/asm/parse.go index ef6c840dc2..7a52e54090 100644 --- a/src/cmd/asm/internal/asm/parse.go +++ b/src/cmd/asm/internal/asm/parse.go @@ -217,8 +217,8 @@ next: for { tok = p.nextToken() if len(operands) == 0 && len(items) == 0 { - if p.arch.InFamily(sys.ARM, sys.ARM64, sys.AMD64, sys.I386) && tok == '.' { - // Suffixes: ARM conditionals or x86 modifiers. + if p.arch.InFamily(sys.ARM, sys.ARM64, sys.AMD64, sys.I386, sys.RISCV64) && tok == '.' { + // Suffixes: ARM conditionals, RISCV rounding mode or x86 modifiers. tok = p.nextToken() str := p.lex.Text() if tok != scanner.Ident { diff --git a/src/cmd/asm/internal/asm/testdata/arm.s b/src/cmd/asm/internal/asm/testdata/arm.s index 2ba22c71de..93edc8854e 100644 --- a/src/cmd/asm/internal/asm/testdata/arm.s +++ b/src/cmd/asm/internal/asm/testdata/arm.s @@ -870,10 +870,13 @@ jmp_label_3: BIC.S R0@>R1, R2 // 7021d2e1 // SRL + SRL $0, R5, R6 // 0560a0e1 + SRL $1, R5, R6 // a560a0e1 SRL $14, R5, R6 // 2567a0e1 SRL $15, R5, R6 // a567a0e1 SRL $30, R5, R6 // 256fa0e1 SRL $31, R5, R6 // a56fa0e1 + SRL $32, R5, R6 // 2560a0e1 SRL.S $14, R5, R6 // 2567b0e1 SRL.S $15, R5, R6 // a567b0e1 SRL.S $30, R5, R6 // 256fb0e1 @@ -892,10 +895,13 @@ jmp_label_3: SRL.S R5, R7 // 3775b0e1 // SRA + SRA $0, R5, R6 // 0560a0e1 + SRA $1, R5, R6 // c560a0e1 SRA $14, R5, R6 // 4567a0e1 SRA $15, R5, R6 // c567a0e1 SRA $30, R5, R6 // 456fa0e1 SRA $31, R5, R6 // c56fa0e1 + SRA $32, R5, R6 // 4560a0e1 SRA.S $14, R5, R6 // 4567b0e1 SRA.S $15, R5, R6 // c567b0e1 SRA.S $30, R5, R6 // 456fb0e1 @@ -914,6 +920,8 @@ jmp_label_3: SRA.S R5, R7 // 5775b0e1 // SLL + SLL $0, R5, R6 // 0560a0e1 + SLL $1, R5, R6 // 8560a0e1 SLL $14, R5, R6 // 0567a0e1 SLL $15, R5, R6 // 8567a0e1 SLL $30, R5, R6 // 056fa0e1 @@ -935,6 +943,20 @@ jmp_label_3: SLL R5, R7 // 1775a0e1 SLL.S R5, R7 // 1775b0e1 +// Ops with zero shifts should encode as left shifts + ADD R0<<0, R1, R2 // 002081e0 + ADD R0>>0, R1, R2 // 002081e0 + ADD R0->0, R1, R2 // 002081e0 + ADD R0@>0, R1, R2 // 002081e0 + MOVW R0<<0(R1), R2 // 002091e7 + MOVW R0>>0(R1), R2 // 002091e7 + MOVW R0->0(R1), R2 // 002091e7 + MOVW R0@>0(R1), R2 // 002091e7 + MOVW R0, R1<<0(R2) // 010082e7 + MOVW R0, R1>>0(R2) // 010082e7 + MOVW R0, R1->0(R2) // 010082e7 + MOVW R0, R1@>0(R2) // 010082e7 + // MULA / MULS MULAWT R1, R2, R3, R4 // c23124e1 MULAWB R1, R2, R3, R4 // 823124e1 diff --git a/src/cmd/asm/internal/asm/testdata/arm64.s b/src/cmd/asm/internal/asm/testdata/arm64.s index 46ea6645af..ecad08b37a 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64.s +++ b/src/cmd/asm/internal/asm/testdata/arm64.s @@ -981,6 +981,14 @@ again: ADR next, R11 // ADR R11 // 2b000010 next: NOP + ADR -2(PC), R10 // 0a000010 + ADR 2(PC), R16 // 10000010 + ADR -26(PC), R1 // 01000010 + ADR 12(PC), R2 // 02000010 + ADRP -2(PC), R10 // 0a000090 + ADRP 2(PC), R16 // 10000090 + ADRP -26(PC), R1 // 01000090 + ADRP 12(PC), R2 // 02000090 // LDP/STP LDP (R0), (R0, R1) // 000440a9 @@ -1003,6 +1011,7 @@ next: LDP -8(R0), (R1, R2) // 01887fa9 LDP x(SB), (R1, R2) LDP x+8(SB), (R1, R2) + LDP 8(R1), (ZR, R2) // 3f8840a9 LDPW -5(R0), (R1, R2) // 1b1400d1610b4029 LDPW (R0), (R1, R2) // 01084029 LDPW 4(R0), (R1, R2) // 01884029 @@ -1020,6 +1029,7 @@ next: LDPW 1024(RSP), (R1, R2) // fb031091610b4029 LDPW x(SB), (R1, R2) LDPW x+8(SB), (R1, R2) + LDPW 8(R1), (ZR, R2) // 3f084129 LDPSW (R0), (R1, R2) // 01084069 LDPSW 4(R0), (R1, R2) // 01884069 LDPSW -4(R0), (R1, R2) // 01887f69 @@ -1036,6 +1046,7 @@ next: LDPSW 1024(RSP), (R1, R2) // fb031091610b4069 LDPSW x(SB), (R1, R2) LDPSW x+8(SB), (R1, R2) + LDPSW 8(R1), (ZR, R2) // 3f084169 STP (R3, R4), (R5) // a31000a9 STP (R3, R4), 8(R5) // a39000a9 STP.W (R3, R4), 8(R5) // a39080a9 diff --git a/src/cmd/asm/internal/asm/testdata/arm64error.s b/src/cmd/asm/internal/asm/testdata/arm64error.s index e1eafa2b46..3ac8788424 100644 --- a/src/cmd/asm/internal/asm/testdata/arm64error.s +++ b/src/cmd/asm/internal/asm/testdata/arm64error.s @@ -66,7 +66,6 @@ TEXT errors(SB),$0 LDP.W 8(R3), (R2, R3) // ERROR "constrained unpredictable behavior" LDP (R1), (R2, R2) // ERROR "constrained unpredictable behavior" LDP (R0), (F0, F1) // ERROR "invalid register pair" - LDP (R0), (R3, ZR) // ERROR "invalid register pair" LDXPW (RSP), (R2, R2) // ERROR "constrained unpredictable behavior" LDAXPW (R5), (R2, R2) // ERROR "constrained unpredictable behavior" MOVD.P 300(R2), R3 // ERROR "offset out of range [-256,255]" diff --git a/src/cmd/asm/internal/asm/testdata/ppc64.s b/src/cmd/asm/internal/asm/testdata/ppc64.s index 01052b49e7..da0b25c1ac 100644 --- a/src/cmd/asm/internal/asm/testdata/ppc64.s +++ b/src/cmd/asm/internal/asm/testdata/ppc64.s @@ -230,10 +230,10 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0 OR $-32767, R5, R6 // 3be080017fe62b78 OR $-32768, R6 // 3be080007fe63378 OR $-32768, R6, R7 // 3be080007fe73378 - OR $1234567, R5 // 641f001263ffd6877fe52b78 - OR $1234567, R5, R3 // 641f001263ffd6877fe32b78 + OR $1234567, R5 // 64a5001260a5d687 + OR $1234567, R5, R3 // 64a300126063d687 OR $2147483648, R5, R3 // 64a38000 - OR $2147483649, R5, R3 // 641f800063ff00017fe32b78 + OR $2147483649, R5, R3 // 64a3800060630001 ORIS $255, R3, R4 // 646400ff OR $16711680, R3, R4 // 646400ff @@ -249,8 +249,8 @@ TEXT asmtest(SB),DUPOK|NOSPLIT,$0 XOR $-32767, R5, R6 // 3be080017fe62a78 XOR $-32768, R6 // 3be080007fe63278 XOR $-32768, R6, R7 // 3be080007fe73278 - XOR $1234567, R5 // 641f001263ffd6877fe52a78 - XOR $1234567, R5, R3 // 641f001263ffd6877fe32a78 + XOR $1234567, R5 // 6ca5001268a5d687 + XOR $1234567, R5, R3 // 6ca300126863d687 XORIS $15, R3, R4 // 6c64000f XOR $983040, R3, R4 // 6c64000f diff --git a/src/cmd/asm/internal/asm/testdata/riscv64.s b/src/cmd/asm/internal/asm/testdata/riscv64.s index 072302b225..a5ab254eaa 100644 --- a/src/cmd/asm/internal/asm/testdata/riscv64.s +++ b/src/cmd/asm/internal/asm/testdata/riscv64.s @@ -233,11 +233,31 @@ start: // 11.7: Single-Precision Floating-Point Conversion and Move Instructions FCVTWS F0, X5 // d31200c0 + FCVTWS.RNE F0, X5 // d30200c0 + FCVTWS.RTZ F0, X5 // d31200c0 + FCVTWS.RDN F0, X5 // d32200c0 + FCVTWS.RUP F0, X5 // d33200c0 + FCVTWS.RMM F0, X5 // d34200c0 FCVTLS F0, X5 // d31220c0 + FCVTLS.RNE F0, X5 // d30220c0 + FCVTLS.RTZ F0, X5 // d31220c0 + FCVTLS.RDN F0, X5 // d32220c0 + FCVTLS.RUP F0, X5 // d33220c0 + FCVTLS.RMM F0, X5 // d34220c0 FCVTSW X5, F0 // 538002d0 FCVTSL X5, F0 // 538022d0 FCVTWUS F0, X5 // d31210c0 + FCVTWUS.RNE F0, X5 // d30210c0 + FCVTWUS.RTZ F0, X5 // d31210c0 + FCVTWUS.RDN F0, X5 // d32210c0 + FCVTWUS.RUP F0, X5 // d33210c0 + FCVTWUS.RMM F0, X5 // d34210c0 FCVTLUS F0, X5 // d31230c0 + FCVTLUS.RNE F0, X5 // d30230c0 + FCVTLUS.RTZ F0, X5 // d31230c0 + FCVTLUS.RDN F0, X5 // d32230c0 + FCVTLUS.RUP F0, X5 // d33230c0 + FCVTLUS.RMM F0, X5 // d34230c0 FCVTSWU X5, F0 // 538012d0 FCVTSLU X5, F0 // 538032d0 FSGNJS F1, F0, F2 // 53011020 @@ -277,11 +297,31 @@ start: // 12.5: Double-Precision Floating-Point Conversion and Move Instructions FCVTWD F0, X5 // d31200c2 + FCVTWD.RNE F0, X5 // d30200c2 + FCVTWD.RTZ F0, X5 // d31200c2 + FCVTWD.RDN F0, X5 // d32200c2 + FCVTWD.RUP F0, X5 // d33200c2 + FCVTWD.RMM F0, X5 // d34200c2 FCVTLD F0, X5 // d31220c2 + FCVTLD.RNE F0, X5 // d30220c2 + FCVTLD.RTZ F0, X5 // d31220c2 + FCVTLD.RDN F0, X5 // d32220c2 + FCVTLD.RUP F0, X5 // d33220c2 + FCVTLD.RMM F0, X5 // d34220c2 FCVTDW X5, F0 // 538002d2 FCVTDL X5, F0 // 538022d2 FCVTWUD F0, X5 // d31210c2 + FCVTWUD.RNE F0, X5 // d30210c2 + FCVTWUD.RTZ F0, X5 // d31210c2 + FCVTWUD.RDN F0, X5 // d32210c2 + FCVTWUD.RUP F0, X5 // d33210c2 + FCVTWUD.RMM F0, X5 // d34210c2 FCVTLUD F0, X5 // d31230c2 + FCVTLUD.RNE F0, X5 // d30230c2 + FCVTLUD.RTZ F0, X5 // d31230c2 + FCVTLUD.RDN F0, X5 // d32230c2 + FCVTLUD.RUP F0, X5 // d33230c2 + FCVTLUD.RMM F0, X5 // d34230c2 FCVTDWU X5, F0 // 538012d2 FCVTDLU X5, F0 // 538032d2 FCVTSD F0, F1 // d3001040 diff --git a/src/cmd/asm/internal/asm/testdata/s390x.s b/src/cmd/asm/internal/asm/testdata/s390x.s index 82aa445356..977190678f 100644 --- a/src/cmd/asm/internal/asm/testdata/s390x.s +++ b/src/cmd/asm/internal/asm/testdata/s390x.s @@ -419,9 +419,9 @@ TEXT main·foo(SB),DUPOK|NOSPLIT,$16-0 // TEXT main.foo(SB), DUPOK|NOSPLIT, $16- KMC R2, R6 // b92f0026 KLMD R2, R8 // b93f0028 KIMD R0, R4 // b93e0004 - KDSA R0, R8 // b93a0008 - KMA R6, R2, R4 // b9296024 - KMCTR R6, R2, R4 // b92d6024 + KDSA R0, R8 // b93a0008 + KMA R2, R6, R4 // b9296024 + KMCTR R2, R6, R4 // b92d6024 // vector add and sub instructions VAB V3, V4, V4 // e743400000f3 diff --git a/src/cmd/cgo/internal/test/callback_windows.go b/src/cmd/cgo/internal/test/callback_windows.go index 95e97c9af9..77bdfa4dd3 100644 --- a/src/cmd/cgo/internal/test/callback_windows.go +++ b/src/cmd/cgo/internal/test/callback_windows.go @@ -29,7 +29,7 @@ USHORT backtrace(ULONG FramesToCapture, PVOID *BackTrace) { } ControlPc = context.Rip; - // Check if we left the user range. + // Check if we left the user range. if (ControlPc < 0x10000) { break; } @@ -65,32 +65,17 @@ func testCallbackCallersSEH(t *testing.T) { // TODO: support SEH on other architectures. t.Skip("skipping on non-amd64") } - const cgoexpPrefix = "_cgoexp_" + // Only frames in the test package are checked. want := []string{ - "runtime.asmcgocall_landingpad", - "runtime.asmcgocall", - "runtime.cgocall", "test._Cfunc_backtrace", "test.testCallbackCallersSEH.func1.1", "test.testCallbackCallersSEH.func1", "test.goCallback", - cgoexpPrefix + "goCallback", - "runtime.cgocallbackg1", - "runtime.cgocallbackg", - "runtime.cgocallbackg", - "runtime.cgocallback", - "crosscall2", - "runtime.asmcgocall_landingpad", - "runtime.asmcgocall", - "runtime.cgocall", "test._Cfunc_callback", "test.nestedCall.func1", "test.nestedCall", "test.testCallbackCallersSEH", "test.TestCallbackCallersSEH", - "testing.tRunner", - "testing.(*T).Run.gowrap1", - "runtime.goexit", } pc := make([]uintptr, 100) n := 0 @@ -105,26 +90,17 @@ func testCallbackCallersSEH(t *testing.T) { } fname := f.Name() switch fname { - case "goCallback", "callback": - // TODO(qmuntal): investigate why these functions don't appear + case "goCallback": + // TODO(qmuntal): investigate why this function doesn't appear // when using the external linker. continue } - // Skip cgo-generated functions, the runtime might not know about them, - // depending on the link mode. - if strings.HasPrefix(fname, "_cgo_") { - continue - } - // Remove the cgo-generated random prefix. - if strings.HasPrefix(fname, cgoexpPrefix) { - idx := strings.Index(fname[len(cgoexpPrefix):], "_") - if idx >= 0 { - fname = cgoexpPrefix + fname[len(cgoexpPrefix)+idx+1:] - } - } // In module mode, this package has a fully-qualified import path. // Remove it if present. fname = strings.TrimPrefix(fname, "cmd/cgo/internal/") + if !strings.HasPrefix(fname, "test.") { + continue + } got = append(got, fname) } if !reflect.DeepEqual(want, got) { diff --git a/src/cmd/cgo/internal/test/cgo_stubs_ppc64x_internal_linking_test.go b/src/cmd/cgo/internal/test/cgo_stubs_ppc64x_internal_linking_test.go new file mode 100644 index 0000000000..015a62ccbe --- /dev/null +++ b/src/cmd/cgo/internal/test/cgo_stubs_ppc64x_internal_linking_test.go @@ -0,0 +1,15 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build (ppc64 || ppc64le) && internal + +package cgotest + +import "testing" + +// If gcc is used, and linking internally, __mulsc3 and __muldc3 +// will be linked in from libgcc which make several R_PPC64_TOC16_DS +// relocations which may not be resolvable with the internal linker. +func test8694(t *testing.T) { t.Skip("not supported on ppc64/ppc64le with internal linking") } +func test9510(t *testing.T) { t.Skip("not supported on ppc64/ppc64le with internal linking") } diff --git a/src/cmd/cgo/internal/test/issue4339.c b/src/cmd/cgo/internal/test/issue4339.c index 15d0004078..d0e64878d1 100644 --- a/src/cmd/cgo/internal/test/issue4339.c +++ b/src/cmd/cgo/internal/test/issue4339.c @@ -1,3 +1,7 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + #include #include "issue4339.h" diff --git a/src/cmd/cgo/internal/test/issue4339.h b/src/cmd/cgo/internal/test/issue4339.h index 20f6cebb6b..99a09960e2 100644 --- a/src/cmd/cgo/internal/test/issue4339.h +++ b/src/cmd/cgo/internal/test/issue4339.h @@ -1,3 +1,7 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + typedef struct Issue4339 Issue4339; struct Issue4339 { diff --git a/src/cmd/cgo/internal/test/issue8694.go b/src/cmd/cgo/internal/test/issue8694.go index 3b8f065d27..117547cfa4 100644 --- a/src/cmd/cgo/internal/test/issue8694.go +++ b/src/cmd/cgo/internal/test/issue8694.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !android +//go:build !android && !((ppc64 || ppc64le) && internal) package cgotest diff --git a/src/cmd/cgo/internal/test/issue8756.go b/src/cmd/cgo/internal/test/issue8756.go index 817f449e96..d8eadfde6d 100644 --- a/src/cmd/cgo/internal/test/issue8756.go +++ b/src/cmd/cgo/internal/test/issue8756.go @@ -1,3 +1,7 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package cgotest /* diff --git a/src/cmd/cgo/internal/test/issue8756/issue8756.go b/src/cmd/cgo/internal/test/issue8756/issue8756.go index 223397f067..02a1424b9f 100644 --- a/src/cmd/cgo/internal/test/issue8756/issue8756.go +++ b/src/cmd/cgo/internal/test/issue8756/issue8756.go @@ -1,3 +1,7 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package issue8756 /* diff --git a/src/cmd/cgo/internal/test/issue8828/trivial.go b/src/cmd/cgo/internal/test/issue8828/trivial.go index e7b9a4e573..9f2619654f 100644 --- a/src/cmd/cgo/internal/test/issue8828/trivial.go +++ b/src/cmd/cgo/internal/test/issue8828/trivial.go @@ -1,3 +1,7 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package issue8828 //void foo(); diff --git a/src/cmd/cgo/internal/test/issue9026/issue9026.go b/src/cmd/cgo/internal/test/issue9026/issue9026.go index ff269ca9eb..13bc180321 100644 --- a/src/cmd/cgo/internal/test/issue9026/issue9026.go +++ b/src/cmd/cgo/internal/test/issue9026/issue9026.go @@ -1,3 +1,7 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package issue9026 // This file appears in its own package since the assertion tests the diff --git a/src/cmd/cgo/internal/test/issue9400/asm_mips64x.s b/src/cmd/cgo/internal/test/issue9400/asm_mips64x.s index 1f492eafe9..3edba3dd82 100644 --- a/src/cmd/cgo/internal/test/issue9400/asm_mips64x.s +++ b/src/cmd/cgo/internal/test/issue9400/asm_mips64x.s @@ -1,4 +1,4 @@ -// Copyright 2016 The Go Authors. All rights reserved. +// Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/src/cmd/cgo/internal/test/issue9400/asm_riscv64.s b/src/cmd/cgo/internal/test/issue9400/asm_riscv64.s index fa34f6bd37..0f10e3a326 100644 --- a/src/cmd/cgo/internal/test/issue9400/asm_riscv64.s +++ b/src/cmd/cgo/internal/test/issue9400/asm_riscv64.s @@ -1,4 +1,4 @@ -// Copyright 2020 The Go Authors. All rights reserved. +// Copyright 2020 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/src/cmd/cgo/internal/test/issue9510.go b/src/cmd/cgo/internal/test/issue9510.go index 7f0aff4fe4..c000a047f6 100644 --- a/src/cmd/cgo/internal/test/issue9510.go +++ b/src/cmd/cgo/internal/test/issue9510.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build cgo +//go:build cgo && !((ppc64 || ppc64le) && internal) // Test that we can link together two different cgo packages that both // use the same libgcc function. diff --git a/src/cmd/cgo/internal/test/issue9510a/a.go b/src/cmd/cgo/internal/test/issue9510a/a.go index 1a5224b8c6..f0a0128d10 100644 --- a/src/cmd/cgo/internal/test/issue9510a/a.go +++ b/src/cmd/cgo/internal/test/issue9510a/a.go @@ -1,3 +1,7 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package issue9510a /* diff --git a/src/cmd/cgo/internal/test/issue9510b/b.go b/src/cmd/cgo/internal/test/issue9510b/b.go index 5016b39597..6e22508c32 100644 --- a/src/cmd/cgo/internal/test/issue9510b/b.go +++ b/src/cmd/cgo/internal/test/issue9510b/b.go @@ -1,3 +1,7 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package issue9510b /* diff --git a/src/cmd/cgo/internal/test/seh_internal_windows_test.go b/src/cmd/cgo/internal/test/seh_internal_windows_test.go new file mode 100644 index 0000000000..708ffdc6f6 --- /dev/null +++ b/src/cmd/cgo/internal/test/seh_internal_windows_test.go @@ -0,0 +1,16 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cgo && windows && internal + +package cgotest + +import ( + "internal/testenv" + "testing" +) + +func TestCallbackCallersSEH(t *testing.T) { + testenv.SkipFlaky(t, 65116) +} diff --git a/src/cmd/cgo/internal/test/cgo_windows_test.go b/src/cmd/cgo/internal/test/seh_windows_test.go similarity index 87% rename from src/cmd/cgo/internal/test/cgo_windows_test.go rename to src/cmd/cgo/internal/test/seh_windows_test.go index 7bbed5b04e..4a8d5bbd4d 100644 --- a/src/cmd/cgo/internal/test/cgo_windows_test.go +++ b/src/cmd/cgo/internal/test/seh_windows_test.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build cgo && windows +//go:build cgo && windows && !internal package cgotest diff --git a/src/cmd/cgo/internal/testgodefs/testdata/fieldtypedef.go b/src/cmd/cgo/internal/testgodefs/testdata/fieldtypedef.go index b0c507477f..d3ab1902c1 100644 --- a/src/cmd/cgo/internal/testgodefs/testdata/fieldtypedef.go +++ b/src/cmd/cgo/internal/testgodefs/testdata/fieldtypedef.go @@ -1,4 +1,4 @@ -// Copyright 2018 The Go Authors. All rights reserve d. +// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/src/cmd/cgo/internal/testsanitizers/cc_test.go b/src/cmd/cgo/internal/testsanitizers/cc_test.go index e212a4fd98..e650de835a 100644 --- a/src/cmd/cgo/internal/testsanitizers/cc_test.go +++ b/src/cmd/cgo/internal/testsanitizers/cc_test.go @@ -16,8 +16,10 @@ import ( "encoding/json" "errors" "fmt" + "internal/testenv" "os" "os/exec" + "os/user" "path/filepath" "regexp" "strconv" @@ -266,12 +268,28 @@ func compilerSupportsLocation() bool { case "gcc": return compiler.major >= 10 case "clang": + // TODO(65606): The clang toolchain on the LUCI builders is not built against + // zlib, the ASAN runtime can't actually symbolize its own stack trace. Once + // this is resolved, one way or another, switch this back to 'true'. We still + // have coverage from the 'gcc' case above. + if inLUCIBuild() { + return false + } return true default: return false } } +// inLUCIBuild returns true if we're currently executing in a LUCI build. +func inLUCIBuild() bool { + u, err := user.Current() + if err != nil { + return false + } + return testenv.Builder() != "" && u.Username == "swarming" +} + // compilerRequiredTsanVersion reports whether the compiler is the version required by Tsan. // Only restrictions for ppc64le are known; otherwise return true. func compilerRequiredTsanVersion(goos, goarch string) bool { diff --git a/src/cmd/cgo/internal/testsanitizers/libfuzzer_test.go b/src/cmd/cgo/internal/testsanitizers/libfuzzer_test.go index f84c9f37ae..3f5b1d91c7 100644 --- a/src/cmd/cgo/internal/testsanitizers/libfuzzer_test.go +++ b/src/cmd/cgo/internal/testsanitizers/libfuzzer_test.go @@ -7,11 +7,14 @@ package sanitizers_test import ( + "internal/testenv" "strings" "testing" ) func TestLibFuzzer(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) goos, err := goEnv("GOOS") if err != nil { t.Fatal(err) diff --git a/src/cmd/cgo/internal/testsanitizers/msan_test.go b/src/cmd/cgo/internal/testsanitizers/msan_test.go index 1a22b5246c..83d66f6660 100644 --- a/src/cmd/cgo/internal/testsanitizers/msan_test.go +++ b/src/cmd/cgo/internal/testsanitizers/msan_test.go @@ -8,11 +8,14 @@ package sanitizers_test import ( "internal/platform" + "internal/testenv" "strings" "testing" ) func TestMSAN(t *testing.T) { + testenv.MustHaveGoBuild(t) + testenv.MustHaveCGO(t) goos, err := goEnv("GOOS") if err != nil { t.Fatal(err) diff --git a/src/cmd/compile/abi-internal.md b/src/cmd/compile/abi-internal.md index 43dc39689b..eae230dc07 100644 --- a/src/cmd/compile/abi-internal.md +++ b/src/cmd/compile/abi-internal.md @@ -633,6 +633,56 @@ modifying or saving the FPCR. Functions are allowed to modify it between calls (as long as they restore it), but as of this writing Go code never does. +### loong64 architecture + +The loong64 architecture uses R4 – R19 for integer arguments and integer results. + +It uses F0 – F15 for floating-point arguments and results. + +Registers R20 - R21, R23 – R28, R30 - R31, F16 – F31 are permanent scratch registers. + +Register R2 is reserved and never used. + +Register R20, R21 is Used by runtime.duffcopy, runtime.duffzero. + +Special-purpose registers used within Go generated code and Go assembly code +are as follows: + +| Register | Call meaning | Return meaning | Body meaning | +| --- | --- | --- | --- | +| R0 | Zero value | Same | Same | +| R1 | Link register | Link register | Scratch | +| R3 | Stack pointer | Same | Same | +| R20,R21 | Scratch | Scratch | Used by duffcopy, duffzero | +| R22 | Current goroutine | Same | Same | +| R29 | Closure context pointer | Same | Same | +| R30, R31 | used by the assembler | Same | Same | + +*Rationale*: These register meanings are compatible with Go’s stack-based +calling convention. + +#### Stack layout + +The stack pointer, R3, grows down and is aligned to 8 bytes. + +A function's stack frame, after the frame is created, is laid out as +follows: + + +------------------------------+ + | ... locals ... | + | ... outgoing arguments ... | + | return PC | ← R3 points to + +------------------------------+ ↓ lower addresses + +This stack layout is used by both register-based (ABIInternal) and +stack-based (ABI0) calling conventions. + +The "return PC" is loaded to the link register, R1, as part of the +loong64 `JAL` operation. + +#### Flags +All bits in CSR are system flags and are not modified by Go. + ### ppc64 architecture The ppc64 architecture uses R3 – R10 and R14 – R17 for integer arguments diff --git a/src/cmd/compile/default.pgo b/src/cmd/compile/default.pgo index 2ba79688d4..0f925ec69c 100644 Binary files a/src/cmd/compile/default.pgo and b/src/cmd/compile/default.pgo differ diff --git a/src/cmd/compile/doc.go b/src/cmd/compile/doc.go index 0a60368afa..507899e222 100644 --- a/src/cmd/compile/doc.go +++ b/src/cmd/compile/doc.go @@ -295,5 +295,27 @@ The declaration of lower.f may also have a linkname directive with a single argument, f. This is optional, but helps alert the reader that the function is accessed from outside the package. + //go:wasmimport importmodule importname + +The //go:wasmimport directive is wasm-only and must be followed by a +function declaration. +It specifies that the function is provided by a wasm module identified +by ``importmodule`` and ``importname``. + + //go:wasmimport a_module f + func g() + +The types of parameters and return values to the Go function are translated to +Wasm according to the following table: + + Go types Wasm types + int32, uint32 i32 + int64, uint64 i64 + float32 f32 + float64 f64 + unsafe.Pointer i32 + +Any other parameter types are disallowed by the compiler. + */ package main diff --git a/src/cmd/compile/internal/arm/galign.go b/src/cmd/compile/internal/arm/galign.go index 23e52bacbf..43d811832e 100644 --- a/src/cmd/compile/internal/arm/galign.go +++ b/src/cmd/compile/internal/arm/galign.go @@ -15,7 +15,7 @@ func Init(arch *ssagen.ArchInfo) { arch.LinkArch = &arm.Linkarm arch.REGSP = arm.REGSP arch.MAXWIDTH = (1 << 32) - 1 - arch.SoftFloat = buildcfg.GOARM == 5 + arch.SoftFloat = buildcfg.GOARM.SoftFloat arch.ZeroRange = zerorange arch.Ginsnop = ginsnop diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go index 7fcbb4d024..638ed3ed4e 100644 --- a/src/cmd/compile/internal/arm/ssa.go +++ b/src/cmd/compile/internal/arm/ssa.go @@ -289,7 +289,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { case ssa.OpARMANDconst, ssa.OpARMBICconst: // try to optimize ANDconst and BICconst to BFC, which saves bytes and ticks // BFC is only available on ARMv7, and its result and source are in the same register - if buildcfg.GOARM == 7 && v.Reg() == v.Args[0].Reg() { + if buildcfg.GOARM.Version == 7 && v.Reg() == v.Args[0].Reg() { var val uint32 if v.Op == ssa.OpARMANDconst { val = ^uint32(v.AuxInt) @@ -646,7 +646,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { default: } } - if buildcfg.GOARM >= 6 { + if buildcfg.GOARM.Version >= 6 { // generate more efficient "MOVB/MOVBU/MOVH/MOVHU Reg@>0, Reg" on ARMv6 & ARMv7 genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, 0) return diff --git a/src/cmd/compile/internal/base/debug.go b/src/cmd/compile/internal/base/debug.go index a85f0139fc..420ad1305e 100644 --- a/src/cmd/compile/internal/base/debug.go +++ b/src/cmd/compile/internal/base/debug.go @@ -36,11 +36,11 @@ type DebugFlags struct { Gossahash string `help:"hash value for use in debugging the compiler"` InlFuncsWithClosures int `help:"allow functions with closures to be inlined" concurrent:"ok"` InlStaticInit int `help:"allow static initialization of inlined calls" concurrent:"ok"` - InterfaceCycles int `help:"allow anonymous interface cycles"` Libfuzzer int `help:"enable coverage instrumentation for libfuzzer"` LoopVar int `help:"shared (0, default), 1 (private loop variables), 2, private + log"` LoopVarHash string `help:"for debugging changes in loop behavior. Overrides experiment and loopvar flag."` LocationLists int `help:"print information about DWARF location list creation"` + MaxShapeLen int `help:"hash shape names longer than this threshold (default 500)" concurrent:"ok"` Nil int `help:"print information about nil checks"` NoOpenDefer int `help:"disable open-coded defers" concurrent:"ok"` NoRefName int `help:"do not include referenced symbol names in object file" concurrent:"ok"` diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go index e2e15c3c9c..5b3c3ad8c6 100644 --- a/src/cmd/compile/internal/base/flag.go +++ b/src/cmd/compile/internal/base/flag.go @@ -124,7 +124,7 @@ type CmdFlags struct { TraceProfile string "help:\"write an execution trace to `file`\"" TrimPath string "help:\"remove `prefix` from recorded source file paths\"" WB bool "help:\"enable write barrier\"" // TODO: remove - PgoProfile string "help:\"read profile from `file`\"" + PgoProfile string "help:\"read profile or pre-process profile from `file`\"" ErrorURL bool "help:\"print explanatory URL with error message if applicable\"" // Configuration derived from flags; not a flag itself. @@ -176,6 +176,7 @@ func ParseFlags() { Flag.WB = true Debug.ConcurrentOk = true + Debug.MaxShapeLen = 500 Debug.InlFuncsWithClosures = 1 Debug.InlStaticInit = 1 Debug.PGOInline = 1 diff --git a/src/cmd/compile/internal/base/hashdebug.go b/src/cmd/compile/internal/base/hashdebug.go index de7f01f09e..4e36c8d549 100644 --- a/src/cmd/compile/internal/base/hashdebug.go +++ b/src/cmd/compile/internal/base/hashdebug.go @@ -197,14 +197,13 @@ func NewHashDebug(ev, s string, file io.Writer) *HashDebug { break } if i == 0 { - hd.matches = append(hd.matches, toHashAndMask(s, fmt.Sprintf("%s", ev))) + hd.matches = append(hd.matches, toHashAndMask(s, ev)) } else { hd.matches = append(hd.matches, toHashAndMask(s, fmt.Sprintf("%s%d", ev, i-1))) } i++ } return hd - } // TODO: Delete when we switch to bisect-only. diff --git a/src/cmd/compile/internal/devirtualize/devirtualize.go b/src/cmd/compile/internal/devirtualize/devirtualize.go index 7b3a869d8e..5d1b952627 100644 --- a/src/cmd/compile/internal/devirtualize/devirtualize.go +++ b/src/cmd/compile/internal/devirtualize/devirtualize.go @@ -18,39 +18,27 @@ import ( "cmd/compile/internal/types" ) -// Static devirtualizes calls within fn where possible when the concrete callee +// StaticCall devirtualizes the given call if possible when the concrete callee // is available statically. -func Static(fn *ir.Func) { - ir.CurFunc = fn +func StaticCall(call *ir.CallExpr) { + // For promoted methods (including value-receiver methods promoted + // to pointer-receivers), the interface method wrapper may contain + // expressions that can panic (e.g., ODEREF, ODOTPTR, + // ODOTINTER). Devirtualization involves inlining these expressions + // (and possible panics) to the call site. This normally isn't a + // problem, but for go/defer statements it can move the panic from + // when/where the call executes to the go/defer statement itself, + // which is a visible change in semantics (e.g., #52072). To prevent + // this, we skip devirtualizing calls within go/defer statements + // altogether. + if call.GoDefer { + return + } - // For promoted methods (including value-receiver methods promoted to pointer-receivers), - // the interface method wrapper may contain expressions that can panic (e.g., ODEREF, ODOTPTR, ODOTINTER). - // Devirtualization involves inlining these expressions (and possible panics) to the call site. - // This normally isn't a problem, but for go/defer statements it can move the panic from when/where - // the call executes to the go/defer statement itself, which is a visible change in semantics (e.g., #52072). - // To prevent this, we skip devirtualizing calls within go/defer statements altogether. - goDeferCall := make(map[*ir.CallExpr]bool) - ir.VisitList(fn.Body, func(n ir.Node) { - switch n := n.(type) { - case *ir.GoDeferStmt: - if call, ok := n.Call.(*ir.CallExpr); ok { - goDeferCall[call] = true - } - return - case *ir.CallExpr: - if !goDeferCall[n] { - staticCall(n) - } - } - }) -} - -// staticCall devirtualizes the given call if possible when the concrete callee -// is available statically. -func staticCall(call *ir.CallExpr) { if call.Op() != ir.OCALLINTER { return } + sel := call.Fun.(*ir.SelectorExpr) r := ir.StaticValue(sel.X) if r.Op() != ir.OCONVIFACE { @@ -70,7 +58,7 @@ func staticCall(call *ir.CallExpr) { return } - // If typ *has* a shape type, then it's an shaped, instantiated + // If typ *has* a shape type, then it's a shaped, instantiated // type like T[go.shape.int], and its methods (may) have an extra // dictionary parameter. We could devirtualize this call if we // could derive an appropriate dictionary argument. diff --git a/src/cmd/compile/internal/devirtualize/pgo.go b/src/cmd/compile/internal/devirtualize/pgo.go index 05b37d6be6..5cc9fab54c 100644 --- a/src/cmd/compile/internal/devirtualize/pgo.go +++ b/src/cmd/compile/internal/devirtualize/pgo.go @@ -107,9 +107,6 @@ func ProfileGuided(fn *ir.Func, p *pgo.Profile) { name := ir.LinkFuncName(fn) - // Can't devirtualize go/defer calls. See comment in Static. - goDeferCall := make(map[*ir.CallExpr]bool) - var jsonW *json.Encoder if base.Debug.PGODebug >= 3 { jsonW = json.NewEncoder(os.Stdout) @@ -121,12 +118,6 @@ func ProfileGuided(fn *ir.Func, p *pgo.Profile) { return n } - if gds, ok := n.(*ir.GoDeferStmt); ok { - if call, ok := gds.Call.(*ir.CallExpr); ok { - goDeferCall[call] = true - } - } - ir.EditChildren(n, edit) call, ok := n.(*ir.CallExpr) @@ -156,7 +147,7 @@ func ProfileGuided(fn *ir.Func, p *pgo.Profile) { fmt.Printf("%v: PGO devirtualize considering call %v\n", ir.Line(call), call) } - if goDeferCall[call] { + if call.GoDefer { if base.Debug.PGODebug >= 2 { fmt.Printf("%v: can't PGO devirtualize go/defer call %v\n", ir.Line(call), call) } @@ -749,7 +740,7 @@ func findHotConcreteCallee(p *pgo.Profile, caller *ir.Func, call *ir.CallExpr, e } if base.Debug.PGODebug >= 2 { - fmt.Printf("%v call %s:%d: hottest callee %s (weight %d)\n", ir.Line(call), callerName, callOffset, hottest.Dst.Name(), hottest.Weight) + fmt.Printf("%v: call %s:%d: hottest callee %s (weight %d)\n", ir.Line(call), callerName, callOffset, hottest.Dst.Name(), hottest.Weight) } return hottest.Dst.AST, hottest.Weight } diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go index e9553d1185..512d8d22e7 100644 --- a/src/cmd/compile/internal/dwarfgen/dwarf.go +++ b/src/cmd/compile/internal/dwarfgen/dwarf.go @@ -16,6 +16,7 @@ import ( "cmd/compile/internal/reflectdata" "cmd/compile/internal/ssa" "cmd/compile/internal/ssagen" + "cmd/compile/internal/typecheck" "cmd/compile/internal/types" "cmd/internal/dwarf" "cmd/internal/obj" @@ -100,7 +101,23 @@ func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn obj.Func) (scopes []dwarf.Sc } } - decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn, apdecls) + var closureVars map[*ir.Name]int64 + if fn.Needctxt() { + closureVars = make(map[*ir.Name]int64) + csiter := typecheck.NewClosureStructIter(fn.ClosureVars) + for { + n, _, offset := csiter.Next() + if n == nil { + break + } + closureVars[n] = offset + if n.Heapaddr != nil { + closureVars[n.Heapaddr] = offset + } + } + } + + decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn, apdecls, closureVars) // For each type referenced by the functions auto vars but not // already referenced by a dwarf var, attach an R_USETYPE relocation to @@ -137,18 +154,18 @@ func declPos(decl *ir.Name) src.XPos { // createDwarfVars process fn, returning a list of DWARF variables and the // Nodes they represent. -func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var) { +func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir.Name, closureVars map[*ir.Name]int64) ([]*ir.Name, []*dwarf.Var) { // Collect a raw list of DWARF vars. var vars []*dwarf.Var var decls []*ir.Name var selected ir.NameSet if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK { - decls, vars, selected = createComplexVars(fnsym, fn) + decls, vars, selected = createComplexVars(fnsym, fn, closureVars) } else if fn.ABI == obj.ABIInternal && base.Flag.N != 0 && complexOK { - decls, vars, selected = createABIVars(fnsym, fn, apDecls) + decls, vars, selected = createABIVars(fnsym, fn, apDecls, closureVars) } else { - decls, vars, selected = createSimpleVars(fnsym, apDecls) + decls, vars, selected = createSimpleVars(fnsym, apDecls, closureVars) } if fn.DebugInfo != nil { // Recover zero sized variables eliminated by the stackframe pass @@ -159,7 +176,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir types.CalcSize(n.Type()) if n.Type().Size() == 0 { decls = append(decls, n) - vars = append(vars, createSimpleVar(fnsym, n)) + vars = append(vars, createSimpleVar(fnsym, n, closureVars)) vars[len(vars)-1].StackOffset = 0 fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type())) } @@ -212,16 +229,16 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir // Args not of SSA-able type are treated here; they // are homed on the stack in a single place for the // entire call. - vars = append(vars, createSimpleVar(fnsym, n)) + vars = append(vars, createSimpleVar(fnsym, n, closureVars)) decls = append(decls, n) continue } typename := dwarf.InfoPrefix + types.TypeSymName(n.Type()) decls = append(decls, n) - abbrev := dwarf.DW_ABRV_AUTO_LOCLIST + tag := dwarf.DW_TAG_variable isReturnValue := (n.Class == ir.PPARAMOUT) if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT { - abbrev = dwarf.DW_ABRV_PARAM_LOCLIST + tag = dwarf.DW_TAG_formal_parameter } if n.Esc() == ir.EscHeap { // The variable in question has been promoted to the heap. @@ -233,7 +250,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir if n.InlFormal() || n.InlLocal() { inlIndex = posInlIndex(n.Pos()) + 1 if n.InlFormal() { - abbrev = dwarf.DW_ABRV_PARAM_LOCLIST + tag = dwarf.DW_TAG_formal_parameter } } } @@ -241,7 +258,8 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir vars = append(vars, &dwarf.Var{ Name: n.Sym().Name, IsReturnValue: isReturnValue, - Abbrev: abbrev, + Tag: tag, + WithLoclist: true, StackOffset: int32(n.FrameOffset()), Type: base.Ctxt.Lookup(typename), DeclFile: declpos.RelFilename(), @@ -250,6 +268,7 @@ func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir InlIndex: int32(inlIndex), ChildIndex: -1, DictIndex: n.DictIndex, + ClosureOffset: closureOffset(n, closureVars), }) // Record go type of to insure that it gets emitted by the linker. fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type())) @@ -333,7 +352,7 @@ func preInliningDcls(fnsym *obj.LSym) []*ir.Name { // createSimpleVars creates a DWARF entry for every variable declared in the // function, claiming that they are permanently on the stack. -func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, ir.NameSet) { +func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name, closureVars map[*ir.Name]int64) ([]*ir.Name, []*dwarf.Var, ir.NameSet) { var vars []*dwarf.Var var decls []*ir.Name var selected ir.NameSet @@ -343,14 +362,14 @@ func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf } decls = append(decls, n) - vars = append(vars, createSimpleVar(fnsym, n)) + vars = append(vars, createSimpleVar(fnsym, n, closureVars)) selected.Add(n) } return decls, vars, selected } -func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { - var abbrev int +func createSimpleVar(fnsym *obj.LSym, n *ir.Name, closureVars map[*ir.Name]int64) *dwarf.Var { + var tag int var offs int64 localAutoOffset := func() int64 { @@ -367,9 +386,9 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { switch n.Class { case ir.PAUTO: offs = localAutoOffset() - abbrev = dwarf.DW_ABRV_AUTO + tag = dwarf.DW_TAG_variable case ir.PPARAM, ir.PPARAMOUT: - abbrev = dwarf.DW_ABRV_PARAM + tag = dwarf.DW_TAG_formal_parameter if n.IsOutputParamInRegisters() { offs = localAutoOffset() } else { @@ -387,7 +406,7 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { if n.InlFormal() || n.InlLocal() { inlIndex = posInlIndex(n.Pos()) + 1 if n.InlFormal() { - abbrev = dwarf.DW_ABRV_PARAM + tag = dwarf.DW_TAG_formal_parameter } } } @@ -396,7 +415,7 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { Name: n.Sym().Name, IsReturnValue: n.Class == ir.PPARAMOUT, IsInlFormal: n.InlFormal(), - Abbrev: abbrev, + Tag: tag, StackOffset: int32(offs), Type: base.Ctxt.Lookup(typename), DeclFile: declpos.RelFilename(), @@ -405,6 +424,7 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { InlIndex: int32(inlIndex), ChildIndex: -1, DictIndex: n.DictIndex, + ClosureOffset: closureOffset(n, closureVars), } } @@ -413,11 +433,11 @@ func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var { // hybrid approach in which register-resident input params are // captured with location lists, and all other vars use the "simple" // strategy. -func createABIVars(fnsym *obj.LSym, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, ir.NameSet) { +func createABIVars(fnsym *obj.LSym, fn *ir.Func, apDecls []*ir.Name, closureVars map[*ir.Name]int64) ([]*ir.Name, []*dwarf.Var, ir.NameSet) { // Invoke createComplexVars to generate dwarf vars for input parameters // that are register-allocated according to the ABI rules. - decls, vars, selected := createComplexVars(fnsym, fn) + decls, vars, selected := createComplexVars(fnsym, fn, closureVars) // Now fill in the remainder of the variables: input parameters // that are not register-resident, output parameters, and local @@ -432,7 +452,7 @@ func createABIVars(fnsym *obj.LSym, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name } decls = append(decls, n) - vars = append(vars, createSimpleVar(fnsym, n)) + vars = append(vars, createSimpleVar(fnsym, n, closureVars)) selected.Add(n) } @@ -441,7 +461,7 @@ func createABIVars(fnsym *obj.LSym, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name // createComplexVars creates recomposed DWARF vars with location lists, // suitable for describing optimized code. -func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, ir.NameSet) { +func createComplexVars(fnsym *obj.LSym, fn *ir.Func, closureVars map[*ir.Name]int64) ([]*ir.Name, []*dwarf.Var, ir.NameSet) { debugInfo := fn.DebugInfo.(*ssa.FuncDebug) // Produce a DWARF variable entry for each user variable. @@ -456,7 +476,7 @@ func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, ssaVars.Add(debugInfo.Slots[slot].N) } - if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil { + if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID), closureVars); dvar != nil { decls = append(decls, n) vars = append(vars, dvar) } @@ -466,16 +486,16 @@ func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, } // createComplexVar builds a single DWARF variable entry and location list. -func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var { +func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID, closureVars map[*ir.Name]int64) *dwarf.Var { debug := fn.DebugInfo.(*ssa.FuncDebug) n := debug.Vars[varID] - var abbrev int + var tag int switch n.Class { case ir.PAUTO: - abbrev = dwarf.DW_ABRV_AUTO_LOCLIST + tag = dwarf.DW_TAG_variable case ir.PPARAM, ir.PPARAMOUT: - abbrev = dwarf.DW_ABRV_PARAM_LOCLIST + tag = dwarf.DW_TAG_formal_parameter default: return nil } @@ -488,7 +508,7 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var if n.InlFormal() || n.InlLocal() { inlIndex = posInlIndex(n.Pos()) + 1 if n.InlFormal() { - abbrev = dwarf.DW_ABRV_PARAM_LOCLIST + tag = dwarf.DW_TAG_formal_parameter } } } @@ -497,19 +517,21 @@ func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var Name: n.Sym().Name, IsReturnValue: n.Class == ir.PPARAMOUT, IsInlFormal: n.InlFormal(), - Abbrev: abbrev, + Tag: tag, + WithLoclist: true, Type: base.Ctxt.Lookup(typename), // The stack offset is used as a sorting key, so for decomposed // variables just give it the first one. It's not used otherwise. // This won't work well if the first slot hasn't been assigned a stack // location, but it's not obvious how to do better. - StackOffset: ssagen.StackOffset(debug.Slots[debug.VarSlots[varID][0]]), - DeclFile: declpos.RelFilename(), - DeclLine: declpos.RelLine(), - DeclCol: declpos.RelCol(), - InlIndex: int32(inlIndex), - ChildIndex: -1, - DictIndex: n.DictIndex, + StackOffset: ssagen.StackOffset(debug.Slots[debug.VarSlots[varID][0]]), + DeclFile: declpos.RelFilename(), + DeclLine: declpos.RelLine(), + DeclCol: declpos.RelCol(), + InlIndex: int32(inlIndex), + ChildIndex: -1, + DictIndex: n.DictIndex, + ClosureOffset: closureOffset(n, closureVars), } list := debug.LocationLists[varID] if len(list) != 0 { @@ -592,3 +614,7 @@ func RecordPackageName() { base.Ctxt.Data = append(base.Ctxt.Data, s) s.P = []byte(types.LocalPkg.Name) } + +func closureOffset(n *ir.Name, closureVars map[*ir.Name]int64) int64 { + return closureVars[n] +} diff --git a/src/cmd/compile/internal/dwarfgen/dwinl.go b/src/cmd/compile/internal/dwarfgen/dwinl.go index 655e7c66ac..bb3ef84df8 100644 --- a/src/cmd/compile/internal/dwarfgen/dwinl.go +++ b/src/cmd/compile/internal/dwarfgen/dwinl.go @@ -358,7 +358,7 @@ func dumpInlCalls(inlcalls dwarf.InlCalls) { func dumpInlVars(dwvars []*dwarf.Var) { for i, dwv := range dwvars { typ := "local" - if dwv.Abbrev == dwarf.DW_ABRV_PARAM_LOCLIST || dwv.Abbrev == dwarf.DW_ABRV_PARAM { + if dwv.Tag == dwarf.DW_TAG_formal_parameter { typ = "param" } ia := 0 diff --git a/src/cmd/compile/internal/escape/call.go b/src/cmd/compile/internal/escape/call.go index bf40de0544..4a3753ada9 100644 --- a/src/cmd/compile/internal/escape/call.go +++ b/src/cmd/compile/internal/escape/call.go @@ -155,10 +155,17 @@ func (e *escape) call(ks []hole, call ir.Node) { e.discard(call.X) e.discard(call.Y) - case ir.ODELETE, ir.OMAX, ir.OMIN, ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP: + case ir.ODELETE, ir.OPRINT, ir.OPRINTLN, ir.ORECOVERFP: call := call.(*ir.CallExpr) - for i := range call.Args { - e.discard(call.Args[i]) + for _, arg := range call.Args { + e.discard(arg) + } + e.discard(call.RType) + + case ir.OMIN, ir.OMAX: + call := call.(*ir.CallExpr) + for _, arg := range call.Args { + argument(ks[0], arg) } e.discard(call.RType) diff --git a/src/cmd/compile/internal/escape/graph.go b/src/cmd/compile/internal/escape/graph.go index f3baa67223..75e2546a7b 100644 --- a/src/cmd/compile/internal/escape/graph.go +++ b/src/cmd/compile/internal/escape/graph.go @@ -38,7 +38,7 @@ import ( // e.value(k, n.Left) // } -// An location represents an abstract location that stores a Go +// A location represents an abstract location that stores a Go // variable. type location struct { n ir.Node // represented variable or expression, if any diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go index a19962dabb..7e5069fced 100644 --- a/src/cmd/compile/internal/gc/main.go +++ b/src/cmd/compile/internal/gc/main.go @@ -9,10 +9,10 @@ import ( "bytes" "cmd/compile/internal/base" "cmd/compile/internal/coverage" - "cmd/compile/internal/devirtualize" "cmd/compile/internal/dwarfgen" "cmd/compile/internal/escape" "cmd/compile/internal/inline" + "cmd/compile/internal/inline/interleaved" "cmd/compile/internal/ir" "cmd/compile/internal/logopt" "cmd/compile/internal/loopvar" @@ -224,30 +224,15 @@ func Main(archInit func(*ssagen.ArchInfo)) { } } - base.Timer.Start("fe", "pgo-devirtualization") - if profile != nil && base.Debug.PGODevirtualize > 0 { - // TODO(prattmic): No need to use bottom-up visit order. This - // is mirroring the PGO IRGraph visit order, which also need - // not be bottom-up. - ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) { - for _, fn := range list { - devirtualize.ProfileGuided(fn, profile) - } - }) - ir.CurFunc = nil - } + // Interleaved devirtualization and inlining. + base.Timer.Start("fe", "devirtualize-and-inline") + interleaved.DevirtualizeAndInlinePackage(typecheck.Target, profile) - // Inlining - base.Timer.Start("fe", "inlining") - if base.Flag.LowerL != 0 { - inline.InlinePackage(profile) - } noder.MakeWrappers(typecheck.Target) // must happen after inlining - // Devirtualize and get variable capture right in for loops + // Get variable capture right in for loops. var transformed []loopvar.VarAndLoop for _, fn := range typecheck.Target.Funcs { - devirtualize.Static(fn) transformed = append(transformed, loopvar.ForCapture(fn)...) } ir.CurFunc = nil diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go index 2677ae3086..8d2de22473 100644 --- a/src/cmd/compile/internal/inline/inl.go +++ b/src/cmd/compile/internal/inline/inl.go @@ -29,6 +29,7 @@ package inline import ( "fmt" "go/constant" + "internal/buildcfg" "strconv" "cmd/compile/internal/base" @@ -76,8 +77,8 @@ var ( inlineHotMaxBudget int32 = 2000 ) -// pgoInlinePrologue records the hot callsites from ir-graph. -func pgoInlinePrologue(p *pgo.Profile, funcs []*ir.Func) { +// PGOInlinePrologue records the hot callsites from ir-graph. +func PGOInlinePrologue(p *pgo.Profile) { if base.Debug.PGOInlineCDFThreshold != "" { if s, err := strconv.ParseFloat(base.Debug.PGOInlineCDFThreshold, 64); err == nil && s >= 0 && s <= 100 { inlineCDFHotCallSiteThresholdPercent = s @@ -118,7 +119,7 @@ func pgoInlinePrologue(p *pgo.Profile, funcs []*ir.Func) { // a percent, is the lower bound of weight for nodes to be considered hot // (currently only used in debug prints) (in case of equal weights, // comparing with the threshold may not accurately reflect which nodes are -// considiered hot). +// considered hot). func hotNodesFromCDF(p *pgo.Profile) (float64, []pgo.NamedCallEdge) { cum := int64(0) for i, n := range p.NamedEdgeMap.ByWeight { @@ -134,79 +135,43 @@ func hotNodesFromCDF(p *pgo.Profile) (float64, []pgo.NamedCallEdge) { return 0, p.NamedEdgeMap.ByWeight } -// InlinePackage finds functions that can be inlined and clones them before walk expands them. -func InlinePackage(p *pgo.Profile) { - if base.Debug.PGOInline == 0 { - p = nil +// CanInlineFuncs computes whether a batch of functions are inlinable. +func CanInlineFuncs(funcs []*ir.Func, profile *pgo.Profile) { + if profile != nil { + PGOInlinePrologue(profile) } - inlheur.SetupScoreAdjustments() - - InlineDecls(p, typecheck.Target.Funcs, true) - - // Perform a garbage collection of hidden closures functions that - // are no longer reachable from top-level functions following - // inlining. See #59404 and #59638 for more context. - garbageCollectUnreferencedHiddenClosures() - - if base.Debug.DumpInlFuncProps != "" { - inlheur.DumpFuncProps(nil, base.Debug.DumpInlFuncProps) - } - if inlheur.Enabled() { - postProcessCallSites(p) - inlheur.TearDown() - } -} - -// InlineDecls applies inlining to the given batch of declarations. -func InlineDecls(p *pgo.Profile, funcs []*ir.Func, doInline bool) { - if p != nil { - pgoInlinePrologue(p, funcs) + if base.Flag.LowerL == 0 { + return } - doCanInline := func(n *ir.Func, recursive bool, numfns int) { - if !recursive || numfns > 1 { - // We allow inlining if there is no - // recursion, or the recursion cycle is - // across more than one function. - CanInline(n, p) - } else { - if base.Flag.LowerM > 1 && n.OClosure == nil { - fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Nname) + ir.VisitFuncsBottomUp(funcs, func(funcs []*ir.Func, recursive bool) { + numfns := numNonClosures(funcs) + + for _, fn := range funcs { + if !recursive || numfns > 1 { + // We allow inlining if there is no + // recursion, or the recursion cycle is + // across more than one function. + CanInline(fn, profile) + } else { + if base.Flag.LowerM > 1 && fn.OClosure == nil { + fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(fn), fn.Nname) + } } - } - if inlheur.Enabled() { - analyzeFuncProps(n, p) - } - } - - ir.VisitFuncsBottomUp(funcs, func(list []*ir.Func, recursive bool) { - numfns := numNonClosures(list) - // We visit functions within an SCC in fairly arbitrary order, - // so by computing inlinability for all functions in the SCC - // before performing any inlining, the results are less - // sensitive to the order within the SCC (see #58905 for an - // example). - - // First compute inlinability for all functions in the SCC ... - for _, n := range list { - doCanInline(n, recursive, numfns) - } - // ... then make a second pass to do inlining of calls. - if doInline { - for _, n := range list { - InlineCalls(n, p) + if inlheur.Enabled() { + analyzeFuncProps(fn, profile) } } }) } -// garbageCollectUnreferencedHiddenClosures makes a pass over all the +// GarbageCollectUnreferencedHiddenClosures makes a pass over all the // top-level (non-hidden-closure) functions looking for nested closure // functions that are reachable, then sweeps through the Target.Decls // list and marks any non-reachable hidden closure function as dead. // See issues #59404 and #59638 for more context. -func garbageCollectUnreferencedHiddenClosures() { +func GarbageCollectUnreferencedHiddenClosures() { liveFuncs := make(map[*ir.Func]bool) @@ -253,7 +218,7 @@ func garbageCollectUnreferencedHiddenClosures() { } // inlineBudget determines the max budget for function 'fn' prior to -// analyzing the hairyness of the body of 'fn'. We pass in the pgo +// analyzing the hairiness of the body of 'fn'. We pass in the pgo // profile if available (which can change the budget), also a // 'relaxed' flag, which expands the budget slightly to allow for the // possibility that a call to the function might have its score @@ -265,7 +230,7 @@ func inlineBudget(fn *ir.Func, profile *pgo.Profile, relaxed bool, verbose bool) if profile != nil { if n, ok := profile.WeightedCG.IRNodes[ir.LinkFuncName(fn)]; ok { if _, ok := candHotCalleeMap[n]; ok { - budget = int32(inlineHotMaxBudget) + budget = inlineHotMaxBudget if verbose { fmt.Printf("hot-node enabled increased budget=%v for func=%v\n", budget, ir.PkgFuncName(fn)) } @@ -336,7 +301,7 @@ func CanInline(fn *ir.Func, profile *pgo.Profile) { visitor := hairyVisitor{ curFunc: fn, - isBigFunc: isBigFunc(fn), + isBigFunc: IsBigFunc(fn), budget: budget, maxBudget: budget, extraCallCost: cc, @@ -348,20 +313,27 @@ func CanInline(fn *ir.Func, profile *pgo.Profile) { } n.Func.Inl = &ir.Inline{ - Cost: budget - visitor.budget, - Dcl: pruneUnusedAutos(n.Func.Dcl, &visitor), - HaveDcl: true, - + Cost: budget - visitor.budget, + Dcl: pruneUnusedAutos(n.Func.Dcl, &visitor), + HaveDcl: true, CanDelayResults: canDelayResults(fn), } + if base.Flag.LowerM != 0 || logopt.Enabled() { + noteInlinableFunc(n, fn, budget-visitor.budget) + } +} +// noteInlinableFunc issues a message to the user that the specified +// function is inlinable. +func noteInlinableFunc(n *ir.Name, fn *ir.Func, cost int32) { if base.Flag.LowerM > 1 { - fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, budget-visitor.budget, fn.Type(), ir.Nodes(fn.Body)) + fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, cost, fn.Type(), ir.Nodes(fn.Body)) } else if base.Flag.LowerM != 0 { fmt.Printf("%v: can inline %v\n", ir.Line(fn), n) } + // JSON optimization log output. if logopt.Enabled() { - logopt.LogOpt(fn.Pos(), "canInlineFunction", "inline", ir.FuncName(fn), fmt.Sprintf("cost: %d", budget-visitor.budget)) + logopt.LogOpt(fn.Pos(), "canInlineFunction", "inline", ir.FuncName(fn), fmt.Sprintf("cost: %d", cost)) } } @@ -585,7 +557,7 @@ opSwitch: // Check whether we'd actually inline this call. Set // log == false since we aren't actually doing inlining // yet. - if canInlineCallExpr(v.curFunc, n, callee, v.isBigFunc, false) { + if ok, _ := canInlineCallExpr(v.curFunc, n, callee, v.isBigFunc, false); ok { // mkinlcall would inline this call [1], so use // the cost of the inline body as the cost of // the call, as that is what will actually @@ -732,14 +704,16 @@ opSwitch: // particular, to avoid breaking the existing inlinability regress // tests), we need to compensate for this here. // - // See also identical logic in isBigFunc. - if init := n.Rhs[0].Init(); len(init) == 1 { - if _, ok := init[0].(*ir.AssignListStmt); ok { - // 4 for each value, because each temporary variable now - // appears 3 times (DCL, LHS, RHS), plus an extra DCL node. - // - // 1 for the extra "tmp1, tmp2 = f()" assignment statement. - v.budget += 4*int32(len(n.Lhs)) + 1 + // See also identical logic in IsBigFunc. + if len(n.Rhs) > 0 { + if init := n.Rhs[0].Init(); len(init) == 1 { + if _, ok := init[0].(*ir.AssignListStmt); ok { + // 4 for each value, because each temporary variable now + // appears 3 times (DCL, LHS, RHS), plus an extra DCL node. + // + // 1 for the extra "tmp1, tmp2 = f()" assignment statement. + v.budget += 4*int32(len(n.Lhs)) + 1 + } } } @@ -771,12 +745,15 @@ opSwitch: return ir.DoChildren(n, v.do) } -func isBigFunc(fn *ir.Func) bool { +// IsBigFunc reports whether fn is a "big" function. +// +// Note: The criteria for "big" is heuristic and subject to change. +func IsBigFunc(fn *ir.Func) bool { budget := inlineBigFunctionNodes return ir.Any(fn, func(n ir.Node) bool { // See logic in hairyVisitor.doNode, explaining unified IR's // handling of "a, b = f()" assignments. - if n, ok := n.(*ir.AssignListStmt); ok && n.Op() == ir.OAS2 { + if n, ok := n.(*ir.AssignListStmt); ok && n.Op() == ir.OAS2 && len(n.Rhs) > 0 { if init := n.Rhs[0].Init(); len(init) == 1 { if _, ok := init[0].(*ir.AssignListStmt); ok { budget += 4*len(n.Lhs) + 1 @@ -789,128 +766,40 @@ func isBigFunc(fn *ir.Func) bool { }) } -// InlineCalls/inlnode walks fn's statements and expressions and substitutes any -// calls made to inlineable functions. This is the external entry point. -func InlineCalls(fn *ir.Func, profile *pgo.Profile) { - if inlheur.Enabled() && !fn.Wrapper() { - inlheur.ScoreCalls(fn) - defer inlheur.ScoreCallsCleanup() +// TryInlineCall returns an inlined call expression for call, or nil +// if inlining is not possible. +func TryInlineCall(callerfn *ir.Func, call *ir.CallExpr, bigCaller bool, profile *pgo.Profile) *ir.InlinedCallExpr { + if base.Flag.LowerL == 0 { + return nil } - if base.Debug.DumpInlFuncProps != "" && !fn.Wrapper() { - inlheur.DumpFuncProps(fn, base.Debug.DumpInlFuncProps) + if call.Op() != ir.OCALLFUNC { + return nil } - savefn := ir.CurFunc - ir.CurFunc = fn - bigCaller := isBigFunc(fn) - if bigCaller && base.Flag.LowerM > 1 { - fmt.Printf("%v: function %v considered 'big'; reducing max cost of inlinees\n", ir.Line(fn), fn) - } - var inlCalls []*ir.InlinedCallExpr - var edit func(ir.Node) ir.Node - edit = func(n ir.Node) ir.Node { - return inlnode(fn, n, bigCaller, &inlCalls, edit, profile) - } - ir.EditChildren(fn, edit) - - // If we inlined any calls, we want to recursively visit their - // bodies for further inlining. However, we need to wait until - // *after* the original function body has been expanded, or else - // inlCallee can have false positives (e.g., #54632). - for len(inlCalls) > 0 { - call := inlCalls[0] - inlCalls = inlCalls[1:] - ir.EditChildren(call, edit) + if call.GoDefer || call.NoInline { + return nil } - ir.CurFunc = savefn -} - -// inlnode recurses over the tree to find inlineable calls, which will -// be turned into OINLCALLs by mkinlcall. When the recursion comes -// back up will examine left, right, list, rlist, ninit, ntest, nincr, -// nbody and nelse and use one of the 4 inlconv/glue functions above -// to turn the OINLCALL into an expression, a statement, or patch it -// in to this nodes list or rlist as appropriate. -// NOTE it makes no sense to pass the glue functions down the -// recursion to the level where the OINLCALL gets created because they -// have to edit /this/ n, so you'd have to push that one down as well, -// but then you may as well do it here. so this is cleaner and -// shorter and less complicated. -// The result of inlnode MUST be assigned back to n, e.g. -// -// n.Left = inlnode(n.Left) -func inlnode(callerfn *ir.Func, n ir.Node, bigCaller bool, inlCalls *[]*ir.InlinedCallExpr, edit func(ir.Node) ir.Node, profile *pgo.Profile) ir.Node { - if n == nil { - return n - } - - switch n.Op() { - case ir.ODEFER, ir.OGO: - n := n.(*ir.GoDeferStmt) - switch call := n.Call; call.Op() { - case ir.OCALLMETH: - base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck") - case ir.OCALLFUNC: - call := call.(*ir.CallExpr) - call.NoInline = true - } - case ir.OTAILCALL: - n := n.(*ir.TailCallStmt) - n.Call.NoInline = true // Not inline a tail call for now. Maybe we could inline it just like RETURN fn(arg)? - - // TODO do them here (or earlier), - // so escape analysis can avoid more heapmoves. - case ir.OCLOSURE: - return n - case ir.OCALLMETH: - base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck") - case ir.OCALLFUNC: - n := n.(*ir.CallExpr) - if n.Fun.Op() == ir.OMETHEXPR { - // Prevent inlining some reflect.Value methods when using checkptr, - // even when package reflect was compiled without it (#35073). - if meth := ir.MethodExprName(n.Fun); meth != nil { - s := meth.Sym() - if base.Debug.Checkptr != 0 { - switch types.ReflectSymName(s) { - case "Value.UnsafeAddr", "Value.Pointer": - return n - } - } + // Prevent inlining some reflect.Value methods when using checkptr, + // even when package reflect was compiled without it (#35073). + if base.Debug.Checkptr != 0 && call.Fun.Op() == ir.OMETHEXPR { + if method := ir.MethodExprName(call.Fun); method != nil { + switch types.ReflectSymName(method.Sym()) { + case "Value.UnsafeAddr", "Value.Pointer": + return nil } } } - lno := ir.SetPos(n) - - ir.EditChildren(n, edit) - - // with all the branches out of the way, it is now time to - // transmogrify this node itself unless inhibited by the - // switch at the top of this function. - switch n.Op() { - case ir.OCALLMETH: - base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck") - - case ir.OCALLFUNC: - call := n.(*ir.CallExpr) - if call.NoInline { - break - } - if base.Flag.LowerM > 3 { - fmt.Printf("%v:call to func %+v\n", ir.Line(n), call.Fun) - } - if ir.IsIntrinsicCall(call) { - break - } - if fn := inlCallee(callerfn, call.Fun, profile); fn != nil && typecheck.HaveInlineBody(fn) { - n = mkinlcall(callerfn, call, fn, bigCaller, inlCalls) - } + if base.Flag.LowerM > 3 { + fmt.Printf("%v:call to func %+v\n", ir.Line(call), call.Fun) } - - base.Pos = lno - - return n + if ir.IsIntrinsicCall(call) { + return nil + } + if fn := inlCallee(callerfn, call.Fun, profile); fn != nil && typecheck.HaveInlineBody(fn) { + return mkinlcall(callerfn, call, fn, bigCaller) + } + return nil } // inlCallee takes a function-typed expression and returns the underlying function ONAME @@ -961,9 +850,10 @@ var InlineCall = func(callerfn *ir.Func, call *ir.CallExpr, fn *ir.Func, inlInde // inlineCostOK returns true if call n from caller to callee is cheap enough to // inline. bigCaller indicates that caller is a big function. // -// If inlineCostOK returns false, it also returns the max cost that the callee -// exceeded. -func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller bool) (bool, int32) { +// In addition to the "cost OK" boolean, it also returns the "max +// cost" limit used to make the decision (which may differ depending +// on func size), and the score assigned to this specific callsite. +func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller bool) (bool, int32, int32) { maxCost := int32(inlineMaxBudget) if bigCaller { // We use this to restrict inlining into very big functions. @@ -977,12 +867,11 @@ func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller bool) (bool if ok { metric = int32(score) } - } if metric <= maxCost { // Simple case. Function is already cheap enough. - return true, 0 + return true, 0, metric } // We'll also allow inlining of hot functions below inlineHotMaxBudget, @@ -992,7 +881,7 @@ func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller bool) (bool csi := pgo.CallSiteInfo{LineOffset: lineOffset, Caller: caller} if _, ok := candHotEdgeMap[csi]; !ok { // Cold - return false, maxCost + return false, maxCost, metric } // Hot @@ -1001,47 +890,49 @@ func inlineCostOK(n *ir.CallExpr, caller, callee *ir.Func, bigCaller bool) (bool if base.Debug.PGODebug > 0 { fmt.Printf("hot-big check disallows inlining for call %s (cost %d) at %v in big function %s\n", ir.PkgFuncName(callee), callee.Inl.Cost, ir.Line(n), ir.PkgFuncName(caller)) } - return false, maxCost + return false, maxCost, metric } if metric > inlineHotMaxBudget { - return false, inlineHotMaxBudget + return false, inlineHotMaxBudget, metric } if !base.PGOHash.MatchPosWithInfo(n.Pos(), "inline", nil) { // De-selected by PGO Hash. - return false, maxCost + return false, maxCost, metric } if base.Debug.PGODebug > 0 { fmt.Printf("hot-budget check allows inlining for call %s (cost %d) at %v in function %s\n", ir.PkgFuncName(callee), callee.Inl.Cost, ir.Line(n), ir.PkgFuncName(caller)) } - return true, 0 + return true, 0, metric } -// canInlineCallsite returns true if the call n from caller to callee can be -// inlined. bigCaller indicates that caller is a big function. log indicates -// that the 'cannot inline' reason should be logged. +// canInlineCallExpr returns true if the call n from caller to callee +// can be inlined, plus the score computed for the call expr in +// question. bigCaller indicates that caller is a big function. log +// indicates that the 'cannot inline' reason should be logged. // // Preconditions: CanInline(callee) has already been called. -func canInlineCallExpr(callerfn *ir.Func, n *ir.CallExpr, callee *ir.Func, bigCaller bool, log bool) bool { +func canInlineCallExpr(callerfn *ir.Func, n *ir.CallExpr, callee *ir.Func, bigCaller bool, log bool) (bool, int32) { if callee.Inl == nil { // callee is never inlinable. if log && logopt.Enabled() { logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn), fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(callee))) } - return false + return false, 0 } - if ok, maxCost := inlineCostOK(n, callerfn, callee, bigCaller); !ok { + ok, maxCost, callSiteScore := inlineCostOK(n, callerfn, callee, bigCaller) + if !ok { // callee cost too high for this call site. if log && logopt.Enabled() { logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn), fmt.Sprintf("cost %d of %s exceeds max caller cost %d", callee.Inl.Cost, ir.PkgFuncName(callee), maxCost)) } - return false + return false, 0 } if callee == callerfn { @@ -1049,7 +940,7 @@ func canInlineCallExpr(callerfn *ir.Func, n *ir.CallExpr, callee *ir.Func, bigCa if log && logopt.Enabled() { logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(callerfn))) } - return false + return false, 0 } if base.Flag.Cfg.Instrumenting && types.IsNoInstrumentPkg(callee.Sym().Pkg) { @@ -1063,7 +954,7 @@ func canInlineCallExpr(callerfn *ir.Func, n *ir.CallExpr, callee *ir.Func, bigCa logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn), fmt.Sprintf("call to runtime function %s in instrumented build", ir.PkgFuncName(callee))) } - return false + return false, 0 } if base.Flag.Race && types.IsNoRacePkg(callee.Sym().Pkg) { @@ -1071,7 +962,7 @@ func canInlineCallExpr(callerfn *ir.Func, n *ir.CallExpr, callee *ir.Func, bigCa logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(callerfn), fmt.Sprintf(`call to into "no-race" package function %s in race build`, ir.PkgFuncName(callee))) } - return false + return false, 0 } // Check if we've already inlined this function at this particular @@ -1094,24 +985,24 @@ func canInlineCallExpr(callerfn *ir.Func, n *ir.CallExpr, callee *ir.Func, bigCa fmt.Sprintf("repeated recursive cycle to %s", ir.PkgFuncName(callee))) } } - return false + return false, 0 } } - return true + return true, callSiteScore } -// If n is a OCALLFUNC node, and fn is an ONAME node for a -// function with an inlinable body, return an OINLCALL node that can replace n. -// The returned node's Ninit has the parameter assignments, the Nbody is the -// inlined function body, and (List, Rlist) contain the (input, output) -// parameters. +// mkinlcall returns an OINLCALL node that can replace OCALLFUNC n, or +// nil if it cannot be inlined. callerfn is the function that contains +// n, and fn is the function being called. +// // The result of mkinlcall MUST be assigned back to n, e.g. // // n.Left = mkinlcall(n.Left, fn, isddd) -func mkinlcall(callerfn *ir.Func, n *ir.CallExpr, fn *ir.Func, bigCaller bool, inlCalls *[]*ir.InlinedCallExpr) ir.Node { - if !canInlineCallExpr(callerfn, n, fn, bigCaller, true) { - return n +func mkinlcall(callerfn *ir.Func, n *ir.CallExpr, fn *ir.Func, bigCaller bool) *ir.InlinedCallExpr { + ok, score := canInlineCallExpr(callerfn, n, fn, bigCaller, true) + if !ok { + return nil } typecheck.AssertFixedCall(n) @@ -1169,7 +1060,12 @@ func mkinlcall(callerfn *ir.Func, n *ir.CallExpr, fn *ir.Func, bigCaller bool, i } if base.Flag.LowerM != 0 { - fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn) + if buildcfg.Experiment.NewInliner { + fmt.Printf("%v: inlining call to %v with score %d\n", + ir.Line(n), fn, score) + } else { + fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn) + } } if base.Flag.LowerM > 2 { fmt.Printf("%v: Before inlining: %+v\n", ir.Line(n), n) @@ -1189,8 +1085,6 @@ func mkinlcall(callerfn *ir.Func, n *ir.CallExpr, fn *ir.Func, bigCaller bool, i inlheur.UpdateCallsiteTable(callerfn, n, res) } - *inlCalls = append(*inlCalls, res) - return res } @@ -1294,7 +1188,7 @@ func isAtomicCoverageCounterUpdate(cn *ir.CallExpr) bool { return v } -func postProcessCallSites(profile *pgo.Profile) { +func PostProcessCallSites(profile *pgo.Profile) { if base.Debug.DumpInlCallSiteScores != 0 { budgetCallback := func(fn *ir.Func, prof *pgo.Profile) (int32, bool) { v := inlineBudget(fn, prof, false, false) diff --git a/src/cmd/compile/internal/inline/inlheur/analyze.go b/src/cmd/compile/internal/inline/inlheur/analyze.go index 6c3db92afe..1fb502ac2a 100644 --- a/src/cmd/compile/internal/inline/inlheur/analyze.go +++ b/src/cmd/compile/internal/inline/inlheur/analyze.go @@ -95,15 +95,16 @@ func AnalyzeFunc(fn *ir.Func, canInline func(*ir.Func), budgetForFunc func(*ir.F // only after the closures it contains have been processed, so // iterate through the list in reverse order. Once a function has // been analyzed, revisit the question of whether it should be - // inlinable; if it is over the default hairyness limit and it + // inlinable; if it is over the default hairiness limit and it // doesn't have any interesting properties, then we don't want // the overhead of writing out its inline body. + nameFinder := newNameFinder(fn) for i := len(funcs) - 1; i >= 0; i-- { f := funcs[i] if f.OClosure != nil && !f.InlinabilityChecked() { canInline(f) } - funcProps := analyzeFunc(f, inlineMaxBudget) + funcProps := analyzeFunc(f, inlineMaxBudget, nameFinder) revisitInlinability(f, funcProps, budgetForFunc) if f.Inl != nil { f.Inl.Properties = funcProps.SerializeToString() @@ -122,11 +123,11 @@ func TearDown() { scoreCallsCache.csl = nil } -func analyzeFunc(fn *ir.Func, inlineMaxBudget int) *FuncProps { +func analyzeFunc(fn *ir.Func, inlineMaxBudget int, nf *nameFinder) *FuncProps { if funcInlHeur, ok := fpmap[fn]; ok { return funcInlHeur.props } - funcProps, fcstab := computeFuncProps(fn, inlineMaxBudget) + funcProps, fcstab := computeFuncProps(fn, inlineMaxBudget, nf) file, line := fnFileLine(fn) entry := fnInlHeur{ fname: fn.Sym().Name, @@ -153,7 +154,7 @@ func revisitInlinability(fn *ir.Func, funcProps *FuncProps, budgetForFunc func(* if fn.Inl == nil { return } - maxAdj := int32(largestScoreAdjustment(fn, funcProps)) + maxAdj := int32(LargestNegativeScoreAdjustment(fn, funcProps)) budget := budgetForFunc(fn) if fn.Inl.Cost+maxAdj > budget { fn.Inl = nil @@ -163,7 +164,7 @@ func revisitInlinability(fn *ir.Func, funcProps *FuncProps, budgetForFunc func(* // computeFuncProps examines the Go function 'fn' and computes for it // a function "properties" object, to be used to drive inlining // heuristics. See comments on the FuncProps type for more info. -func computeFuncProps(fn *ir.Func, inlineMaxBudget int) (*FuncProps, CallSiteTab) { +func computeFuncProps(fn *ir.Func, inlineMaxBudget int, nf *nameFinder) (*FuncProps, CallSiteTab) { if debugTrace&debugTraceFuncs != 0 { fmt.Fprintf(os.Stderr, "=-= starting analysis of func %v:\n%+v\n", fn, fn) @@ -171,13 +172,13 @@ func computeFuncProps(fn *ir.Func, inlineMaxBudget int) (*FuncProps, CallSiteTab funcProps := new(FuncProps) ffa := makeFuncFlagsAnalyzer(fn) analyzers := []propAnalyzer{ffa} - analyzers = addResultsAnalyzer(fn, analyzers, funcProps, inlineMaxBudget) - analyzers = addParamsAnalyzer(fn, analyzers, funcProps) + analyzers = addResultsAnalyzer(fn, analyzers, funcProps, inlineMaxBudget, nf) + analyzers = addParamsAnalyzer(fn, analyzers, funcProps, nf) runAnalyzersOnFunction(fn, analyzers) for _, a := range analyzers { a.setResults(funcProps) } - cstab := computeCallSiteTable(fn, fn.Body, nil, ffa.panicPathTable(), 0) + cstab := computeCallSiteTable(fn, fn.Body, nil, ffa.panicPathTable(), 0, nf) return funcProps, cstab } diff --git a/src/cmd/compile/internal/inline/inlheur/analyze_func_callsites.go b/src/cmd/compile/internal/inline/inlheur/analyze_func_callsites.go index 3e285d5181..36ebe18b82 100644 --- a/src/cmd/compile/internal/inline/inlheur/analyze_func_callsites.go +++ b/src/cmd/compile/internal/inline/inlheur/analyze_func_callsites.go @@ -14,23 +14,37 @@ import ( ) type callSiteAnalyzer struct { + fn *ir.Func + *nameFinder +} + +type callSiteTableBuilder struct { + fn *ir.Func + *nameFinder cstab CallSiteTab - fn *ir.Func ptab map[ir.Node]pstate nstack []ir.Node loopNest int isInit bool } -func makeCallSiteAnalyzer(fn *ir.Func, cstab CallSiteTab, ptab map[ir.Node]pstate, loopNestingLevel int) *callSiteAnalyzer { - isInit := fn.IsPackageInit() || strings.HasPrefix(fn.Sym().Name, "init.") +func makeCallSiteAnalyzer(fn *ir.Func) *callSiteAnalyzer { return &callSiteAnalyzer{ - fn: fn, - cstab: cstab, - ptab: ptab, - isInit: isInit, - loopNest: loopNestingLevel, - nstack: []ir.Node{fn}, + fn: fn, + nameFinder: newNameFinder(fn), + } +} + +func makeCallSiteTableBuilder(fn *ir.Func, cstab CallSiteTab, ptab map[ir.Node]pstate, loopNestingLevel int, nf *nameFinder) *callSiteTableBuilder { + isInit := fn.IsPackageInit() || strings.HasPrefix(fn.Sym().Name, "init.") + return &callSiteTableBuilder{ + fn: fn, + cstab: cstab, + ptab: ptab, + isInit: isInit, + loopNest: loopNestingLevel, + nstack: []ir.Node{fn}, + nameFinder: nf, } } @@ -39,22 +53,22 @@ func makeCallSiteAnalyzer(fn *ir.Func, cstab CallSiteTab, ptab map[ir.Node]pstat // specific subtree within the AST for a function. The main intended // use cases are for 'region' to be either A) an entire function body, // or B) an inlined call expression. -func computeCallSiteTable(fn *ir.Func, region ir.Nodes, cstab CallSiteTab, ptab map[ir.Node]pstate, loopNestingLevel int) CallSiteTab { - csa := makeCallSiteAnalyzer(fn, cstab, ptab, loopNestingLevel) +func computeCallSiteTable(fn *ir.Func, region ir.Nodes, cstab CallSiteTab, ptab map[ir.Node]pstate, loopNestingLevel int, nf *nameFinder) CallSiteTab { + cstb := makeCallSiteTableBuilder(fn, cstab, ptab, loopNestingLevel, nf) var doNode func(ir.Node) bool doNode = func(n ir.Node) bool { - csa.nodeVisitPre(n) + cstb.nodeVisitPre(n) ir.DoChildren(n, doNode) - csa.nodeVisitPost(n) + cstb.nodeVisitPost(n) return false } for _, n := range region { doNode(n) } - return csa.cstab + return cstb.cstab } -func (csa *callSiteAnalyzer) flagsForNode(call *ir.CallExpr) CSPropBits { +func (cstb *callSiteTableBuilder) flagsForNode(call *ir.CallExpr) CSPropBits { var r CSPropBits if debugTrace&debugTraceCalls != 0 { @@ -63,21 +77,21 @@ func (csa *callSiteAnalyzer) flagsForNode(call *ir.CallExpr) CSPropBits { } // Set a bit if this call is within a loop. - if csa.loopNest > 0 { + if cstb.loopNest > 0 { r |= CallSiteInLoop } // Set a bit if the call is within an init function (either // compiler-generated or user-written). - if csa.isInit { + if cstb.isInit { r |= CallSiteInInitFunc } // Decide whether to apply the panic path heuristic. Hack: don't // apply this heuristic in the function "main.main" (mostly just // to avoid annoying users). - if !isMainMain(csa.fn) { - r = csa.determinePanicPathBits(call, r) + if !isMainMain(cstb.fn) { + r = cstb.determinePanicPathBits(call, r) } return r @@ -88,15 +102,15 @@ func (csa *callSiteAnalyzer) flagsForNode(call *ir.CallExpr) CSPropBits { // panic/exit. Do this by walking back up the node stack to see if we // can find either A) an enclosing panic, or B) a statement node that // we've determined leads to a panic/exit. -func (csa *callSiteAnalyzer) determinePanicPathBits(call ir.Node, r CSPropBits) CSPropBits { - csa.nstack = append(csa.nstack, call) +func (cstb *callSiteTableBuilder) determinePanicPathBits(call ir.Node, r CSPropBits) CSPropBits { + cstb.nstack = append(cstb.nstack, call) defer func() { - csa.nstack = csa.nstack[:len(csa.nstack)-1] + cstb.nstack = cstb.nstack[:len(cstb.nstack)-1] }() - for ri := range csa.nstack[:len(csa.nstack)-1] { - i := len(csa.nstack) - ri - 1 - n := csa.nstack[i] + for ri := range cstb.nstack[:len(cstb.nstack)-1] { + i := len(cstb.nstack) - ri - 1 + n := cstb.nstack[i] _, isCallExpr := n.(*ir.CallExpr) _, isStmt := n.(ir.Stmt) if isCallExpr { @@ -104,7 +118,7 @@ func (csa *callSiteAnalyzer) determinePanicPathBits(call ir.Node, r CSPropBits) } if debugTrace&debugTraceCalls != 0 { - ps, inps := csa.ptab[n] + ps, inps := cstb.ptab[n] fmt.Fprintf(os.Stderr, "=-= callpar %d op=%s ps=%s inptab=%v stmt=%v\n", i, n.Op().String(), ps.String(), inps, isStmt) } @@ -112,7 +126,7 @@ func (csa *callSiteAnalyzer) determinePanicPathBits(call ir.Node, r CSPropBits) r |= CallSiteOnPanicPath break } - if v, ok := csa.ptab[n]; ok { + if v, ok := cstb.ptab[n]; ok { if v == psCallsPanic { r |= CallSiteOnPanicPath break @@ -126,16 +140,15 @@ func (csa *callSiteAnalyzer) determinePanicPathBits(call ir.Node, r CSPropBits) } // propsForArg returns property bits for a given call argument expression arg. -func (csa *callSiteAnalyzer) propsForArg(arg ir.Node) ActualExprPropBits { - _, islit := isLiteral(arg) - if islit { +func (cstb *callSiteTableBuilder) propsForArg(arg ir.Node) ActualExprPropBits { + if cval := cstb.constValue(arg); cval != nil { return ActualExprConstant } - if isConcreteConvIface(arg) { + if cstb.isConcreteConvIface(arg) { return ActualExprIsConcreteConvIface } - fname, isfunc, _ := isFuncName(arg) - if isfunc { + fname := cstb.funcName(arg) + if fname != nil { if fn := fname.Func; fn != nil && typecheck.HaveInlineBody(fn) { return ActualExprIsInlinableFunc } @@ -149,11 +162,11 @@ func (csa *callSiteAnalyzer) propsForArg(arg ir.Node) ActualExprPropBits { // expression; these will be stored in the CallSite object for a given // call and then consulted when scoring. If no arg has any interesting // properties we try to save some space and return a nil slice. -func (csa *callSiteAnalyzer) argPropsForCall(ce *ir.CallExpr) []ActualExprPropBits { +func (cstb *callSiteTableBuilder) argPropsForCall(ce *ir.CallExpr) []ActualExprPropBits { rv := make([]ActualExprPropBits, len(ce.Args)) somethingInteresting := false for idx := range ce.Args { - argProp := csa.propsForArg(ce.Args[idx]) + argProp := cstb.propsForArg(ce.Args[idx]) somethingInteresting = somethingInteresting || (argProp != 0) rv[idx] = argProp } @@ -163,9 +176,9 @@ func (csa *callSiteAnalyzer) argPropsForCall(ce *ir.CallExpr) []ActualExprPropBi return rv } -func (csa *callSiteAnalyzer) addCallSite(callee *ir.Func, call *ir.CallExpr) { - flags := csa.flagsForNode(call) - argProps := csa.argPropsForCall(call) +func (cstb *callSiteTableBuilder) addCallSite(callee *ir.Func, call *ir.CallExpr) { + flags := cstb.flagsForNode(call) + argProps := cstb.argPropsForCall(call) if debugTrace&debugTraceCalls != 0 { fmt.Fprintf(os.Stderr, "=-= props %+v for call %v\n", argProps, call) } @@ -173,12 +186,12 @@ func (csa *callSiteAnalyzer) addCallSite(callee *ir.Func, call *ir.CallExpr) { cs := &CallSite{ Call: call, Callee: callee, - Assign: csa.containingAssignment(call), + Assign: cstb.containingAssignment(call), ArgProps: argProps, Flags: flags, - ID: uint(len(csa.cstab)), + ID: uint(len(cstb.cstab)), } - if _, ok := csa.cstab[call]; ok { + if _, ok := cstb.cstab[call]; ok { fmt.Fprintf(os.Stderr, "*** cstab duplicate entry at: %s\n", fmtFullPos(call.Pos())) fmt.Fprintf(os.Stderr, "*** call: %+v\n", call) @@ -189,38 +202,38 @@ func (csa *callSiteAnalyzer) addCallSite(callee *ir.Func, call *ir.CallExpr) { // on heuristics. cs.Score = int(callee.Inl.Cost) - if csa.cstab == nil { - csa.cstab = make(CallSiteTab) + if cstb.cstab == nil { + cstb.cstab = make(CallSiteTab) } - csa.cstab[call] = cs + cstb.cstab[call] = cs if debugTrace&debugTraceCalls != 0 { fmt.Fprintf(os.Stderr, "=-= added callsite: caller=%v callee=%v n=%s\n", - csa.fn, callee, fmtFullPos(call.Pos())) + cstb.fn, callee, fmtFullPos(call.Pos())) } } -func (csa *callSiteAnalyzer) nodeVisitPre(n ir.Node) { +func (cstb *callSiteTableBuilder) nodeVisitPre(n ir.Node) { switch n.Op() { case ir.ORANGE, ir.OFOR: if !hasTopLevelLoopBodyReturnOrBreak(loopBody(n)) { - csa.loopNest++ + cstb.loopNest++ } case ir.OCALLFUNC: ce := n.(*ir.CallExpr) callee := pgo.DirectCallee(ce.Fun) if callee != nil && callee.Inl != nil { - csa.addCallSite(callee, ce) + cstb.addCallSite(callee, ce) } } - csa.nstack = append(csa.nstack, n) + cstb.nstack = append(cstb.nstack, n) } -func (csa *callSiteAnalyzer) nodeVisitPost(n ir.Node) { - csa.nstack = csa.nstack[:len(csa.nstack)-1] +func (cstb *callSiteTableBuilder) nodeVisitPost(n ir.Node) { + cstb.nstack = cstb.nstack[:len(cstb.nstack)-1] switch n.Op() { case ir.ORANGE, ir.OFOR: if !hasTopLevelLoopBodyReturnOrBreak(loopBody(n)) { - csa.loopNest-- + cstb.loopNest-- } } } @@ -281,8 +294,8 @@ func hasTopLevelLoopBodyReturnOrBreak(loopBody ir.Nodes) bool { // call to a pair of auto-temps, then the second one assigning the // auto-temps to the user-visible vars. This helper will return the // second (outer) of these two. -func (csa *callSiteAnalyzer) containingAssignment(n ir.Node) ir.Node { - parent := csa.nstack[len(csa.nstack)-1] +func (cstb *callSiteTableBuilder) containingAssignment(n ir.Node) ir.Node { + parent := cstb.nstack[len(cstb.nstack)-1] // assignsOnlyAutoTemps returns TRUE of the specified OAS2FUNC // node assigns only auto-temps. @@ -315,12 +328,12 @@ func (csa *callSiteAnalyzer) containingAssignment(n ir.Node) ir.Node { // OAS1({x,y},OCONVNOP(OAS2FUNC({auto1,auto2},OCALLFUNC(bar)))) // if assignsOnlyAutoTemps(parent) { - par2 := csa.nstack[len(csa.nstack)-2] + par2 := cstb.nstack[len(cstb.nstack)-2] if par2.Op() == ir.OAS2 { return par2 } if par2.Op() == ir.OCONVNOP { - par3 := csa.nstack[len(csa.nstack)-3] + par3 := cstb.nstack[len(cstb.nstack)-3] if par3.Op() == ir.OAS2 { return par3 } @@ -378,18 +391,23 @@ func UpdateCallsiteTable(callerfn *ir.Func, n *ir.CallExpr, ic *ir.InlinedCallEx loopNestLevel = 1 } ptab := map[ir.Node]pstate{ic: icp} - icstab := computeCallSiteTable(callerfn, ic.Body, nil, ptab, loopNestLevel) + nf := newNameFinder(nil) + icstab := computeCallSiteTable(callerfn, ic.Body, nil, ptab, loopNestLevel, nf) // Record parent callsite. This is primarily for debug output. for _, cs := range icstab { cs.parent = oldcs } - // Score the calls in the inlined body. Note the setting of "doCallResults" - // to false here: at the moment there isn't any easy way to localize - // or region-ize the work done by "rescoreBasedOnCallResultUses", which - // currently does a walk over the entire function to look for uses - // of a given set of results. + // Score the calls in the inlined body. Note the setting of + // "doCallResults" to false here: at the moment there isn't any + // easy way to localize or region-ize the work done by + // "rescoreBasedOnCallResultUses", which currently does a walk + // over the entire function to look for uses of a given set of + // results. Similarly we're passing nil to makeCallSiteAnalyzer, + // so as to run name finding without the use of static value & + // friends. + csa := makeCallSiteAnalyzer(nil) const doCallResults = false - scoreCallsRegion(callerfn, ic.Body, icstab, doCallResults, ic) + csa.scoreCallsRegion(callerfn, ic.Body, icstab, doCallResults, ic) } diff --git a/src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go b/src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go index 588d2f4f59..d86fd7d71b 100644 --- a/src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go +++ b/src/cmd/compile/internal/inline/inlheur/analyze_func_flags.go @@ -66,34 +66,24 @@ func (ffa *funcFlagsAnalyzer) setResults(funcProps *FuncProps) { funcProps.Flags = rv } -func (ffa *funcFlagsAnalyzer) getstate(n ir.Node) pstate { - val, ok := ffa.nstate[n] - if !ok { - base.Fatalf("funcFlagsAnalyzer: fn %q node %s line %s: internal error, no setting for node:\n%+v\n", ffa.fn.Sym().Name, n.Op().String(), ir.Line(n), n) - } - return val +func (ffa *funcFlagsAnalyzer) getState(n ir.Node) pstate { + return ffa.nstate[n] } -func (ffa *funcFlagsAnalyzer) setstate(n ir.Node, st pstate) { - if _, ok := ffa.nstate[n]; ok { - base.Fatalf("funcFlagsAnalyzer: fn %q internal error, existing setting for node:\n%+v\n", ffa.fn.Sym().Name, n) - } else { +func (ffa *funcFlagsAnalyzer) setState(n ir.Node, st pstate) { + if st != psNoInfo { ffa.nstate[n] = st } } -func (ffa *funcFlagsAnalyzer) updatestate(n ir.Node, st pstate) { - if _, ok := ffa.nstate[n]; !ok { - base.Fatalf("funcFlagsAnalyzer: fn %q internal error, expected existing setting for node:\n%+v\n", ffa.fn.Sym().Name, n) +func (ffa *funcFlagsAnalyzer) updateState(n ir.Node, st pstate) { + if st == psNoInfo { + delete(ffa.nstate, n) } else { ffa.nstate[n] = st } } -func (ffa *funcFlagsAnalyzer) setstateSoft(n ir.Node, st pstate) { - ffa.nstate[n] = st -} - func (ffa *funcFlagsAnalyzer) panicPathTable() map[ir.Node]pstate { return ffa.nstate } @@ -144,7 +134,7 @@ func branchCombine(p1, p2 pstate) pstate { } // stateForList walks through a list of statements and computes the -// state/diposition for the entire list as a whole, as well +// state/disposition for the entire list as a whole, as well // as updating disposition of intermediate nodes. func (ffa *funcFlagsAnalyzer) stateForList(list ir.Nodes) pstate { st := psTop @@ -164,13 +154,13 @@ func (ffa *funcFlagsAnalyzer) stateForList(list ir.Nodes) pstate { // line 10 will be on a panic path). for i := len(list) - 1; i >= 0; i-- { n := list[i] - psi := ffa.getstate(n) + psi := ffa.getState(n) if debugTrace&debugTraceFuncFlags != 0 { fmt.Fprintf(os.Stderr, "=-= %v: stateForList n=%s ps=%s\n", ir.Line(n), n.Op().String(), psi.String()) } st = blockCombine(psi, st) - ffa.updatestate(n, st) + ffa.updateState(n, st) } if st == psTop { st = psNoInfo @@ -237,8 +227,6 @@ func (ffa *funcFlagsAnalyzer) nodeVisitPost(n ir.Node) { ir.Line(n), n.Op().String(), shouldVisit(n)) } if !shouldVisit(n) { - // invoke soft set, since node may be shared (e.g. ONAME) - ffa.setstateSoft(n, psNoInfo) return } var st pstate @@ -361,7 +349,7 @@ func (ffa *funcFlagsAnalyzer) nodeVisitPost(n ir.Node) { fmt.Fprintf(os.Stderr, "=-= %v: visit n=%s returns %s\n", ir.Line(n), n.Op().String(), st.String()) } - ffa.setstate(n, st) + ffa.setState(n, st) } func (ffa *funcFlagsAnalyzer) nodeVisitPre(n ir.Node) { diff --git a/src/cmd/compile/internal/inline/inlheur/analyze_func_params.go b/src/cmd/compile/internal/inline/inlheur/analyze_func_params.go index 0ce0af43a2..f6bd84c3f5 100644 --- a/src/cmd/compile/internal/inline/inlheur/analyze_func_params.go +++ b/src/cmd/compile/internal/inline/inlheur/analyze_func_params.go @@ -19,6 +19,7 @@ type paramsAnalyzer struct { params []*ir.Name top []bool *condLevelTracker + *nameFinder } // getParams returns an *ir.Name slice containing all params for the @@ -34,8 +35,8 @@ func getParams(fn *ir.Func) []*ir.Name { // new list. If the function in question doesn't have any interesting // parameters then the analyzer list is returned unchanged, and the // params flags in "fp" are updated accordingly. -func addParamsAnalyzer(fn *ir.Func, analyzers []propAnalyzer, fp *FuncProps) []propAnalyzer { - pa, props := makeParamsAnalyzer(fn) +func addParamsAnalyzer(fn *ir.Func, analyzers []propAnalyzer, fp *FuncProps, nf *nameFinder) []propAnalyzer { + pa, props := makeParamsAnalyzer(fn, nf) if pa != nil { analyzers = append(analyzers, pa) } else { @@ -44,11 +45,11 @@ func addParamsAnalyzer(fn *ir.Func, analyzers []propAnalyzer, fp *FuncProps) []p return analyzers } -// makeParamAnalyzer creates a new helper object to analyze parameters +// makeParamsAnalyzer creates a new helper object to analyze parameters // of function fn. If the function doesn't have any interesting // params, a nil helper is returned along with a set of default param // flags for the func. -func makeParamsAnalyzer(fn *ir.Func) (*paramsAnalyzer, []ParamPropBits) { +func makeParamsAnalyzer(fn *ir.Func, nf *nameFinder) (*paramsAnalyzer, []ParamPropBits) { params := getParams(fn) // includes receiver if applicable if len(params) == 0 { return nil, nil @@ -98,6 +99,7 @@ func makeParamsAnalyzer(fn *ir.Func) (*paramsAnalyzer, []ParamPropBits) { params: params, top: top, condLevelTracker: new(condLevelTracker), + nameFinder: nf, } return pa, nil } @@ -162,7 +164,7 @@ func (pa *paramsAnalyzer) callCheckParams(ce *ir.CallExpr) { return } sel := ce.Fun.(*ir.SelectorExpr) - r := ir.StaticValue(sel.X) + r := pa.staticValue(sel.X) if r.Op() != ir.ONAME { return } @@ -193,8 +195,8 @@ func (pa *paramsAnalyzer) callCheckParams(ce *ir.CallExpr) { return name == p, false }) } else { - cname, isFunc, _ := isFuncName(called) - if isFunc { + cname := pa.funcName(called) + if cname != nil { pa.deriveFlagsFromCallee(ce, cname.Func) } } @@ -238,7 +240,7 @@ func (pa *paramsAnalyzer) deriveFlagsFromCallee(ce *ir.CallExpr, callee *ir.Func } // See if one of the caller's parameters is flowing unmodified // into this actual expression. - r := ir.StaticValue(arg) + r := pa.staticValue(arg) if r.Op() != ir.ONAME { return } @@ -247,7 +249,13 @@ func (pa *paramsAnalyzer) deriveFlagsFromCallee(ce *ir.CallExpr, callee *ir.Func return } callerParamIdx := pa.findParamIdx(name) - if callerParamIdx == -1 || pa.params[callerParamIdx] == nil { + // note that callerParamIdx may return -1 in the case where + // the param belongs not to the current closure func we're + // analyzing but to an outer enclosing func. + if callerParamIdx == -1 { + return + } + if pa.params[callerParamIdx] == nil { panic("something went wrong") } if !pa.top[callerParamIdx] && diff --git a/src/cmd/compile/internal/inline/inlheur/analyze_func_returns.go b/src/cmd/compile/internal/inline/inlheur/analyze_func_returns.go index 58b0f54697..2aaa68d1b7 100644 --- a/src/cmd/compile/internal/inline/inlheur/analyze_func_returns.go +++ b/src/cmd/compile/internal/inline/inlheur/analyze_func_returns.go @@ -20,6 +20,7 @@ type resultsAnalyzer struct { props []ResultPropBits values []resultVal inlineMaxBudget int + *nameFinder } // resultVal captures information about a specific result returned from @@ -28,7 +29,7 @@ type resultsAnalyzer struct { // the same function, etc. This container stores info on a the specific // scenarios we're looking for. type resultVal struct { - lit constant.Value + cval constant.Value fn *ir.Name fnClo bool top bool @@ -40,8 +41,8 @@ type resultVal struct { // new list. If the function in question doesn't have any returns (or // any interesting returns) then the analyzer list is left as is, and // the result flags in "fp" are updated accordingly. -func addResultsAnalyzer(fn *ir.Func, analyzers []propAnalyzer, fp *FuncProps, inlineMaxBudget int) []propAnalyzer { - ra, props := makeResultsAnalyzer(fn, inlineMaxBudget) +func addResultsAnalyzer(fn *ir.Func, analyzers []propAnalyzer, fp *FuncProps, inlineMaxBudget int, nf *nameFinder) []propAnalyzer { + ra, props := makeResultsAnalyzer(fn, inlineMaxBudget, nf) if ra != nil { analyzers = append(analyzers, ra) } else { @@ -54,7 +55,7 @@ func addResultsAnalyzer(fn *ir.Func, analyzers []propAnalyzer, fp *FuncProps, in // in function fn. If the function doesn't have any interesting // results, a nil helper is returned along with a set of default // result flags for the func. -func makeResultsAnalyzer(fn *ir.Func, inlineMaxBudget int) (*resultsAnalyzer, []ResultPropBits) { +func makeResultsAnalyzer(fn *ir.Func, inlineMaxBudget int, nf *nameFinder) (*resultsAnalyzer, []ResultPropBits) { results := fn.Type().Results() if len(results) == 0 { return nil, nil @@ -84,6 +85,7 @@ func makeResultsAnalyzer(fn *ir.Func, inlineMaxBudget int) (*resultsAnalyzer, [] props: props, values: vals, inlineMaxBudget: inlineMaxBudget, + nameFinder: nf, } return ra, nil } @@ -143,29 +145,6 @@ func (ra *resultsAnalyzer) nodeVisitPost(n ir.Node) { } } -// isFuncName returns the *ir.Name for the func or method -// corresponding to node 'n', along with a boolean indicating success, -// and another boolean indicating whether the func is closure. -func isFuncName(n ir.Node) (*ir.Name, bool, bool) { - sv := ir.StaticValue(n) - if sv.Op() == ir.ONAME { - name := sv.(*ir.Name) - if name.Sym() != nil && name.Class == ir.PFUNC { - return name, true, false - } - } - if sv.Op() == ir.OCLOSURE { - cloex := sv.(*ir.ClosureExpr) - return cloex.Func.Nname, true, true - } - if sv.Op() == ir.OMETHEXPR { - if mn := ir.MethodExprName(sv); mn != nil { - return mn, true, false - } - } - return nil, false, false -} - // analyzeResult examines the expression 'n' being returned as the // 'ii'th argument in some return statement to see whether has // interesting characteristics (for example, returns a constant), then @@ -173,18 +152,22 @@ func isFuncName(n ir.Node) (*ir.Name, bool, bool) { // previous result (for the given return slot) that we've already // processed. func (ra *resultsAnalyzer) analyzeResult(ii int, n ir.Node) { - isAllocMem := isAllocatedMem(n) - isConcConvItf := isConcreteConvIface(n) - lit, isConst := isLiteral(n) - rfunc, isFunc, isClo := isFuncName(n) + isAllocMem := ra.isAllocatedMem(n) + isConcConvItf := ra.isConcreteConvIface(n) + constVal := ra.constValue(n) + isConst := (constVal != nil) + isNil := ra.isNil(n) + rfunc := ra.funcName(n) + isFunc := (rfunc != nil) + isClo := (rfunc != nil && rfunc.Func.OClosure != nil) curp := ra.props[ii] - dprops, isDerivedFromCall := deriveReturnFlagsFromCallee(n) + dprops, isDerivedFromCall := ra.deriveReturnFlagsFromCallee(n) newp := ResultNoInfo - var newlit constant.Value + var newcval constant.Value var newfunc *ir.Name if debugTrace&debugTraceResults != 0 { - fmt.Fprintf(os.Stderr, "=-= %v: analyzeResult n=%s ismem=%v isconcconv=%v isconst=%v isfunc=%v isclo=%v\n", ir.Line(n), n.Op().String(), isAllocMem, isConcConvItf, isConst, isFunc, isClo) + fmt.Fprintf(os.Stderr, "=-= %v: analyzeResult n=%s ismem=%v isconcconv=%v isconst=%v isnil=%v isfunc=%v isclo=%v\n", ir.Line(n), n.Op().String(), isAllocMem, isConcConvItf, isConst, isNil, isFunc, isClo) } if ra.values[ii].top { @@ -201,7 +184,10 @@ func (ra *resultsAnalyzer) analyzeResult(ii int, n ir.Node) { newfunc = rfunc case isConst: newp = ResultAlwaysSameConstant - newlit = lit + newcval = constVal + case isNil: + newp = ResultAlwaysSameConstant + newcval = nil case isDerivedFromCall: newp = dprops ra.values[ii].derived = true @@ -214,17 +200,20 @@ func (ra *resultsAnalyzer) analyzeResult(ii int, n ir.Node) { // the previous returns. switch curp { case ResultIsAllocatedMem: - if isAllocatedMem(n) { + if isAllocMem { newp = ResultIsAllocatedMem } case ResultIsConcreteTypeConvertedToInterface: - if isConcreteConvIface(n) { + if isConcConvItf { newp = ResultIsConcreteTypeConvertedToInterface } case ResultAlwaysSameConstant: - if isConst && isSameLiteral(lit, ra.values[ii].lit) { + if isNil && ra.values[ii].cval == nil { newp = ResultAlwaysSameConstant - newlit = lit + newcval = nil + } else if isConst && constant.Compare(constVal, token.EQL, ra.values[ii].cval) { + newp = ResultAlwaysSameConstant + newcval = constVal } case ResultAlwaysSameFunc: if isFunc && isSameFuncName(rfunc, ra.values[ii].fn) { @@ -236,7 +225,7 @@ func (ra *resultsAnalyzer) analyzeResult(ii int, n ir.Node) { } ra.values[ii].fn = newfunc ra.values[ii].fnClo = isClo - ra.values[ii].lit = newlit + ra.values[ii].cval = newcval ra.props[ii] = newp if debugTrace&debugTraceResults != 0 { @@ -245,15 +234,6 @@ func (ra *resultsAnalyzer) analyzeResult(ii int, n ir.Node) { } } -func isAllocatedMem(n ir.Node) bool { - sv := ir.StaticValue(n) - switch sv.Op() { - case ir.OMAKESLICE, ir.ONEW, ir.OPTRLIT, ir.OSLICELIT: - return true - } - return false -} - // deriveReturnFlagsFromCallee tries to set properties for a given // return result where we're returning call expression; return value // is a return property value and a boolean indicating whether the @@ -270,7 +250,7 @@ func isAllocatedMem(n ir.Node) bool { // set foo's return property to that of bar. In the case of "two", however, // even though each return path returns a constant, we don't know // whether the constants are identical, hence we need to be conservative. -func deriveReturnFlagsFromCallee(n ir.Node) (ResultPropBits, bool) { +func (ra *resultsAnalyzer) deriveReturnFlagsFromCallee(n ir.Node) (ResultPropBits, bool) { if n.Op() != ir.OCALLFUNC { return 0, false } @@ -282,8 +262,8 @@ func deriveReturnFlagsFromCallee(n ir.Node) (ResultPropBits, bool) { if called.Op() != ir.ONAME { return 0, false } - cname, isFunc, _ := isFuncName(called) - if !isFunc { + cname := ra.funcName(called) + if cname == nil { return 0, false } calleeProps := propsForFunc(cname.Func) @@ -295,41 +275,3 @@ func deriveReturnFlagsFromCallee(n ir.Node) (ResultPropBits, bool) { } return calleeProps.ResultFlags[0], true } - -func isLiteral(n ir.Node) (constant.Value, bool) { - sv := ir.StaticValue(n) - switch sv.Op() { - case ir.ONIL: - return nil, true - case ir.OLITERAL: - return sv.Val(), true - } - return nil, false -} - -// isSameLiteral checks to see if 'v1' and 'v2' correspond to the same -// literal value, or if they are both nil. -func isSameLiteral(v1, v2 constant.Value) bool { - if v1 == nil && v2 == nil { - return true - } - if v1 == nil || v2 == nil { - return false - } - return constant.Compare(v1, token.EQL, v2) -} - -func isConcreteConvIface(n ir.Node) bool { - sv := ir.StaticValue(n) - if sv.Op() != ir.OCONVIFACE { - return false - } - return !sv.(*ir.ConvExpr).X.Type().IsInterface() -} - -func isSameFuncName(v1, v2 *ir.Name) bool { - // NB: there are a few corner cases where pointer equality - // doesn't work here, but this should be good enough for - // our purposes here. - return v1 == v2 -} diff --git a/src/cmd/compile/internal/inline/inlheur/names.go b/src/cmd/compile/internal/inline/inlheur/names.go new file mode 100644 index 0000000000..022385087b --- /dev/null +++ b/src/cmd/compile/internal/inline/inlheur/names.go @@ -0,0 +1,129 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inlheur + +import ( + "cmd/compile/internal/ir" + "go/constant" +) + +// nameFinder provides a set of "isXXX" query methods for clients to +// ask whether a given AST node corresponds to a function, a constant +// value, and so on. These methods use an underlying ir.ReassignOracle +// to return more precise results in cases where an "interesting" +// value is assigned to a singly-defined local temp. Example: +// +// const q = 101 +// fq := func() int { return q } +// copyOfConstant := q +// copyOfFunc := f +// interestingCall(copyOfConstant, copyOfFunc) +// +// A name finder query method invoked on the arguments being passed to +// "interestingCall" will be able detect that 'copyOfConstant' always +// evaluates to a constant (even though it is in fact a PAUTO local +// variable). A given nameFinder can also operate without using +// ir.ReassignOracle (in cases where it is not practical to look +// at the entire function); in such cases queries will still work +// for explicit constant values and functions. +type nameFinder struct { + ro *ir.ReassignOracle +} + +// newNameFinder returns a new nameFinder object with a reassignment +// oracle initialized based on the function fn, or if fn is nil, +// without an underlying ReassignOracle. +func newNameFinder(fn *ir.Func) *nameFinder { + var ro *ir.ReassignOracle + if fn != nil { + ro = &ir.ReassignOracle{} + ro.Init(fn) + } + return &nameFinder{ro: ro} +} + +// funcName returns the *ir.Name for the func or method +// corresponding to node 'n', or nil if n can't be proven +// to contain a function value. +func (nf *nameFinder) funcName(n ir.Node) *ir.Name { + sv := n + if nf.ro != nil { + sv = nf.ro.StaticValue(n) + } + if name := ir.StaticCalleeName(sv); name != nil { + return name + } + return nil +} + +// isAllocatedMem returns true if node n corresponds to a memory +// allocation expression (make, new, or equivalent). +func (nf *nameFinder) isAllocatedMem(n ir.Node) bool { + sv := n + if nf.ro != nil { + sv = nf.ro.StaticValue(n) + } + switch sv.Op() { + case ir.OMAKESLICE, ir.ONEW, ir.OPTRLIT, ir.OSLICELIT: + return true + } + return false +} + +// constValue returns the underlying constant.Value for an AST node n +// if n is itself a constant value/expr, or if n is a singly assigned +// local containing constant expr/value (or nil not constant). +func (nf *nameFinder) constValue(n ir.Node) constant.Value { + sv := n + if nf.ro != nil { + sv = nf.ro.StaticValue(n) + } + if sv.Op() == ir.OLITERAL { + return sv.Val() + } + return nil +} + +// isNil returns whether n is nil (or singly +// assigned local containing nil). +func (nf *nameFinder) isNil(n ir.Node) bool { + sv := n + if nf.ro != nil { + sv = nf.ro.StaticValue(n) + } + return sv.Op() == ir.ONIL +} + +func (nf *nameFinder) staticValue(n ir.Node) ir.Node { + if nf.ro == nil { + return n + } + return nf.ro.StaticValue(n) +} + +func (nf *nameFinder) reassigned(n *ir.Name) bool { + if nf.ro == nil { + return true + } + return nf.ro.Reassigned(n) +} + +func (nf *nameFinder) isConcreteConvIface(n ir.Node) bool { + sv := n + if nf.ro != nil { + sv = nf.ro.StaticValue(n) + } + if sv.Op() != ir.OCONVIFACE { + return false + } + return !sv.(*ir.ConvExpr).X.Type().IsInterface() +} + +func isSameFuncName(v1, v2 *ir.Name) bool { + // NB: there are a few corner cases where pointer equality + // doesn't work here, but this should be good enough for + // our purposes here. + return v1 == v2 +} diff --git a/src/cmd/compile/internal/inline/inlheur/score_callresult_uses.go b/src/cmd/compile/internal/inline/inlheur/score_callresult_uses.go index 1d31f09ac0..b95ea37d59 100644 --- a/src/cmd/compile/internal/inline/inlheur/score_callresult_uses.go +++ b/src/cmd/compile/internal/inline/inlheur/score_callresult_uses.go @@ -46,7 +46,7 @@ type resultUseAnalyzer struct { // rescoreBasedOnCallResultUses examines how call results are used, // and tries to update the scores of calls based on how their results // are used in the function. -func rescoreBasedOnCallResultUses(fn *ir.Func, resultNameTab map[*ir.Name]resultPropAndCS, cstab CallSiteTab) { +func (csa *callSiteAnalyzer) rescoreBasedOnCallResultUses(fn *ir.Func, resultNameTab map[*ir.Name]resultPropAndCS, cstab CallSiteTab) { enableDebugTraceIfEnv() rua := &resultUseAnalyzer{ resultNameTab: resultNameTab, @@ -65,7 +65,7 @@ func rescoreBasedOnCallResultUses(fn *ir.Func, resultNameTab map[*ir.Name]result disableDebugTrace() } -func examineCallResults(cs *CallSite, resultNameTab map[*ir.Name]resultPropAndCS) map[*ir.Name]resultPropAndCS { +func (csa *callSiteAnalyzer) examineCallResults(cs *CallSite, resultNameTab map[*ir.Name]resultPropAndCS) map[*ir.Name]resultPropAndCS { if debugTrace&debugTraceScoring != 0 { fmt.Fprintf(os.Stderr, "=-= examining call results for %q\n", EncodeCallSiteKey(cs)) @@ -103,7 +103,7 @@ func examineCallResults(cs *CallSite, resultNameTab map[*ir.Name]resultPropAndCS if rprop&interesting == 0 { continue } - if ir.Reassigned(n) { + if csa.nameFinder.reassigned(n) { continue } if resultNameTab == nil { diff --git a/src/cmd/compile/internal/inline/inlheur/scoring.go b/src/cmd/compile/internal/inline/inlheur/scoring.go index 2b210fce8e..3de95d46b4 100644 --- a/src/cmd/compile/internal/inline/inlheur/scoring.go +++ b/src/cmd/compile/internal/inline/inlheur/scoring.go @@ -182,13 +182,14 @@ func mustToMay(x scoreAdjustTyp) scoreAdjustTyp { return 0 } -// computeCallSiteScore takes a given call site whose ir node is 'call' and -// callee function is 'callee' and with previously computed call site -// properties 'csflags', then computes a score for the callsite that -// combines the size cost of the callee with heuristics based on -// previously parameter and function properties, then stores the score -// and the adjustment mask in the appropriate fields in 'cs' -func (cs *CallSite) computeCallSiteScore(calleeProps *FuncProps) { +// computeCallSiteScore takes a given call site whose ir node is +// 'call' and callee function is 'callee' and with previously computed +// call site properties 'csflags', then computes a score for the +// callsite that combines the size cost of the callee with heuristics +// based on previously computed argument and function properties, +// then stores the score and the adjustment mask in the appropriate +// fields in 'cs' +func (cs *CallSite) computeCallSiteScore(csa *callSiteAnalyzer, calleeProps *FuncProps) { callee := cs.Callee csflags := cs.Flags call := cs.Call @@ -353,7 +354,7 @@ func setupFlagToAdjMaps() { } } -// largestScoreAdjustment tries to estimate the largest possible +// LargestNegativeScoreAdjustment tries to estimate the largest possible // negative score adjustment that could be applied to a call of the // function with the specified props. Example: // @@ -372,7 +373,7 @@ func setupFlagToAdjMaps() { // given call _could_ be rescored down as much as -35 points-- thus if // the size of "bar" is 100 (for example) then there is at least a // chance that scoring will enable inlining. -func largestScoreAdjustment(fn *ir.Func, props *FuncProps) int { +func LargestNegativeScoreAdjustment(fn *ir.Func, props *FuncProps) int { if resultFlagToPositiveAdj == nil { setupFlagToAdjMaps() } @@ -397,6 +398,14 @@ func largestScoreAdjustment(fn *ir.Func, props *FuncProps) int { return score } +// LargestPositiveScoreAdjustment tries to estimate the largest possible +// positive score adjustment that could be applied to a given callsite. +// At the moment we don't have very many positive score adjustments, so +// this is just hard-coded, not table-driven. +func LargestPositiveScoreAdjustment(fn *ir.Func) int { + return adjValues[panicPathAdj] + adjValues[initFuncAdj] +} + // callSiteTab contains entries for each call in the function // currently being processed by InlineCalls; this variable will either // be set to 'cstabCache' below (for non-inlinable routines) or to the @@ -438,8 +447,13 @@ type scoreCallsCacheType struct { // after foo has been analyzed, but it's conceivable that CanInline // might visit bar before foo for this SCC. func ScoreCalls(fn *ir.Func) { + if len(fn.Body) == 0 { + return + } enableDebugTraceIfEnv() + nameFinder := newNameFinder(fn) + if debugTrace&debugTraceScoring != 0 { fmt.Fprintf(os.Stderr, "=-= ScoreCalls(%v)\n", ir.FuncName(fn)) } @@ -461,21 +475,25 @@ func ScoreCalls(fn *ir.Func) { fmt.Fprintf(os.Stderr, "=-= building cstab for non-inl func %s\n", ir.FuncName(fn)) } - cstab = computeCallSiteTable(fn, fn.Body, scoreCallsCache.tab, nil, 0) + cstab = computeCallSiteTable(fn, fn.Body, scoreCallsCache.tab, nil, 0, + nameFinder) } + csa := makeCallSiteAnalyzer(fn) const doCallResults = true - scoreCallsRegion(fn, fn.Body, cstab, doCallResults, nil) + csa.scoreCallsRegion(fn, fn.Body, cstab, doCallResults, nil) + + disableDebugTrace() } // scoreCallsRegion assigns numeric scores to each of the callsites in // region 'region' within function 'fn'. This can be called on // an entire function, or with 'region' set to a chunk of // code corresponding to an inlined call. -func scoreCallsRegion(fn *ir.Func, region ir.Nodes, cstab CallSiteTab, doCallResults bool, ic *ir.InlinedCallExpr) { +func (csa *callSiteAnalyzer) scoreCallsRegion(fn *ir.Func, region ir.Nodes, cstab CallSiteTab, doCallResults bool, ic *ir.InlinedCallExpr) { if debugTrace&debugTraceScoring != 0 { - fmt.Fprintf(os.Stderr, "=-= scoreCallsRegion(%v, %s)\n", - ir.FuncName(fn), region[0].Op().String()) + fmt.Fprintf(os.Stderr, "=-= scoreCallsRegion(%v, %s) len(cstab)=%d\n", + ir.FuncName(fn), region[0].Op().String(), len(cstab)) } // Sort callsites to avoid any surprises with non deterministic @@ -510,13 +528,13 @@ func scoreCallsRegion(fn *ir.Func, region ir.Nodes, cstab CallSiteTab, doCallRes continue } } - cs.computeCallSiteScore(cprops) + cs.computeCallSiteScore(csa, cprops) if doCallResults { if debugTrace&debugTraceScoring != 0 { fmt.Fprintf(os.Stderr, "=-= examineCallResults at %s: flags=%d score=%d funcInlHeur=%v deser=%v\n", fmtFullPos(cs.Call.Pos()), cs.Flags, cs.Score, fihcprops, desercprops) } - resultNameTab = examineCallResults(cs, resultNameTab) + resultNameTab = csa.examineCallResults(cs, resultNameTab) } if debugTrace&debugTraceScoring != 0 { @@ -525,7 +543,7 @@ func scoreCallsRegion(fn *ir.Func, region ir.Nodes, cstab CallSiteTab, doCallRes } if resultNameTab != nil { - rescoreBasedOnCallResultUses(fn, resultNameTab, cstab) + csa.rescoreBasedOnCallResultUses(fn, resultNameTab, cstab) } disableDebugTrace() @@ -572,7 +590,7 @@ func GetCallSiteScore(fn *ir.Func, call *ir.CallExpr) (int, bool) { // BudgetExpansion returns the amount to relax/expand the base // inlining budget when the new inliner is turned on; the inliner -// will add the returned value to the hairyness budget. +// will add the returned value to the hairiness budget. // // Background: with the new inliner, the score for a given callsite // can be adjusted down by some amount due to heuristics, however we @@ -599,7 +617,7 @@ var allCallSites CallSiteTab // along with info on call site scoring and the adjustments made to a // given score. Here profile is the PGO profile in use (may be // nil), budgetCallback is a callback that can be invoked to find out -// the original pre-adjustment hairyness limit for the function, and +// the original pre-adjustment hairiness limit for the function, and // inlineHotMaxBudget is the constant of the same name used in the // inliner. Sample output lines: // @@ -611,7 +629,7 @@ var allCallSites CallSiteTab // // In the dump above, "Score" is the final score calculated for the // callsite, "Adjustment" is the amount added to or subtracted from -// the original hairyness estimate to form the score. "Status" shows +// the original hairiness estimate to form the score. "Status" shows // whether anything changed with the site -- did the adjustment bump // it down just below the threshold ("PROMOTED") or instead bump it // above the threshold ("DEMOTED"); this will be blank ("---") if no diff --git a/src/cmd/compile/internal/inline/interleaved/interleaved.go b/src/cmd/compile/internal/inline/interleaved/interleaved.go new file mode 100644 index 0000000000..e55b0f1aee --- /dev/null +++ b/src/cmd/compile/internal/inline/interleaved/interleaved.go @@ -0,0 +1,190 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package interleaved implements the interleaved devirtualization and +// inlining pass. +package interleaved + +import ( + "cmd/compile/internal/base" + "cmd/compile/internal/devirtualize" + "cmd/compile/internal/inline" + "cmd/compile/internal/inline/inlheur" + "cmd/compile/internal/ir" + "cmd/compile/internal/pgo" + "cmd/compile/internal/typecheck" + "fmt" +) + +// DevirtualizeAndInlinePackage interleaves devirtualization and inlining on +// all functions within pkg. +func DevirtualizeAndInlinePackage(pkg *ir.Package, profile *pgo.Profile) { + if profile != nil && base.Debug.PGODevirtualize > 0 { + // TODO(mdempsky): Integrate into DevirtualizeAndInlineFunc below. + ir.VisitFuncsBottomUp(typecheck.Target.Funcs, func(list []*ir.Func, recursive bool) { + for _, fn := range list { + devirtualize.ProfileGuided(fn, profile) + } + }) + ir.CurFunc = nil + } + + if base.Flag.LowerL != 0 { + inlheur.SetupScoreAdjustments() + } + + var inlProfile *pgo.Profile // copy of profile for inlining + if base.Debug.PGOInline != 0 { + inlProfile = profile + } + + // First compute inlinability of all functions in the package. + inline.CanInlineFuncs(pkg.Funcs, inlProfile) + + // Now we make a second pass to do devirtualization and inlining of + // calls. Order here should not matter. + for _, fn := range pkg.Funcs { + DevirtualizeAndInlineFunc(fn, inlProfile) + } + + if base.Flag.LowerL != 0 { + // Perform a garbage collection of hidden closures functions that + // are no longer reachable from top-level functions following + // inlining. See #59404 and #59638 for more context. + inline.GarbageCollectUnreferencedHiddenClosures() + + if base.Debug.DumpInlFuncProps != "" { + inlheur.DumpFuncProps(nil, base.Debug.DumpInlFuncProps) + } + if inlheur.Enabled() { + inline.PostProcessCallSites(inlProfile) + inlheur.TearDown() + } + } +} + +// DevirtualizeAndInlineFunc interleaves devirtualization and inlining +// on a single function. +func DevirtualizeAndInlineFunc(fn *ir.Func, profile *pgo.Profile) { + ir.WithFunc(fn, func() { + if base.Flag.LowerL != 0 { + if inlheur.Enabled() && !fn.Wrapper() { + inlheur.ScoreCalls(fn) + defer inlheur.ScoreCallsCleanup() + } + if base.Debug.DumpInlFuncProps != "" && !fn.Wrapper() { + inlheur.DumpFuncProps(fn, base.Debug.DumpInlFuncProps) + } + } + + bigCaller := base.Flag.LowerL != 0 && inline.IsBigFunc(fn) + if bigCaller && base.Flag.LowerM > 1 { + fmt.Printf("%v: function %v considered 'big'; reducing max cost of inlinees\n", ir.Line(fn), fn) + } + + match := func(n ir.Node) bool { + switch n := n.(type) { + case *ir.CallExpr: + return true + case *ir.TailCallStmt: + n.Call.NoInline = true // can't inline yet + } + return false + } + + edit := func(n ir.Node) ir.Node { + call, ok := n.(*ir.CallExpr) + if !ok { // previously inlined + return nil + } + + devirtualize.StaticCall(call) + if inlCall := inline.TryInlineCall(fn, call, bigCaller, profile); inlCall != nil { + return inlCall + } + return nil + } + + fixpoint(fn, match, edit) + }) +} + +// fixpoint repeatedly edits a function until it stabilizes. +// +// First, fixpoint applies match to every node n within fn. Then it +// iteratively applies edit to each node satisfying match(n). +// +// If edit(n) returns nil, no change is made. Otherwise, the result +// replaces n in fn's body, and fixpoint iterates at least once more. +// +// After an iteration where all edit calls return nil, fixpoint +// returns. +func fixpoint(fn *ir.Func, match func(ir.Node) bool, edit func(ir.Node) ir.Node) { + // Consider the expression "f(g())". We want to be able to replace + // "g()" in-place with its inlined representation. But if we first + // replace "f(...)" with its inlined representation, then "g()" will + // instead appear somewhere within this new AST. + // + // To mitigate this, each matched node n is wrapped in a ParenExpr, + // so we can reliably replace n in-place by assigning ParenExpr.X. + // It's safe to use ParenExpr here, because typecheck already + // removed them all. + + var parens []*ir.ParenExpr + var mark func(ir.Node) ir.Node + mark = func(n ir.Node) ir.Node { + if _, ok := n.(*ir.ParenExpr); ok { + return n // already visited n.X before wrapping + } + + ok := match(n) + + ir.EditChildren(n, mark) + + if ok { + paren := ir.NewParenExpr(n.Pos(), n) + paren.SetType(n.Type()) + paren.SetTypecheck(n.Typecheck()) + + parens = append(parens, paren) + n = paren + } + + return n + } + ir.EditChildren(fn, mark) + + // Edit until stable. + for { + done := true + + for i := 0; i < len(parens); i++ { // can't use "range parens" here + paren := parens[i] + if new := edit(paren.X); new != nil { + // Update AST and recursively mark nodes. + paren.X = new + ir.EditChildren(new, mark) // mark may append to parens + done = false + } + } + + if done { + break + } + } + + // Finally, remove any parens we inserted. + if len(parens) == 0 { + return // short circuit + } + var unparen func(ir.Node) ir.Node + unparen = func(n ir.Node) ir.Node { + if paren, ok := n.(*ir.ParenExpr); ok { + n = paren.X + } + ir.EditChildren(n, unparen) + return n + } + ir.EditChildren(fn, unparen) +} diff --git a/src/cmd/compile/internal/ir/check_reassign_no.go b/src/cmd/compile/internal/ir/check_reassign_no.go new file mode 100644 index 0000000000..8290a7da7e --- /dev/null +++ b/src/cmd/compile/internal/ir/check_reassign_no.go @@ -0,0 +1,9 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !checknewoldreassignment + +package ir + +const consistencyCheckEnabled = false diff --git a/src/cmd/compile/internal/ir/check_reassign_yes.go b/src/cmd/compile/internal/ir/check_reassign_yes.go new file mode 100644 index 0000000000..30876cca20 --- /dev/null +++ b/src/cmd/compile/internal/ir/check_reassign_yes.go @@ -0,0 +1,9 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build checknewoldreassignment + +package ir + +const consistencyCheckEnabled = true diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go index ca2a2d5008..345828c163 100644 --- a/src/cmd/compile/internal/ir/expr.go +++ b/src/cmd/compile/internal/ir/expr.go @@ -190,7 +190,8 @@ type CallExpr struct { RType Node `mknode:"-"` // see reflectdata/helpers.go KeepAlive []*Name // vars to be kept alive until call returns IsDDD bool - NoInline bool + GoDefer bool // whether this call is part of a go or defer statement + NoInline bool // whether this call must not be inlined } func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr { @@ -349,7 +350,7 @@ func NewKeyExpr(pos src.XPos, key, value Node) *KeyExpr { return n } -// A StructKeyExpr is an Field: Value composite literal key. +// A StructKeyExpr is a Field: Value composite literal key. type StructKeyExpr struct { miniExpr Field *types.Field @@ -855,13 +856,19 @@ func IsAddressable(n Node) bool { // "g()" expression. func StaticValue(n Node) Node { for { - if n.Op() == OCONVNOP { - n = n.(*ConvExpr).X - continue - } - - if n.Op() == OINLCALL { - n = n.(*InlinedCallExpr).SingleResult() + switch n1 := n.(type) { + case *ConvExpr: + if n1.Op() == OCONVNOP { + n = n1.X + continue + } + case *InlinedCallExpr: + if n1.Op() == OINLCALL { + n = n1.SingleResult() + continue + } + case *ParenExpr: + n = n1.X continue } @@ -922,6 +929,8 @@ FindRHS: // NB: global variables are always considered to be re-assigned. // TODO: handle initial declaration not including an assignment and // followed by a single assignment? +// NOTE: any changes made here should also be made in the corresponding +// code in the ReassignOracle.Init method. func Reassigned(name *Name) bool { if name.Op() != ONAME { base.Fatalf("reassigned %v", name) diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go index 303c5e4fd0..a74bb6ebda 100644 --- a/src/cmd/compile/internal/ir/func.go +++ b/src/cmd/compile/internal/ir/func.go @@ -539,7 +539,7 @@ func FuncPC(pos src.XPos, n Node, wantABI obj.ABI) Node { if abi != wantABI { base.ErrorfAt(pos, 0, "internal/abi.FuncPC%s expects an %v function, %s is defined as %v", wantABI, wantABI, name.Sym().Name, abi) } - var e Node = NewLinksymExpr(pos, name.Sym().LinksymABI(abi), types.Types[types.TUINTPTR]) + var e Node = NewLinksymExpr(pos, name.LinksymABI(abi), types.Types[types.TUINTPTR]) e = NewAddrExpr(pos, e) e.SetType(types.Types[types.TUINTPTR].PtrTo()) e = NewConvExpr(pos, OCONVNOP, types.Types[types.TUINTPTR], e) diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go index 6513386f03..21d181dba6 100644 --- a/src/cmd/compile/internal/ir/node.go +++ b/src/cmd/compile/internal/ir/node.go @@ -152,7 +152,7 @@ const ( // OCALLFUNC, OCALLMETH, and OCALLINTER have the same structure. // Prior to walk, they are: X(Args), where Args is all regular arguments. // After walk, if any argument whose evaluation might requires temporary variable, - // that temporary variable will be pushed to Init, Args will contains an updated + // that temporary variable will be pushed to Init, Args will contain an updated // set of arguments. OCALLFUNC // X(Args) (function call f(args)) OCALLMETH // X(Args) (direct method call x.Method(args)) diff --git a/src/cmd/compile/internal/ir/reassign_consistency_check.go b/src/cmd/compile/internal/ir/reassign_consistency_check.go new file mode 100644 index 0000000000..06a6c88962 --- /dev/null +++ b/src/cmd/compile/internal/ir/reassign_consistency_check.go @@ -0,0 +1,46 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "cmd/compile/internal/base" + "cmd/internal/src" + "fmt" + "path/filepath" + "strings" +) + +// checkStaticValueResult compares the result from ReassignOracle.StaticValue +// with the corresponding result from ir.StaticValue to make sure they agree. +// This method is called only when turned on via build tag. +func checkStaticValueResult(n Node, newres Node) { + oldres := StaticValue(n) + if oldres != newres { + base.Fatalf("%s: new/old static value disagreement on %v:\nnew=%v\nold=%v", fmtFullPos(n.Pos()), n, newres, oldres) + } +} + +// checkReassignedResult compares the result from ReassignOracle.Reassigned +// with the corresponding result from ir.Reassigned to make sure they agree. +// This method is called only when turned on via build tag. +func checkReassignedResult(n *Name, newres bool) { + origres := Reassigned(n) + if newres != origres { + base.Fatalf("%s: new/old reassigned disagreement on %v (class %s) newres=%v oldres=%v", fmtFullPos(n.Pos()), n, n.Class.String(), newres, origres) + } +} + +// fmtFullPos returns a verbose dump for pos p, including inlines. +func fmtFullPos(p src.XPos) string { + var sb strings.Builder + sep := "" + base.Ctxt.AllPos(p, func(pos src.Pos) { + fmt.Fprintf(&sb, sep) + sep = "|" + file := filepath.Base(pos.Filename()) + fmt.Fprintf(&sb, "%s:%d:%d", file, pos.Line(), pos.Col()) + }) + return sb.String() +} diff --git a/src/cmd/compile/internal/ir/reassignment.go b/src/cmd/compile/internal/ir/reassignment.go new file mode 100644 index 0000000000..9974292471 --- /dev/null +++ b/src/cmd/compile/internal/ir/reassignment.go @@ -0,0 +1,205 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ir + +import ( + "cmd/compile/internal/base" +) + +// A ReassignOracle efficiently answers queries about whether local +// variables are reassigned. This helper works by looking for function +// params and short variable declarations (e.g. +// https://go.dev/ref/spec#Short_variable_declarations) that are +// neither address taken nor subsequently re-assigned. It is intended +// to operate much like "ir.StaticValue" and "ir.Reassigned", but in a +// way that does just a single walk of the containing function (as +// opposed to a new walk on every call). +type ReassignOracle struct { + fn *Func + // maps candidate name to its defining assignment (or for + // for params, defining func). + singleDef map[*Name]Node +} + +// Init initializes the oracle based on the IR in function fn, laying +// the groundwork for future calls to the StaticValue and Reassigned +// methods. If the fn's IR is subsequently modified, Init must be +// called again. +func (ro *ReassignOracle) Init(fn *Func) { + ro.fn = fn + + // Collect candidate map. Start by adding function parameters + // explicitly. + ro.singleDef = make(map[*Name]Node) + sig := fn.Type() + numParams := sig.NumRecvs() + sig.NumParams() + for _, param := range fn.Dcl[:numParams] { + if IsBlank(param) { + continue + } + // For params, use func itself as defining node. + ro.singleDef[param] = fn + } + + // Walk the function body to discover any locals assigned + // via ":=" syntax (e.g. "a := "). + var findLocals func(n Node) bool + findLocals = func(n Node) bool { + if nn, ok := n.(*Name); ok { + if nn.Defn != nil && !nn.Addrtaken() && nn.Class == PAUTO { + ro.singleDef[nn] = nn.Defn + } + } else if nn, ok := n.(*ClosureExpr); ok { + Any(nn.Func, findLocals) + } + return false + } + Any(fn, findLocals) + + outerName := func(x Node) *Name { + if x == nil { + return nil + } + n, ok := OuterValue(x).(*Name) + if ok { + return n.Canonical() + } + return nil + } + + // pruneIfNeeded examines node nn appearing on the left hand side + // of assignment statement asn to see if it contains a reassignment + // to any nodes in our candidate map ro.singleDef; if a reassignment + // is found, the corresponding name is deleted from singleDef. + pruneIfNeeded := func(nn Node, asn Node) { + oname := outerName(nn) + if oname == nil { + return + } + defn, ok := ro.singleDef[oname] + if !ok { + return + } + // any assignment to a param invalidates the entry. + paramAssigned := oname.Class == PPARAM + // assignment to local ok iff assignment is its orig def. + localAssigned := (oname.Class == PAUTO && asn != defn) + if paramAssigned || localAssigned { + // We found an assignment to name N that doesn't + // correspond to its original definition; remove + // from candidates. + delete(ro.singleDef, oname) + } + } + + // Prune away anything that looks assigned. This code modeled after + // similar code in ir.Reassigned; any changes there should be made + // here as well. + var do func(n Node) bool + do = func(n Node) bool { + switch n.Op() { + case OAS: + asn := n.(*AssignStmt) + pruneIfNeeded(asn.X, n) + case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE, OAS2RECV, OSELRECV2: + asn := n.(*AssignListStmt) + for _, p := range asn.Lhs { + pruneIfNeeded(p, n) + } + case OASOP: + asn := n.(*AssignOpStmt) + pruneIfNeeded(asn.X, n) + case ORANGE: + rs := n.(*RangeStmt) + pruneIfNeeded(rs.Key, n) + pruneIfNeeded(rs.Value, n) + case OCLOSURE: + n := n.(*ClosureExpr) + Any(n.Func, do) + } + return false + } + Any(fn, do) +} + +// StaticValue method has the same semantics as the ir package function +// of the same name; see comments on [StaticValue]. +func (ro *ReassignOracle) StaticValue(n Node) Node { + arg := n + for { + if n.Op() == OCONVNOP { + n = n.(*ConvExpr).X + continue + } + + if n.Op() == OINLCALL { + n = n.(*InlinedCallExpr).SingleResult() + continue + } + + n1 := ro.staticValue1(n) + if n1 == nil { + if consistencyCheckEnabled { + checkStaticValueResult(arg, n) + } + return n + } + n = n1 + } +} + +func (ro *ReassignOracle) staticValue1(nn Node) Node { + if nn.Op() != ONAME { + return nil + } + n := nn.(*Name).Canonical() + if n.Class != PAUTO { + return nil + } + + defn := n.Defn + if defn == nil { + return nil + } + + var rhs Node +FindRHS: + switch defn.Op() { + case OAS: + defn := defn.(*AssignStmt) + rhs = defn.Y + case OAS2: + defn := defn.(*AssignListStmt) + for i, lhs := range defn.Lhs { + if lhs == n { + rhs = defn.Rhs[i] + break FindRHS + } + } + base.Fatalf("%v missing from LHS of %v", n, defn) + default: + return nil + } + if rhs == nil { + base.Fatalf("RHS is nil: %v", defn) + } + + if _, ok := ro.singleDef[n]; !ok { + return nil + } + + return rhs +} + +// Reassigned method has the same semantics as the ir package function +// of the same name; see comments on [Reassigned] for more info. +func (ro *ReassignOracle) Reassigned(n *Name) bool { + _, ok := ro.singleDef[n] + result := !ok + if consistencyCheckEnabled { + checkReassignedResult(n, result) + } + return result +} diff --git a/src/cmd/compile/internal/loong64/galign.go b/src/cmd/compile/internal/loong64/galign.go index 99ab7bdfb5..a613165054 100644 --- a/src/cmd/compile/internal/loong64/galign.go +++ b/src/cmd/compile/internal/loong64/galign.go @@ -20,4 +20,6 @@ func Init(arch *ssagen.ArchInfo) { arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {} arch.SSAGenValue = ssaGenValue arch.SSAGenBlock = ssaGenBlock + arch.LoadRegResult = loadRegResult + arch.SpillArgReg = spillArgReg } diff --git a/src/cmd/compile/internal/loong64/ssa.go b/src/cmd/compile/internal/loong64/ssa.go index 6e81da3ef8..e7298bdb9f 100644 --- a/src/cmd/compile/internal/loong64/ssa.go +++ b/src/cmd/compile/internal/loong64/ssa.go @@ -10,6 +10,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/logopt" + "cmd/compile/internal/objw" "cmd/compile/internal/ssa" "cmd/compile/internal/ssagen" "cmd/compile/internal/types" @@ -144,6 +145,18 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.From.Type = obj.TYPE_REG p.From.Reg = r ssagen.AddrAuto(&p.To, v) + case ssa.OpArgIntReg, ssa.OpArgFloatReg: + // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill + // The loop only runs once. + for _, a := range v.Block.Func.RegArgs { + // Pass the spill/unspill information along to the assembler, offset by size of + // the saved LR slot. + addr := ssagen.SpillSlotAddr(a, loong64.REGSP, base.Ctxt.Arch.FixedFrameSize) + s.FuncInfo().AddSpill( + obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type, a.Reg), Spill: storeByType(a.Type, a.Reg)}) + } + v.Block.Func.RegArgs = nil + ssagen.CheckArgReg(v) case ssa.OpLOONG64ADDV, ssa.OpLOONG64SUBV, ssa.OpLOONG64AND, @@ -362,13 +375,12 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpLOONG64DUFFZERO: - // runtime.duffzero expects start address in R19 + // runtime.duffzero expects start address in R20 p := s.Prog(obj.ADUFFZERO) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = ir.Syms.Duffzero p.To.Offset = v.AuxInt - case ssa.OpLOONG64LoweredZero: // MOVx R0, (Rarg0) // ADDV $sz, Rarg0 @@ -797,3 +809,22 @@ func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) { b.Fatalf("branch not implemented: %s", b.LongString()) } } + +func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { + p := s.Prog(loadByType(t, reg)) + p.From.Type = obj.TYPE_MEM + p.From.Name = obj.NAME_AUTO + p.From.Sym = n.Linksym() + p.From.Offset = n.FrameOffset() + off + p.To.Type = obj.TYPE_REG + p.To.Reg = reg + return p +} + +func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog { + p = pp.Append(p, storeByType(t, reg), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off) + p.To.Name = obj.NAME_PARAM + p.To.Sym = n.Linksym() + p.Pos = p.Pos.WithNotStmt() + return p +} diff --git a/src/cmd/compile/internal/loopvar/loopvar_test.go b/src/cmd/compile/internal/loopvar/loopvar_test.go index c8e11dbd07..64cfdb77d9 100644 --- a/src/cmd/compile/internal/loopvar/loopvar_test.go +++ b/src/cmd/compile/internal/loopvar/loopvar_test.go @@ -251,7 +251,7 @@ func TestLoopVarVersionEnableFlag(t *testing.T) { t.Logf(m) - yCount := strings.Count(m, "opt.go:16:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt.go:30)") + yCount := strings.Count(m, "opt.go:16:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt.go:29)") nCount := strings.Count(m, "shared") if yCount != 1 { @@ -290,7 +290,7 @@ func TestLoopVarVersionEnableGoBuild(t *testing.T) { t.Logf(m) - yCount := strings.Count(m, "opt-122.go:18:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt-122.go:32)") + yCount := strings.Count(m, "opt-122.go:18:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt-122.go:31)") nCount := strings.Count(m, "shared") if yCount != 1 { @@ -329,7 +329,7 @@ func TestLoopVarVersionDisableFlag(t *testing.T) { t.Logf(m) // expect error - yCount := strings.Count(m, "opt.go:16:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt.go:30)") + yCount := strings.Count(m, "opt.go:16:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt.go:29)") nCount := strings.Count(m, "shared") if yCount != 0 { @@ -368,7 +368,7 @@ func TestLoopVarVersionDisableGoBuild(t *testing.T) { t.Logf(m) // expect error - yCount := strings.Count(m, "opt-121.go:18:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt-121.go:32)") + yCount := strings.Count(m, "opt-121.go:18:6: loop variable private now per-iteration, heap-allocated (loop inlined into ./opt-121.go:31)") nCount := strings.Count(m, "shared") if yCount != 0 { diff --git a/src/cmd/compile/internal/loopvar/testdata/opt-121.go b/src/cmd/compile/internal/loopvar/testdata/opt-121.go index 131033b13c..4afb658fc8 100644 --- a/src/cmd/compile/internal/loopvar/testdata/opt-121.go +++ b/src/cmd/compile/internal/loopvar/testdata/opt-121.go @@ -19,7 +19,6 @@ func inline(j, k int) []*int { a = append(a, &private) } return a - } //go:noinline diff --git a/src/cmd/compile/internal/loopvar/testdata/opt-122.go b/src/cmd/compile/internal/loopvar/testdata/opt-122.go index 0ed6feee04..9dceab9175 100644 --- a/src/cmd/compile/internal/loopvar/testdata/opt-122.go +++ b/src/cmd/compile/internal/loopvar/testdata/opt-122.go @@ -19,7 +19,6 @@ func inline(j, k int) []*int { a = append(a, &private) } return a - } //go:noinline diff --git a/src/cmd/compile/internal/loopvar/testdata/opt.go b/src/cmd/compile/internal/loopvar/testdata/opt.go index 1bcd73614d..82c8616bcd 100644 --- a/src/cmd/compile/internal/loopvar/testdata/opt.go +++ b/src/cmd/compile/internal/loopvar/testdata/opt.go @@ -17,7 +17,6 @@ func inline(j, k int) []*int { a = append(a, &private) } return a - } //go:noinline diff --git a/src/cmd/compile/internal/noder/helpers.go b/src/cmd/compile/internal/noder/helpers.go index 1f7b497599..0bff71e658 100644 --- a/src/cmd/compile/internal/noder/helpers.go +++ b/src/cmd/compile/internal/noder/helpers.go @@ -80,7 +80,7 @@ func idealType(tv syntax.TypeAndValue) types2.Type { // types2 mostly satisfies this expectation already. But there are a few // cases where the Go spec doesn't require converting to concrete type, // and so types2 leaves them untyped. So we need to fix those up here. - typ := tv.Type + typ := types2.Unalias(tv.Type) if basic, ok := typ.(*types2.Basic); ok && basic.Info()&types2.IsUntyped != 0 { switch basic.Kind() { case types2.UntypedNil: @@ -99,6 +99,8 @@ func idealType(tv syntax.TypeAndValue) types2.Type { typ = types2.Typ[types2.Bool] // expression in "if" or "for" condition case types2.UntypedString: typ = types2.Typ[types2.String] // argument to "append" or "copy" calls + case types2.UntypedRune: + typ = types2.Typ[types2.Int32] // range over rune default: return nil } @@ -107,13 +109,14 @@ func idealType(tv syntax.TypeAndValue) types2.Type { } func isTypeParam(t types2.Type) bool { - _, ok := t.(*types2.TypeParam) + _, ok := types2.Unalias(t).(*types2.TypeParam) return ok } // isNotInHeap reports whether typ is or contains an element of type // runtime/internal/sys.NotInHeap. func isNotInHeap(typ types2.Type) bool { + typ = types2.Unalias(typ) if named, ok := typ.(*types2.Named); ok { if obj := named.Obj(); obj.Name() == "nih" && obj.Pkg().Path() == "runtime/internal/sys" { return true diff --git a/src/cmd/compile/internal/noder/irgen.go b/src/cmd/compile/internal/noder/irgen.go index 46511d1f97..e0b7bb946d 100644 --- a/src/cmd/compile/internal/noder/irgen.go +++ b/src/cmd/compile/internal/noder/irgen.go @@ -92,23 +92,22 @@ func checkFiles(m posMap, noders []*noder) (*types2.Package, *types2.Info) { } // Check for anonymous interface cycles (#56103). - if base.Debug.InterfaceCycles == 0 { - var f cycleFinder - for _, file := range files { - syntax.Inspect(file, func(n syntax.Node) bool { - if n, ok := n.(*syntax.InterfaceType); ok { - if f.hasCycle(n.GetTypeInfo().Type.(*types2.Interface)) { - base.ErrorfAt(m.makeXPos(n.Pos()), errors.InvalidTypeCycle, "invalid recursive type: anonymous interface refers to itself (see https://go.dev/issue/56103)") + // TODO(gri) move this code into the type checkers (types2 and go/types) + var f cycleFinder + for _, file := range files { + syntax.Inspect(file, func(n syntax.Node) bool { + if n, ok := n.(*syntax.InterfaceType); ok { + if f.hasCycle(types2.Unalias(n.GetTypeInfo().Type).(*types2.Interface)) { + base.ErrorfAt(m.makeXPos(n.Pos()), errors.InvalidTypeCycle, "invalid recursive type: anonymous interface refers to itself (see https://go.dev/issue/56103)") - for typ := range f.cyclic { - f.cyclic[typ] = false // suppress duplicate errors - } + for typ := range f.cyclic { + f.cyclic[typ] = false // suppress duplicate errors } - return false } - return true - }) - } + return false + } + return true + }) } base.ExitIfErrors() @@ -172,7 +171,7 @@ func (f *cycleFinder) hasCycle(typ *types2.Interface) bool { // visit recursively walks typ0 to check any referenced interface types. func (f *cycleFinder) visit(typ0 types2.Type) bool { for { // loop for tail recursion - switch typ := typ0.(type) { + switch typ := types2.Unalias(typ0).(type) { default: base.Fatalf("unexpected type: %T", typ) diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go index c1145f980e..25d6fb53e3 100644 --- a/src/cmd/compile/internal/noder/reader.go +++ b/src/cmd/compile/internal/noder/reader.go @@ -5,6 +5,7 @@ package noder import ( + "encoding/hex" "fmt" "go/constant" "internal/buildcfg" @@ -15,12 +16,14 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/dwarfgen" "cmd/compile/internal/inline" + "cmd/compile/internal/inline/interleaved" "cmd/compile/internal/ir" "cmd/compile/internal/objw" "cmd/compile/internal/reflectdata" "cmd/compile/internal/staticinit" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" + "cmd/internal/notsha256" "cmd/internal/obj" "cmd/internal/objabi" "cmd/internal/src" @@ -660,9 +663,24 @@ func (pr *pkgReader) objInstIdx(info objInfo, dict *readerDict, shaped bool) ir. } // objIdx returns the specified object, instantiated with the given -// type arguments, if any. If shaped is true, then the shaped variant -// of the object is returned instead. +// type arguments, if any. +// If shaped is true, then the shaped variant of the object is returned +// instead. func (pr *pkgReader) objIdx(idx pkgbits.Index, implicits, explicits []*types.Type, shaped bool) ir.Node { + n, err := pr.objIdxMayFail(idx, implicits, explicits, shaped) + if err != nil { + base.Fatalf("%v", err) + } + return n +} + +// objIdxMayFail is equivalent to objIdx, but returns an error rather than +// failing the build if this object requires type arguments and the incorrect +// number of type arguments were passed. +// +// Other sources of internal failure (such as duplicate definitions) still fail +// the build. +func (pr *pkgReader) objIdxMayFail(idx pkgbits.Index, implicits, explicits []*types.Type, shaped bool) (ir.Node, error) { rname := pr.newReader(pkgbits.RelocName, idx, pkgbits.SyncObject1) _, sym := rname.qualifiedIdent() tag := pkgbits.CodeObj(rname.Code(pkgbits.SyncCodeObj)) @@ -671,22 +689,25 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index, implicits, explicits []*types.Typ assert(!sym.IsBlank()) switch sym.Pkg { case types.BuiltinPkg, types.UnsafePkg: - return sym.Def.(ir.Node) + return sym.Def.(ir.Node), nil } if pri, ok := objReader[sym]; ok { - return pri.pr.objIdx(pri.idx, nil, explicits, shaped) + return pri.pr.objIdxMayFail(pri.idx, nil, explicits, shaped) } if sym.Pkg.Path == "runtime" { - return typecheck.LookupRuntime(sym.Name) + return typecheck.LookupRuntime(sym.Name), nil } base.Fatalf("unresolved stub: %v", sym) } - dict := pr.objDictIdx(sym, idx, implicits, explicits, shaped) + dict, err := pr.objDictIdx(sym, idx, implicits, explicits, shaped) + if err != nil { + return nil, err + } sym = dict.baseSym if !sym.IsBlank() && sym.Def != nil { - return sym.Def.(*ir.Name) + return sym.Def.(*ir.Name), nil } r := pr.newReader(pkgbits.RelocObj, idx, pkgbits.SyncObject1) @@ -722,7 +743,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index, implicits, explicits []*types.Typ name := do(ir.OTYPE, false) setType(name, r.typ()) name.SetAlias(true) - return name + return name, nil case pkgbits.ObjConst: name := do(ir.OLITERAL, false) @@ -730,7 +751,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index, implicits, explicits []*types.Typ val := FixValue(typ, r.Value()) setType(name, typ) setValue(name, val) - return name + return name, nil case pkgbits.ObjFunc: if sym.Name == "init" { @@ -765,7 +786,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index, implicits, explicits []*types.Typ } rext.funcExt(name, nil) - return name + return name, nil case pkgbits.ObjType: name := do(ir.OTYPE, true) @@ -802,13 +823,13 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index, implicits, explicits []*types.Typ r.needWrapper(typ) } - return name + return name, nil case pkgbits.ObjVar: name := do(ir.ONAME, false) setType(name, r.typ()) rext.varExt(name) - return name + return name, nil } } @@ -882,7 +903,16 @@ func shapify(targ *types.Type, basic bool) *types.Type { under = types.NewPtr(types.Types[types.TUINT8]) } - sym := types.ShapePkg.Lookup(under.LinkString()) + // Hash long type names to bound symbol name length seen by users, + // particularly for large protobuf structs (#65030). + uls := under.LinkString() + if base.Debug.MaxShapeLen != 0 && + len(uls) > base.Debug.MaxShapeLen { + h := notsha256.Sum256([]byte(uls)) + uls = hex.EncodeToString(h[:]) + } + + sym := types.ShapePkg.Lookup(uls) if sym.Def == nil { name := ir.NewDeclNameAt(under.Pos(), ir.OTYPE, sym) typ := types.NewNamed(name) @@ -896,7 +926,7 @@ func shapify(targ *types.Type, basic bool) *types.Type { } // objDictIdx reads and returns the specified object dictionary. -func (pr *pkgReader) objDictIdx(sym *types.Sym, idx pkgbits.Index, implicits, explicits []*types.Type, shaped bool) *readerDict { +func (pr *pkgReader) objDictIdx(sym *types.Sym, idx pkgbits.Index, implicits, explicits []*types.Type, shaped bool) (*readerDict, error) { r := pr.newReader(pkgbits.RelocObjDict, idx, pkgbits.SyncObject1) dict := readerDict{ @@ -907,7 +937,7 @@ func (pr *pkgReader) objDictIdx(sym *types.Sym, idx pkgbits.Index, implicits, ex nexplicits := r.Len() if nimplicits > len(implicits) || nexplicits != len(explicits) { - base.Fatalf("%v has %v+%v params, but instantiated with %v+%v args", sym, nimplicits, nexplicits, len(implicits), len(explicits)) + return nil, fmt.Errorf("%v has %v+%v params, but instantiated with %v+%v args", sym, nimplicits, nexplicits, len(implicits), len(explicits)) } dict.targs = append(implicits[:nimplicits:nimplicits], explicits...) @@ -972,7 +1002,7 @@ func (pr *pkgReader) objDictIdx(sym *types.Sym, idx pkgbits.Index, implicits, ex dict.itabs[i] = itabInfo{typ: r.typInfo(), iface: r.typInfo()} } - return &dict + return &dict, nil } func (r *reader) typeParamNames() { @@ -2517,7 +2547,10 @@ func (pr *pkgReader) objDictName(idx pkgbits.Index, implicits, explicits []*type base.Fatalf("unresolved stub: %v", sym) } - dict := pr.objDictIdx(sym, idx, implicits, explicits, false) + dict, err := pr.objDictIdx(sym, idx, implicits, explicits, false) + if err != nil { + base.Fatalf("%v", err) + } return pr.dictNameOf(dict) } @@ -3587,6 +3620,57 @@ func usedLocals(body []ir.Node) ir.NameSet { } // @@@ Method wrappers +// +// Here we handle constructing "method wrappers," alternative entry +// points that adapt methods to different calling conventions. Given a +// user-declared method "func (T) M(i int) bool { ... }", there are a +// few wrappers we may need to construct: +// +// - Implicit dereferencing. Methods declared with a value receiver T +// are also included in the method set of the pointer type *T, so +// we need to construct a wrapper like "func (recv *T) M(i int) +// bool { return (*recv).M(i) }". +// +// - Promoted methods. If struct type U contains an embedded field of +// type T or *T, we need to construct a wrapper like "func (recv U) +// M(i int) bool { return recv.T.M(i) }". +// +// - Method values. If x is an expression of type T, then "x.M" is +// roughly "tmp := x; func(i int) bool { return tmp.M(i) }". +// +// At call sites, we always prefer to call the user-declared method +// directly, if known, so wrappers are only needed for indirect calls +// (for example, interface method calls that can't be devirtualized). +// Consequently, we can save some compile time by skipping +// construction of wrappers that are never needed. +// +// Alternatively, because the linker doesn't care which compilation +// unit constructed a particular wrapper, we can instead construct +// them as needed. However, if a wrapper is needed in multiple +// downstream packages, we may end up needing to compile it multiple +// times, costing us more compile time and object file size. (We mark +// the wrappers as DUPOK, so the linker doesn't complain about the +// duplicate symbols.) +// +// The current heuristics we use to balance these trade offs are: +// +// - For a (non-parameterized) defined type T, we construct wrappers +// for *T and any promoted methods on T (and *T) in the same +// compilation unit as the type declaration. +// +// - For a parameterized defined type, we construct wrappers in the +// compilation units in which the type is instantiated. We +// similarly handle wrappers for anonymous types with methods and +// compilation units where their type literals appear in source. +// +// - Method value expressions are relatively uncommon, so we +// construct their wrappers in the compilation units that they +// appear in. +// +// Finally, as an opportunistic compile-time optimization, if we know +// a wrapper was constructed in any imported package's compilation +// unit, then we skip constructing a duplicate one. However, currently +// this is only done on a best-effort basis. // needWrapperTypes lists types for which we may need to generate // method wrappers. @@ -3610,6 +3694,8 @@ type methodValueWrapper struct { method *types.Field } +// needWrapper records that wrapper methods may be needed at link +// time. func (r *reader) needWrapper(typ *types.Type) { if typ.IsPtr() { return @@ -3643,6 +3729,8 @@ func (r *reader) importedDef() bool { return r.p != localPkgReader && !r.hasTypeParams() } +// MakeWrappers constructs all wrapper methods needed for the target +// compilation unit. func MakeWrappers(target *ir.Package) { // always generate a wrapper for error.Error (#29304) needWrapperTypes = append(needWrapperTypes, types.ErrorType) @@ -3778,7 +3866,6 @@ func wrapMethodValue(recvType *types.Type, method *types.Field, target *ir.Packa func newWrapperFunc(pos src.XPos, sym *types.Sym, wrapper *types.Type, method *types.Field) *ir.Func { sig := newWrapperType(wrapper, method) - fn := ir.NewFunc(pos, pos, sym, sig) fn.DeclareParams(true) fn.SetDupok(true) // TODO(mdempsky): Leave unset for local, non-generic wrappers? @@ -3794,7 +3881,7 @@ func finishWrapperFunc(fn *ir.Func, target *ir.Package) { // We generate wrappers after the global inlining pass, // so we're responsible for applying inlining ourselves here. // TODO(prattmic): plumb PGO. - inline.InlineCalls(fn, nil) + interleaved.DevirtualizeAndInlineFunc(fn, nil) // The body of wrapper function after inlining may reveal new ir.OMETHVALUE node, // we don't know whether wrapper function has been generated for it or not, so diff --git a/src/cmd/compile/internal/noder/unified.go b/src/cmd/compile/internal/noder/unified.go index a803e53502..492b00d256 100644 --- a/src/cmd/compile/internal/noder/unified.go +++ b/src/cmd/compile/internal/noder/unified.go @@ -80,7 +80,11 @@ func lookupFunction(pkg *types.Pkg, symName string) (*ir.Func, error) { return nil, fmt.Errorf("func sym %v missing objReader", sym) } - name := pri.pr.objIdx(pri.idx, nil, nil, false).(*ir.Name) + node, err := pri.pr.objIdxMayFail(pri.idx, nil, nil, false) + if err != nil { + return nil, fmt.Errorf("func sym %v lookup error: %w", sym, err) + } + name := node.(*ir.Name) if name.Op() != ir.ONAME || name.Class != ir.PFUNC { return nil, fmt.Errorf("func sym %v refers to non-function name: %v", sym, name) } @@ -105,7 +109,11 @@ func lookupMethod(pkg *types.Pkg, symName string) (*ir.Func, error) { return nil, fmt.Errorf("type sym %v missing objReader", typ) } - name := pri.pr.objIdx(pri.idx, nil, nil, false).(*ir.Name) + node, err := pri.pr.objIdxMayFail(pri.idx, nil, nil, false) + if err != nil { + return nil, fmt.Errorf("func sym %v lookup error: %w", typ, err) + } + name := node.(*ir.Name) if name.Op() != ir.OTYPE { return nil, fmt.Errorf("type sym %v refers to non-type name: %v", typ, name) } @@ -280,7 +288,7 @@ func readBodies(target *ir.Package, duringInlining bool) { oldLowerM := base.Flag.LowerM base.Flag.LowerM = 0 - inline.InlineDecls(nil, inlDecls, false) + inline.CanInlineFuncs(inlDecls, nil) base.Flag.LowerM = oldLowerM for _, fn := range inlDecls { diff --git a/src/cmd/compile/internal/noder/writer.go b/src/cmd/compile/internal/noder/writer.go index 46d5213694..c57ccdf36d 100644 --- a/src/cmd/compile/internal/noder/writer.go +++ b/src/cmd/compile/internal/noder/writer.go @@ -189,7 +189,9 @@ type writer struct { // A writerDict tracks types and objects that are used by a declaration. type writerDict struct { - implicits []*types2.TypeName + // implicits is a slice of type parameters from the enclosing + // declarations. + implicits []*types2.TypeParam // derived is a slice of type indices for computing derived types // (i.e., types that depend on the declaration's type parameters). @@ -217,7 +219,7 @@ type itabInfo struct { // generic function or method. func (dict *writerDict) typeParamIndex(typ *types2.TypeParam) int { for idx, implicit := range dict.implicits { - if implicit.Type().(*types2.TypeParam) == typ { + if implicit == typ { return idx } } @@ -513,24 +515,20 @@ func (pw *pkgWriter) typIdx(typ types2.Type, dict *writerDict) typeInfo { default: // Handle "byte" and "rune" as references to their TypeNames. - obj := types2.Universe.Lookup(typ.Name()) + obj := types2.Universe.Lookup(typ.Name()).(*types2.TypeName) assert(obj.Type() == typ) w.Code(pkgbits.TypeNamed) - w.obj(obj, nil) + w.namedType(obj, nil) } case *types2.Named: - obj, targs := splitNamed(typ) - - // Defined types that are declared within a generic function (and - // thus have implicit type parameters) are always derived types. - if w.p.hasImplicitTypeParams(obj) { - w.derived = true - } - w.Code(pkgbits.TypeNamed) - w.obj(obj, targs) + w.namedType(splitNamed(typ)) + + case *types2.Alias: + w.Code(pkgbits.TypeNamed) + w.namedType(typ.Obj(), nil) case *types2.TypeParam: w.derived = true @@ -596,6 +594,17 @@ func (pw *pkgWriter) typIdx(typ types2.Type, dict *writerDict) typeInfo { return typeInfo{idx: w.Flush(), derived: false} } +// namedType writes a use of the given named type into the bitstream. +func (w *writer) namedType(obj *types2.TypeName, targs *types2.TypeList) { + // Named types that are declared within a generic function (and + // thus have implicit type parameters) are always derived types. + if w.p.hasImplicitTypeParams(obj) { + w.derived = true + } + + w.obj(obj, targs) +} + func (w *writer) structType(typ *types2.Struct) { w.Len(typ.NumFields()) for i := 0; i < typ.NumFields(); i++ { @@ -822,7 +831,7 @@ func (w *writer) doObj(wext *writer, obj types2.Object) pkgbits.CodeObj { case *types2.TypeName: if obj.IsAlias() { w.pos(obj) - w.typ(obj.Type()) + w.typ(types2.Unalias(obj.Type())) return pkgbits.ObjAlias } @@ -889,8 +898,7 @@ func (w *writer) objDict(obj types2.Object, dict *writerDict) { // parameter is constrained to `int | uint` but then never used in // arithmetic/conversions/etc, we could shape those together. for _, implicit := range dict.implicits { - tparam := implicit.Type().(*types2.TypeParam) - w.Bool(tparam.Underlying().(*types2.Interface).IsMethodSet()) + w.Bool(implicit.Underlying().(*types2.Interface).IsMethodSet()) } for i := 0; i < ntparams; i++ { tparam := tparams.At(i) @@ -1209,10 +1217,17 @@ func (w *writer) stmt(stmt syntax.Stmt) { func (w *writer) stmts(stmts []syntax.Stmt) { dead := false w.Sync(pkgbits.SyncStmts) - for _, stmt := range stmts { - if dead { - // Any statements after a terminating statement are safe to - // omit, at least until the next labeled statement. + var lastLabel = -1 + for i, stmt := range stmts { + if _, ok := stmt.(*syntax.LabeledStmt); ok { + lastLabel = i + } + } + for i, stmt := range stmts { + if dead && i > lastLabel { + // Any statements after a terminating and last label statement are safe to omit. + // Otherwise, code after label statement may refer to dead stmts between terminating + // and label statement, see issue #65593. if _, ok := stmt.(*syntax.LabeledStmt); !ok { continue } @@ -2124,7 +2139,7 @@ func (w *writer) methodExpr(expr *syntax.SelectorExpr, recv types2.Type, sel *ty // Method on a type parameter. These require an indirect call // through the current function's runtime dictionary. - if typeParam, ok := recv.(*types2.TypeParam); w.Bool(ok) { + if typeParam, ok := types2.Unalias(recv).(*types2.TypeParam); w.Bool(ok) { typeParamIdx := w.dict.typeParamIndex(typeParam) methodInfo := w.p.selectorIdx(fun) @@ -2137,7 +2152,7 @@ func (w *writer) methodExpr(expr *syntax.SelectorExpr, recv types2.Type, sel *ty } if !isInterface(recv) { - if named, ok := deref2(recv).(*types2.Named); ok { + if named, ok := types2.Unalias(deref2(recv)).(*types2.Named); ok { obj, targs := splitNamed(named) info := w.p.objInstIdx(obj, targs, w.dict) @@ -2362,12 +2377,16 @@ func (w *writer) varDictIndex(obj *types2.Var) { } } +// isUntyped reports whether typ is an untyped type. func isUntyped(typ types2.Type) bool { + // Note: types2.Unalias is unnecessary here, since untyped types can't be aliased. basic, ok := typ.(*types2.Basic) return ok && basic.Info()&types2.IsUntyped != 0 } +// isTuple reports whether typ is a tuple type. func isTuple(typ types2.Type) bool { + // Note: types2.Unalias is unnecessary here, since tuple types can't be aliased. _, ok := typ.(*types2.Tuple) return ok } @@ -2416,7 +2435,7 @@ func (w *writer) exprType(iface types2.Type, typ syntax.Expr) { // If typ is a type parameter, then isInterface reports an internal // compiler error instead. func isInterface(typ types2.Type) bool { - if _, ok := typ.(*types2.TypeParam); ok { + if _, ok := types2.Unalias(typ).(*types2.TypeParam); ok { // typ is a type parameter and may be instantiated as either a // concrete or interface type, so the writer can't depend on // knowing this. @@ -2447,7 +2466,7 @@ type typeDeclGen struct { gen int // Implicit type parameters in scope at this type declaration. - implicits []*types2.TypeName + implicits []*types2.TypeParam } type fileImports struct { @@ -2465,7 +2484,7 @@ type declCollector struct { typegen *int file *fileImports withinFunc bool - implicits []*types2.TypeName + implicits []*types2.TypeParam } func (c *declCollector) withTParams(obj types2.Object) *declCollector { @@ -2478,7 +2497,7 @@ func (c *declCollector) withTParams(obj types2.Object) *declCollector { copy := *c copy.implicits = copy.implicits[:len(copy.implicits):len(copy.implicits)] for i := 0; i < n; i++ { - copy.implicits = append(copy.implicits, tparams.At(i).Obj()) + copy.implicits = append(copy.implicits, tparams.At(i)) } return © } @@ -2867,9 +2886,9 @@ func (pw *pkgWriter) isBuiltin(expr syntax.Expr, builtin string) bool { // recvBase returns the base type for the given receiver parameter. func recvBase(recv *types2.Var) *types2.Named { - typ := recv.Type() + typ := types2.Unalias(recv.Type()) if ptr, ok := typ.(*types2.Pointer); ok { - typ = ptr.Elem() + typ = types2.Unalias(ptr.Elem()) } return typ.(*types2.Named) } @@ -2945,7 +2964,7 @@ func asWasmImport(p syntax.Pragma) *WasmImport { // isPtrTo reports whether from is the type *to. func isPtrTo(from, to types2.Type) bool { - ptr, ok := from.(*types2.Pointer) + ptr, ok := types2.Unalias(from).(*types2.Pointer) return ok && types2.Identical(ptr.Elem(), to) } diff --git a/src/cmd/compile/internal/pgo/irgraph.go b/src/cmd/compile/internal/pgo/irgraph.go index 7a7cd20f2b..9ed16d224b 100644 --- a/src/cmd/compile/internal/pgo/irgraph.go +++ b/src/cmd/compile/internal/pgo/irgraph.go @@ -41,15 +41,19 @@ package pgo import ( + "bufio" "cmd/compile/internal/base" "cmd/compile/internal/ir" - "cmd/compile/internal/pgo/internal/graph" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" + "errors" "fmt" "internal/profile" + "io" "os" "sort" + "strconv" + "strings" ) // IRGraph is a call graph with nodes pointing to IRs of functions and edges @@ -129,7 +133,7 @@ type Profile struct { // the percentage threshold for hot/cold partitioning. TotalWeight int64 - // EdgeMap contains all unique call edges in the profile and their + // NamedEdgeMap contains all unique call edges in the profile and their // edge weight. NamedEdgeMap NamedEdgeMap @@ -138,25 +142,69 @@ type Profile struct { WeightedCG *IRGraph } -// New generates a profile-graph from the profile. +var wantHdr = "GO PREPROFILE V1\n" + +func isPreProfileFile(r *bufio.Reader) (bool, error) { + hdr, err := r.Peek(len(wantHdr)) + if err == io.EOF { + // Empty file. + return false, nil + } else if err != nil { + return false, fmt.Errorf("error reading profile header: %w", err) + } + + return string(hdr) == wantHdr, nil +} + +// New generates a profile-graph from the profile or pre-processed profile. func New(profileFile string) (*Profile, error) { f, err := os.Open(profileFile) if err != nil { return nil, fmt.Errorf("error opening profile: %w", err) } defer f.Close() - profile, err := profile.Parse(f) + + r := bufio.NewReader(f) + + isPreProf, err := isPreProfileFile(r) if err != nil { + return nil, fmt.Errorf("error processing profile header: %w", err) + } + + if isPreProf { + profile, err := processPreprof(r) + if err != nil { + return nil, fmt.Errorf("error processing preprocessed PGO profile: %w", err) + } + return profile, nil + } + + profile, err := processProto(r) + if err != nil { + return nil, fmt.Errorf("error processing pprof PGO profile: %w", err) + } + return profile, nil + +} + +// processProto generates a profile-graph from the profile. +func processProto(r io.Reader) (*Profile, error) { + p, err := profile.Parse(r) + if errors.Is(err, profile.ErrNoData) { + // Treat a completely empty file the same as a profile with no + // samples: nothing to do. + return nil, nil + } else if err != nil { return nil, fmt.Errorf("error parsing profile: %w", err) } - if len(profile.Sample) == 0 { + if len(p.Sample) == 0 { // We accept empty profiles, but there is nothing to do. return nil, nil } valueIndex := -1 - for i, s := range profile.SampleType { + for i, s := range p.SampleType { // Samples count is the raw data collected, and CPU nanoseconds is just // a scaled version of it, so either one we can find is fine. if (s.Type == "samples" && s.Unit == "count") || @@ -170,7 +218,7 @@ func New(profileFile string) (*Profile, error) { return nil, fmt.Errorf(`profile does not contain a sample index with value/type "samples/count" or cpu/nanoseconds"`) } - g := graph.NewGraph(profile, &graph.Options{ + g := profile.NewGraph(p, &profile.Options{ SampleValue: func(v []int64) int64 { return v[valueIndex] }, }) @@ -193,45 +241,31 @@ func New(profileFile string) (*Profile, error) { }, nil } -// createNamedEdgeMap builds a map of callsite-callee edge weights from the -// profile-graph. -// -// Caller should ignore the profile if totalWeight == 0. -func createNamedEdgeMap(g *graph.Graph) (edgeMap NamedEdgeMap, totalWeight int64, err error) { - seenStartLine := false - - // Process graph and build various node and edge maps which will - // be consumed by AST walk. - weight := make(map[NamedCallEdge]int64) - for _, n := range g.Nodes { - seenStartLine = seenStartLine || n.Info.StartLine != 0 - - canonicalName := n.Info.Name - // Create the key to the nodeMapKey. - namedEdge := NamedCallEdge{ - CallerName: canonicalName, - CallSiteOffset: n.Info.Lineno - n.Info.StartLine, - } - - for _, e := range n.Out { - totalWeight += e.WeightValue() - namedEdge.CalleeName = e.Dest.Info.Name - // Create new entry or increment existing entry. - weight[namedEdge] += e.WeightValue() - } +// processPreprof generates a profile-graph from the pre-procesed profile. +func processPreprof(r io.Reader) (*Profile, error) { + namedEdgeMap, totalWeight, err := createNamedEdgeMapFromPreprocess(r) + if err != nil { + return nil, err } if totalWeight == 0 { + return nil, nil // accept but ignore profile with no samples. + } + + // Create package-level call graph with weights from profile and IR. + wg := createIRGraph(namedEdgeMap) + + return &Profile{ + TotalWeight: totalWeight, + NamedEdgeMap: namedEdgeMap, + WeightedCG: wg, + }, nil +} + +func postProcessNamedEdgeMap(weight map[NamedCallEdge]int64, weightVal int64) (edgeMap NamedEdgeMap, totalWeight int64, err error) { + if weightVal == 0 { return NamedEdgeMap{}, 0, nil // accept but ignore profile with no samples. } - - if !seenStartLine { - // TODO(prattmic): If Function.start_line is missing we could - // fall back to using absolute line numbers, which is better - // than nothing. - return NamedEdgeMap{}, 0, fmt.Errorf("profile missing Function.start_line data (Go version of profiled application too old? Go 1.20+ automatically adds this to profiles)") - } - byWeight := make([]NamedCallEdge, 0, len(weight)) for namedEdge := range weight { byWeight = append(byWeight, namedEdge) @@ -256,9 +290,110 @@ func createNamedEdgeMap(g *graph.Graph) (edgeMap NamedEdgeMap, totalWeight int64 ByWeight: byWeight, } + totalWeight = weightVal + return edgeMap, totalWeight, nil } +// restore NodeMap information from a preprocessed profile. +// The reader can refer to the format of preprocessed profile in cmd/preprofile/main.go. +func createNamedEdgeMapFromPreprocess(r io.Reader) (edgeMap NamedEdgeMap, totalWeight int64, err error) { + fileScanner := bufio.NewScanner(r) + fileScanner.Split(bufio.ScanLines) + weight := make(map[NamedCallEdge]int64) + + if !fileScanner.Scan() { + if err := fileScanner.Err(); err != nil { + return NamedEdgeMap{}, 0, fmt.Errorf("error reading preprocessed profile: %w", err) + } + return NamedEdgeMap{}, 0, fmt.Errorf("preprocessed profile missing header") + } + if gotHdr := fileScanner.Text() + "\n"; gotHdr != wantHdr { + return NamedEdgeMap{}, 0, fmt.Errorf("preprocessed profile malformed header; got %q want %q", gotHdr, wantHdr) + } + + for fileScanner.Scan() { + readStr := fileScanner.Text() + + callerName := readStr + + if !fileScanner.Scan() { + if err := fileScanner.Err(); err != nil { + return NamedEdgeMap{}, 0, fmt.Errorf("error reading preprocessed profile: %w", err) + } + return NamedEdgeMap{}, 0, fmt.Errorf("preprocessed profile entry missing callee") + } + calleeName := fileScanner.Text() + + if !fileScanner.Scan() { + if err := fileScanner.Err(); err != nil { + return NamedEdgeMap{}, 0, fmt.Errorf("error reading preprocessed profile: %w", err) + } + return NamedEdgeMap{}, 0, fmt.Errorf("preprocessed profile entry missing weight") + } + readStr = fileScanner.Text() + + split := strings.Split(readStr, " ") + + if len(split) != 2 { + return NamedEdgeMap{}, 0, fmt.Errorf("preprocessed profile entry got %v want 2 fields", split) + } + + co, _ := strconv.Atoi(split[0]) + + namedEdge := NamedCallEdge{ + CallerName: callerName, + CalleeName: calleeName, + CallSiteOffset: co, + } + + EWeight, _ := strconv.ParseInt(split[1], 10, 64) + + weight[namedEdge] += EWeight + totalWeight += EWeight + } + + return postProcessNamedEdgeMap(weight, totalWeight) + +} + +// createNamedEdgeMap builds a map of callsite-callee edge weights from the +// profile-graph. +// +// Caller should ignore the profile if totalWeight == 0. +func createNamedEdgeMap(g *profile.Graph) (edgeMap NamedEdgeMap, totalWeight int64, err error) { + seenStartLine := false + + // Process graph and build various node and edge maps which will + // be consumed by AST walk. + weight := make(map[NamedCallEdge]int64) + for _, n := range g.Nodes { + seenStartLine = seenStartLine || n.Info.StartLine != 0 + + canonicalName := n.Info.Name + // Create the key to the nodeMapKey. + namedEdge := NamedCallEdge{ + CallerName: canonicalName, + CallSiteOffset: n.Info.Lineno - n.Info.StartLine, + } + + for _, e := range n.Out { + totalWeight += e.WeightValue() + namedEdge.CalleeName = e.Dest.Info.Name + // Create new entry or increment existing entry. + weight[namedEdge] += e.WeightValue() + } + } + + if !seenStartLine { + // TODO(prattmic): If Function.start_line is missing we could + // fall back to using absolute line numbers, which is better + // than nothing. + return NamedEdgeMap{}, 0, fmt.Errorf("profile missing Function.start_line data (Go version of profiled application too old? Go 1.20+ automatically adds this to profiles)") + } + return postProcessNamedEdgeMap(weight, totalWeight) +} + // initializeIRGraph builds the IRGraph by visiting all the ir.Func in decl list // of a package. func createIRGraph(namedEdgeMap NamedEdgeMap) *IRGraph { diff --git a/src/cmd/compile/internal/rangefunc/rewrite.go b/src/cmd/compile/internal/rangefunc/rewrite.go index 7475c570aa..d439412ea8 100644 --- a/src/cmd/compile/internal/rangefunc/rewrite.go +++ b/src/cmd/compile/internal/rangefunc/rewrite.go @@ -934,7 +934,7 @@ func (r *rewriter) endLoop(loop *forLoop) { if rfunc.Params().Len() != 1 { base.Fatalf("invalid typecheck of range func") } - ftyp := rfunc.Params().At(0).Type().(*types2.Signature) // func(...) bool + ftyp := types2.CoreType(rfunc.Params().At(0).Type()).(*types2.Signature) // func(...) bool if ftyp.Results().Len() != 1 { base.Fatalf("invalid typecheck of range func") } diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go index c2407af017..185be4dd51 100644 --- a/src/cmd/compile/internal/reflectdata/reflect.go +++ b/src/cmd/compile/internal/reflectdata/reflect.go @@ -55,24 +55,6 @@ type typeSig struct { mtype *types.Type } -// Builds a type representing a Bucket structure for -// the given map type. This type is not visible to users - -// we include only enough information to generate a correct GC -// program for it. -// Make sure this stays in sync with runtime/map.go. -// -// A "bucket" is a "struct" { -// tophash [BUCKETSIZE]uint8 -// keys [BUCKETSIZE]keyType -// elems [BUCKETSIZE]elemType -// overflow *bucket -// } -const ( - BUCKETSIZE = abi.MapBucketCount - MAXKEYSIZE = abi.MapMaxKeyBytes - MAXELEMSIZE = abi.MapMaxElemBytes -) - func commonSize() int { return int(rttype.Type.Size()) } // Sizeof(runtime._type{}) func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{}) @@ -89,6 +71,18 @@ func makefield(name string, t *types.Type) *types.Field { // MapBucketType makes the map bucket type given the type of the map. func MapBucketType(t *types.Type) *types.Type { + // Builds a type representing a Bucket structure for + // the given map type. This type is not visible to users - + // we include only enough information to generate a correct GC + // program for it. + // Make sure this stays in sync with runtime/map.go. + // + // A "bucket" is a "struct" { + // tophash [abi.MapBucketCount]uint8 + // keys [abi.MapBucketCount]keyType + // elems [abi.MapBucketCount]elemType + // overflow *bucket + // } if t.MapType().Bucket != nil { return t.MapType().Bucket } @@ -97,25 +91,25 @@ func MapBucketType(t *types.Type) *types.Type { elemtype := t.Elem() types.CalcSize(keytype) types.CalcSize(elemtype) - if keytype.Size() > MAXKEYSIZE { + if keytype.Size() > abi.MapMaxKeyBytes { keytype = types.NewPtr(keytype) } - if elemtype.Size() > MAXELEMSIZE { + if elemtype.Size() > abi.MapMaxElemBytes { elemtype = types.NewPtr(elemtype) } field := make([]*types.Field, 0, 5) // The first field is: uint8 topbits[BUCKETSIZE]. - arr := types.NewArray(types.Types[types.TUINT8], BUCKETSIZE) + arr := types.NewArray(types.Types[types.TUINT8], abi.MapBucketCount) field = append(field, makefield("topbits", arr)) - arr = types.NewArray(keytype, BUCKETSIZE) + arr = types.NewArray(keytype, abi.MapBucketCount) arr.SetNoalg(true) keys := makefield("keys", arr) field = append(field, keys) - arr = types.NewArray(elemtype, BUCKETSIZE) + arr = types.NewArray(elemtype, abi.MapBucketCount) arr.SetNoalg(true) elems := makefield("elems", arr) field = append(field, elems) @@ -142,25 +136,25 @@ func MapBucketType(t *types.Type) *types.Type { if !types.IsComparable(t.Key()) { base.Fatalf("unsupported map key type for %v", t) } - if BUCKETSIZE < 8 { - base.Fatalf("bucket size %d too small for proper alignment %d", BUCKETSIZE, 8) + if abi.MapBucketCount < 8 { + base.Fatalf("bucket size %d too small for proper alignment %d", abi.MapBucketCount, 8) } - if uint8(keytype.Alignment()) > BUCKETSIZE { + if uint8(keytype.Alignment()) > abi.MapBucketCount { base.Fatalf("key align too big for %v", t) } - if uint8(elemtype.Alignment()) > BUCKETSIZE { - base.Fatalf("elem align %d too big for %v, BUCKETSIZE=%d", elemtype.Alignment(), t, BUCKETSIZE) + if uint8(elemtype.Alignment()) > abi.MapBucketCount { + base.Fatalf("elem align %d too big for %v, BUCKETSIZE=%d", elemtype.Alignment(), t, abi.MapBucketCount) } - if keytype.Size() > MAXKEYSIZE { + if keytype.Size() > abi.MapMaxKeyBytes { base.Fatalf("key size too large for %v", t) } - if elemtype.Size() > MAXELEMSIZE { + if elemtype.Size() > abi.MapMaxElemBytes { base.Fatalf("elem size too large for %v", t) } - if t.Key().Size() > MAXKEYSIZE && !keytype.IsPtr() { + if t.Key().Size() > abi.MapMaxKeyBytes && !keytype.IsPtr() { base.Fatalf("key indirect incorrect for %v", t) } - if t.Elem().Size() > MAXELEMSIZE && !elemtype.IsPtr() { + if t.Elem().Size() > abi.MapMaxElemBytes && !elemtype.IsPtr() { base.Fatalf("elem indirect incorrect for %v", t) } if keytype.Size()%keytype.Alignment() != 0 { @@ -1124,14 +1118,14 @@ func writeType(t *types.Type) *obj.LSym { var flags uint32 // Note: flags must match maptype accessors in ../../../../runtime/type.go // and maptype builder in ../../../../reflect/type.go:MapOf. - if t.Key().Size() > MAXKEYSIZE { + if t.Key().Size() > abi.MapMaxKeyBytes { c.Field("KeySize").WriteUint8(uint8(types.PtrSize)) flags |= 1 // indirect key } else { c.Field("KeySize").WriteUint8(uint8(t.Key().Size())) } - if t.Elem().Size() > MAXELEMSIZE { + if t.Elem().Size() > abi.MapMaxElemBytes { c.Field("ValueSize").WriteUint8(uint8(types.PtrSize)) flags |= 2 // indirect value } else { @@ -1337,20 +1331,25 @@ func writeITab(lsym *obj.LSym, typ, iface *types.Type, allowNonImplement bool) { // _ [4]byte // fun [1]uintptr // variable sized. fun[0]==0 means _type does not implement inter. // } - o := objw.SymPtr(lsym, 0, writeType(iface), 0) - o = objw.SymPtr(lsym, o, writeType(typ), 0) - o = objw.Uint32(lsym, o, types.TypeHash(typ)) // copy of type hash - o += 4 // skip unused field + c := rttype.NewCursor(lsym, 0, rttype.ITab) + c.Field("Inter").WritePtr(writeType(iface)) + c.Field("Type").WritePtr(writeType(typ)) + c.Field("Hash").WriteUint32(types.TypeHash(typ)) // copy of type hash + + var delta int64 + c = c.Field("Fun") if !completeItab { // If typ doesn't implement iface, make method entries be zero. - o = objw.Uintptr(lsym, o, 0) - entries = entries[:0] - } - for _, fn := range entries { - o = objw.SymPtrWeak(lsym, o, fn, 0) // method pointer for each method + c.Elem(0).WriteUintptr(0) + } else { + var a rttype.ArrayCursor + a, delta = c.ModifyArray(len(entries)) + for i, fn := range entries { + a.Elem(i).WritePtrWeak(fn) // method pointer for each method + } } // Nothing writes static itabs, so they are read only. - objw.Global(lsym, int32(o), int16(obj.DUPOK|obj.RODATA)) + objw.Global(lsym, int32(rttype.ITab.Size()+delta), int16(obj.DUPOK|obj.RODATA)) lsym.Set(obj.AttrContentAddressable, true) } @@ -1499,39 +1498,6 @@ func (a typesByString) Less(i, j int) bool { } func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -// maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap, -// which holds 1-bit entries describing where pointers are in a given type. -// Above this length, the GC information is recorded as a GC program, -// which can express repetition compactly. In either form, the -// information is used by the runtime to initialize the heap bitmap, -// and for large types (like 128 or more words), they are roughly the -// same speed. GC programs are never much larger and often more -// compact. (If large arrays are involved, they can be arbitrarily -// more compact.) -// -// The cutoff must be large enough that any allocation large enough to -// use a GC program is large enough that it does not share heap bitmap -// bytes with any other objects, allowing the GC program execution to -// assume an aligned start and not use atomic operations. In the current -// runtime, this means all malloc size classes larger than the cutoff must -// be multiples of four words. On 32-bit systems that's 16 bytes, and -// all size classes >= 16 bytes are 16-byte aligned, so no real constraint. -// On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed -// for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated -// is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes -// must be >= 4. -// -// We used to use 16 because the GC programs do have some constant overhead -// to get started, and processing 128 pointers seems to be enough to -// amortize that overhead well. -// -// To make sure that the runtime's chansend can call typeBitsBulkBarrier, -// we raised the limit to 2048, so that even 32-bit systems are guaranteed to -// use bitmaps for objects up to 64 kB in size. -// -// Also known to reflect/type.go. -const maxPtrmaskBytes = 2048 - // GCSym returns a data symbol containing GC information for type t, along // with a boolean reporting whether the UseGCProg bit should be set in the // type kind, and the ptrdata field to record in the reflect type information. @@ -1554,7 +1520,7 @@ func GCSym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) { // When write is true, it writes the symbol data. func dgcsym(t *types.Type, write bool) (lsym *obj.LSym, useGCProg bool, ptrdata int64) { ptrdata = types.PtrDataSize(t) - if ptrdata/int64(types.PtrSize) <= maxPtrmaskBytes*8 { + if ptrdata/int64(types.PtrSize) <= abi.MaxPtrmaskBytes*8 { lsym = dgcptrmask(t, write) return } diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go index 22338188e5..17f0d98532 100644 --- a/src/cmd/compile/internal/riscv64/ssa.go +++ b/src/cmd/compile/internal/riscv64/ssa.go @@ -278,7 +278,7 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = rd case ssa.OpRISCV64ADD, ssa.OpRISCV64SUB, ssa.OpRISCV64SUBW, ssa.OpRISCV64XOR, ssa.OpRISCV64OR, ssa.OpRISCV64AND, - ssa.OpRISCV64SLL, ssa.OpRISCV64SRA, ssa.OpRISCV64SRAW, ssa.OpRISCV64SRL, ssa.OpRISCV64SRLW, + ssa.OpRISCV64SLL, ssa.OpRISCV64SLLW, ssa.OpRISCV64SRA, ssa.OpRISCV64SRAW, ssa.OpRISCV64SRL, ssa.OpRISCV64SRLW, ssa.OpRISCV64SLT, ssa.OpRISCV64SLTU, ssa.OpRISCV64MUL, ssa.OpRISCV64MULW, ssa.OpRISCV64MULH, ssa.OpRISCV64MULHU, ssa.OpRISCV64DIV, ssa.OpRISCV64DIVU, ssa.OpRISCV64DIVW, ssa.OpRISCV64DIVUW, ssa.OpRISCV64REM, ssa.OpRISCV64REMU, ssa.OpRISCV64REMW, @@ -297,6 +297,72 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.Reg = r1 p.To.Type = obj.TYPE_REG p.To.Reg = r + + case ssa.OpRISCV64LoweredFMAXD, ssa.OpRISCV64LoweredFMIND, ssa.OpRISCV64LoweredFMAXS, ssa.OpRISCV64LoweredFMINS: + // Most of FMIN/FMAX result match Go's required behaviour, unless one of the + // inputs is a NaN. As such, we need to explicitly test for NaN + // before using FMIN/FMAX. + + // FADD Rarg0, Rarg1, Rout // FADD is used to propagate a NaN to the result in these cases. + // FEQ Rarg0, Rarg0, Rtmp + // BEQZ Rtmp, end + // FEQ Rarg1, Rarg1, Rtmp + // BEQZ Rtmp, end + // F(MIN | MAX) + + r0 := v.Args[0].Reg() + r1 := v.Args[1].Reg() + out := v.Reg() + add, feq := riscv.AFADDD, riscv.AFEQD + if v.Op == ssa.OpRISCV64LoweredFMAXS || v.Op == ssa.OpRISCV64LoweredFMINS { + add = riscv.AFADDS + feq = riscv.AFEQS + } + + p1 := s.Prog(add) + p1.From.Type = obj.TYPE_REG + p1.From.Reg = r0 + p1.Reg = r1 + p1.To.Type = obj.TYPE_REG + p1.To.Reg = out + + p2 := s.Prog(feq) + p2.From.Type = obj.TYPE_REG + p2.From.Reg = r0 + p2.Reg = r0 + p2.To.Type = obj.TYPE_REG + p2.To.Reg = riscv.REG_TMP + + p3 := s.Prog(riscv.ABEQ) + p3.From.Type = obj.TYPE_REG + p3.From.Reg = riscv.REG_ZERO + p3.Reg = riscv.REG_TMP + p3.To.Type = obj.TYPE_BRANCH + + p4 := s.Prog(feq) + p4.From.Type = obj.TYPE_REG + p4.From.Reg = r1 + p4.Reg = r1 + p4.To.Type = obj.TYPE_REG + p4.To.Reg = riscv.REG_TMP + + p5 := s.Prog(riscv.ABEQ) + p5.From.Type = obj.TYPE_REG + p5.From.Reg = riscv.REG_ZERO + p5.Reg = riscv.REG_TMP + p5.To.Type = obj.TYPE_BRANCH + + p6 := s.Prog(v.Op.Asm()) + p6.From.Type = obj.TYPE_REG + p6.From.Reg = r1 + p6.Reg = r0 + p6.To.Type = obj.TYPE_REG + p6.To.Reg = out + + nop := s.Prog(obj.ANOP) + p3.To.SetTarget(nop) + p5.To.SetTarget(nop) + case ssa.OpRISCV64LoweredMuluhilo: r0 := v.Args[0].Reg() r1 := v.Args[1].Reg() @@ -356,8 +422,8 @@ func ssaGenValue(s *ssagen.State, v *ssa.Value) { p.To.Type = obj.TYPE_REG p.To.Reg = v.Reg() case ssa.OpRISCV64ADDI, ssa.OpRISCV64ADDIW, ssa.OpRISCV64XORI, ssa.OpRISCV64ORI, ssa.OpRISCV64ANDI, - ssa.OpRISCV64SLLI, ssa.OpRISCV64SRAI, ssa.OpRISCV64SRAIW, ssa.OpRISCV64SRLI, ssa.OpRISCV64SRLIW, ssa.OpRISCV64SLTI, - ssa.OpRISCV64SLTIU: + ssa.OpRISCV64SLLI, ssa.OpRISCV64SLLIW, ssa.OpRISCV64SRAI, ssa.OpRISCV64SRAIW, + ssa.OpRISCV64SRLI, ssa.OpRISCV64SRLIW, ssa.OpRISCV64SLTI, ssa.OpRISCV64SLTIU: p := s.Prog(v.Op.Asm()) p.From.Type = obj.TYPE_CONST p.From.Offset = v.AuxInt diff --git a/src/cmd/compile/internal/rttype/rttype.go b/src/cmd/compile/internal/rttype/rttype.go index cdc399d9cf..b90e23dc5b 100644 --- a/src/cmd/compile/internal/rttype/rttype.go +++ b/src/cmd/compile/internal/rttype/rttype.go @@ -42,6 +42,9 @@ var UncommonType *types.Type var InterfaceSwitch *types.Type var TypeAssert *types.Type +// Interface tables (itabs) +var ITab *types.Type + func Init() { // Note: this has to be called explicitly instead of being // an init function so it runs after the types package has @@ -64,6 +67,8 @@ func Init() { InterfaceSwitch = fromReflect(reflect.TypeOf(abi.InterfaceSwitch{})) TypeAssert = fromReflect(reflect.TypeOf(abi.TypeAssert{})) + ITab = fromReflect(reflect.TypeOf(abi.ITab{})) + // Make sure abi functions are correct. These functions are used // by the linker which doesn't have the ability to do type layout, // so we check the functions it uses here. @@ -80,6 +85,9 @@ func Init() { if got, want := int64(abi.TFlagOff(ptrSize)), Type.OffsetOf("TFlag"); got != want { base.Fatalf("abi.TFlagOff() == %d, want %d", got, want) } + if got, want := int64(abi.ITabTypeOff(ptrSize)), ITab.OffsetOf("Type"); got != want { + base.Fatalf("abi.ITabTypeOff() == %d, want %d", got, want) + } } // fromReflect translates from a host type to the equivalent target type. @@ -154,6 +162,12 @@ func (c Cursor) WritePtr(target *obj.LSym) { objw.SymPtr(c.lsym, int(c.offset), target, 0) } } +func (c Cursor) WritePtrWeak(target *obj.LSym) { + if c.typ.Kind() != types.TUINTPTR { + base.Fatalf("can't write ptr, it has kind %s", c.typ.Kind()) + } + objw.SymPtrWeak(c.lsym, int(c.offset), target, 0) +} func (c Cursor) WriteUintptr(val uint64) { if c.typ.Kind() != types.TUINTPTR { base.Fatalf("can't write uintptr, it has kind %s", c.typ.Kind()) @@ -250,6 +264,17 @@ func (c Cursor) Field(name string) Cursor { return Cursor{} } +func (c Cursor) Elem(i int64) Cursor { + if c.typ.Kind() != types.TARRAY { + base.Fatalf("can't call Elem on non-array %v", c.typ) + } + if i < 0 || i >= c.typ.NumElem() { + base.Fatalf("element access out of bounds [%d] in [0:%d]", i, c.typ.NumElem()) + } + elem := c.typ.Elem() + return Cursor{lsym: c.lsym, offset: c.offset + i*elem.Size(), typ: elem} +} + type ArrayCursor struct { c Cursor // cursor pointing at first element n int // number of elements diff --git a/src/cmd/compile/internal/ssa/_gen/ARM.rules b/src/cmd/compile/internal/ssa/_gen/ARM.rules index a60afb000a..ed0ed80afa 100644 --- a/src/cmd/compile/internal/ssa/_gen/ARM.rules +++ b/src/cmd/compile/internal/ssa/_gen/ARM.rules @@ -66,17 +66,17 @@ // count trailing zero for ARMv5 and ARMv6 // 32 - CLZ(x&-x - 1) -(Ctz32 x) && buildcfg.GOARM<=6 => +(Ctz32 x) && buildcfg.GOARM.Version<=6 => (RSBconst [32] (CLZ (SUBconst (AND x (RSBconst [0] x)) [1]))) -(Ctz16 x) && buildcfg.GOARM<=6 => +(Ctz16 x) && buildcfg.GOARM.Version<=6 => (RSBconst [32] (CLZ (SUBconst (AND (ORconst [0x10000] x) (RSBconst [0] (ORconst [0x10000] x))) [1]))) -(Ctz8 x) && buildcfg.GOARM<=6 => +(Ctz8 x) && buildcfg.GOARM.Version<=6 => (RSBconst [32] (CLZ (SUBconst (AND (ORconst [0x100] x) (RSBconst [0] (ORconst [0x100] x))) [1]))) // count trailing zero for ARMv7 -(Ctz32 x) && buildcfg.GOARM==7 => (CLZ (RBIT x)) -(Ctz16 x) && buildcfg.GOARM==7 => (CLZ (RBIT (ORconst [0x10000] x))) -(Ctz8 x) && buildcfg.GOARM==7 => (CLZ (RBIT (ORconst [0x100] x))) +(Ctz32 x) && buildcfg.GOARM.Version==7 => (CLZ (RBIT x)) +(Ctz16 x) && buildcfg.GOARM.Version==7 => (CLZ (RBIT (ORconst [0x10000] x))) +(Ctz8 x) && buildcfg.GOARM.Version==7 => (CLZ (RBIT (ORconst [0x100] x))) // bit length (BitLen32 x) => (RSBconst [32] (CLZ x)) @@ -90,13 +90,13 @@ // t5 = x right rotate 8 bits -- (d, a, b, c ) // result = t4 ^ t5 -- (d, c, b, a ) // using shifted ops this can be done in 4 instructions. -(Bswap32 x) && buildcfg.GOARM==5 => +(Bswap32 x) && buildcfg.GOARM.Version==5 => (XOR (SRLconst (BICconst (XOR x (SRRconst [16] x)) [0xff0000]) [8]) (SRRconst x [8])) // byte swap for ARMv6 and above -(Bswap32 x) && buildcfg.GOARM>=6 => (REV x) +(Bswap32 x) && buildcfg.GOARM.Version>=6 => (REV x) // boolean ops -- booleans are represented with 0=false, 1=true (AndB ...) => (AND ...) @@ -741,10 +741,10 @@ (SUBconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) => (ADDconst [-c] x) (ANDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (BICconst [int32(^uint32(c))] x) (BICconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (ANDconst [int32(^uint32(c))] x) -(ADDconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (SUBconst [-c] x) -(SUBconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (ADDconst [-c] x) -(ANDconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (BICconst [int32(^uint32(c))] x) -(BICconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (ANDconst [int32(^uint32(c))] x) +(ADDconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (SUBconst [-c] x) +(SUBconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (ADDconst [-c] x) +(ANDconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (BICconst [int32(^uint32(c))] x) +(BICconst [c] x) && buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (ANDconst [int32(^uint32(c))] x) (ADDconst [c] (MOVWconst [d])) => (MOVWconst [c+d]) (ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x) (ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x) @@ -1139,7 +1139,7 @@ // UBFX instruction is supported by ARMv6T2, ARMv7 and above versions, REV16 is supported by // ARMv6 and above versions. So for ARMv6, we need to match SLLconst, SRLconst and ORshiftLL. ((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (BFXU [int32(armBFAuxInt(8, 8))] x) x) => (REV16 x) -((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [24] (SLLconst [16] x)) x) && buildcfg.GOARM>=6 => (REV16 x) +((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [24] (SLLconst [16] x)) x) && buildcfg.GOARM.Version>=6 => (REV16 x) // use indexed loads and stores (MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVWloadidx ptr idx mem) @@ -1209,25 +1209,25 @@ (BIC x x) => (MOVWconst [0]) (ADD (MUL x y) a) => (MULA x y a) -(SUB a (MUL x y)) && buildcfg.GOARM == 7 => (MULS x y a) -(RSB (MUL x y) a) && buildcfg.GOARM == 7 => (MULS x y a) +(SUB a (MUL x y)) && buildcfg.GOARM.Version == 7 => (MULS x y a) +(RSB (MUL x y) a) && buildcfg.GOARM.Version == 7 => (MULS x y a) -(NEGF (MULF x y)) && buildcfg.GOARM >= 6 => (NMULF x y) -(NEGD (MULD x y)) && buildcfg.GOARM >= 6 => (NMULD x y) -(MULF (NEGF x) y) && buildcfg.GOARM >= 6 => (NMULF x y) -(MULD (NEGD x) y) && buildcfg.GOARM >= 6 => (NMULD x y) +(NEGF (MULF x y)) && buildcfg.GOARM.Version >= 6 => (NMULF x y) +(NEGD (MULD x y)) && buildcfg.GOARM.Version >= 6 => (NMULD x y) +(MULF (NEGF x) y) && buildcfg.GOARM.Version >= 6 => (NMULF x y) +(MULD (NEGD x) y) && buildcfg.GOARM.Version >= 6 => (NMULD x y) (NMULF (NEGF x) y) => (MULF x y) (NMULD (NEGD x) y) => (MULD x y) // the result will overwrite the addend, since they are in the same register -(ADDF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAF a x y) -(ADDF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSF a x y) -(ADDD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAD a x y) -(ADDD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSD a x y) -(SUBF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSF a x y) -(SUBF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAF a x y) -(SUBD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSD a x y) -(SUBD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAD a x y) +(ADDF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAF a x y) +(ADDF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSF a x y) +(ADDD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAD a x y) +(ADDD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSD a x y) +(SUBF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSF a x y) +(SUBF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAF a x y) +(SUBD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULSD a x y) +(SUBD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM.Version >= 6 => (MULAD a x y) (AND x (MVN y)) => (BIC x y) @@ -1259,8 +1259,8 @@ (CMPD x (MOVDconst [0])) => (CMPD0 x) // bit extraction -(SRAconst (SLLconst x [c]) [d]) && buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFX [(d-c)|(32-d)<<8] x) -(SRLconst (SLLconst x [c]) [d]) && buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFXU [(d-c)|(32-d)<<8] x) +(SRAconst (SLLconst x [c]) [d]) && buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFX [(d-c)|(32-d)<<8] x) +(SRLconst (SLLconst x [c]) [d]) && buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFXU [(d-c)|(32-d)<<8] x) // comparison simplification ((EQ|NE) (CMP x (RSBconst [0] y))) => ((EQ|NE) (CMN x y)) // sense of carry bit not preserved; see also #50854 diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64.rules b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules index 4a47c4cd47..2af9519113 100644 --- a/src/cmd/compile/internal/ssa/_gen/LOONG64.rules +++ b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules @@ -416,7 +416,7 @@ (GetCallerSP ...) => (LoweredGetCallerSP ...) (GetCallerPC ...) => (LoweredGetCallerPC ...) -(If cond yes no) => (NE cond yes no) +(If cond yes no) => (NE (MOVBUreg cond) yes no) // Write barrier. (WB ...) => (LoweredWB ...) @@ -450,71 +450,37 @@ (EQ (SGTconst [0] x) yes no) => (GEZ x yes no) (NE (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no) (EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no) +(MOVBUreg x:((SGT|SGTU) _ _)) => x // fold offset into address (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr) // fold address into load/store -(MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBload [off1+int32(off2)] {sym} ptr mem) -(MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBUload [off1+int32(off2)] {sym} ptr mem) -(MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHload [off1+int32(off2)] {sym} ptr mem) -(MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHUload [off1+int32(off2)] {sym} ptr mem) -(MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWload [off1+int32(off2)] {sym} ptr mem) -(MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWUload [off1+int32(off2)] {sym} ptr mem) -(MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVload [off1+int32(off2)] {sym} ptr mem) -(MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVFload [off1+int32(off2)] {sym} ptr mem) -(MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDload [off1+int32(off2)] {sym} ptr mem) +// Do not fold global variable access in -dynlink mode, where it will be rewritten +// to use the GOT via REGTMP, which currently cannot handle large offset. +(MOV(B|BU|H|HU|W|WU|V|F|D)load [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOV(B|BU|H|HU|W|WU|V|F|D)load [off1+int32(off2)] {sym} ptr mem) -(MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVBstore [off1+int32(off2)] {sym} ptr val mem) -(MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVHstore [off1+int32(off2)] {sym} ptr val mem) -(MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVWstore [off1+int32(off2)] {sym} ptr val mem) -(MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVVstore [off1+int32(off2)] {sym} ptr val mem) -(MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVFstore [off1+int32(off2)] {sym} ptr val mem) -(MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVDstore [off1+int32(off2)] {sym} ptr val mem) -(MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) -(MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) -(MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) -(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVstorezero [off1+int32(off2)] {sym} ptr mem) +(MOV(B|H|W|V|F|D)store [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOV(B|H|W|V|F|D)store [off1+int32(off2)] {sym} ptr val mem) -(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => - (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) -(MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => - (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) -(MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => - (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) -(MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => - (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) -(MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => - (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) -(MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => - (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) -(MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => - (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) -(MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => - (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) -(MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => - (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOV(B|H|W|V)storezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) + && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOV(B|H|W|V)storezero [off1+int32(off2)] {sym} ptr mem) -(MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => - (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) -(MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => - (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) -(MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => - (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) -(MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => - (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) -(MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => - (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) -(MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => - (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) -(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => - (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) -(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => - (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) -(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => - (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) -(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) => - (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) +(MOV(B|BU|H|HU|W|WU|V|F|D)load [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) + && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOV(B|BU|H|HU|W|WU|V|F|D)load [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) + +(MOV(B|H|W|V|F|D)store [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) + && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOV(B|H|W|V|F|D)store [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) + +(MOV(B|H|W|V)storezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) + && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) => + (MOV(B|H|W|V)storezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) (LoweredAtomicStore(32|64) ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero(32|64) ptr mem) (LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst32 [int32(c)] ptr mem) diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go index 3442fc8d7c..3fbf5be499 100644 --- a/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/LOONG64Ops.go @@ -123,17 +123,17 @@ func init() { // Common individual register masks var ( - gp = buildReg("R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31") // R1 is LR, R2 is thread pointer, R3 is stack pointer, R21-unused, R22 is g, R30 is REGTMP + gp = buildReg("R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31") // R1 is LR, R2 is thread pointer, R3 is stack pointer, R22 is g, R30 is REGTMP gpg = gp | buildReg("g") gpsp = gp | buildReg("SP") gpspg = gpg | buildReg("SP") gpspsbg = gpspg | buildReg("SB") fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31") callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g - r1 = buildReg("R19") - r2 = buildReg("R18") - r3 = buildReg("R17") - r4 = buildReg("R4") + r1 = buildReg("R20") + r2 = buildReg("R21") + r3 = buildReg("R23") + r4 = buildReg("R24") ) // Common regInfo var ( @@ -273,32 +273,32 @@ func init() { {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32 // function calls - {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem - {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem - {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R29"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem - {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem + {name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem + {name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem + {name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("R29"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem + {name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem // duffzero // arg0 = address of memory to zero // arg1 = mem // auxint = offset into duffzero code to start executing // returns mem - // R19 aka loong64.REGRT1 changed as side effect + // R20 aka loong64.REGRT1 changed as side effect { name: "DUFFZERO", aux: "Int64", argLength: 2, reg: regInfo{ - inputs: []regMask{buildReg("R19")}, - clobbers: buildReg("R19 R1"), + inputs: []regMask{buildReg("R20")}, + clobbers: buildReg("R20 R1"), }, typ: "Mem", faultOnNilArg0: true, }, // duffcopy - // arg0 = address of dst memory (in R20, changed as side effect) REGRT2 - // arg1 = address of src memory (in R19, changed as side effect) REGRT1 + // arg0 = address of dst memory (in R21, changed as side effect) + // arg1 = address of src memory (in R20, changed as side effect) // arg2 = mem // auxint = offset into duffcopy code to start executing // returns mem @@ -307,8 +307,8 @@ func init() { aux: "Int64", argLength: 3, reg: regInfo{ - inputs: []regMask{buildReg("R20"), buildReg("R19")}, - clobbers: buildReg("R19 R20 R1"), + inputs: []regMask{buildReg("R21"), buildReg("R20")}, + clobbers: buildReg("R20 R21 R1"), }, typ: "Mem", faultOnNilArg0: true, @@ -316,45 +316,45 @@ func init() { }, // large or unaligned zeroing - // arg0 = address of memory to zero (in R19, changed as side effect) + // arg0 = address of memory to zero (in R20, changed as side effect) // arg1 = address of the last element to zero // arg2 = mem // auxint = alignment // returns mem - // MOVx R0, (R19) - // ADDV $sz, R19 - // BGEU Rarg1, R19, -2(PC) + // MOVx R0, (R20) + // ADDV $sz, R20 + // BGEU Rarg1, R20, -2(PC) { name: "LoweredZero", aux: "Int64", argLength: 3, reg: regInfo{ - inputs: []regMask{buildReg("R19"), gp}, - clobbers: buildReg("R19"), + inputs: []regMask{buildReg("R20"), gp}, + clobbers: buildReg("R20"), }, typ: "Mem", faultOnNilArg0: true, }, // large or unaligned move - // arg0 = address of dst memory (in R20, changed as side effect) - // arg1 = address of src memory (in R19, changed as side effect) + // arg0 = address of dst memory (in R21, changed as side effect) + // arg1 = address of src memory (in R20, changed as side effect) // arg2 = address of the last element of src // arg3 = mem // auxint = alignment // returns mem - // MOVx (R19), Rtmp - // MOVx Rtmp, (R20) - // ADDV $sz, R19 + // MOVx (R20), Rtmp + // MOVx Rtmp, (R21) // ADDV $sz, R20 - // BGEU Rarg2, R19, -4(PC) + // ADDV $sz, R21 + // BGEU Rarg2, R20, -4(PC) { name: "LoweredMove", aux: "Int64", argLength: 4, reg: regInfo{ - inputs: []regMask{buildReg("R20"), buildReg("R19"), gp}, - clobbers: buildReg("R19 R20"), + inputs: []regMask{buildReg("R21"), buildReg("R20"), gp}, + clobbers: buildReg("R20 R21"), }, typ: "Mem", faultOnNilArg0: true, @@ -476,8 +476,8 @@ func init() { blocks: blocks, regnames: regNamesLOONG64, // TODO: support register ABI on loong64 - ParamIntRegNames: "R4 R5 R6 R7 R8 R9 R10 R11", - ParamFloatRegNames: "F0 F1 F2 F3 F4 F5 F6 F7", + ParamIntRegNames: "R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19", + ParamFloatRegNames: "F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15", gpregmask: gp, fpregmask: fp, framepointerreg: -1, // not used diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules index 9afe5995ae..135d70bc47 100644 --- a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules +++ b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules @@ -72,6 +72,9 @@ (FMA ...) => (FMADDD ...) +(Min(64|32)F ...) => (LoweredFMIN(D|S) ...) +(Max(64|32)F ...) => (LoweredFMAX(D|S) ...) + // Sign and zero extension. (SignExt8to16 ...) => (MOVBreg ...) @@ -153,27 +156,27 @@ // SRL only considers the bottom 6 bits of y, similarly SRLW only considers the // bottom 5 bits of y. Ensure that the result is always zero if the shift exceeds // the maximum value. See Lsh above for a detailed description. -(Rsh8Ux8 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt8to64 y)))) -(Rsh8Ux16 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt16to64 y)))) -(Rsh8Ux32 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt32to64 y)))) -(Rsh8Ux64 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] y))) -(Rsh16Ux8 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt8to64 y)))) -(Rsh16Ux16 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt16to64 y)))) -(Rsh16Ux32 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt32to64 y)))) -(Rsh16Ux64 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] y))) -(Rsh32Ux8 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [32] (ZeroExt8to64 y)))) -(Rsh32Ux16 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [32] (ZeroExt16to64 y)))) -(Rsh32Ux32 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [32] (ZeroExt32to64 y)))) -(Rsh32Ux64 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [32] y))) -(Rsh64Ux8 x y) && !shiftIsBounded(v) => (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt8to64 y)))) -(Rsh64Ux16 x y) && !shiftIsBounded(v) => (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt16to64 y)))) -(Rsh64Ux32 x y) && !shiftIsBounded(v) => (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt32to64 y)))) -(Rsh64Ux64 x y) && !shiftIsBounded(v) => (AND (SRL x y) (Neg64 (SLTIU [64] y))) +(Rsh8Ux8 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt8to64 y)))) +(Rsh8Ux16 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt16to64 y)))) +(Rsh8Ux32 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] (ZeroExt32to64 y)))) +(Rsh8Ux64 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt8to64 x) y) (Neg8 (SLTIU [64] y))) +(Rsh16Ux8 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt8to64 y)))) +(Rsh16Ux16 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt16to64 y)))) +(Rsh16Ux32 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] (ZeroExt32to64 y)))) +(Rsh16Ux64 x y) && !shiftIsBounded(v) => (AND (SRL (ZeroExt16to64 x) y) (Neg16 (SLTIU [64] y))) +(Rsh32Ux8 x y) && !shiftIsBounded(v) => (AND (SRLW x y) (Neg32 (SLTIU [32] (ZeroExt8to64 y)))) +(Rsh32Ux16 x y) && !shiftIsBounded(v) => (AND (SRLW x y) (Neg32 (SLTIU [32] (ZeroExt16to64 y)))) +(Rsh32Ux32 x y) && !shiftIsBounded(v) => (AND (SRLW x y) (Neg32 (SLTIU [32] (ZeroExt32to64 y)))) +(Rsh32Ux64 x y) && !shiftIsBounded(v) => (AND (SRLW x y) (Neg32 (SLTIU [32] y))) +(Rsh64Ux8 x y) && !shiftIsBounded(v) => (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt8to64 y)))) +(Rsh64Ux16 x y) && !shiftIsBounded(v) => (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt16to64 y)))) +(Rsh64Ux32 x y) && !shiftIsBounded(v) => (AND (SRL x y) (Neg64 (SLTIU [64] (ZeroExt32to64 y)))) +(Rsh64Ux64 x y) && !shiftIsBounded(v) => (AND (SRL x y) (Neg64 (SLTIU [64] y))) -(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt8to64 x) y) -(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt16to64 x) y) -(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt32to64 x) y) -(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL x y) +(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt8to64 x) y) +(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL (ZeroExt16to64 x) y) +(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLW x y) +(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL x y) // SRA only considers the bottom 6 bits of y, similarly SRAW only considers the // bottom 5 bits. If y is greater than the maximum value (either 63 or 31 @@ -188,33 +191,33 @@ // // We don't need to sign-extend the OR result, as it will be at minimum 8 bits, // more than the 5 or 6 bits SRAW and SRA care about. -(Rsh8x8 x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) -(Rsh8x16 x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) -(Rsh8x32 x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) -(Rsh8x64 x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) -(Rsh16x8 x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) -(Rsh16x16 x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) -(Rsh16x32 x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) -(Rsh16x64 x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) -(Rsh32x8 x y) && !shiftIsBounded(v) => (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [32] (ZeroExt8to64 y))))) -(Rsh32x16 x y) && !shiftIsBounded(v) => (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [32] (ZeroExt16to64 y))))) -(Rsh32x32 x y) && !shiftIsBounded(v) => (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [32] (ZeroExt32to64 y))))) -(Rsh32x64 x y) && !shiftIsBounded(v) => (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [32] y)))) -(Rsh64x8 x y) && !shiftIsBounded(v) => (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) -(Rsh64x16 x y) && !shiftIsBounded(v) => (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) -(Rsh64x32 x y) && !shiftIsBounded(v) => (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) -(Rsh64x64 x y) && !shiftIsBounded(v) => (SRA x (OR y (ADDI [-1] (SLTIU [64] y)))) +(Rsh8x8 x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) +(Rsh8x16 x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) +(Rsh8x32 x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) +(Rsh8x64 x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) +(Rsh16x8 x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) +(Rsh16x16 x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) +(Rsh16x32 x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) +(Rsh16x64 x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (OR y (ADDI [-1] (SLTIU [64] y)))) +(Rsh32x8 x y) && !shiftIsBounded(v) => (SRAW x (OR y (ADDI [-1] (SLTIU [32] (ZeroExt8to64 y))))) +(Rsh32x16 x y) && !shiftIsBounded(v) => (SRAW x (OR y (ADDI [-1] (SLTIU [32] (ZeroExt16to64 y))))) +(Rsh32x32 x y) && !shiftIsBounded(v) => (SRAW x (OR y (ADDI [-1] (SLTIU [32] (ZeroExt32to64 y))))) +(Rsh32x64 x y) && !shiftIsBounded(v) => (SRAW x (OR y (ADDI [-1] (SLTIU [32] y)))) +(Rsh64x8 x y) && !shiftIsBounded(v) => (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt8to64 y))))) +(Rsh64x16 x y) && !shiftIsBounded(v) => (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt16to64 y))))) +(Rsh64x32 x y) && !shiftIsBounded(v) => (SRA x (OR y (ADDI [-1] (SLTIU [64] (ZeroExt32to64 y))))) +(Rsh64x64 x y) && !shiftIsBounded(v) => (SRA x (OR y (ADDI [-1] (SLTIU [64] y)))) -(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt8to64 x) y) -(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt16to64 x) y) -(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt32to64 x) y) -(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA x y) +(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt8to64 x) y) +(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA (SignExt16to64 x) y) +(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW x y) +(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA x y) // Rotates. -(RotateLeft8 x (MOVDconst [c])) => (Or8 (Lsh8x64 x (MOVDconst [c&7])) (Rsh8Ux64 x (MOVDconst [-c&7]))) -(RotateLeft16 x (MOVDconst [c])) => (Or16 (Lsh16x64 x (MOVDconst [c&15])) (Rsh16Ux64 x (MOVDconst [-c&15]))) -(RotateLeft32 x (MOVDconst [c])) => (Or32 (Lsh32x64 x (MOVDconst [c&31])) (Rsh32Ux64 x (MOVDconst [-c&31]))) -(RotateLeft64 x (MOVDconst [c])) => (Or64 (Lsh64x64 x (MOVDconst [c&63])) (Rsh64Ux64 x (MOVDconst [-c&63]))) +(RotateLeft8 x y) => (OR (SLL x (ANDI [7] y)) (SRL (ZeroExt8to64 x) (ANDI [7] (NEG y)))) +(RotateLeft16 x y) => (OR (SLL x (ANDI [15] y)) (SRL (ZeroExt16to64 x) (ANDI [15] (NEG y)))) +(RotateLeft32 x y) => (OR (SLLW x y) (SRLW x (NEG y))) +(RotateLeft64 x y) => (OR (SLL x y) (SRL x (NEG y))) (Less64 ...) => (SLT ...) (Less32 x y) => (SLT (SignExt32to64 x) (SignExt32to64 y)) @@ -710,10 +713,18 @@ (MOVDnop (MOVDconst [c])) => (MOVDconst [c]) // Avoid unnecessary zero and sign extension when right shifting. -(SRL (MOVWUreg x) y) => (SRLW x y) -(SRLI [x] (MOVWUreg y)) => (SRLIW [int64(x&31)] y) -(SRA (MOVWreg x) y) => (SRAW x y) -(SRAI [x] (MOVWreg y)) => (SRAIW [int64(x&31)] y) +(SRAI [x] (MOVWreg y)) && x >= 0 && x <= 31 => (SRAIW [int64(x)] y) +(SRLI [x] (MOVWUreg y)) && x >= 0 && x <= 31 => (SRLIW [int64(x)] y) + +// Replace right shifts that exceed size of signed type. +(SRAI [x] (MOVBreg y)) && x >= 8 => (SRAI [63] (SLLI [56] y)) +(SRAI [x] (MOVHreg y)) && x >= 16 => (SRAI [63] (SLLI [48] y)) +(SRAI [x] (MOVWreg y)) && x >= 32 => (SRAIW [31] y) + +// Eliminate right shifts that exceed size of unsigned type. +(SRLI [x] (MOVBUreg y)) && x >= 8 => (MOVDconst [0]) +(SRLI [x] (MOVHUreg y)) && x >= 16 => (MOVDconst [0]) +(SRLI [x] (MOVWUreg y)) && x >= 32 => (MOVDconst [0]) // Fold constant into immediate instructions where possible. (ADD (MOVDconst [val]) x) && is32Bit(val) && !t.IsPtr() => (ADDI [val] x) @@ -722,6 +733,7 @@ (XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x) (SLL x (MOVDconst [val])) => (SLLI [int64(val&63)] x) (SRL x (MOVDconst [val])) => (SRLI [int64(val&63)] x) +(SLLW x (MOVDconst [val])) => (SLLIW [int64(val&31)] x) (SRLW x (MOVDconst [val])) => (SRLIW [int64(val&31)] x) (SRA x (MOVDconst [val])) => (SRAI [int64(val&63)] x) (SRAW x (MOVDconst [val])) => (SRAIW [int64(val&31)] x) diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go b/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go index 93f20f8a99..e9f1df0d58 100644 --- a/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go +++ b/src/cmd/compile/internal/ssa/_gen/RISCV64Ops.go @@ -207,16 +207,18 @@ func init() { {name: "MOVDnop", argLength: 1, reg: regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}}, resultInArg0: true}, // nop, return arg0 in same register // Shift ops - {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << (aux1 & 63) - {name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> (aux1 & 63), signed - {name: "SRAW", argLength: 2, reg: gp21, asm: "SRAW"}, // arg0 >> (aux1 & 31), signed - {name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> (aux1 & 63), unsigned - {name: "SRLW", argLength: 2, reg: gp21, asm: "SRLW"}, // arg0 >> (aux1 & 31), unsigned - {name: "SLLI", argLength: 1, reg: gp11, asm: "SLLI", aux: "Int64"}, // arg0 << auxint, shift amount 0-63 - {name: "SRAI", argLength: 1, reg: gp11, asm: "SRAI", aux: "Int64"}, // arg0 >> auxint, signed, shift amount 0-63 - {name: "SRAIW", argLength: 1, reg: gp11, asm: "SRAIW", aux: "Int64"}, // arg0 >> auxint, signed, shift amount 0-31 - {name: "SRLI", argLength: 1, reg: gp11, asm: "SRLI", aux: "Int64"}, // arg0 >> auxint, unsigned, shift amount 0-63 - {name: "SRLIW", argLength: 1, reg: gp11, asm: "SRLIW", aux: "Int64"}, // arg0 >> auxint, unsigned, shift amount 0-31 + {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << (aux1 & 63), logical left shift + {name: "SLLW", argLength: 2, reg: gp21, asm: "SLLW"}, // arg0 << (aux1 & 31), logical left shift of 32 bit value, sign extended to 64 bits + {name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> (aux1 & 63), arithmetic right shift + {name: "SRAW", argLength: 2, reg: gp21, asm: "SRAW"}, // arg0 >> (aux1 & 31), arithmetic right shift of 32 bit value, sign extended to 64 bits + {name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> (aux1 & 63), logical right shift + {name: "SRLW", argLength: 2, reg: gp21, asm: "SRLW"}, // arg0 >> (aux1 & 31), logical right shift of 32 bit value, sign extended to 64 bits + {name: "SLLI", argLength: 1, reg: gp11, asm: "SLLI", aux: "Int64"}, // arg0 << auxint, shift amount 0-63, logical left shift + {name: "SLLIW", argLength: 1, reg: gp11, asm: "SLLIW", aux: "Int64"}, // arg0 << auxint, shift amount 0-31, logical left shift of 32 bit value, sign extended to 64 bits + {name: "SRAI", argLength: 1, reg: gp11, asm: "SRAI", aux: "Int64"}, // arg0 >> auxint, shift amount 0-63, arithmetic right shift + {name: "SRAIW", argLength: 1, reg: gp11, asm: "SRAIW", aux: "Int64"}, // arg0 >> auxint, shift amount 0-31, arithmetic right shift of 32 bit value, sign extended to 64 bits + {name: "SRLI", argLength: 1, reg: gp11, asm: "SRLI", aux: "Int64"}, // arg0 >> auxint, shift amount 0-63, logical right shift + {name: "SRLIW", argLength: 1, reg: gp11, asm: "SRLIW", aux: "Int64"}, // arg0 >> auxint, shift amount 0-31, logical right shift of 32 bit value, sign extended to 64 bits // Bitwise ops {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true}, // arg0 ^ arg1 @@ -429,6 +431,8 @@ func init() { {name: "FNES", argLength: 2, reg: fp2gp, asm: "FNES", commutative: true}, // arg0 != arg1 {name: "FLTS", argLength: 2, reg: fp2gp, asm: "FLTS"}, // arg0 < arg1 {name: "FLES", argLength: 2, reg: fp2gp, asm: "FLES"}, // arg0 <= arg1 + {name: "LoweredFMAXS", argLength: 2, reg: fp21, resultNotInArgs: true, asm: "FMAXS", commutative: true, typ: "Float32"}, // max(arg0, arg1) + {name: "LoweredFMINS", argLength: 2, reg: fp21, resultNotInArgs: true, asm: "FMINS", commutative: true, typ: "Float32"}, // min(arg0, arg1) // D extension. {name: "FADDD", argLength: 2, reg: fp21, asm: "FADDD", commutative: true, typ: "Float64"}, // arg0 + arg1 @@ -456,6 +460,8 @@ func init() { {name: "FNED", argLength: 2, reg: fp2gp, asm: "FNED", commutative: true}, // arg0 != arg1 {name: "FLTD", argLength: 2, reg: fp2gp, asm: "FLTD"}, // arg0 < arg1 {name: "FLED", argLength: 2, reg: fp2gp, asm: "FLED"}, // arg0 <= arg1 + {name: "LoweredFMIND", argLength: 2, reg: fp21, resultNotInArgs: true, asm: "FMIND", commutative: true, typ: "Float64"}, // min(arg0, arg1) + {name: "LoweredFMAXD", argLength: 2, reg: fp21, resultNotInArgs: true, asm: "FMAXD", commutative: true, typ: "Float64"}, // max(arg0, arg1) } RISCV64blocks := []blockData{ diff --git a/src/cmd/compile/internal/ssa/_gen/allocators.go b/src/cmd/compile/internal/ssa/_gen/allocators.go index 5c72fe8be1..5869a61e82 100644 --- a/src/cmd/compile/internal/ssa/_gen/allocators.go +++ b/src/cmd/compile/internal/ssa/_gen/allocators.go @@ -1,3 +1,7 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package main // TODO: should we share backing storage for similarly-shaped types? diff --git a/src/cmd/compile/internal/ssa/_gen/generic.rules b/src/cmd/compile/internal/ssa/_gen/generic.rules index aeda62591a..4c475d31e0 100644 --- a/src/cmd/compile/internal/ssa/_gen/generic.rules +++ b/src/cmd/compile/internal/ssa/_gen/generic.rules @@ -601,9 +601,49 @@ (Or(64|32|16|8) x (Or(64|32|16|8) x y)) => (Or(64|32|16|8) x y) (Xor(64|32|16|8) x (Xor(64|32|16|8) x y)) => y -// Unsigned comparisons to zero. -(Less(64U|32U|16U|8U) _ (Const(64|32|16|8) [0])) => (ConstBool [false]) -(Leq(64U|32U|16U|8U) (Const(64|32|16|8) [0]) _) => (ConstBool [true]) +// Fold comparisons with numeric bounds +(Less(64|32|16|8)U _ (Const(64|32|16|8) [0])) => (ConstBool [false]) +(Leq(64|32|16|8)U (Const(64|32|16|8) [0]) _) => (ConstBool [true]) +(Less(64|32|16|8)U (Const(64|32|16|8) [-1]) _) => (ConstBool [false]) +(Leq(64|32|16|8)U _ (Const(64|32|16|8) [-1])) => (ConstBool [true]) +(Less64 _ (Const64 [math.MinInt64])) => (ConstBool [false]) +(Less32 _ (Const32 [math.MinInt32])) => (ConstBool [false]) +(Less16 _ (Const16 [math.MinInt16])) => (ConstBool [false]) +(Less8 _ (Const8 [math.MinInt8 ])) => (ConstBool [false]) +(Leq64 (Const64 [math.MinInt64]) _) => (ConstBool [true]) +(Leq32 (Const32 [math.MinInt32]) _) => (ConstBool [true]) +(Leq16 (Const16 [math.MinInt16]) _) => (ConstBool [true]) +(Leq8 (Const8 [math.MinInt8 ]) _) => (ConstBool [true]) +(Less64 (Const64 [math.MaxInt64]) _) => (ConstBool [false]) +(Less32 (Const32 [math.MaxInt32]) _) => (ConstBool [false]) +(Less16 (Const16 [math.MaxInt16]) _) => (ConstBool [false]) +(Less8 (Const8 [math.MaxInt8 ]) _) => (ConstBool [false]) +(Leq64 _ (Const64 [math.MaxInt64])) => (ConstBool [true]) +(Leq32 _ (Const32 [math.MaxInt32])) => (ConstBool [true]) +(Leq16 _ (Const16 [math.MaxInt16])) => (ConstBool [true]) +(Leq8 _ (Const8 [math.MaxInt8 ])) => (ConstBool [true]) + +// Canonicalize <= on numeric bounds and < near numeric bounds to == +(Leq(64|32|16|8)U x c:(Const(64|32|16|8) [0])) => (Eq(64|32|16|8) x c) +(Leq(64|32|16|8)U c:(Const(64|32|16|8) [-1]) x) => (Eq(64|32|16|8) x c) +(Less(64|32|16|8)U x (Const(64|32|16|8) [1])) => (Eq(64|32|16|8) x (Const(64|32|16|8) [0])) +(Less(64|32|16|8)U (Const(64|32|16|8) [-2]) x) => (Eq(64|32|16|8) x (Const(64|32|16|8) [-1])) +(Leq64 x c:(Const64 [math.MinInt64])) => (Eq64 x c) +(Leq32 x c:(Const32 [math.MinInt32])) => (Eq32 x c) +(Leq16 x c:(Const16 [math.MinInt16])) => (Eq16 x c) +(Leq8 x c:(Const8 [math.MinInt8 ])) => (Eq8 x c) +(Leq64 c:(Const64 [math.MaxInt64]) x) => (Eq64 x c) +(Leq32 c:(Const32 [math.MaxInt32]) x) => (Eq32 x c) +(Leq16 c:(Const16 [math.MaxInt16]) x) => (Eq16 x c) +(Leq8 c:(Const8 [math.MaxInt8 ]) x) => (Eq8 x c) +(Less64 x (Const64 [math.MinInt64+1])) => (Eq64 x (Const64 [math.MinInt64])) +(Less32 x (Const32 [math.MinInt32+1])) => (Eq32 x (Const32 [math.MinInt32])) +(Less16 x (Const16 [math.MinInt16+1])) => (Eq16 x (Const16 [math.MinInt16])) +(Less8 x (Const8 [math.MinInt8 +1])) => (Eq8 x (Const8 [math.MinInt8 ])) +(Less64 (Const64 [math.MaxInt64-1]) x) => (Eq64 x (Const64 [math.MaxInt64])) +(Less32 (Const32 [math.MaxInt32-1]) x) => (Eq32 x (Const32 [math.MaxInt32])) +(Less16 (Const16 [math.MaxInt16-1]) x) => (Eq16 x (Const16 [math.MaxInt16])) +(Less8 (Const8 [math.MaxInt8 -1]) x) => (Eq8 x (Const8 [math.MaxInt8 ])) // Ands clear bits. Ors set bits. // If a subsequent Or will set all the bits diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go index 4a24a181e5..26af10b59c 100644 --- a/src/cmd/compile/internal/ssa/block.go +++ b/src/cmd/compile/internal/ssa/block.go @@ -297,6 +297,8 @@ func (b *Block) removePred(i int) { // removeSucc removes the ith output edge from b. // It is the responsibility of the caller to remove // the corresponding predecessor edge. +// Note that this potentially reorders successors of b, so it +// must be used very carefully. func (b *Block) removeSucc(i int) { n := len(b.Succs) - 1 if i != n { @@ -323,6 +325,19 @@ func (b *Block) swapSuccessors() { b.Likely *= -1 } +// Swaps b.Succs[x] and b.Succs[y]. +func (b *Block) swapSuccessorsByIdx(x, y int) { + if x == y { + return + } + ex := b.Succs[x] + ey := b.Succs[y] + b.Succs[x] = ey + b.Succs[y] = ex + ex.b.Preds[ex.i].i = y + ey.b.Preds[ey.i].i = x +} + // removePhiArg removes the ith arg from phi. // It must be called after calling b.removePred(i) to // adjust the corresponding phi value of the block: @@ -339,7 +354,7 @@ func (b *Block) swapSuccessors() { func (b *Block) removePhiArg(phi *Value, i int) { n := len(b.Preds) if numPhiArgs := len(phi.Args); numPhiArgs-1 != n { - b.Fatalf("inconsistent state, num predecessors: %d, num phi args: %d", n, numPhiArgs) + b.Fatalf("inconsistent state for %v, num predecessors: %d, num phi args: %d", phi, n, numPhiArgs) } phi.Args[i].Uses-- phi.Args[i] = phi.Args[n] diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go index da4294d871..debcf1a0f4 100644 --- a/src/cmd/compile/internal/ssa/config.go +++ b/src/cmd/compile/internal/ssa/config.go @@ -283,6 +283,8 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat boo c.registers = registersLOONG64[:] c.gpRegMask = gpRegMaskLOONG64 c.fpRegMask = fpRegMaskLOONG64 + c.intParamRegs = paramIntRegLOONG64 + c.floatParamRegs = paramFloatRegLOONG64 c.FPReg = framepointerRegLOONG64 c.LinkReg = linkRegLOONG64 c.hasGReg = true diff --git a/src/cmd/compile/internal/ssa/copyelim.go b/src/cmd/compile/internal/ssa/copyelim.go index 17f65127ee..17471e3b5f 100644 --- a/src/cmd/compile/internal/ssa/copyelim.go +++ b/src/cmd/compile/internal/ssa/copyelim.go @@ -11,6 +11,17 @@ func copyelim(f *Func) { // of OpCopy) is a copy. for _, b := range f.Blocks { for _, v := range b.Values { + + // This is an early place in SSA where all values are examined. + // Rewrite all 0-sized Go values to remove accessors, dereferences, loads, etc. + if t := v.Type; (t.IsStruct() || t.IsArray()) && t.Size() == 0 { + if t.IsStruct() { + v.reset(OpStructMake0) + } else { + v.reset(OpArrayMake0) + } + } + copyelimValue(v) } } diff --git a/src/cmd/compile/internal/ssa/deadcode.go b/src/cmd/compile/internal/ssa/deadcode.go index ae9fd2ef24..3bd1737bab 100644 --- a/src/cmd/compile/internal/ssa/deadcode.go +++ b/src/cmd/compile/internal/ssa/deadcode.go @@ -312,6 +312,8 @@ func deadcode(f *Func) { // removeEdge removes the i'th outgoing edge from b (and // the corresponding incoming edge from b.Succs[i].b). +// Note that this potentially reorders successors of b, so it +// must be used very carefully. func (b *Block) removeEdge(i int) { e := b.Succs[i] c := e.b diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go index 7e0e1f34a8..05a72787f3 100644 --- a/src/cmd/compile/internal/ssa/debug.go +++ b/src/cmd/compile/internal/ssa/debug.go @@ -42,7 +42,10 @@ type FuncDebug struct { OptDcl []*ir.Name // Filled in by the user. Translates Block and Value ID to PC. - GetPC func(ID, ID) int64 + // + // NOTE: block is only used if value is BlockStart.ID or BlockEnd.ID. + // Otherwise, it is ignored. + GetPC func(block, value ID) int64 } type BlockDebug struct { @@ -1368,7 +1371,7 @@ func (state *debugState) buildLocationLists(blockLocs []*BlockDebug) { // Flush any leftover entries live at the end of the last block. for varID := range state.lists { - state.writePendingEntry(VarID(varID), state.f.Blocks[len(state.f.Blocks)-1].ID, FuncEnd.ID) + state.writePendingEntry(VarID(varID), -1, FuncEnd.ID) list := state.lists[varID] if state.loggingLevel > 0 { if len(list) == 0 { diff --git a/src/cmd/compile/internal/ssa/debug_lines_test.go b/src/cmd/compile/internal/ssa/debug_lines_test.go index cf115107a1..af9e2a34cf 100644 --- a/src/cmd/compile/internal/ssa/debug_lines_test.go +++ b/src/cmd/compile/internal/ssa/debug_lines_test.go @@ -44,7 +44,7 @@ func testGoArch() string { func hasRegisterABI() bool { switch testGoArch() { - case "amd64", "arm64", "ppc64", "ppc64le", "riscv": + case "amd64", "arm64", "loong64", "ppc64", "ppc64le", "riscv": return true } return false diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go index 298e29ec56..b0788f1db4 100644 --- a/src/cmd/compile/internal/ssa/expand_calls.go +++ b/src/cmd/compile/internal/ssa/expand_calls.go @@ -411,7 +411,7 @@ func (x *expandState) decomposeAsNecessary(pos src.XPos, b *Block, a, m0 *Value, return mem case types.TSLICE: - mem = x.decomposeOne(pos, b, a, mem, x.typs.BytePtr, OpSlicePtr, &rc) + mem = x.decomposeOne(pos, b, a, mem, at.Elem().PtrTo(), OpSlicePtr, &rc) pos = pos.WithNotStmt() mem = x.decomposeOne(pos, b, a, mem, x.typs.Int, OpSliceLen, &rc) return x.decomposeOne(pos, b, a, mem, x.typs.Int, OpSliceCap, &rc) @@ -564,7 +564,7 @@ func (x *expandState) rewriteSelectOrArg(pos src.XPos, b *Block, container, a, m return a case types.TSLICE: - addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.BytePtr, rc.next(x.typs.BytePtr))) + addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, at.Elem().PtrTo(), rc.next(x.typs.BytePtr))) pos = pos.WithNotStmt() addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Int, rc.next(x.typs.Int))) addArg(x.rewriteSelectOrArg(pos, b, container, nil, m0, x.typs.Int, rc.next(x.typs.Int))) @@ -721,7 +721,7 @@ func (x *expandState) rewriteWideSelectToStores(pos src.XPos, b *Block, containe return m0 case types.TSLICE: - m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.BytePtr, rc.next(x.typs.BytePtr)) + m0 = x.rewriteWideSelectToStores(pos, b, container, m0, at.Elem().PtrTo(), rc.next(x.typs.BytePtr)) pos = pos.WithNotStmt() m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Int, rc.next(x.typs.Int)) m0 = x.rewriteWideSelectToStores(pos, b, container, m0, x.typs.Int, rc.next(x.typs.Int)) diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go index e94cb77f92..031d94f90c 100644 --- a/src/cmd/compile/internal/ssa/func.go +++ b/src/cmd/compile/internal/ssa/func.go @@ -64,7 +64,7 @@ type Func struct { // RegArgs is a slice of register-memory pairs that must be spilled and unspilled in the uncommon path of function entry. RegArgs []Spill - // AuxCall describing parameters and results for this function. + // OwnAux describes parameters and results for this function. OwnAux *AuxCall freeValues *Value // free Values linked by argstorage[0]. All other fields except ID are 0/nil. @@ -721,7 +721,6 @@ func (f *Func) ConstOffPtrSP(t *types.Type, c int64, sp *Value) *Value { v.AddArg(sp) } return v - } func (f *Func) Frontend() Frontend { return f.fe } diff --git a/src/cmd/compile/internal/ssa/magic.go b/src/cmd/compile/internal/ssa/magic.go index df4b568134..235b0e5e5c 100644 --- a/src/cmd/compile/internal/ssa/magic.go +++ b/src/cmd/compile/internal/ssa/magic.go @@ -170,7 +170,7 @@ func smagicOK(n uint, c int64) bool { return c&(c-1) != 0 } -// smagicOKn reports whether we should strength reduce an signed n-bit divide by c. +// smagicOKn reports whether we should strength reduce a signed n-bit divide by c. func smagicOK8(c int8) bool { return smagicOK(8, int64(c)) } func smagicOK16(c int16) bool { return smagicOK(16, int64(c)) } func smagicOK32(c int32) bool { return smagicOK(32, int64(c)) } diff --git a/src/cmd/compile/internal/ssa/memcombine.go b/src/cmd/compile/internal/ssa/memcombine.go index 848b1e57a7..b1a47510be 100644 --- a/src/cmd/compile/internal/ssa/memcombine.go +++ b/src/cmd/compile/internal/ssa/memcombine.go @@ -313,8 +313,8 @@ func combineLoads(root *Value, n int64) bool { if isLittleEndian && shift0 != 0 { v = leftShift(loadBlock, pos, v, shift0) } - if isBigEndian && shift0-(n-1)*8 != 0 { - v = leftShift(loadBlock, pos, v, shift0-(n-1)*8) + if isBigEndian && shift0-(n-1)*size*8 != 0 { + v = leftShift(loadBlock, pos, v, shift0-(n-1)*size*8) } // Install with (Copy v). @@ -662,14 +662,14 @@ func combineStores(root *Value, n int64) bool { isLittleEndian := true shift0 := shift(a[0].store, shiftBase) for i := int64(1); i < n; i++ { - if shift(a[i].store, shiftBase) != shift0+i*8 { + if shift(a[i].store, shiftBase) != shift0+i*size*8 { isLittleEndian = false break } } isBigEndian := true for i := int64(1); i < n; i++ { - if shift(a[i].store, shiftBase) != shift0-i*8 { + if shift(a[i].store, shiftBase) != shift0-i*size*8 { isBigEndian = false break } @@ -692,8 +692,8 @@ func combineStores(root *Value, n int64) bool { if isLittleEndian && shift0 != 0 { sv = rightShift(root.Block, root.Pos, sv, shift0) } - if isBigEndian && shift0-(n-1)*8 != 0 { - sv = rightShift(root.Block, root.Pos, sv, shift0-(n-1)*8) + if isBigEndian && shift0-(n-1)*size*8 != 0 { + sv = rightShift(root.Block, root.Pos, sv, shift0-(n-1)*size*8) } if sv.Type.Size() > size*n { sv = truncate(root.Block, root.Pos, sv, sv.Type.Size(), size*n) diff --git a/src/cmd/compile/internal/ssa/nilcheck_test.go b/src/cmd/compile/internal/ssa/nilcheck_test.go index 2e32afe2a6..6c89b1e185 100644 --- a/src/cmd/compile/internal/ssa/nilcheck_test.go +++ b/src/cmd/compile/internal/ssa/nilcheck_test.go @@ -1,3 +1,7 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package ssa import ( diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go index 80ac8e4f8b..2378c7abc2 100644 --- a/src/cmd/compile/internal/ssa/opGen.go +++ b/src/cmd/compile/internal/ssa/opGen.go @@ -2388,11 +2388,13 @@ const ( OpRISCV64MOVWUreg OpRISCV64MOVDnop OpRISCV64SLL + OpRISCV64SLLW OpRISCV64SRA OpRISCV64SRAW OpRISCV64SRL OpRISCV64SRLW OpRISCV64SLLI + OpRISCV64SLLIW OpRISCV64SRAI OpRISCV64SRAIW OpRISCV64SRLI @@ -2464,6 +2466,8 @@ const ( OpRISCV64FNES OpRISCV64FLTS OpRISCV64FLES + OpRISCV64LoweredFMAXS + OpRISCV64LoweredFMINS OpRISCV64FADDD OpRISCV64FSUBD OpRISCV64FMULD @@ -2489,6 +2493,8 @@ const ( OpRISCV64FNED OpRISCV64FLTD OpRISCV64FLED + OpRISCV64LoweredFMIND + OpRISCV64LoweredFMAXD OpS390XFADDS OpS390XFADD @@ -23193,11 +23199,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.AADDVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23208,10 +23214,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.AADDVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693244}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741820}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23221,11 +23227,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.ASUBVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23236,10 +23242,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.ASUBVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23250,11 +23256,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMULV, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23265,11 +23271,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMULHV, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23280,11 +23286,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMULHVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23294,11 +23300,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.ADIVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23308,11 +23314,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.ADIVVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23322,11 +23328,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.AREMV, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23336,11 +23342,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.AREMVU, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23467,11 +23473,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23482,10 +23488,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.AAND, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23496,11 +23502,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23511,10 +23517,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.AOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23525,11 +23531,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23540,10 +23546,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.AXOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23554,11 +23560,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23569,10 +23575,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.ANOR, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23581,10 +23587,10 @@ var opcodeTable = [...]opInfo{ argLen: 1, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23646,11 +23652,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMASKEQZ, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23660,11 +23666,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMASKNEZ, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23674,11 +23680,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.ASLLV, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23689,10 +23695,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.ASLLV, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23702,11 +23708,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.ASRLV, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23717,10 +23723,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.ASRLV, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23730,11 +23736,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.ASRAV, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23745,10 +23751,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.ASRAV, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23758,11 +23764,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.AROTR, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23772,11 +23778,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.AROTRV, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23787,10 +23793,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.AROTR, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23801,10 +23807,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.AROTRV, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23814,11 +23820,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23829,10 +23835,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.ASGT, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23842,11 +23848,11 @@ var opcodeTable = [...]opInfo{ asm: loong64.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23857,10 +23863,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.ASGTU, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23938,7 +23944,7 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVV, reg: regInfo{ outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23978,7 +23984,7 @@ var opcodeTable = [...]opInfo{ {0, 4611686018427387908}, // SP SB }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -23991,10 +23997,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24007,10 +24013,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24023,10 +24029,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24039,10 +24045,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24055,10 +24061,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24071,10 +24077,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24087,10 +24093,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24103,7 +24109,7 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -24119,7 +24125,7 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ {0, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 @@ -24135,8 +24141,8 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, @@ -24149,8 +24155,8 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, @@ -24163,8 +24169,8 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, @@ -24177,8 +24183,8 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, @@ -24191,7 +24197,7 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVF, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, @@ -24205,7 +24211,7 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVD, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB {1, 4611686017353646080}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, @@ -24219,7 +24225,7 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, @@ -24232,7 +24238,7 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, @@ -24245,7 +24251,7 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, @@ -24258,7 +24264,7 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, @@ -24268,10 +24274,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVB, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24281,10 +24287,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVBU, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24294,10 +24300,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVH, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24307,10 +24313,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVHU, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24320,10 +24326,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVW, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24333,10 +24339,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVWU, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24346,10 +24352,10 @@ var opcodeTable = [...]opInfo{ asm: loong64.AMOVV, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24359,10 +24365,10 @@ var opcodeTable = [...]opInfo{ resultInArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24499,49 +24505,49 @@ var opcodeTable = [...]opInfo{ { name: "CALLstatic", auxType: auxCallOff, - argLen: 1, + argLen: -1, clobberFlags: true, call: true, reg: regInfo{ - clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { name: "CALLtail", auxType: auxCallOff, - argLen: 1, + argLen: -1, clobberFlags: true, call: true, tailCall: true, reg: regInfo{ - clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { name: "CALLclosure", auxType: auxCallOff, - argLen: 3, + argLen: -1, clobberFlags: true, call: true, reg: regInfo{ inputs: []inputInfo{ {1, 268435456}, // R29 - {0, 1070596092}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644668}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { name: "CALLinter", auxType: auxCallOff, - argLen: 2, + argLen: -1, clobberFlags: true, call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 4611686018426339320, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + clobbers: 4611686018427387896, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 }, }, { @@ -24551,9 +24557,9 @@ var opcodeTable = [...]opInfo{ faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 262144}, // R19 + {0, 524288}, // R20 }, - clobbers: 262146, // R1 R19 + clobbers: 524290, // R1 R20 }, }, { @@ -24564,10 +24570,10 @@ var opcodeTable = [...]opInfo{ faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 524288}, // R20 - {1, 262144}, // R19 + {0, 1048576}, // R21 + {1, 524288}, // R20 }, - clobbers: 786434, // R1 R19 R20 + clobbers: 1572866, // R1 R20 R21 }, }, { @@ -24577,10 +24583,10 @@ var opcodeTable = [...]opInfo{ faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 262144}, // R19 - {1, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 524288}, // R20 + {1, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 262144, // R19 + clobbers: 524288, // R20 }, }, { @@ -24591,11 +24597,11 @@ var opcodeTable = [...]opInfo{ faultOnNilArg1: true, reg: regInfo{ inputs: []inputInfo{ - {0, 524288}, // R20 - {1, 262144}, // R19 - {2, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1048576}, // R21 + {1, 524288}, // R20 + {2, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, - clobbers: 786432, // R19 R20 + clobbers: 1572864, // R20 R21 }, }, { @@ -24604,10 +24610,10 @@ var opcodeTable = [...]opInfo{ faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24617,10 +24623,10 @@ var opcodeTable = [...]opInfo{ faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24630,10 +24636,10 @@ var opcodeTable = [...]opInfo{ faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24644,8 +24650,8 @@ var opcodeTable = [...]opInfo{ hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, @@ -24656,8 +24662,8 @@ var opcodeTable = [...]opInfo{ hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, @@ -24668,8 +24674,8 @@ var opcodeTable = [...]opInfo{ hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, @@ -24680,7 +24686,7 @@ var opcodeTable = [...]opInfo{ hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, @@ -24691,7 +24697,7 @@ var opcodeTable = [...]opInfo{ hasSideEffects: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, }, }, @@ -24704,11 +24710,11 @@ var opcodeTable = [...]opInfo{ unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24721,11 +24727,11 @@ var opcodeTable = [...]opInfo{ unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24738,11 +24744,11 @@ var opcodeTable = [...]opInfo{ unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24755,11 +24761,11 @@ var opcodeTable = [...]opInfo{ unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24773,10 +24779,10 @@ var opcodeTable = [...]opInfo{ unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24790,10 +24796,10 @@ var opcodeTable = [...]opInfo{ unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24806,12 +24812,12 @@ var opcodeTable = [...]opInfo{ unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24824,12 +24830,12 @@ var opcodeTable = [...]opInfo{ unsafePoint: true, reg: regInfo{ inputs: []inputInfo{ - {1, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {2, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 - {0, 4611686019500081148}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 SB + {1, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {2, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 4611686019501129724}, // SP R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 SB }, outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24840,7 +24846,7 @@ var opcodeTable = [...]opInfo{ faultOnNilArg0: true, reg: regInfo{ inputs: []inputInfo{ - {0, 1072693240}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 g R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1073741816}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 g R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24849,7 +24855,7 @@ var opcodeTable = [...]opInfo{ argLen: 1, reg: regInfo{ outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24858,7 +24864,7 @@ var opcodeTable = [...]opInfo{ argLen: 1, reg: regInfo{ outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24878,7 +24884,7 @@ var opcodeTable = [...]opInfo{ rematerializeable: true, reg: regInfo{ outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24888,7 +24894,7 @@ var opcodeTable = [...]opInfo{ rematerializeable: true, reg: regInfo{ outputs: []outputInfo{ - {0, 1070596088}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R23 R24 R25 R26 R27 R28 R29 R31 + {0, 1071644664}, // R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R23 R24 R25 R26 R27 R28 R29 R31 }, }, }, @@ -24911,8 +24917,8 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 65536}, // R17 - {1, 8}, // R4 + {0, 4194304}, // R23 + {1, 8388608}, // R24 }, }, }, @@ -24923,8 +24929,8 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 131072}, // R18 - {1, 65536}, // R17 + {0, 1048576}, // R21 + {1, 4194304}, // R23 }, }, }, @@ -24935,8 +24941,8 @@ var opcodeTable = [...]opInfo{ call: true, reg: regInfo{ inputs: []inputInfo{ - {0, 262144}, // R19 - {1, 131072}, // R18 + {0, 524288}, // R20 + {1, 1048576}, // R21 }, }, }, @@ -32041,6 +32047,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "SLLW", + argLen: 2, + asm: riscv.ASLLW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, { name: "SRA", argLen: 2, @@ -32111,6 +32131,20 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "SLLIW", + auxType: auxInt64, + argLen: 1, + asm: riscv.ASLLIW, + reg: regInfo{ + inputs: []inputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + outputs: []outputInfo{ + {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 + }, + }, + }, { name: "SRAI", auxType: auxInt64, @@ -33072,6 +33106,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "LoweredFMAXS", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: riscv.AFMAXS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "LoweredFMINS", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: riscv.AFMINS, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, { name: "FADDD", argLen: 2, @@ -33426,6 +33492,38 @@ var opcodeTable = [...]opInfo{ }, }, }, + { + name: "LoweredFMIND", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: riscv.AFMIND, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, + { + name: "LoweredFMAXD", + argLen: 2, + commutative: true, + resultNotInArgs: true, + asm: riscv.AFMAXD, + reg: regInfo{ + inputs: []inputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + outputs: []outputInfo{ + {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 + }, + }, + }, { name: "FADDS", @@ -40707,16 +40805,16 @@ var registersLOONG64 = [...]Register{ {17, loong64.REG_R18, 14, "R18"}, {18, loong64.REG_R19, 15, "R19"}, {19, loong64.REG_R20, 16, "R20"}, - {20, loong64.REG_R21, -1, "R21"}, + {20, loong64.REG_R21, 17, "R21"}, {21, loong64.REGG, -1, "g"}, - {22, loong64.REG_R23, 17, "R23"}, - {23, loong64.REG_R24, 18, "R24"}, - {24, loong64.REG_R25, 19, "R25"}, - {25, loong64.REG_R26, 20, "R26"}, - {26, loong64.REG_R27, 21, "R27"}, - {27, loong64.REG_R28, 22, "R28"}, - {28, loong64.REG_R29, 23, "R29"}, - {29, loong64.REG_R31, 24, "R31"}, + {22, loong64.REG_R23, 18, "R23"}, + {23, loong64.REG_R24, 19, "R24"}, + {24, loong64.REG_R25, 20, "R25"}, + {25, loong64.REG_R26, 21, "R26"}, + {26, loong64.REG_R27, 22, "R27"}, + {27, loong64.REG_R28, 23, "R28"}, + {28, loong64.REG_R29, 24, "R29"}, + {29, loong64.REG_R31, 25, "R31"}, {30, loong64.REG_F0, -1, "F0"}, {31, loong64.REG_F1, -1, "F1"}, {32, loong64.REG_F2, -1, "F2"}, @@ -40751,9 +40849,9 @@ var registersLOONG64 = [...]Register{ {61, loong64.REG_F31, -1, "F31"}, {62, 0, -1, "SB"}, } -var paramIntRegLOONG64 = []int8{3, 4, 5, 6, 7, 8, 9, 10} -var paramFloatRegLOONG64 = []int8{30, 31, 32, 33, 34, 35, 36, 37} -var gpRegMaskLOONG64 = regMask(1070596088) +var paramIntRegLOONG64 = []int8{3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18} +var paramFloatRegLOONG64 = []int8{30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45} +var gpRegMaskLOONG64 = regMask(1071644664) var fpRegMaskLOONG64 = regMask(4611686017353646080) var specialRegMaskLOONG64 = regMask(0) var framepointerRegLOONG64 = int8(-1) diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go index 91f5fbe765..761b77a05d 100644 --- a/src/cmd/compile/internal/ssa/prove.go +++ b/src/cmd/compile/internal/ssa/prove.go @@ -100,10 +100,11 @@ func (d domain) String() string { } type pair struct { - v, w *Value // a pair of values, ordered by ID. + // a pair of values, ordered by ID. // v can be nil, to mean the zero value. // for booleans the zero value (v == nil) is false. - d domain + v, w *Value + d domain } // fact is a pair plus a relation for that pair. @@ -165,7 +166,7 @@ type factsTable struct { facts map[pair]relation // current known set of relation stack []fact // previous sets of relations - // order is a couple of partial order sets that record information + // order* is a couple of partial order sets that record information // about relations between SSA values in the signed and unsigned // domain. orderS *poset @@ -877,34 +878,17 @@ func prove(f *Func) { continue } - header := ind.Block - check := header.Controls[0] - if check == nil { - // we don't know how to rewrite a loop that not simple comparison - continue - } - switch check.Op { - case OpLeq64, OpLeq32, OpLeq16, OpLeq8, - OpLess64, OpLess32, OpLess16, OpLess8: - default: - // we don't know how to rewrite a loop that not simple comparison - continue - } - if !((check.Args[0] == ind && check.Args[1] == end) || - (check.Args[1] == ind && check.Args[0] == end)) { - // we don't know how to rewrite a loop that not simple comparison - continue - } if end.Block == ind.Block { // we can't rewrite loops where the condition depends on the loop body // this simple check is forced to work because if this is true a Phi in ind.Block must exists continue } + check := ind.Block.Controls[0] // invert the check check.Args[0], check.Args[1] = check.Args[1], check.Args[0] - // invert start and end in the loop + // swap start and end in the loop for i, v := range check.Args { if v != end { continue diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go index fcd3f5c8b5..2325b9ee45 100644 --- a/src/cmd/compile/internal/ssa/regalloc.go +++ b/src/cmd/compile/internal/ssa/regalloc.go @@ -672,6 +672,8 @@ func (s *regAllocState) init(f *Func) { s.allocatable &^= 1 << 9 // R9 case "arm64": // nothing to do + case "loong64": // R2 (aka TP) already reserved. + // nothing to do case "ppc64le": // R2 already reserved. // nothing to do case "riscv64": // X3 (aka GP) and X4 (aka TP) already reserved. diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go index fa4208228e..d3ddfbfab2 100644 --- a/src/cmd/compile/internal/ssa/rewrite.go +++ b/src/cmd/compile/internal/ssa/rewrite.go @@ -2131,8 +2131,8 @@ func logicFlags32(x int32) flagConstant { func makeJumpTableSym(b *Block) *obj.LSym { s := base.Ctxt.Lookup(fmt.Sprintf("%s.jump%d", b.Func.fe.Func().LSym.Name, b.ID)) - s.Set(obj.AttrDuplicateOK, true) - s.Set(obj.AttrLocal, true) + // The jump table symbol is accessed only from the function symbol. + s.Set(obj.AttrStatic, true) return s } @@ -2144,7 +2144,7 @@ func canRotate(c *Config, bits int64) bool { return false } switch c.arch { - case "386", "amd64", "arm64": + case "386", "amd64", "arm64", "riscv64": return true case "arm", "s390x", "ppc64", "ppc64le", "wasm", "loong64": return bits >= 32 diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index 70cacb90ed..971c9a5d55 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -1496,7 +1496,7 @@ func rewriteValueARM_OpARMADDD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ADDD a (MULD x y)) - // cond: a.Uses == 1 && buildcfg.GOARM >= 6 + // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6 // result: (MULAD a x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -1506,7 +1506,7 @@ func rewriteValueARM_OpARMADDD(v *Value) bool { } y := v_1.Args[1] x := v_1.Args[0] - if !(a.Uses == 1 && buildcfg.GOARM >= 6) { + if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) { continue } v.reset(OpARMMULAD) @@ -1516,7 +1516,7 @@ func rewriteValueARM_OpARMADDD(v *Value) bool { break } // match: (ADDD a (NMULD x y)) - // cond: a.Uses == 1 && buildcfg.GOARM >= 6 + // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6 // result: (MULSD a x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -1526,7 +1526,7 @@ func rewriteValueARM_OpARMADDD(v *Value) bool { } y := v_1.Args[1] x := v_1.Args[0] - if !(a.Uses == 1 && buildcfg.GOARM >= 6) { + if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) { continue } v.reset(OpARMMULSD) @@ -1541,7 +1541,7 @@ func rewriteValueARM_OpARMADDF(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (ADDF a (MULF x y)) - // cond: a.Uses == 1 && buildcfg.GOARM >= 6 + // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6 // result: (MULAF a x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -1551,7 +1551,7 @@ func rewriteValueARM_OpARMADDF(v *Value) bool { } y := v_1.Args[1] x := v_1.Args[0] - if !(a.Uses == 1 && buildcfg.GOARM >= 6) { + if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) { continue } v.reset(OpARMMULAF) @@ -1561,7 +1561,7 @@ func rewriteValueARM_OpARMADDF(v *Value) bool { break } // match: (ADDF a (NMULF x y)) - // cond: a.Uses == 1 && buildcfg.GOARM >= 6 + // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6 // result: (MULSF a x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -1571,7 +1571,7 @@ func rewriteValueARM_OpARMADDF(v *Value) bool { } y := v_1.Args[1] x := v_1.Args[0] - if !(a.Uses == 1 && buildcfg.GOARM >= 6) { + if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) { continue } v.reset(OpARMMULSF) @@ -1979,12 +1979,12 @@ func rewriteValueARM_OpARMADDconst(v *Value) bool { return true } // match: (ADDconst [c] x) - // cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff + // cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff // result: (SUBconst [-c] x) for { c := auxIntToInt32(v.AuxInt) x := v_0 - if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) { + if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) { break } v.reset(OpARMSUBconst) @@ -2099,7 +2099,7 @@ func rewriteValueARM_OpARMADDshiftLL(v *Value) bool { return true } // match: (ADDshiftLL [8] (SRLconst [24] (SLLconst [16] x)) x) - // cond: buildcfg.GOARM>=6 + // cond: buildcfg.GOARM.Version>=6 // result: (REV16 x) for { if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 { @@ -2110,7 +2110,7 @@ func rewriteValueARM_OpARMADDshiftLL(v *Value) bool { break } x := v_0_0.Args[0] - if x != v_1 || !(buildcfg.GOARM >= 6) { + if x != v_1 || !(buildcfg.GOARM.Version >= 6) { break } v.reset(OpARMREV16) @@ -2551,12 +2551,12 @@ func rewriteValueARM_OpARMANDconst(v *Value) bool { return true } // match: (ANDconst [c] x) - // cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff + // cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff // result: (BICconst [int32(^uint32(c))] x) for { c := auxIntToInt32(v.AuxInt) x := v_0 - if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) { + if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) { break } v.reset(OpARMBICconst) @@ -3052,12 +3052,12 @@ func rewriteValueARM_OpARMBICconst(v *Value) bool { return true } // match: (BICconst [c] x) - // cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff + // cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff // result: (ANDconst [int32(^uint32(c))] x) for { c := auxIntToInt32(v.AuxInt) x := v_0 - if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) { + if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) { break } v.reset(OpARMANDconst) @@ -7590,7 +7590,7 @@ func rewriteValueARM_OpARMMULD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MULD (NEGD x) y) - // cond: buildcfg.GOARM >= 6 + // cond: buildcfg.GOARM.Version >= 6 // result: (NMULD x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -7599,7 +7599,7 @@ func rewriteValueARM_OpARMMULD(v *Value) bool { } x := v_0.Args[0] y := v_1 - if !(buildcfg.GOARM >= 6) { + if !(buildcfg.GOARM.Version >= 6) { continue } v.reset(OpARMNMULD) @@ -7614,7 +7614,7 @@ func rewriteValueARM_OpARMMULF(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (MULF (NEGF x) y) - // cond: buildcfg.GOARM >= 6 + // cond: buildcfg.GOARM.Version >= 6 // result: (NMULF x y) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -7623,7 +7623,7 @@ func rewriteValueARM_OpARMMULF(v *Value) bool { } x := v_0.Args[0] y := v_1 - if !(buildcfg.GOARM >= 6) { + if !(buildcfg.GOARM.Version >= 6) { continue } v.reset(OpARMNMULF) @@ -8247,7 +8247,7 @@ func rewriteValueARM_OpARMMVNshiftRLreg(v *Value) bool { func rewriteValueARM_OpARMNEGD(v *Value) bool { v_0 := v.Args[0] // match: (NEGD (MULD x y)) - // cond: buildcfg.GOARM >= 6 + // cond: buildcfg.GOARM.Version >= 6 // result: (NMULD x y) for { if v_0.Op != OpARMMULD { @@ -8255,7 +8255,7 @@ func rewriteValueARM_OpARMNEGD(v *Value) bool { } y := v_0.Args[1] x := v_0.Args[0] - if !(buildcfg.GOARM >= 6) { + if !(buildcfg.GOARM.Version >= 6) { break } v.reset(OpARMNMULD) @@ -8267,7 +8267,7 @@ func rewriteValueARM_OpARMNEGD(v *Value) bool { func rewriteValueARM_OpARMNEGF(v *Value) bool { v_0 := v.Args[0] // match: (NEGF (MULF x y)) - // cond: buildcfg.GOARM >= 6 + // cond: buildcfg.GOARM.Version >= 6 // result: (NMULF x y) for { if v_0.Op != OpARMMULF { @@ -8275,7 +8275,7 @@ func rewriteValueARM_OpARMNEGF(v *Value) bool { } y := v_0.Args[1] x := v_0.Args[0] - if !(buildcfg.GOARM >= 6) { + if !(buildcfg.GOARM.Version >= 6) { break } v.reset(OpARMNMULF) @@ -8583,7 +8583,7 @@ func rewriteValueARM_OpARMORshiftLL(v *Value) bool { return true } // match: (ORshiftLL [8] (SRLconst [24] (SLLconst [16] x)) x) - // cond: buildcfg.GOARM>=6 + // cond: buildcfg.GOARM.Version>=6 // result: (REV16 x) for { if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 { @@ -8594,7 +8594,7 @@ func rewriteValueARM_OpARMORshiftLL(v *Value) bool { break } x := v_0_0.Args[0] - if x != v_1 || !(buildcfg.GOARM >= 6) { + if x != v_1 || !(buildcfg.GOARM.Version >= 6) { break } v.reset(OpARMREV16) @@ -9048,7 +9048,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { return true } // match: (RSB (MUL x y) a) - // cond: buildcfg.GOARM == 7 + // cond: buildcfg.GOARM.Version == 7 // result: (MULS x y a) for { if v_0.Op != OpARMMUL { @@ -9057,7 +9057,7 @@ func rewriteValueARM_OpARMRSB(v *Value) bool { y := v_0.Args[1] x := v_0.Args[0] a := v_1 - if !(buildcfg.GOARM == 7) { + if !(buildcfg.GOARM.Version == 7) { break } v.reset(OpARMMULS) @@ -10534,7 +10534,7 @@ func rewriteValueARM_OpARMSRAconst(v *Value) bool { return true } // match: (SRAconst (SLLconst x [c]) [d]) - // cond: buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 + // cond: buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31 // result: (BFX [(d-c)|(32-d)<<8] x) for { d := auxIntToInt32(v.AuxInt) @@ -10543,7 +10543,7 @@ func rewriteValueARM_OpARMSRAconst(v *Value) bool { } c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(buildcfg.GOARM == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) { + if !(buildcfg.GOARM.Version == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) { break } v.reset(OpARMBFX) @@ -10590,7 +10590,7 @@ func rewriteValueARM_OpARMSRLconst(v *Value) bool { return true } // match: (SRLconst (SLLconst x [c]) [d]) - // cond: buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 + // cond: buildcfg.GOARM.Version==7 && uint64(d)>=uint64(c) && uint64(d)<=31 // result: (BFXU [(d-c)|(32-d)<<8] x) for { d := auxIntToInt32(v.AuxInt) @@ -10599,7 +10599,7 @@ func rewriteValueARM_OpARMSRLconst(v *Value) bool { } c := auxIntToInt32(v_0.AuxInt) x := v_0.Args[0] - if !(buildcfg.GOARM == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) { + if !(buildcfg.GOARM.Version == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) { break } v.reset(OpARMBFXU) @@ -10830,7 +10830,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { return true } // match: (SUB a (MUL x y)) - // cond: buildcfg.GOARM == 7 + // cond: buildcfg.GOARM.Version == 7 // result: (MULS x y a) for { a := v_0 @@ -10839,7 +10839,7 @@ func rewriteValueARM_OpARMSUB(v *Value) bool { } y := v_1.Args[1] x := v_1.Args[0] - if !(buildcfg.GOARM == 7) { + if !(buildcfg.GOARM.Version == 7) { break } v.reset(OpARMMULS) @@ -10852,7 +10852,7 @@ func rewriteValueARM_OpARMSUBD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SUBD a (MULD x y)) - // cond: a.Uses == 1 && buildcfg.GOARM >= 6 + // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6 // result: (MULSD a x y) for { a := v_0 @@ -10861,7 +10861,7 @@ func rewriteValueARM_OpARMSUBD(v *Value) bool { } y := v_1.Args[1] x := v_1.Args[0] - if !(a.Uses == 1 && buildcfg.GOARM >= 6) { + if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) { break } v.reset(OpARMMULSD) @@ -10869,7 +10869,7 @@ func rewriteValueARM_OpARMSUBD(v *Value) bool { return true } // match: (SUBD a (NMULD x y)) - // cond: a.Uses == 1 && buildcfg.GOARM >= 6 + // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6 // result: (MULAD a x y) for { a := v_0 @@ -10878,7 +10878,7 @@ func rewriteValueARM_OpARMSUBD(v *Value) bool { } y := v_1.Args[1] x := v_1.Args[0] - if !(a.Uses == 1 && buildcfg.GOARM >= 6) { + if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) { break } v.reset(OpARMMULAD) @@ -10891,7 +10891,7 @@ func rewriteValueARM_OpARMSUBF(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] // match: (SUBF a (MULF x y)) - // cond: a.Uses == 1 && buildcfg.GOARM >= 6 + // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6 // result: (MULSF a x y) for { a := v_0 @@ -10900,7 +10900,7 @@ func rewriteValueARM_OpARMSUBF(v *Value) bool { } y := v_1.Args[1] x := v_1.Args[0] - if !(a.Uses == 1 && buildcfg.GOARM >= 6) { + if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) { break } v.reset(OpARMMULSF) @@ -10908,7 +10908,7 @@ func rewriteValueARM_OpARMSUBF(v *Value) bool { return true } // match: (SUBF a (NMULF x y)) - // cond: a.Uses == 1 && buildcfg.GOARM >= 6 + // cond: a.Uses == 1 && buildcfg.GOARM.Version >= 6 // result: (MULAF a x y) for { a := v_0 @@ -10917,7 +10917,7 @@ func rewriteValueARM_OpARMSUBF(v *Value) bool { } y := v_1.Args[1] x := v_1.Args[0] - if !(a.Uses == 1 && buildcfg.GOARM >= 6) { + if !(a.Uses == 1 && buildcfg.GOARM.Version >= 6) { break } v.reset(OpARMMULAF) @@ -11383,12 +11383,12 @@ func rewriteValueARM_OpARMSUBconst(v *Value) bool { return true } // match: (SUBconst [c] x) - // cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff + // cond: buildcfg.GOARM.Version==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff // result: (ADDconst [-c] x) for { c := auxIntToInt32(v.AuxInt) x := v_0 - if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) { + if !(buildcfg.GOARM.Version == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) { break } v.reset(OpARMADDconst) @@ -12710,7 +12710,7 @@ func rewriteValueARM_OpARMXORshiftLL(v *Value) bool { return true } // match: (XORshiftLL [8] (SRLconst [24] (SLLconst [16] x)) x) - // cond: buildcfg.GOARM>=6 + // cond: buildcfg.GOARM.Version>=6 // result: (REV16 x) for { if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 { @@ -12721,7 +12721,7 @@ func rewriteValueARM_OpARMXORshiftLL(v *Value) bool { break } x := v_0_0.Args[0] - if x != v_1 || !(buildcfg.GOARM >= 6) { + if x != v_1 || !(buildcfg.GOARM.Version >= 6) { break } v.reset(OpARMREV16) @@ -13062,12 +13062,12 @@ func rewriteValueARM_OpBswap32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Bswap32 x) - // cond: buildcfg.GOARM==5 + // cond: buildcfg.GOARM.Version==5 // result: (XOR (SRLconst (BICconst (XOR x (SRRconst [16] x)) [0xff0000]) [8]) (SRRconst x [8])) for { t := v.Type x := v_0 - if !(buildcfg.GOARM == 5) { + if !(buildcfg.GOARM.Version == 5) { break } v.reset(OpARMXOR) @@ -13090,11 +13090,11 @@ func rewriteValueARM_OpBswap32(v *Value) bool { return true } // match: (Bswap32 x) - // cond: buildcfg.GOARM>=6 + // cond: buildcfg.GOARM.Version>=6 // result: (REV x) for { x := v_0 - if !(buildcfg.GOARM >= 6) { + if !(buildcfg.GOARM.Version >= 6) { break } v.reset(OpARMREV) @@ -13177,12 +13177,12 @@ func rewriteValueARM_OpCtz16(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Ctz16 x) - // cond: buildcfg.GOARM<=6 + // cond: buildcfg.GOARM.Version<=6 // result: (RSBconst [32] (CLZ (SUBconst (AND (ORconst [0x10000] x) (RSBconst [0] (ORconst [0x10000] x))) [1]))) for { t := v.Type x := v_0 - if !(buildcfg.GOARM <= 6) { + if !(buildcfg.GOARM.Version <= 6) { break } v.reset(OpARMRSBconst) @@ -13204,12 +13204,12 @@ func rewriteValueARM_OpCtz16(v *Value) bool { return true } // match: (Ctz16 x) - // cond: buildcfg.GOARM==7 + // cond: buildcfg.GOARM.Version==7 // result: (CLZ (RBIT (ORconst [0x10000] x))) for { t := v.Type x := v_0 - if !(buildcfg.GOARM == 7) { + if !(buildcfg.GOARM.Version == 7) { break } v.reset(OpARMCLZ) @@ -13228,12 +13228,12 @@ func rewriteValueARM_OpCtz32(v *Value) bool { v_0 := v.Args[0] b := v.Block // match: (Ctz32 x) - // cond: buildcfg.GOARM<=6 + // cond: buildcfg.GOARM.Version<=6 // result: (RSBconst [32] (CLZ (SUBconst (AND x (RSBconst [0] x)) [1]))) for { t := v.Type x := v_0 - if !(buildcfg.GOARM <= 6) { + if !(buildcfg.GOARM.Version <= 6) { break } v.reset(OpARMRSBconst) @@ -13252,12 +13252,12 @@ func rewriteValueARM_OpCtz32(v *Value) bool { return true } // match: (Ctz32 x) - // cond: buildcfg.GOARM==7 + // cond: buildcfg.GOARM.Version==7 // result: (CLZ (RBIT x)) for { t := v.Type x := v_0 - if !(buildcfg.GOARM == 7) { + if !(buildcfg.GOARM.Version == 7) { break } v.reset(OpARMCLZ) @@ -13274,12 +13274,12 @@ func rewriteValueARM_OpCtz8(v *Value) bool { b := v.Block typ := &b.Func.Config.Types // match: (Ctz8 x) - // cond: buildcfg.GOARM<=6 + // cond: buildcfg.GOARM.Version<=6 // result: (RSBconst [32] (CLZ (SUBconst (AND (ORconst [0x100] x) (RSBconst [0] (ORconst [0x100] x))) [1]))) for { t := v.Type x := v_0 - if !(buildcfg.GOARM <= 6) { + if !(buildcfg.GOARM.Version <= 6) { break } v.reset(OpARMRSBconst) @@ -13301,12 +13301,12 @@ func rewriteValueARM_OpCtz8(v *Value) bool { return true } // match: (Ctz8 x) - // cond: buildcfg.GOARM==7 + // cond: buildcfg.GOARM.Version==7 // result: (CLZ (RBIT (ORconst [0x100] x))) for { t := v.Type x := v_0 - if !(buildcfg.GOARM == 7) { + if !(buildcfg.GOARM.Version == 7) { break } v.reset(OpARMCLZ) diff --git a/src/cmd/compile/internal/ssa/rewriteLOONG64.go b/src/cmd/compile/internal/ssa/rewriteLOONG64.go index e88b74cb22..edd3ffe6b9 100644 --- a/src/cmd/compile/internal/ssa/rewriteLOONG64.go +++ b/src/cmd/compile/internal/ssa/rewriteLOONG64.go @@ -1724,8 +1724,10 @@ func rewriteValueLOONG64_OpLOONG64MASKNEZ(v *Value) bool { func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+off2) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVBUload [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -1736,7 +1738,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool { off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(int64(off1) + off2)) { + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVBUload) @@ -1746,7 +1748,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool { return true } // match: (MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -1758,7 +1760,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool { sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVBUload) @@ -1771,6 +1773,26 @@ func rewriteValueLOONG64_OpLOONG64MOVBUload(v *Value) bool { } func rewriteValueLOONG64_OpLOONG64MOVBUreg(v *Value) bool { v_0 := v.Args[0] + // match: (MOVBUreg x:(SGT _ _)) + // result: x + for { + x := v_0 + if x.Op != OpLOONG64SGT { + break + } + v.copyOf(x) + return true + } + // match: (MOVBUreg x:(SGTU _ _)) + // result: x + for { + x := v_0 + if x.Op != OpLOONG64SGTU { + break + } + v.copyOf(x) + return true + } // match: (MOVBUreg x:(MOVBUload _ _)) // result: (MOVVreg x) for { @@ -1809,8 +1831,10 @@ func rewriteValueLOONG64_OpLOONG64MOVBUreg(v *Value) bool { func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+off2) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVBload [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -1821,7 +1845,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool { off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(int64(off1) + off2)) { + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVBload) @@ -1831,7 +1855,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool { return true } // match: (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -1843,7 +1867,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBload(v *Value) bool { sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVBload) @@ -1895,8 +1919,10 @@ func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) - // cond: is32Bit(int64(off1)+off2) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVBstore [off1+int32(off2)] {sym} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -1908,7 +1934,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool { ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(int64(off1) + off2)) { + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVBstore) @@ -1918,7 +1944,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool { return true } // match: (MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -1931,7 +1957,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool { ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVBstore) @@ -2047,8 +2073,10 @@ func rewriteValueLOONG64_OpLOONG64MOVBstore(v *Value) bool { func rewriteValueLOONG64_OpLOONG64MOVBstorezero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+off2) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2059,7 +2087,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBstorezero(v *Value) bool { off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(int64(off1) + off2)) { + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVBstorezero) @@ -2069,7 +2097,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBstorezero(v *Value) bool { return true } // match: (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2081,7 +2109,7 @@ func rewriteValueLOONG64_OpLOONG64MOVBstorezero(v *Value) bool { sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVBstorezero) @@ -2095,8 +2123,10 @@ func rewriteValueLOONG64_OpLOONG64MOVBstorezero(v *Value) bool { func rewriteValueLOONG64_OpLOONG64MOVDload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+off2) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVDload [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2107,7 +2137,7 @@ func rewriteValueLOONG64_OpLOONG64MOVDload(v *Value) bool { off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(int64(off1) + off2)) { + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVDload) @@ -2117,7 +2147,7 @@ func rewriteValueLOONG64_OpLOONG64MOVDload(v *Value) bool { return true } // match: (MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2129,7 +2159,7 @@ func rewriteValueLOONG64_OpLOONG64MOVDload(v *Value) bool { sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVDload) @@ -2144,8 +2174,10 @@ func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) - // cond: is32Bit(int64(off1)+off2) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVDstore [off1+int32(off2)] {sym} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2157,7 +2189,7 @@ func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool { ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(int64(off1) + off2)) { + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVDstore) @@ -2167,7 +2199,7 @@ func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool { return true } // match: (MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2180,7 +2212,7 @@ func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool { ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVDstore) @@ -2194,8 +2226,10 @@ func rewriteValueLOONG64_OpLOONG64MOVDstore(v *Value) bool { func rewriteValueLOONG64_OpLOONG64MOVFload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+off2) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVFload [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2206,7 +2240,7 @@ func rewriteValueLOONG64_OpLOONG64MOVFload(v *Value) bool { off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(int64(off1) + off2)) { + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVFload) @@ -2216,7 +2250,7 @@ func rewriteValueLOONG64_OpLOONG64MOVFload(v *Value) bool { return true } // match: (MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2228,7 +2262,7 @@ func rewriteValueLOONG64_OpLOONG64MOVFload(v *Value) bool { sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVFload) @@ -2243,8 +2277,10 @@ func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) - // cond: is32Bit(int64(off1)+off2) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVFstore [off1+int32(off2)] {sym} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2256,7 +2292,7 @@ func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool { ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(int64(off1) + off2)) { + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVFstore) @@ -2266,7 +2302,7 @@ func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool { return true } // match: (MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2279,7 +2315,7 @@ func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool { ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVFstore) @@ -2293,8 +2329,10 @@ func rewriteValueLOONG64_OpLOONG64MOVFstore(v *Value) bool { func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+off2) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVHUload [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2305,7 +2343,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool { off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(int64(off1) + off2)) { + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVHUload) @@ -2315,7 +2353,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool { return true } // match: (MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2327,7 +2365,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHUload(v *Value) bool { sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVHUload) @@ -2400,8 +2438,10 @@ func rewriteValueLOONG64_OpLOONG64MOVHUreg(v *Value) bool { func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+off2) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVHload [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2412,7 +2452,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool { off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(int64(off1) + off2)) { + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVHload) @@ -2422,7 +2462,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool { return true } // match: (MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2434,7 +2474,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHload(v *Value) bool { sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVHload) @@ -2530,8 +2570,10 @@ func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) - // cond: is32Bit(int64(off1)+off2) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVHstore [off1+int32(off2)] {sym} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2543,7 +2585,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool { ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(int64(off1) + off2)) { + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVHstore) @@ -2553,7 +2595,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool { return true } // match: (MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2566,7 +2608,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool { ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVHstore) @@ -2648,8 +2690,10 @@ func rewriteValueLOONG64_OpLOONG64MOVHstore(v *Value) bool { func rewriteValueLOONG64_OpLOONG64MOVHstorezero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+off2) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2660,7 +2704,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHstorezero(v *Value) bool { off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(int64(off1) + off2)) { + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVHstorezero) @@ -2670,7 +2714,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHstorezero(v *Value) bool { return true } // match: (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2682,7 +2726,7 @@ func rewriteValueLOONG64_OpLOONG64MOVHstorezero(v *Value) bool { sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVHstorezero) @@ -2696,8 +2740,10 @@ func rewriteValueLOONG64_OpLOONG64MOVHstorezero(v *Value) bool { func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+off2) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVVload [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2708,7 +2754,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool { off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(int64(off1) + off2)) { + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVVload) @@ -2718,7 +2764,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool { return true } // match: (MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2730,7 +2776,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVload(v *Value) bool { sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVVload) @@ -2772,8 +2818,10 @@ func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) - // cond: is32Bit(int64(off1)+off2) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVVstore [off1+int32(off2)] {sym} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2785,7 +2833,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool { ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(int64(off1) + off2)) { + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVVstore) @@ -2795,7 +2843,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool { return true } // match: (MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2808,7 +2856,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool { ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVVstore) @@ -2822,8 +2870,10 @@ func rewriteValueLOONG64_OpLOONG64MOVVstore(v *Value) bool { func rewriteValueLOONG64_OpLOONG64MOVVstorezero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+off2) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVVstorezero [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2834,7 +2884,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVstorezero(v *Value) bool { off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(int64(off1) + off2)) { + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVVstorezero) @@ -2844,7 +2894,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVstorezero(v *Value) bool { return true } // match: (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2856,7 +2906,7 @@ func rewriteValueLOONG64_OpLOONG64MOVVstorezero(v *Value) bool { sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVVstorezero) @@ -2870,8 +2920,10 @@ func rewriteValueLOONG64_OpLOONG64MOVVstorezero(v *Value) bool { func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+off2) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVWUload [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2882,7 +2934,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool { off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(int64(off1) + off2)) { + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVWUload) @@ -2892,7 +2944,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool { return true } // match: (MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -2904,7 +2956,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWUload(v *Value) bool { sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVWUload) @@ -2999,8 +3051,10 @@ func rewriteValueLOONG64_OpLOONG64MOVWUreg(v *Value) bool { func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+off2) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVWload [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -3011,7 +3065,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool { off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(int64(off1) + off2)) { + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVWload) @@ -3021,7 +3075,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool { return true } // match: (MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -3033,7 +3087,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWload(v *Value) bool { sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVWload) @@ -3162,8 +3216,10 @@ func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool { v_2 := v.Args[2] v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) - // cond: is32Bit(int64(off1)+off2) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVWstore [off1+int32(off2)] {sym} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -3175,7 +3231,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool { ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(is32Bit(int64(off1) + off2)) { + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVWstore) @@ -3185,7 +3241,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool { return true } // match: (MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -3198,7 +3254,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool { ptr := v_0.Args[0] val := v_1 mem := v_2 - if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVWstore) @@ -3246,8 +3302,10 @@ func rewriteValueLOONG64_OpLOONG64MOVWstore(v *Value) bool { func rewriteValueLOONG64_OpLOONG64MOVWstorezero(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] + b := v.Block + config := b.Func.Config // match: (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) - // cond: is32Bit(int64(off1)+off2) + // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -3258,7 +3316,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWstorezero(v *Value) bool { off2 := auxIntToInt64(v_0.AuxInt) ptr := v_0.Args[0] mem := v_1 - if !(is32Bit(int64(off1) + off2)) { + if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVWstorezero) @@ -3268,7 +3326,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWstorezero(v *Value) bool { return true } // match: (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) - // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) + // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) // result: (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem) for { off1 := auxIntToInt32(v.AuxInt) @@ -3280,7 +3338,7 @@ func rewriteValueLOONG64_OpLOONG64MOVWstorezero(v *Value) bool { sym2 := auxToSym(v_0.Aux) ptr := v_0.Args[0] mem := v_1 - if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) { + if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink)) { break } v.reset(OpLOONG64MOVWstorezero) @@ -7570,6 +7628,7 @@ func rewriteValueLOONG64_OpZero(v *Value) bool { return false } func rewriteBlockLOONG64(b *Block) bool { + typ := &b.Func.Config.Types switch b.Kind { case BlockLOONG64EQ: // match: (EQ (FPFlagTrue cmp) yes no) @@ -7769,10 +7828,12 @@ func rewriteBlockLOONG64(b *Block) bool { } case BlockIf: // match: (If cond yes no) - // result: (NE cond yes no) + // result: (NE (MOVBUreg cond) yes no) for { cond := b.Controls[0] - b.resetWithControl(BlockLOONG64NE, cond) + v0 := b.NewValue0(cond.Pos, OpLOONG64MOVBUreg, typ.UInt64) + v0.AddArg(cond) + b.resetWithControl(BlockLOONG64NE, v0) return true } case BlockLOONG64LEZ: diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go index 6009c41f2d..9b81676001 100644 --- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go +++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go @@ -326,6 +326,18 @@ func rewriteValueRISCV64(v *Value) bool { return rewriteValueRISCV64_OpLsh8x64(v) case OpLsh8x8: return rewriteValueRISCV64_OpLsh8x8(v) + case OpMax32F: + v.Op = OpRISCV64LoweredFMAXS + return true + case OpMax64F: + v.Op = OpRISCV64LoweredFMAXD + return true + case OpMin32F: + v.Op = OpRISCV64LoweredFMINS + return true + case OpMin64F: + v.Op = OpRISCV64LoweredFMIND + return true case OpMod16: return rewriteValueRISCV64_OpMod16(v) case OpMod16u: @@ -524,6 +536,8 @@ func rewriteValueRISCV64(v *Value) bool { return rewriteValueRISCV64_OpRISCV64SLL(v) case OpRISCV64SLLI: return rewriteValueRISCV64_OpRISCV64SLLI(v) + case OpRISCV64SLLW: + return rewriteValueRISCV64_OpRISCV64SLLW(v) case OpRISCV64SLT: return rewriteValueRISCV64_OpRISCV64SLT(v) case OpRISCV64SLTI: @@ -6058,6 +6072,24 @@ func rewriteValueRISCV64_OpRISCV64SLLI(v *Value) bool { } return false } +func rewriteValueRISCV64_OpRISCV64SLLW(v *Value) bool { + v_1 := v.Args[1] + v_0 := v.Args[0] + // match: (SLLW x (MOVDconst [val])) + // result: (SLLIW [int64(val&31)] x) + for { + x := v_0 + if v_1.Op != OpRISCV64MOVDconst { + break + } + val := auxIntToInt64(v_1.AuxInt) + v.reset(OpRISCV64SLLIW) + v.AuxInt = int64ToAuxInt(int64(val & 31)) + v.AddArg(x) + return true + } + return false +} func rewriteValueRISCV64_OpRISCV64SLT(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] @@ -6260,20 +6292,6 @@ func rewriteValueRISCV64_OpRISCV64SNEZ(v *Value) bool { func rewriteValueRISCV64_OpRISCV64SRA(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SRA (MOVWreg x) y) - // result: (SRAW x y) - for { - t := v.Type - if v_0.Op != OpRISCV64MOVWreg { - break - } - x := v_0.Args[0] - y := v_1 - v.reset(OpRISCV64SRAW) - v.Type = t - v.AddArg2(x, y) - return true - } // match: (SRA x (MOVDconst [val])) // result: (SRAI [int64(val&63)] x) for { @@ -6291,8 +6309,10 @@ func rewriteValueRISCV64_OpRISCV64SRA(v *Value) bool { } func rewriteValueRISCV64_OpRISCV64SRAI(v *Value) bool { v_0 := v.Args[0] + b := v.Block // match: (SRAI [x] (MOVWreg y)) - // result: (SRAIW [int64(x&31)] y) + // cond: x >= 0 && x <= 31 + // result: (SRAIW [int64(x)] y) for { t := v.Type x := auxIntToInt64(v.AuxInt) @@ -6300,9 +6320,71 @@ func rewriteValueRISCV64_OpRISCV64SRAI(v *Value) bool { break } y := v_0.Args[0] + if !(x >= 0 && x <= 31) { + break + } v.reset(OpRISCV64SRAIW) v.Type = t - v.AuxInt = int64ToAuxInt(int64(x & 31)) + v.AuxInt = int64ToAuxInt(int64(x)) + v.AddArg(y) + return true + } + // match: (SRAI [x] (MOVBreg y)) + // cond: x >= 8 + // result: (SRAI [63] (SLLI [56] y)) + for { + t := v.Type + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVBreg { + break + } + y := v_0.Args[0] + if !(x >= 8) { + break + } + v.reset(OpRISCV64SRAI) + v.AuxInt = int64ToAuxInt(63) + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) + v0.AuxInt = int64ToAuxInt(56) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (SRAI [x] (MOVHreg y)) + // cond: x >= 16 + // result: (SRAI [63] (SLLI [48] y)) + for { + t := v.Type + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVHreg { + break + } + y := v_0.Args[0] + if !(x >= 16) { + break + } + v.reset(OpRISCV64SRAI) + v.AuxInt = int64ToAuxInt(63) + v0 := b.NewValue0(v.Pos, OpRISCV64SLLI, t) + v0.AuxInt = int64ToAuxInt(48) + v0.AddArg(y) + v.AddArg(v0) + return true + } + // match: (SRAI [x] (MOVWreg y)) + // cond: x >= 32 + // result: (SRAIW [31] y) + for { + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVWreg { + break + } + y := v_0.Args[0] + if !(x >= 32) { + break + } + v.reset(OpRISCV64SRAIW) + v.AuxInt = int64ToAuxInt(31) v.AddArg(y) return true } @@ -6341,20 +6423,6 @@ func rewriteValueRISCV64_OpRISCV64SRAW(v *Value) bool { func rewriteValueRISCV64_OpRISCV64SRL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (SRL (MOVWUreg x) y) - // result: (SRLW x y) - for { - t := v.Type - if v_0.Op != OpRISCV64MOVWUreg { - break - } - x := v_0.Args[0] - y := v_1 - v.reset(OpRISCV64SRLW) - v.Type = t - v.AddArg2(x, y) - return true - } // match: (SRL x (MOVDconst [val])) // result: (SRLI [int64(val&63)] x) for { @@ -6373,7 +6441,8 @@ func rewriteValueRISCV64_OpRISCV64SRL(v *Value) bool { func rewriteValueRISCV64_OpRISCV64SRLI(v *Value) bool { v_0 := v.Args[0] // match: (SRLI [x] (MOVWUreg y)) - // result: (SRLIW [int64(x&31)] y) + // cond: x >= 0 && x <= 31 + // result: (SRLIW [int64(x)] y) for { t := v.Type x := auxIntToInt64(v.AuxInt) @@ -6381,12 +6450,66 @@ func rewriteValueRISCV64_OpRISCV64SRLI(v *Value) bool { break } y := v_0.Args[0] + if !(x >= 0 && x <= 31) { + break + } v.reset(OpRISCV64SRLIW) v.Type = t - v.AuxInt = int64ToAuxInt(int64(x & 31)) + v.AuxInt = int64ToAuxInt(int64(x)) v.AddArg(y) return true } + // match: (SRLI [x] (MOVBUreg y)) + // cond: x >= 8 + // result: (MOVDconst [0]) + for { + t := v.Type + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVBUreg { + break + } + if !(x >= 8) { + break + } + v.reset(OpRISCV64MOVDconst) + v.Type = t + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SRLI [x] (MOVHUreg y)) + // cond: x >= 16 + // result: (MOVDconst [0]) + for { + t := v.Type + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVHUreg { + break + } + if !(x >= 16) { + break + } + v.reset(OpRISCV64MOVDconst) + v.Type = t + v.AuxInt = int64ToAuxInt(0) + return true + } + // match: (SRLI [x] (MOVWUreg y)) + // cond: x >= 32 + // result: (MOVDconst [0]) + for { + t := v.Type + x := auxIntToInt64(v.AuxInt) + if v_0.Op != OpRISCV64MOVWUreg { + break + } + if !(x >= 32) { + break + } + v.reset(OpRISCV64MOVDconst) + v.Type = t + v.AuxInt = int64ToAuxInt(0) + return true + } // match: (SRLI [x] (MOVDconst [y])) // result: (MOVDconst [int64(uint64(y) >> uint32(x))]) for { @@ -6541,112 +6664,102 @@ func rewriteValueRISCV64_OpRotateLeft16(v *Value) bool { v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (RotateLeft16 x (MOVDconst [c])) - // result: (Or16 (Lsh16x64 x (MOVDconst [c&15])) (Rsh16Ux64 x (MOVDconst [-c&15]))) + // match: (RotateLeft16 x y) + // result: (OR (SLL x (ANDI [15] y)) (SRL (ZeroExt16to64 x) (ANDI [15] (NEG y)))) for { t := v.Type x := v_0 - if v_1.Op != OpRISCV64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpOr16) - v0 := b.NewValue0(v.Pos, OpLsh16x64, t) - v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) - v1.AuxInt = int64ToAuxInt(c & 15) + y := v_1 + v.reset(OpRISCV64OR) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v1 := b.NewValue0(v.Pos, OpRISCV64ANDI, y.Type) + v1.AuxInt = int64ToAuxInt(15) + v1.AddArg(y) v0.AddArg2(x, v1) - v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t) - v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) - v3.AuxInt = int64ToAuxInt(-c & 15) - v2.AddArg2(x, v3) + v2 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpRISCV64ANDI, y.Type) + v4.AuxInt = int64ToAuxInt(15) + v5 := b.NewValue0(v.Pos, OpRISCV64NEG, y.Type) + v5.AddArg(y) + v4.AddArg(v5) + v2.AddArg2(v3, v4) v.AddArg2(v0, v2) return true } - return false } func rewriteValueRISCV64_OpRotateLeft32(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (RotateLeft32 x (MOVDconst [c])) - // result: (Or32 (Lsh32x64 x (MOVDconst [c&31])) (Rsh32Ux64 x (MOVDconst [-c&31]))) + // match: (RotateLeft32 x y) + // result: (OR (SLLW x y) (SRLW x (NEG y))) for { t := v.Type x := v_0 - if v_1.Op != OpRISCV64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpOr32) - v0 := b.NewValue0(v.Pos, OpLsh32x64, t) - v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) - v1.AuxInt = int64ToAuxInt(c & 31) - v0.AddArg2(x, v1) - v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t) - v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) - v3.AuxInt = int64ToAuxInt(-c & 31) - v2.AddArg2(x, v3) - v.AddArg2(v0, v2) + y := v_1 + v.reset(OpRISCV64OR) + v0 := b.NewValue0(v.Pos, OpRISCV64SLLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpRISCV64SRLW, t) + v2 := b.NewValue0(v.Pos, OpRISCV64NEG, y.Type) + v2.AddArg(y) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) return true } - return false } func rewriteValueRISCV64_OpRotateLeft64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types - // match: (RotateLeft64 x (MOVDconst [c])) - // result: (Or64 (Lsh64x64 x (MOVDconst [c&63])) (Rsh64Ux64 x (MOVDconst [-c&63]))) + // match: (RotateLeft64 x y) + // result: (OR (SLL x y) (SRL x (NEG y))) for { t := v.Type x := v_0 - if v_1.Op != OpRISCV64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpOr64) - v0 := b.NewValue0(v.Pos, OpLsh64x64, t) - v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) - v1.AuxInt = int64ToAuxInt(c & 63) - v0.AddArg2(x, v1) - v2 := b.NewValue0(v.Pos, OpRsh64Ux64, t) - v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) - v3.AuxInt = int64ToAuxInt(-c & 63) - v2.AddArg2(x, v3) - v.AddArg2(v0, v2) + y := v_1 + v.reset(OpRISCV64OR) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v2 := b.NewValue0(v.Pos, OpRISCV64NEG, y.Type) + v2.AddArg(y) + v1.AddArg2(x, v2) + v.AddArg2(v0, v1) return true } - return false } func rewriteValueRISCV64_OpRotateLeft8(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block typ := &b.Func.Config.Types - // match: (RotateLeft8 x (MOVDconst [c])) - // result: (Or8 (Lsh8x64 x (MOVDconst [c&7])) (Rsh8Ux64 x (MOVDconst [-c&7]))) + // match: (RotateLeft8 x y) + // result: (OR (SLL x (ANDI [7] y)) (SRL (ZeroExt8to64 x) (ANDI [7] (NEG y)))) for { t := v.Type x := v_0 - if v_1.Op != OpRISCV64MOVDconst { - break - } - c := auxIntToInt64(v_1.AuxInt) - v.reset(OpOr8) - v0 := b.NewValue0(v.Pos, OpLsh8x64, t) - v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) - v1.AuxInt = int64ToAuxInt(c & 7) + y := v_1 + v.reset(OpRISCV64OR) + v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t) + v1 := b.NewValue0(v.Pos, OpRISCV64ANDI, y.Type) + v1.AuxInt = int64ToAuxInt(7) + v1.AddArg(y) v0.AddArg2(x, v1) - v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t) - v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64) - v3.AuxInt = int64ToAuxInt(-c & 7) - v2.AddArg2(x, v3) + v2 := b.NewValue0(v.Pos, OpRISCV64SRL, t) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(x) + v4 := b.NewValue0(v.Pos, OpRISCV64ANDI, y.Type) + v4.AuxInt = int64ToAuxInt(7) + v5 := b.NewValue0(v.Pos, OpRISCV64NEG, y.Type) + v5.AddArg(y) + v4.AddArg(v5) + v2.AddArg2(v3, v4) v.AddArg2(v0, v2) return true } - return false } func rewriteValueRISCV64_OpRsh16Ux16(v *Value) bool { v_1 := v.Args[1] @@ -7035,7 +7148,7 @@ func rewriteValueRISCV64_OpRsh32Ux16(v *Value) bool { typ := &b.Func.Config.Types // match: (Rsh32Ux16 x y) // cond: !shiftIsBounded(v) - // result: (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [32] (ZeroExt16to64 y)))) + // result: (AND (SRLW x y) (Neg32 (SLTIU [32] (ZeroExt16to64 y)))) for { t := v.Type x := v_0 @@ -7044,33 +7157,29 @@ func rewriteValueRISCV64_OpRsh32Ux16(v *Value) bool { break } v.reset(OpRISCV64AND) - v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) - v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v1.AddArg(x) - v0.AddArg2(v1, y) - v2 := b.NewValue0(v.Pos, OpNeg32, t) - v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) - v3.AuxInt = int64ToAuxInt(32) - v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) + v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg32, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(32) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) v2.AddArg(v3) - v.AddArg2(v0, v2) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } // match: (Rsh32Ux16 x y) // cond: shiftIsBounded(v) - // result: (SRL (ZeroExt32to64 x) y) + // result: (SRLW x y) for { x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } - v.reset(OpRISCV64SRL) - v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v0.AddArg(x) - v.AddArg2(v0, y) + v.reset(OpRISCV64SRLW) + v.AddArg2(x, y) return true } return false @@ -7082,7 +7191,7 @@ func rewriteValueRISCV64_OpRsh32Ux32(v *Value) bool { typ := &b.Func.Config.Types // match: (Rsh32Ux32 x y) // cond: !shiftIsBounded(v) - // result: (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [32] (ZeroExt32to64 y)))) + // result: (AND (SRLW x y) (Neg32 (SLTIU [32] (ZeroExt32to64 y)))) for { t := v.Type x := v_0 @@ -7091,33 +7200,29 @@ func rewriteValueRISCV64_OpRsh32Ux32(v *Value) bool { break } v.reset(OpRISCV64AND) - v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) - v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v1.AddArg(x) - v0.AddArg2(v1, y) - v2 := b.NewValue0(v.Pos, OpNeg32, t) - v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) - v3.AuxInt = int64ToAuxInt(32) - v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) + v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg32, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(32) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) v2.AddArg(v3) - v.AddArg2(v0, v2) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } // match: (Rsh32Ux32 x y) // cond: shiftIsBounded(v) - // result: (SRL (ZeroExt32to64 x) y) + // result: (SRLW x y) for { x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } - v.reset(OpRISCV64SRL) - v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v0.AddArg(x) - v.AddArg2(v0, y) + v.reset(OpRISCV64SRLW) + v.AddArg2(x, y) return true } return false @@ -7126,10 +7231,9 @@ func rewriteValueRISCV64_OpRsh32Ux64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types // match: (Rsh32Ux64 x y) // cond: !shiftIsBounded(v) - // result: (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [32] y))) + // result: (AND (SRLW x y) (Neg32 (SLTIU [32] y))) for { t := v.Type x := v_0 @@ -7138,31 +7242,27 @@ func rewriteValueRISCV64_OpRsh32Ux64(v *Value) bool { break } v.reset(OpRISCV64AND) - v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) - v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v1.AddArg(x) - v0.AddArg2(v1, y) - v2 := b.NewValue0(v.Pos, OpNeg32, t) - v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) - v3.AuxInt = int64ToAuxInt(32) - v3.AddArg(y) - v2.AddArg(v3) - v.AddArg2(v0, v2) + v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg32, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } // match: (Rsh32Ux64 x y) // cond: shiftIsBounded(v) - // result: (SRL (ZeroExt32to64 x) y) + // result: (SRLW x y) for { x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } - v.reset(OpRISCV64SRL) - v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v0.AddArg(x) - v.AddArg2(v0, y) + v.reset(OpRISCV64SRLW) + v.AddArg2(x, y) return true } return false @@ -7174,7 +7274,7 @@ func rewriteValueRISCV64_OpRsh32Ux8(v *Value) bool { typ := &b.Func.Config.Types // match: (Rsh32Ux8 x y) // cond: !shiftIsBounded(v) - // result: (AND (SRL (ZeroExt32to64 x) y) (Neg32 (SLTIU [32] (ZeroExt8to64 y)))) + // result: (AND (SRLW x y) (Neg32 (SLTIU [32] (ZeroExt8to64 y)))) for { t := v.Type x := v_0 @@ -7183,33 +7283,29 @@ func rewriteValueRISCV64_OpRsh32Ux8(v *Value) bool { break } v.reset(OpRISCV64AND) - v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t) - v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v1.AddArg(x) - v0.AddArg2(v1, y) - v2 := b.NewValue0(v.Pos, OpNeg32, t) - v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) - v3.AuxInt = int64ToAuxInt(32) - v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) + v0 := b.NewValue0(v.Pos, OpRISCV64SRLW, t) + v0.AddArg2(x, y) + v1 := b.NewValue0(v.Pos, OpNeg32, t) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t) + v2.AuxInt = int64ToAuxInt(32) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) v2.AddArg(v3) - v.AddArg2(v0, v2) + v1.AddArg(v2) + v.AddArg2(v0, v1) return true } // match: (Rsh32Ux8 x y) // cond: shiftIsBounded(v) - // result: (SRL (ZeroExt32to64 x) y) + // result: (SRLW x y) for { x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } - v.reset(OpRISCV64SRL) - v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v0.AddArg(x) - v.AddArg2(v0, y) + v.reset(OpRISCV64SRLW) + v.AddArg2(x, y) return true } return false @@ -7221,7 +7317,7 @@ func rewriteValueRISCV64_OpRsh32x16(v *Value) bool { typ := &b.Func.Config.Types // match: (Rsh32x16 x y) // cond: !shiftIsBounded(v) - // result: (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [32] (ZeroExt16to64 y))))) + // result: (SRAW x (OR y (ADDI [-1] (SLTIU [32] (ZeroExt16to64 y))))) for { t := v.Type x := v_0 @@ -7229,36 +7325,32 @@ func rewriteValueRISCV64_OpRsh32x16(v *Value) bool { if !(!shiftIsBounded(v)) { break } - v.reset(OpRISCV64SRA) + v.reset(OpRISCV64SRAW) v.Type = t - v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) - v2.AuxInt = int64ToAuxInt(-1) - v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) - v3.AuxInt = int64ToAuxInt(32) - v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) + v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v1.AuxInt = int64ToAuxInt(-1) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v2.AuxInt = int64ToAuxInt(32) + v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64) + v3.AddArg(y) v2.AddArg(v3) - v1.AddArg2(y, v2) - v.AddArg2(v0, v1) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh32x16 x y) // cond: shiftIsBounded(v) - // result: (SRA (SignExt32to64 x) y) + // result: (SRAW x y) for { x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } - v.reset(OpRISCV64SRA) - v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) - v0.AddArg(x) - v.AddArg2(v0, y) + v.reset(OpRISCV64SRAW) + v.AddArg2(x, y) return true } return false @@ -7270,7 +7362,7 @@ func rewriteValueRISCV64_OpRsh32x32(v *Value) bool { typ := &b.Func.Config.Types // match: (Rsh32x32 x y) // cond: !shiftIsBounded(v) - // result: (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [32] (ZeroExt32to64 y))))) + // result: (SRAW x (OR y (ADDI [-1] (SLTIU [32] (ZeroExt32to64 y))))) for { t := v.Type x := v_0 @@ -7278,36 +7370,32 @@ func rewriteValueRISCV64_OpRsh32x32(v *Value) bool { if !(!shiftIsBounded(v)) { break } - v.reset(OpRISCV64SRA) + v.reset(OpRISCV64SRAW) v.Type = t - v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) - v2.AuxInt = int64ToAuxInt(-1) - v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) - v3.AuxInt = int64ToAuxInt(32) - v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) + v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v1.AuxInt = int64ToAuxInt(-1) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v2.AuxInt = int64ToAuxInt(32) + v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64) + v3.AddArg(y) v2.AddArg(v3) - v1.AddArg2(y, v2) - v.AddArg2(v0, v1) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh32x32 x y) // cond: shiftIsBounded(v) - // result: (SRA (SignExt32to64 x) y) + // result: (SRAW x y) for { x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } - v.reset(OpRISCV64SRA) - v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) - v0.AddArg(x) - v.AddArg2(v0, y) + v.reset(OpRISCV64SRAW) + v.AddArg2(x, y) return true } return false @@ -7316,10 +7404,9 @@ func rewriteValueRISCV64_OpRsh32x64(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - typ := &b.Func.Config.Types // match: (Rsh32x64 x y) // cond: !shiftIsBounded(v) - // result: (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [32] y)))) + // result: (SRAW x (OR y (ADDI [-1] (SLTIU [32] y)))) for { t := v.Type x := v_0 @@ -7327,34 +7414,30 @@ func rewriteValueRISCV64_OpRsh32x64(v *Value) bool { if !(!shiftIsBounded(v)) { break } - v.reset(OpRISCV64SRA) + v.reset(OpRISCV64SRAW) v.Type = t - v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) - v2.AuxInt = int64ToAuxInt(-1) - v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) - v3.AuxInt = int64ToAuxInt(32) - v3.AddArg(y) - v2.AddArg(v3) - v1.AddArg2(y, v2) - v.AddArg2(v0, v1) + v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v1.AuxInt = int64ToAuxInt(-1) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v2.AuxInt = int64ToAuxInt(32) + v2.AddArg(y) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh32x64 x y) // cond: shiftIsBounded(v) - // result: (SRA (SignExt32to64 x) y) + // result: (SRAW x y) for { x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } - v.reset(OpRISCV64SRA) - v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) - v0.AddArg(x) - v.AddArg2(v0, y) + v.reset(OpRISCV64SRAW) + v.AddArg2(x, y) return true } return false @@ -7366,7 +7449,7 @@ func rewriteValueRISCV64_OpRsh32x8(v *Value) bool { typ := &b.Func.Config.Types // match: (Rsh32x8 x y) // cond: !shiftIsBounded(v) - // result: (SRA (SignExt32to64 x) (OR y (ADDI [-1] (SLTIU [32] (ZeroExt8to64 y))))) + // result: (SRAW x (OR y (ADDI [-1] (SLTIU [32] (ZeroExt8to64 y))))) for { t := v.Type x := v_0 @@ -7374,36 +7457,32 @@ func rewriteValueRISCV64_OpRsh32x8(v *Value) bool { if !(!shiftIsBounded(v)) { break } - v.reset(OpRISCV64SRA) + v.reset(OpRISCV64SRAW) v.Type = t - v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) - v0.AddArg(x) - v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) - v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) - v2.AuxInt = int64ToAuxInt(-1) - v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) - v3.AuxInt = int64ToAuxInt(32) - v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) - v4.AddArg(y) - v3.AddArg(v4) + v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type) + v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type) + v1.AuxInt = int64ToAuxInt(-1) + v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type) + v2.AuxInt = int64ToAuxInt(32) + v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64) + v3.AddArg(y) v2.AddArg(v3) - v1.AddArg2(y, v2) - v.AddArg2(v0, v1) + v1.AddArg(v2) + v0.AddArg2(y, v1) + v.AddArg2(x, v0) return true } // match: (Rsh32x8 x y) // cond: shiftIsBounded(v) - // result: (SRA (SignExt32to64 x) y) + // result: (SRAW x y) for { x := v_0 y := v_1 if !(shiftIsBounded(v)) { break } - v.reset(OpRISCV64SRA) - v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64) - v0.AddArg(x) - v.AddArg2(v0, y) + v.reset(OpRISCV64SRAW) + v.AddArg2(x, y) return true } return false diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go index a018ca04b6..468e9fa9c6 100644 --- a/src/cmd/compile/internal/ssa/rewritegeneric.go +++ b/src/cmd/compile/internal/ssa/rewritegeneric.go @@ -11443,6 +11443,50 @@ func rewriteValuegeneric_OpLeq16(v *Value) bool { v.AddArg2(v0, x) return true } + // match: (Leq16 (Const16 [math.MinInt16]) _) + // result: (ConstBool [true]) + for { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != math.MinInt16 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (Leq16 _ (Const16 [math.MaxInt16])) + // result: (ConstBool [true]) + for { + if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != math.MaxInt16 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (Leq16 x c:(Const16 [math.MinInt16])) + // result: (Eq16 x c) + for { + x := v_0 + c := v_1 + if c.Op != OpConst16 || auxIntToInt16(c.AuxInt) != math.MinInt16 { + break + } + v.reset(OpEq16) + v.AddArg2(x, c) + return true + } + // match: (Leq16 c:(Const16 [math.MaxInt16]) x) + // result: (Eq16 x c) + for { + c := v_0 + if c.Op != OpConst16 || auxIntToInt16(c.AuxInt) != math.MaxInt16 { + break + } + x := v_1 + v.reset(OpEq16) + v.AddArg2(x, c) + return true + } return false } func rewriteValuegeneric_OpLeq16U(v *Value) bool { @@ -11491,6 +11535,40 @@ func rewriteValuegeneric_OpLeq16U(v *Value) bool { v.AuxInt = boolToAuxInt(true) return true } + // match: (Leq16U _ (Const16 [-1])) + // result: (ConstBool [true]) + for { + if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != -1 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (Leq16U x c:(Const16 [0])) + // result: (Eq16 x c) + for { + x := v_0 + c := v_1 + if c.Op != OpConst16 || auxIntToInt16(c.AuxInt) != 0 { + break + } + v.reset(OpEq16) + v.AddArg2(x, c) + return true + } + // match: (Leq16U c:(Const16 [-1]) x) + // result: (Eq16 x c) + for { + c := v_0 + if c.Op != OpConst16 || auxIntToInt16(c.AuxInt) != -1 { + break + } + x := v_1 + v.reset(OpEq16) + v.AddArg2(x, c) + return true + } return false } func rewriteValuegeneric_OpLeq32(v *Value) bool { @@ -11590,6 +11668,50 @@ func rewriteValuegeneric_OpLeq32(v *Value) bool { v.AddArg2(v0, x) return true } + // match: (Leq32 (Const32 [math.MinInt32]) _) + // result: (ConstBool [true]) + for { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != math.MinInt32 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (Leq32 _ (Const32 [math.MaxInt32])) + // result: (ConstBool [true]) + for { + if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != math.MaxInt32 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (Leq32 x c:(Const32 [math.MinInt32])) + // result: (Eq32 x c) + for { + x := v_0 + c := v_1 + if c.Op != OpConst32 || auxIntToInt32(c.AuxInt) != math.MinInt32 { + break + } + v.reset(OpEq32) + v.AddArg2(x, c) + return true + } + // match: (Leq32 c:(Const32 [math.MaxInt32]) x) + // result: (Eq32 x c) + for { + c := v_0 + if c.Op != OpConst32 || auxIntToInt32(c.AuxInt) != math.MaxInt32 { + break + } + x := v_1 + v.reset(OpEq32) + v.AddArg2(x, c) + return true + } return false } func rewriteValuegeneric_OpLeq32F(v *Value) bool { @@ -11658,6 +11780,40 @@ func rewriteValuegeneric_OpLeq32U(v *Value) bool { v.AuxInt = boolToAuxInt(true) return true } + // match: (Leq32U _ (Const32 [-1])) + // result: (ConstBool [true]) + for { + if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != -1 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (Leq32U x c:(Const32 [0])) + // result: (Eq32 x c) + for { + x := v_0 + c := v_1 + if c.Op != OpConst32 || auxIntToInt32(c.AuxInt) != 0 { + break + } + v.reset(OpEq32) + v.AddArg2(x, c) + return true + } + // match: (Leq32U c:(Const32 [-1]) x) + // result: (Eq32 x c) + for { + c := v_0 + if c.Op != OpConst32 || auxIntToInt32(c.AuxInt) != -1 { + break + } + x := v_1 + v.reset(OpEq32) + v.AddArg2(x, c) + return true + } return false } func rewriteValuegeneric_OpLeq64(v *Value) bool { @@ -11757,6 +11913,50 @@ func rewriteValuegeneric_OpLeq64(v *Value) bool { v.AddArg2(v0, x) return true } + // match: (Leq64 (Const64 [math.MinInt64]) _) + // result: (ConstBool [true]) + for { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != math.MinInt64 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (Leq64 _ (Const64 [math.MaxInt64])) + // result: (ConstBool [true]) + for { + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != math.MaxInt64 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (Leq64 x c:(Const64 [math.MinInt64])) + // result: (Eq64 x c) + for { + x := v_0 + c := v_1 + if c.Op != OpConst64 || auxIntToInt64(c.AuxInt) != math.MinInt64 { + break + } + v.reset(OpEq64) + v.AddArg2(x, c) + return true + } + // match: (Leq64 c:(Const64 [math.MaxInt64]) x) + // result: (Eq64 x c) + for { + c := v_0 + if c.Op != OpConst64 || auxIntToInt64(c.AuxInt) != math.MaxInt64 { + break + } + x := v_1 + v.reset(OpEq64) + v.AddArg2(x, c) + return true + } return false } func rewriteValuegeneric_OpLeq64F(v *Value) bool { @@ -11825,6 +12025,40 @@ func rewriteValuegeneric_OpLeq64U(v *Value) bool { v.AuxInt = boolToAuxInt(true) return true } + // match: (Leq64U _ (Const64 [-1])) + // result: (ConstBool [true]) + for { + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (Leq64U x c:(Const64 [0])) + // result: (Eq64 x c) + for { + x := v_0 + c := v_1 + if c.Op != OpConst64 || auxIntToInt64(c.AuxInt) != 0 { + break + } + v.reset(OpEq64) + v.AddArg2(x, c) + return true + } + // match: (Leq64U c:(Const64 [-1]) x) + // result: (Eq64 x c) + for { + c := v_0 + if c.Op != OpConst64 || auxIntToInt64(c.AuxInt) != -1 { + break + } + x := v_1 + v.reset(OpEq64) + v.AddArg2(x, c) + return true + } return false } func rewriteValuegeneric_OpLeq8(v *Value) bool { @@ -11924,6 +12158,50 @@ func rewriteValuegeneric_OpLeq8(v *Value) bool { v.AddArg2(v0, x) return true } + // match: (Leq8 (Const8 [math.MinInt8 ]) _) + // result: (ConstBool [true]) + for { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != math.MinInt8 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (Leq8 _ (Const8 [math.MaxInt8 ])) + // result: (ConstBool [true]) + for { + if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != math.MaxInt8 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (Leq8 x c:(Const8 [math.MinInt8 ])) + // result: (Eq8 x c) + for { + x := v_0 + c := v_1 + if c.Op != OpConst8 || auxIntToInt8(c.AuxInt) != math.MinInt8 { + break + } + v.reset(OpEq8) + v.AddArg2(x, c) + return true + } + // match: (Leq8 c:(Const8 [math.MaxInt8 ]) x) + // result: (Eq8 x c) + for { + c := v_0 + if c.Op != OpConst8 || auxIntToInt8(c.AuxInt) != math.MaxInt8 { + break + } + x := v_1 + v.reset(OpEq8) + v.AddArg2(x, c) + return true + } return false } func rewriteValuegeneric_OpLeq8U(v *Value) bool { @@ -11972,6 +12250,40 @@ func rewriteValuegeneric_OpLeq8U(v *Value) bool { v.AuxInt = boolToAuxInt(true) return true } + // match: (Leq8U _ (Const8 [-1])) + // result: (ConstBool [true]) + for { + if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != -1 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(true) + return true + } + // match: (Leq8U x c:(Const8 [0])) + // result: (Eq8 x c) + for { + x := v_0 + c := v_1 + if c.Op != OpConst8 || auxIntToInt8(c.AuxInt) != 0 { + break + } + v.reset(OpEq8) + v.AddArg2(x, c) + return true + } + // match: (Leq8U c:(Const8 [-1]) x) + // result: (Eq8 x c) + for { + c := v_0 + if c.Op != OpConst8 || auxIntToInt8(c.AuxInt) != -1 { + break + } + x := v_1 + v.reset(OpEq8) + v.AddArg2(x, c) + return true + } return false } func rewriteValuegeneric_OpLess16(v *Value) bool { @@ -12066,6 +12378,60 @@ func rewriteValuegeneric_OpLess16(v *Value) bool { v.AddArg2(v0, x) return true } + // match: (Less16 _ (Const16 [math.MinInt16])) + // result: (ConstBool [false]) + for { + if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != math.MinInt16 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (Less16 (Const16 [math.MaxInt16]) _) + // result: (ConstBool [false]) + for { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != math.MaxInt16 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (Less16 x (Const16 [math.MinInt16+1])) + // result: (Eq16 x (Const16 [math.MinInt16])) + for { + x := v_0 + if v_1.Op != OpConst16 { + break + } + t := v_1.Type + if auxIntToInt16(v_1.AuxInt) != math.MinInt16+1 { + break + } + v.reset(OpEq16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(math.MinInt16) + v.AddArg2(x, v0) + return true + } + // match: (Less16 (Const16 [math.MaxInt16-1]) x) + // result: (Eq16 x (Const16 [math.MaxInt16])) + for { + if v_0.Op != OpConst16 { + break + } + t := v_0.Type + if auxIntToInt16(v_0.AuxInt) != math.MaxInt16-1 { + break + } + x := v_1 + v.reset(OpEq16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(math.MaxInt16) + v.AddArg2(x, v0) + return true + } return false } func rewriteValuegeneric_OpLess16U(v *Value) bool { @@ -12114,6 +12480,50 @@ func rewriteValuegeneric_OpLess16U(v *Value) bool { v.AuxInt = boolToAuxInt(false) return true } + // match: (Less16U (Const16 [-1]) _) + // result: (ConstBool [false]) + for { + if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != -1 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (Less16U x (Const16 [1])) + // result: (Eq16 x (Const16 [0])) + for { + x := v_0 + if v_1.Op != OpConst16 { + break + } + t := v_1.Type + if auxIntToInt16(v_1.AuxInt) != 1 { + break + } + v.reset(OpEq16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(0) + v.AddArg2(x, v0) + return true + } + // match: (Less16U (Const16 [-2]) x) + // result: (Eq16 x (Const16 [-1])) + for { + if v_0.Op != OpConst16 { + break + } + t := v_0.Type + if auxIntToInt16(v_0.AuxInt) != -2 { + break + } + x := v_1 + v.reset(OpEq16) + v0 := b.NewValue0(v.Pos, OpConst16, t) + v0.AuxInt = int16ToAuxInt(-1) + v.AddArg2(x, v0) + return true + } return false } func rewriteValuegeneric_OpLess32(v *Value) bool { @@ -12208,6 +12618,60 @@ func rewriteValuegeneric_OpLess32(v *Value) bool { v.AddArg2(v0, x) return true } + // match: (Less32 _ (Const32 [math.MinInt32])) + // result: (ConstBool [false]) + for { + if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != math.MinInt32 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (Less32 (Const32 [math.MaxInt32]) _) + // result: (ConstBool [false]) + for { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != math.MaxInt32 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (Less32 x (Const32 [math.MinInt32+1])) + // result: (Eq32 x (Const32 [math.MinInt32])) + for { + x := v_0 + if v_1.Op != OpConst32 { + break + } + t := v_1.Type + if auxIntToInt32(v_1.AuxInt) != math.MinInt32+1 { + break + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(math.MinInt32) + v.AddArg2(x, v0) + return true + } + // match: (Less32 (Const32 [math.MaxInt32-1]) x) + // result: (Eq32 x (Const32 [math.MaxInt32])) + for { + if v_0.Op != OpConst32 { + break + } + t := v_0.Type + if auxIntToInt32(v_0.AuxInt) != math.MaxInt32-1 { + break + } + x := v_1 + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(math.MaxInt32) + v.AddArg2(x, v0) + return true + } return false } func rewriteValuegeneric_OpLess32F(v *Value) bool { @@ -12276,6 +12740,50 @@ func rewriteValuegeneric_OpLess32U(v *Value) bool { v.AuxInt = boolToAuxInt(false) return true } + // match: (Less32U (Const32 [-1]) _) + // result: (ConstBool [false]) + for { + if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != -1 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (Less32U x (Const32 [1])) + // result: (Eq32 x (Const32 [0])) + for { + x := v_0 + if v_1.Op != OpConst32 { + break + } + t := v_1.Type + if auxIntToInt32(v_1.AuxInt) != 1 { + break + } + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(0) + v.AddArg2(x, v0) + return true + } + // match: (Less32U (Const32 [-2]) x) + // result: (Eq32 x (Const32 [-1])) + for { + if v_0.Op != OpConst32 { + break + } + t := v_0.Type + if auxIntToInt32(v_0.AuxInt) != -2 { + break + } + x := v_1 + v.reset(OpEq32) + v0 := b.NewValue0(v.Pos, OpConst32, t) + v0.AuxInt = int32ToAuxInt(-1) + v.AddArg2(x, v0) + return true + } return false } func rewriteValuegeneric_OpLess64(v *Value) bool { @@ -12370,6 +12878,60 @@ func rewriteValuegeneric_OpLess64(v *Value) bool { v.AddArg2(v0, x) return true } + // match: (Less64 _ (Const64 [math.MinInt64])) + // result: (ConstBool [false]) + for { + if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != math.MinInt64 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (Less64 (Const64 [math.MaxInt64]) _) + // result: (ConstBool [false]) + for { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != math.MaxInt64 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (Less64 x (Const64 [math.MinInt64+1])) + // result: (Eq64 x (Const64 [math.MinInt64])) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + if auxIntToInt64(v_1.AuxInt) != math.MinInt64+1 { + break + } + v.reset(OpEq64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(math.MinInt64) + v.AddArg2(x, v0) + return true + } + // match: (Less64 (Const64 [math.MaxInt64-1]) x) + // result: (Eq64 x (Const64 [math.MaxInt64])) + for { + if v_0.Op != OpConst64 { + break + } + t := v_0.Type + if auxIntToInt64(v_0.AuxInt) != math.MaxInt64-1 { + break + } + x := v_1 + v.reset(OpEq64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(math.MaxInt64) + v.AddArg2(x, v0) + return true + } return false } func rewriteValuegeneric_OpLess64F(v *Value) bool { @@ -12438,6 +13000,50 @@ func rewriteValuegeneric_OpLess64U(v *Value) bool { v.AuxInt = boolToAuxInt(false) return true } + // match: (Less64U (Const64 [-1]) _) + // result: (ConstBool [false]) + for { + if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (Less64U x (Const64 [1])) + // result: (Eq64 x (Const64 [0])) + for { + x := v_0 + if v_1.Op != OpConst64 { + break + } + t := v_1.Type + if auxIntToInt64(v_1.AuxInt) != 1 { + break + } + v.reset(OpEq64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(0) + v.AddArg2(x, v0) + return true + } + // match: (Less64U (Const64 [-2]) x) + // result: (Eq64 x (Const64 [-1])) + for { + if v_0.Op != OpConst64 { + break + } + t := v_0.Type + if auxIntToInt64(v_0.AuxInt) != -2 { + break + } + x := v_1 + v.reset(OpEq64) + v0 := b.NewValue0(v.Pos, OpConst64, t) + v0.AuxInt = int64ToAuxInt(-1) + v.AddArg2(x, v0) + return true + } return false } func rewriteValuegeneric_OpLess8(v *Value) bool { @@ -12532,6 +13138,60 @@ func rewriteValuegeneric_OpLess8(v *Value) bool { v.AddArg2(v0, x) return true } + // match: (Less8 _ (Const8 [math.MinInt8 ])) + // result: (ConstBool [false]) + for { + if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != math.MinInt8 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (Less8 (Const8 [math.MaxInt8 ]) _) + // result: (ConstBool [false]) + for { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != math.MaxInt8 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (Less8 x (Const8 [math.MinInt8 +1])) + // result: (Eq8 x (Const8 [math.MinInt8 ])) + for { + x := v_0 + if v_1.Op != OpConst8 { + break + } + t := v_1.Type + if auxIntToInt8(v_1.AuxInt) != math.MinInt8+1 { + break + } + v.reset(OpEq8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(math.MinInt8) + v.AddArg2(x, v0) + return true + } + // match: (Less8 (Const8 [math.MaxInt8 -1]) x) + // result: (Eq8 x (Const8 [math.MaxInt8 ])) + for { + if v_0.Op != OpConst8 { + break + } + t := v_0.Type + if auxIntToInt8(v_0.AuxInt) != math.MaxInt8-1 { + break + } + x := v_1 + v.reset(OpEq8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(math.MaxInt8) + v.AddArg2(x, v0) + return true + } return false } func rewriteValuegeneric_OpLess8U(v *Value) bool { @@ -12580,6 +13240,50 @@ func rewriteValuegeneric_OpLess8U(v *Value) bool { v.AuxInt = boolToAuxInt(false) return true } + // match: (Less8U (Const8 [-1]) _) + // result: (ConstBool [false]) + for { + if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != -1 { + break + } + v.reset(OpConstBool) + v.AuxInt = boolToAuxInt(false) + return true + } + // match: (Less8U x (Const8 [1])) + // result: (Eq8 x (Const8 [0])) + for { + x := v_0 + if v_1.Op != OpConst8 { + break + } + t := v_1.Type + if auxIntToInt8(v_1.AuxInt) != 1 { + break + } + v.reset(OpEq8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(0) + v.AddArg2(x, v0) + return true + } + // match: (Less8U (Const8 [-2]) x) + // result: (Eq8 x (Const8 [-1])) + for { + if v_0.Op != OpConst8 { + break + } + t := v_0.Type + if auxIntToInt8(v_0.AuxInt) != -2 { + break + } + x := v_1 + v.reset(OpEq8) + v0 := b.NewValue0(v.Pos, OpConst8, t) + v0.AuxInt = int8ToAuxInt(-1) + v.AddArg2(x, v0) + return true + } return false } func rewriteValuegeneric_OpLoad(v *Value) bool { diff --git a/src/cmd/compile/internal/ssa/sccp.go b/src/cmd/compile/internal/ssa/sccp.go index 3c109548ab..ecc0f94e5b 100644 --- a/src/cmd/compile/internal/ssa/sccp.go +++ b/src/cmd/compile/internal/ssa/sccp.go @@ -80,7 +80,7 @@ func sccp(f *Func) { // build it early since we rely heavily on the def-use chain later t.buildDefUses() - // pick up either an edge or SSA value from worklilst, process it + // pick up either an edge or SSA value from worklist, process it for { if len(t.edges) > 0 { edge := t.edges[0] @@ -145,7 +145,7 @@ func equals(a, b lattice) bool { return true } -// possibleConst checks if Value can be fold to const. For those Values that can +// possibleConst checks if Value can be folded to const. For those Values that can // never become constants(e.g. StaticCall), we don't make futile efforts. func possibleConst(val *Value) bool { if isConst(val) { @@ -343,7 +343,7 @@ func computeLattice(f *Func, val *Value, args ...*Value) lattice { // However, this would create a huge switch for all opcodes that can be // evaluated during compile time. Moreover, some operations can be evaluated // only if its arguments satisfy additional conditions(e.g. divide by zero). - // It's fragile and error prone. We did a trick by reusing the existing rules + // It's fragile and error-prone. We did a trick by reusing the existing rules // in generic rules for compile-time evaluation. But generic rules rewrite // original value, this behavior is undesired, because the lattice of values // may change multiple times, once it was rewritten, we lose the opportunity @@ -533,12 +533,19 @@ func rewireSuccessor(block *Block, constVal *Value) bool { block.ResetControls() return true case BlockJumpTable: + // Remove everything but the known taken branch. idx := int(constVal.AuxInt) - targetBlock := block.Succs[idx].b - for len(block.Succs) > 0 { - block.removeEdge(0) + if idx < 0 || idx >= len(block.Succs) { + // This can only happen in unreachable code, + // as an invariant of jump tables is that their + // input index is in range. + // See issue 64826. + return false + } + block.swapSuccessorsByIdx(0, idx) + for len(block.Succs) > 1 { + block.removeEdge(1) } - block.AddEdgeTo(targetBlock) block.Kind = BlockPlain block.Likely = BranchUnknown block.ResetControls() diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go index c9ca778b3a..8290e1730e 100644 --- a/src/cmd/compile/internal/ssa/stackalloc.go +++ b/src/cmd/compile/internal/ssa/stackalloc.go @@ -202,11 +202,11 @@ func (s *stackAllocState) stackalloc() { } // For each type, we keep track of all the stack slots we - // have allocated for that type. - // TODO: share slots among equivalent types. We would need to - // only share among types with the same GC signature. See the - // type.Equal calls below for where this matters. - locations := map[*types.Type][]LocalSlot{} + // have allocated for that type. This map is keyed by + // strings returned by types.LinkString. This guarantees + // type equality, but also lets us match the same type represented + // by two different types.Type structures. See issue 65783. + locations := map[string][]LocalSlot{} // Each time we assign a stack slot to a value v, we remember // the slot we used via an index into locations[v.Type]. @@ -258,7 +258,8 @@ func (s *stackAllocState) stackalloc() { noname: // Set of stack slots we could reuse. - locs := locations[v.Type] + typeKey := v.Type.LinkString() + locs := locations[typeKey] // Mark all positions in locs used by interfering values. for i := 0; i < len(locs); i++ { used[i] = false @@ -281,7 +282,7 @@ func (s *stackAllocState) stackalloc() { if i == len(locs) { s.nAuto++ locs = append(locs, LocalSlot{N: f.NewLocal(v.Pos, v.Type), Type: v.Type, Off: 0}) - locations[v.Type] = locs + locations[typeKey] = locs } // Use the stack variable at that index for v. loc := locs[i] diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go index 8d1e30e1e6..6335aff832 100644 --- a/src/cmd/compile/internal/ssagen/ssa.go +++ b/src/cmd/compile/internal/ssagen/ssa.go @@ -22,6 +22,7 @@ import ( "cmd/compile/internal/liveness" "cmd/compile/internal/objw" "cmd/compile/internal/reflectdata" + "cmd/compile/internal/rttype" "cmd/compile/internal/ssa" "cmd/compile/internal/staticdata" "cmd/compile/internal/typecheck" @@ -513,16 +514,14 @@ func buildssa(fn *ir.Func, worker int) *ssa.Func { // Populate closure variables. if fn.Needctxt() { clo := s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr) - offset := int64(types.PtrSize) // PtrSize to skip past function entry PC field - for _, n := range fn.ClosureVars { - typ := n.Type() - if !n.Byval() { - typ = types.NewPtr(typ) + csiter := typecheck.NewClosureStructIter(fn.ClosureVars) + for { + n, typ, offset := csiter.Next() + if n == nil { + break } - offset = types.RoundUp(offset, typ.Alignment()) ptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(typ), offset, clo) - offset += typ.Size() // If n is a small variable captured by value, promote // it to PAUTO so it can be converted to SSA. @@ -3700,7 +3699,7 @@ func (s *state) minMax(n *ir.CallExpr) *ssa.Value { if typ.IsFloat() { switch Arch.LinkArch.Family { - case sys.AMD64, sys.ARM64: + case sys.AMD64, sys.ARM64, sys.RISCV64: var op ssa.Op switch { case typ.Kind() == types.TFLOAT64 && n.Op() == ir.OMIN: @@ -4894,22 +4893,22 @@ func InitTables() { func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue2(ssa.OpRotateLeft8, types.Types[types.TUINT8], args[0], args[1]) }, - sys.AMD64) + sys.AMD64, sys.RISCV64) addF("math/bits", "RotateLeft16", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue2(ssa.OpRotateLeft16, types.Types[types.TUINT16], args[0], args[1]) }, - sys.AMD64) + sys.AMD64, sys.RISCV64) addF("math/bits", "RotateLeft32", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue2(ssa.OpRotateLeft32, types.Types[types.TUINT32], args[0], args[1]) }, - sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm, sys.Loong64) + sys.AMD64, sys.ARM, sys.ARM64, sys.Loong64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm) addF("math/bits", "RotateLeft64", func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { return s.newValue2(ssa.OpRotateLeft64, types.Types[types.TUINT64], args[0], args[1]) }, - sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm, sys.Loong64) + sys.AMD64, sys.ARM64, sys.Loong64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm) alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...) makeOnesCountAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value { @@ -5537,7 +5536,7 @@ func (s *state) getClosureAndRcvr(fn *ir.SelectorExpr) (*ssa.Value, *ssa.Value) i := s.expr(fn.X) itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i) s.nilCheck(itab) - itabidx := fn.Offset() + 2*int64(types.PtrSize) + 8 // offset of fun field in runtime.itab + itabidx := fn.Offset() + rttype.ITab.OffsetOf("Fun") closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab) rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i) return closure, rcvr @@ -6348,6 +6347,9 @@ func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value { if !n.X.Type().IsMap() && !n.X.Type().IsChan() { s.Fatalf("node must be a map or a channel") } + if n.X.Type().IsChan() && n.Op() == ir.OLEN { + s.Fatalf("cannot inline len(chan)") // must use runtime.chanlen now + } // if n == nil { // return 0 // } else { @@ -6522,7 +6524,7 @@ func (s *state) dynamicDottype(n *ir.DynamicTypeAssertExpr, commaok bool) (res, targetItab = s.expr(n.ITab) // TODO(mdempsky): Investigate whether compiling n.RType could be // better than loading itab.typ. - target = s.load(byteptr, s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), targetItab)) // itab.typ + target = s.load(byteptr, s.newValue1I(ssa.OpOffPtr, byteptr, rttype.ITab.OffsetOf("Type"), targetItab)) } else { target = s.expr(n.RType) } @@ -6580,7 +6582,7 @@ func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, source, targ return } // Load type out of itab, build interface with existing idata. - off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab) + off := s.newValue1I(ssa.OpOffPtr, byteptr, rttype.ITab.OffsetOf("Type"), itab) typ := s.load(byteptr, off) idata := s.newValue1(ssa.OpIData, byteptr, iface) res = s.newValue2(ssa.OpIMake, dst, typ, idata) @@ -6590,7 +6592,7 @@ func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, source, targ s.startBlock(bOk) // nonempty -> empty // Need to load type from itab - off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab) + off := s.newValue1I(ssa.OpOffPtr, byteptr, rttype.ITab.OffsetOf("Type"), itab) s.vars[typVar] = s.load(byteptr, off) s.endBlock() @@ -6644,7 +6646,7 @@ func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, source, targ s.startBlock(bNonNil) typ := itab if !src.IsEmptyInterface() { - typ = s.load(byteptr, s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)) + typ = s.load(byteptr, s.newValue1I(ssa.OpOffPtr, byteptr, rttype.ITab.OffsetOf("Type"), itab)) } // Check the cache first. @@ -6685,9 +6687,9 @@ func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, source, targ // Load hash from type or itab. var hash *ssa.Value if src.IsEmptyInterface() { - hash = s.newValue2(ssa.OpLoad, typs.UInt32, s.newValue1I(ssa.OpOffPtr, typs.UInt32Ptr, 2*s.config.PtrSize, typ), s.mem()) + hash = s.newValue2(ssa.OpLoad, typs.UInt32, s.newValue1I(ssa.OpOffPtr, typs.UInt32Ptr, rttype.Type.OffsetOf("Hash"), typ), s.mem()) } else { - hash = s.newValue2(ssa.OpLoad, typs.UInt32, s.newValue1I(ssa.OpOffPtr, typs.UInt32Ptr, 2*s.config.PtrSize, itab), s.mem()) + hash = s.newValue2(ssa.OpLoad, typs.UInt32, s.newValue1I(ssa.OpOffPtr, typs.UInt32Ptr, rttype.ITab.OffsetOf("Hash"), itab), s.mem()) } hash = s.newValue1(zext, typs.Uintptr, hash) s.vars[hashVar] = hash @@ -7095,48 +7097,14 @@ func EmitArgInfo(f *ir.Func, abiInfo *abi.ABIParamResultInfo) *obj.LSym { return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice() } - // Populate the data. - // The data is a stream of bytes, which contains the offsets and sizes of the - // non-aggregate arguments or non-aggregate fields/elements of aggregate-typed - // arguments, along with special "operators". Specifically, - // - for each non-aggrgate arg/field/element, its offset from FP (1 byte) and - // size (1 byte) - // - special operators: - // - 0xff - end of sequence - // - 0xfe - print { (at the start of an aggregate-typed argument) - // - 0xfd - print } (at the end of an aggregate-typed argument) - // - 0xfc - print ... (more args/fields/elements) - // - 0xfb - print _ (offset too large) - // These constants need to be in sync with runtime.traceback.go:printArgs. - const ( - _endSeq = 0xff - _startAgg = 0xfe - _endAgg = 0xfd - _dotdotdot = 0xfc - _offsetTooLarge = 0xfb - _special = 0xf0 // above this are operators, below this are ordinary offsets - ) - - const ( - limit = 10 // print no more than 10 args/components - maxDepth = 5 // no more than 5 layers of nesting - - // maxLen is a (conservative) upper bound of the byte stream length. For - // each arg/component, it has no more than 2 bytes of data (size, offset), - // and no more than one {, }, ... at each level (it cannot have both the - // data and ... unless it is the last one, just be conservative). Plus 1 - // for _endSeq. - maxLen = (maxDepth*3+2)*limit + 1 - ) - wOff := 0 n := 0 writebyte := func(o uint8) { wOff = objw.Uint8(x, wOff, o) } - // Write one non-aggrgate arg/field/element. + // Write one non-aggregate arg/field/element. write1 := func(sz, offset int64) { - if offset >= _special { - writebyte(_offsetTooLarge) + if offset >= rtabi.TraceArgsSpecial { + writebyte(rtabi.TraceArgsOffsetTooLarge) } else { writebyte(uint8(offset)) writebyte(uint8(sz)) @@ -7148,19 +7116,19 @@ func EmitArgInfo(f *ir.Func, abiInfo *abi.ABIParamResultInfo) *obj.LSym { // Returns whether to continue visiting. var visitType func(baseOffset int64, t *types.Type, depth int) bool visitType = func(baseOffset int64, t *types.Type, depth int) bool { - if n >= limit { - writebyte(_dotdotdot) + if n >= rtabi.TraceArgsLimit { + writebyte(rtabi.TraceArgsDotdotdot) return false } if !isAggregate(t) { write1(t.Size(), baseOffset) return true } - writebyte(_startAgg) + writebyte(rtabi.TraceArgsStartAgg) depth++ - if depth >= maxDepth { - writebyte(_dotdotdot) - writebyte(_endAgg) + if depth >= rtabi.TraceArgsMaxDepth { + writebyte(rtabi.TraceArgsDotdotdot) + writebyte(rtabi.TraceArgsEndAgg) n++ return true } @@ -7197,7 +7165,7 @@ func EmitArgInfo(f *ir.Func, abiInfo *abi.ABIParamResultInfo) *obj.LSym { } } } - writebyte(_endAgg) + writebyte(rtabi.TraceArgsEndAgg) return true } @@ -7212,8 +7180,8 @@ func EmitArgInfo(f *ir.Func, abiInfo *abi.ABIParamResultInfo) *obj.LSym { break } } - writebyte(_endSeq) - if wOff > maxLen { + writebyte(rtabi.TraceArgsEndSeq) + if wOff > rtabi.TraceArgsMaxLen { base.Fatalf("ArgInfo too large") } @@ -7414,7 +7382,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) { if b.Pos == src.NoXPos { b.Pos = p.Pos // It needs a file, otherwise a no-file non-zero line causes confusion. See #35652. if b.Pos == src.NoXPos { - b.Pos = pp.Text.Pos // Sometimes p.Pos is empty. See #35695. + b.Pos = s.pp.Text.Pos // Sometimes p.Pos is empty. See #35695. } } b.Pos = b.Pos.WithBogusLine() // Debuggers are not good about infinite loops, force a change in line number @@ -7449,14 +7417,14 @@ func genssa(f *ssa.Func, pp *objw.Progs) { // still be inside the function in question. So if // it ends in a call which doesn't return, add a // nop (which will never execute) after the call. - Arch.Ginsnop(pp) + Arch.Ginsnop(s.pp) } if openDeferInfo != nil { // When doing open-coded defers, generate a disconnected call to // deferreturn and a return. This will be used to during panic // recovery to unwind the stack and return back to the runtime. s.pp.NextLive = s.livenessMap.DeferReturn - p := pp.Prog(obj.ACALL) + p := s.pp.Prog(obj.ACALL) p.To.Type = obj.TYPE_MEM p.To.Name = obj.NAME_EXTERN p.To.Sym = ir.Syms.Deferreturn @@ -7473,7 +7441,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) { } } - pp.Prog(obj.ARET) + s.pp.Prog(obj.ARET) } if inlMarks != nil { @@ -7482,7 +7450,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) { // We have some inline marks. Try to find other instructions we're // going to emit anyway, and use those instructions instead of the // inline marks. - for p := pp.Text; p != nil; p = p.Link { + for p := s.pp.Text; p != nil; p = p.Link { if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || Arch.LinkArch.Family == sys.Wasm { // Don't use 0-sized instructions as inline marks, because we need // to identify inline mark instructions by pc offset. @@ -7500,16 +7468,16 @@ func genssa(f *ssa.Func, pp *objw.Progs) { hasCall = true } pos := p.Pos.AtColumn1() - s := inlMarksByPos[pos] - if len(s) == 0 { + marks := inlMarksByPos[pos] + if len(marks) == 0 { continue } - for _, m := range s { + for _, m := range marks { // We found an instruction with the same source position as // some of the inline marks. // Use this instruction instead. p.Pos = p.Pos.WithIsStmt() // promote position to a statement - pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[m]) + s.pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[m]) // Make the inline mark a real nop, so it doesn't generate any code. m.As = obj.ANOP m.Pos = src.NoXPos @@ -7521,7 +7489,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) { // Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction). for _, p := range inlMarkList { if p.As != obj.ANOP { - pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[p]) + s.pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[p]) } } @@ -7532,27 +7500,27 @@ func genssa(f *ssa.Func, pp *objw.Progs) { // equal to the start of the function. // This ensures that runtime.FuncForPC(uintptr(reflect.ValueOf(fn).Pointer())).Name() // returns the right answer. See issue 58300. - for p := pp.Text; p != nil; p = p.Link { + for p := s.pp.Text; p != nil; p = p.Link { if p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.ANOP { continue } if base.Ctxt.PosTable.Pos(p.Pos).Base().InliningIndex() >= 0 { // Make a real (not 0-sized) nop. - nop := Arch.Ginsnop(pp) + nop := Arch.Ginsnop(s.pp) nop.Pos = e.curfn.Pos().WithIsStmt() // Unfortunately, Ginsnop puts the instruction at the // end of the list. Move it up to just before p. // Unlink from the current list. - for x := pp.Text; x != nil; x = x.Link { + for x := s.pp.Text; x != nil; x = x.Link { if x.Link == nop { x.Link = nop.Link break } } // Splice in right before p. - for x := pp.Text; x != nil; x = x.Link { + for x := s.pp.Text; x != nil; x = x.Link { if x.Link == p { nop.Link = p x.Link = nop @@ -7578,9 +7546,9 @@ func genssa(f *ssa.Func, pp *objw.Progs) { for i, b := range f.Blocks { idToIdx[b.ID] = i } - // Note that at this moment, Prog.Pc is a sequence number; it's - // not a real PC until after assembly, so this mapping has to - // be done later. + // Register a callback that will be used later to fill in PCs into location + // lists. At the moment, Prog.Pc is a sequence number; it's not a real PC + // until after assembly, so the translation needs to be deferred. debugInfo.GetPC = func(b, v ssa.ID) int64 { switch v { case ssa.BlockStart.ID: @@ -7622,13 +7590,13 @@ func genssa(f *ssa.Func, pp *objw.Progs) { // Add to list of jump tables to be resolved at assembly time. // The assembler converts from *Prog entries to absolute addresses // once it knows instruction byte offsets. - fi := pp.CurFunc.LSym.Func() + fi := s.pp.CurFunc.LSym.Func() fi.JumpTables = append(fi.JumpTables, obj.JumpTable{Sym: jt.Aux.(*obj.LSym), Targets: targets}) } if e.log { // spew to stdout filename := "" - for p := pp.Text; p != nil; p = p.Link { + for p := s.pp.Text; p != nil; p = p.Link { if p.Pos.IsKnown() && p.InnermostFilename() != filename { filename = p.InnermostFilename() f.Logf("# %s\n", filename) @@ -7650,7 +7618,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) { buf.WriteString("") buf.WriteString("

    ") filename := "" - for p := pp.Text; p != nil; p = p.Link { + for p := s.pp.Text; p != nil; p = p.Link { // Don't spam every line with the file name, which is often huge. // Only print changes, and "unknown" is not a change. if p.Pos.IsKnown() && p.InnermostFilename() != filename { @@ -7698,7 +7666,7 @@ func genssa(f *ssa.Func, pp *objw.Progs) { var allPosOld []src.Pos var allPos []src.Pos - for p := pp.Text; p != nil; p = p.Link { + for p := s.pp.Text; p != nil; p = p.Link { if p.Pos.IsKnown() { allPos = allPos[:0] p.Ctxt.AllPos(p.Pos, func(pos src.Pos) { allPos = append(allPos, pos) }) diff --git a/src/cmd/compile/internal/syntax/parser.go b/src/cmd/compile/internal/syntax/parser.go index 1569b5e987..f75f86587a 100644 --- a/src/cmd/compile/internal/syntax/parser.go +++ b/src/cmd/compile/internal/syntax/parser.go @@ -267,7 +267,9 @@ func (p *parser) syntaxErrorAt(pos Pos, msg string) { // determine token string var tok string switch p.tok { - case _Name, _Semi: + case _Name: + tok = "`" + p.lit + "'" + case _Semi: tok = p.lit case _Literal: tok = "literal " + p.lit diff --git a/src/cmd/compile/internal/syntax/testdata/issue20789.go b/src/cmd/compile/internal/syntax/testdata/issue20789.go index 0d5988b9a6..8a6db6d2ee 100644 --- a/src/cmd/compile/internal/syntax/testdata/issue20789.go +++ b/src/cmd/compile/internal/syntax/testdata/issue20789.go @@ -6,4 +6,4 @@ // Line 9 must end in EOF for this test (no newline). package e -func([<-chan<-[func /* ERROR unexpected u */ u){go \ No newline at end of file +func([<-chan<-[func /* ERROR unexpected `u' */ u){go \ No newline at end of file diff --git a/src/cmd/compile/internal/syntax/testdata/issue47704.go b/src/cmd/compile/internal/syntax/testdata/issue47704.go index e4cdad148f..aab3790560 100644 --- a/src/cmd/compile/internal/syntax/testdata/issue47704.go +++ b/src/cmd/compile/internal/syntax/testdata/issue47704.go @@ -7,7 +7,7 @@ package p func _() { _ = m[] // ERROR expected operand _ = m[x,] - _ = m[x /* ERROR unexpected a */ a b c d] + _ = m[x /* ERROR unexpected `a' */ a b c d] } // test case from the issue diff --git a/src/cmd/compile/internal/syntax/testdata/issue49205.go b/src/cmd/compile/internal/syntax/testdata/issue49205.go index bbcc950c5c..9b6c769703 100644 --- a/src/cmd/compile/internal/syntax/testdata/issue49205.go +++ b/src/cmd/compile/internal/syntax/testdata/issue49205.go @@ -7,7 +7,7 @@ package p // test case from issue type _ interface{ - m /* ERROR unexpected int in interface type; possibly missing semicolon or newline or } */ int + m /* ERROR unexpected `int' in interface type; possibly missing semicolon or newline or } */ int } // other cases where the fix for this issue affects the error message @@ -16,12 +16,12 @@ const ( x int = 10 /* ERROR unexpected literal "foo" in grouped declaration; possibly missing semicolon or newline or \) */ "foo" ) -var _ = []int{1, 2, 3 /* ERROR unexpected int in composite literal; possibly missing comma or } */ int } +var _ = []int{1, 2, 3 /* ERROR unexpected `int' in composite literal; possibly missing comma or } */ int } type _ struct { x y /* ERROR syntax error: unexpected comma in struct type; possibly missing semicolon or newline or } */ , } -func f(a, b c /* ERROR unexpected d in parameter list; possibly missing comma or \) */ d) { - f(a, b, c /* ERROR unexpected d in argument list; possibly missing comma or \) */ d) +func f(a, b c /* ERROR unexpected `d' in parameter list; possibly missing comma or \) */ d) { + f(a, b, c /* ERROR unexpected `d' in argument list; possibly missing comma or \) */ d) } diff --git a/src/cmd/compile/internal/syntax/testdata/issue52391.go b/src/cmd/compile/internal/syntax/testdata/issue52391.go index f2098ceadb..42b71cc83a 100644 --- a/src/cmd/compile/internal/syntax/testdata/issue52391.go +++ b/src/cmd/compile/internal/syntax/testdata/issue52391.go @@ -13,5 +13,5 @@ type _ interface { (int) | (string) (int) | ~(string) (/* ERROR unexpected ~ */ ~int) - (int /* ERROR unexpected \| */ | /* ERROR unexpected string */ string /* ERROR unexpected \) */ ) + (int /* ERROR unexpected \| */ | /* ERROR unexpected `string' */ string /* ERROR unexpected \) */ ) } diff --git a/src/cmd/compile/internal/syntax/testdata/issue65790.go b/src/cmd/compile/internal/syntax/testdata/issue65790.go new file mode 100644 index 0000000000..07ffd12678 --- /dev/null +++ b/src/cmd/compile/internal/syntax/testdata/issue65790.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +import ( + "fmt" +) + +func f() { + int status // ERROR syntax error: unexpected `status' at end of statement + fmt.Println(status) +} diff --git a/src/cmd/compile/internal/test/README b/src/cmd/compile/internal/test/README index 242ff794cb..3bf4a57a68 100644 --- a/src/cmd/compile/internal/test/README +++ b/src/cmd/compile/internal/test/README @@ -1,4 +1,4 @@ This directory holds small tests and benchmarks of code generated by the compiler. This code is not for importing, -and the tests are intended to verify that specific optimzations +and the tests are intended to verify that specific optimizations are applied and correct. diff --git a/src/cmd/compile/internal/test/inl_test.go b/src/cmd/compile/internal/test/inl_test.go index 6d10f6c54c..0ccc7b3761 100644 --- a/src/cmd/compile/internal/test/inl_test.go +++ b/src/cmd/compile/internal/test/inl_test.go @@ -44,7 +44,6 @@ func TestIntendedInlining(t *testing.T) { "chanbuf", "evacuated", "fastlog2", - "fastrand", "float64bits", "funcspdelta", "getm", @@ -54,6 +53,7 @@ func TestIntendedInlining(t *testing.T) { "nextslicecap", "noescape", "pcvalueCacheKey", + "rand32", "readUnaligned32", "readUnaligned64", "releasem", diff --git a/src/cmd/compile/internal/test/logic_test.go b/src/cmd/compile/internal/test/logic_test.go index 1d7043ff60..0e46b5faef 100644 --- a/src/cmd/compile/internal/test/logic_test.go +++ b/src/cmd/compile/internal/test/logic_test.go @@ -1,3 +1,7 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package test import "testing" diff --git a/src/cmd/compile/internal/test/math_test.go b/src/cmd/compile/internal/test/math_test.go index 6bcb4601ba..1febe9d42b 100644 --- a/src/cmd/compile/internal/test/math_test.go +++ b/src/cmd/compile/internal/test/math_test.go @@ -1,3 +1,7 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package test import ( diff --git a/src/cmd/compile/internal/test/memcombine_test.go b/src/cmd/compile/internal/test/memcombine_test.go index c7e7a208dd..3fc4a004a3 100644 --- a/src/cmd/compile/internal/test/memcombine_test.go +++ b/src/cmd/compile/internal/test/memcombine_test.go @@ -71,3 +71,129 @@ func readUint32be(b []byte) uint64 { //go:noinline func nop() { } + +type T32 struct { + a, b uint32 +} + +//go:noinline +func (t *T32) bigEndianLoad() uint64 { + return uint64(t.a)<<32 | uint64(t.b) +} + +//go:noinline +func (t *T32) littleEndianLoad() uint64 { + return uint64(t.a) | (uint64(t.b) << 32) +} + +//go:noinline +func (t *T32) bigEndianStore(x uint64) { + t.a = uint32(x >> 32) + t.b = uint32(x) +} + +//go:noinline +func (t *T32) littleEndianStore(x uint64) { + t.a = uint32(x) + t.b = uint32(x >> 32) +} + +type T16 struct { + a, b uint16 +} + +//go:noinline +func (t *T16) bigEndianLoad() uint32 { + return uint32(t.a)<<16 | uint32(t.b) +} + +//go:noinline +func (t *T16) littleEndianLoad() uint32 { + return uint32(t.a) | (uint32(t.b) << 16) +} + +//go:noinline +func (t *T16) bigEndianStore(x uint32) { + t.a = uint16(x >> 16) + t.b = uint16(x) +} + +//go:noinline +func (t *T16) littleEndianStore(x uint32) { + t.a = uint16(x) + t.b = uint16(x >> 16) +} + +type T8 struct { + a, b uint8 +} + +//go:noinline +func (t *T8) bigEndianLoad() uint16 { + return uint16(t.a)<<8 | uint16(t.b) +} + +//go:noinline +func (t *T8) littleEndianLoad() uint16 { + return uint16(t.a) | (uint16(t.b) << 8) +} + +//go:noinline +func (t *T8) bigEndianStore(x uint16) { + t.a = uint8(x >> 8) + t.b = uint8(x) +} + +//go:noinline +func (t *T8) littleEndianStore(x uint16) { + t.a = uint8(x) + t.b = uint8(x >> 8) +} + +func TestIssue64468(t *testing.T) { + t32 := T32{1, 2} + if got, want := t32.bigEndianLoad(), uint64(1<<32+2); got != want { + t.Errorf("T32.bigEndianLoad got %x want %x\n", got, want) + } + if got, want := t32.littleEndianLoad(), uint64(1+2<<32); got != want { + t.Errorf("T32.littleEndianLoad got %x want %x\n", got, want) + } + t16 := T16{1, 2} + if got, want := t16.bigEndianLoad(), uint32(1<<16+2); got != want { + t.Errorf("T16.bigEndianLoad got %x want %x\n", got, want) + } + if got, want := t16.littleEndianLoad(), uint32(1+2<<16); got != want { + t.Errorf("T16.littleEndianLoad got %x want %x\n", got, want) + } + t8 := T8{1, 2} + if got, want := t8.bigEndianLoad(), uint16(1<<8+2); got != want { + t.Errorf("T8.bigEndianLoad got %x want %x\n", got, want) + } + if got, want := t8.littleEndianLoad(), uint16(1+2<<8); got != want { + t.Errorf("T8.littleEndianLoad got %x want %x\n", got, want) + } + t32.bigEndianStore(1<<32 + 2) + if got, want := t32, (T32{1, 2}); got != want { + t.Errorf("T32.bigEndianStore got %x want %x\n", got, want) + } + t32.littleEndianStore(1<<32 + 2) + if got, want := t32, (T32{2, 1}); got != want { + t.Errorf("T32.littleEndianStore got %x want %x\n", got, want) + } + t16.bigEndianStore(1<<16 + 2) + if got, want := t16, (T16{1, 2}); got != want { + t.Errorf("T16.bigEndianStore got %x want %x\n", got, want) + } + t16.littleEndianStore(1<<16 + 2) + if got, want := t16, (T16{2, 1}); got != want { + t.Errorf("T16.littleEndianStore got %x want %x\n", got, want) + } + t8.bigEndianStore(1<<8 + 2) + if got, want := t8, (T8{1, 2}); got != want { + t.Errorf("T8.bigEndianStore got %x want %x\n", got, want) + } + t8.littleEndianStore(1<<8 + 2) + if got, want := t8, (T8{2, 1}); got != want { + t.Errorf("T8.littleEndianStore got %x want %x\n", got, want) + } +} diff --git a/src/cmd/compile/internal/test/pgo_devirtualize_test.go b/src/cmd/compile/internal/test/pgo_devirtualize_test.go index c457478a1f..af09107dc0 100644 --- a/src/cmd/compile/internal/test/pgo_devirtualize_test.go +++ b/src/cmd/compile/internal/test/pgo_devirtualize_test.go @@ -14,8 +14,16 @@ import ( "testing" ) +type devirtualization struct { + pos string + callee string +} + +const profFileName = "devirt.pprof" +const preProfFileName = "devirt.pprof.node_map" + // testPGODevirtualize tests that specific PGO devirtualize rewrites are performed. -func testPGODevirtualize(t *testing.T, dir string) { +func testPGODevirtualize(t *testing.T, dir string, want []devirtualization, pgoProfileName string) { testenv.MustHaveGoRun(t) t.Parallel() @@ -23,7 +31,7 @@ func testPGODevirtualize(t *testing.T, dir string) { // Add a go.mod so we have a consistent symbol names in this temp dir. goMod := fmt.Sprintf(`module %s -go 1.19 +go 1.21 `, pkg) if err := os.WriteFile(filepath.Join(dir, "go.mod"), []byte(goMod), 0644); err != nil { t.Fatalf("error writing go.mod: %v", err) @@ -40,7 +48,7 @@ go 1.19 } // Build the test with the profile. - pprof := filepath.Join(dir, "devirt.pprof") + pprof := filepath.Join(dir, pgoProfileName) gcflag := fmt.Sprintf("-gcflags=-m=2 -pgoprofile=%s -d=pgodebug=3", pprof) out := filepath.Join(dir, "test.exe") cmd = testenv.CleanCmdEnv(testenv.Command(t, testenv.GoToolPath(t), "test", "-o", out, gcflag, ".")) @@ -60,51 +68,6 @@ go 1.19 t.Fatalf("error starting go test: %v", err) } - type devirtualization struct { - pos string - callee string - } - - want := []devirtualization{ - // ExerciseIface - { - pos: "./devirt.go:101:20", - callee: "mult.Mult.Multiply", - }, - { - pos: "./devirt.go:101:39", - callee: "Add.Add", - }, - // ExerciseFuncConcrete - { - pos: "./devirt.go:173:36", - callee: "AddFn", - }, - { - pos: "./devirt.go:173:15", - callee: "mult.MultFn", - }, - // ExerciseFuncField - { - pos: "./devirt.go:207:35", - callee: "AddFn", - }, - { - pos: "./devirt.go:207:19", - callee: "mult.MultFn", - }, - // ExerciseFuncClosure - // TODO(prattmic): Closure callees not implemented. - //{ - // pos: "./devirt.go:249:27", - // callee: "AddClosure.func1", - //}, - //{ - // pos: "./devirt.go:249:15", - // callee: "mult.MultClosure.func1", - //}, - } - got := make(map[devirtualization]struct{}) devirtualizedLine := regexp.MustCompile(`(.*): PGO devirtualizing \w+ call .* to (.*)`) @@ -166,11 +129,199 @@ func TestPGODevirtualize(t *testing.T) { if err := os.Mkdir(filepath.Join(dir, "mult.pkg"), 0755); err != nil { t.Fatalf("error creating dir: %v", err) } - for _, file := range []string{"devirt.go", "devirt_test.go", "devirt.pprof", filepath.Join("mult.pkg", "mult.go")} { + for _, file := range []string{"devirt.go", "devirt_test.go", profFileName, filepath.Join("mult.pkg", "mult.go")} { if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil { t.Fatalf("error copying %s: %v", file, err) } } - testPGODevirtualize(t, dir) + want := []devirtualization{ + // ExerciseIface + { + pos: "./devirt.go:101:20", + callee: "mult.Mult.Multiply", + }, + { + pos: "./devirt.go:101:39", + callee: "Add.Add", + }, + // ExerciseFuncConcrete + { + pos: "./devirt.go:173:36", + callee: "AddFn", + }, + { + pos: "./devirt.go:173:15", + callee: "mult.MultFn", + }, + // ExerciseFuncField + { + pos: "./devirt.go:207:35", + callee: "AddFn", + }, + { + pos: "./devirt.go:207:19", + callee: "mult.MultFn", + }, + // ExerciseFuncClosure + // TODO(prattmic): Closure callees not implemented. + //{ + // pos: "./devirt.go:249:27", + // callee: "AddClosure.func1", + //}, + //{ + // pos: "./devirt.go:249:15", + // callee: "mult.MultClosure.func1", + //}, + } + + testPGODevirtualize(t, dir, want, profFileName) +} + +// TestPGOPreprocessDevirtualize tests that specific functions are devirtualized when PGO +// is applied to the exact source that was profiled. The input profile is PGO preprocessed file. +func TestPGOPreprocessDevirtualize(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("error getting wd: %v", err) + } + srcDir := filepath.Join(wd, "testdata", "pgo", "devirtualize") + + // Copy the module to a scratch location so we can add a go.mod. + dir := t.TempDir() + if err := os.Mkdir(filepath.Join(dir, "mult.pkg"), 0755); err != nil { + t.Fatalf("error creating dir: %v", err) + } + for _, file := range []string{"devirt.go", "devirt_test.go", preProfFileName, filepath.Join("mult.pkg", "mult.go")} { + if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil { + t.Fatalf("error copying %s: %v", file, err) + } + } + + want := []devirtualization{ + // ExerciseIface + { + pos: "./devirt.go:101:20", + callee: "mult.Mult.Multiply", + }, + { + pos: "./devirt.go:101:39", + callee: "Add.Add", + }, + // ExerciseFuncConcrete + { + pos: "./devirt.go:173:36", + callee: "AddFn", + }, + { + pos: "./devirt.go:173:15", + callee: "mult.MultFn", + }, + // ExerciseFuncField + { + pos: "./devirt.go:207:35", + callee: "AddFn", + }, + { + pos: "./devirt.go:207:19", + callee: "mult.MultFn", + }, + // ExerciseFuncClosure + // TODO(prattmic): Closure callees not implemented. + //{ + // pos: "./devirt.go:249:27", + // callee: "AddClosure.func1", + //}, + //{ + // pos: "./devirt.go:249:15", + // callee: "mult.MultClosure.func1", + //}, + } + + testPGODevirtualize(t, dir, want, preProfFileName) +} + +// Regression test for https://go.dev/issue/65615. If a target function changes +// from non-generic to generic we can't devirtualize it (don't know the type +// parameters), but the compiler should not crash. +func TestLookupFuncGeneric(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("error getting wd: %v", err) + } + srcDir := filepath.Join(wd, "testdata", "pgo", "devirtualize") + + // Copy the module to a scratch location so we can add a go.mod. + dir := t.TempDir() + if err := os.Mkdir(filepath.Join(dir, "mult.pkg"), 0755); err != nil { + t.Fatalf("error creating dir: %v", err) + } + for _, file := range []string{"devirt.go", "devirt_test.go", profFileName, filepath.Join("mult.pkg", "mult.go")} { + if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil { + t.Fatalf("error copying %s: %v", file, err) + } + } + + // Change MultFn from a concrete function to a parameterized function. + if err := convertMultToGeneric(filepath.Join(dir, "mult.pkg", "mult.go")); err != nil { + t.Fatalf("error editing mult.go: %v", err) + } + + // Same as TestPGODevirtualize except for MultFn, which we cannot + // devirtualize to because it has become generic. + // + // Note that the important part of this test is that the build is + // successful, not the specific devirtualizations. + want := []devirtualization{ + // ExerciseIface + { + pos: "./devirt.go:101:20", + callee: "mult.Mult.Multiply", + }, + { + pos: "./devirt.go:101:39", + callee: "Add.Add", + }, + // ExerciseFuncConcrete + { + pos: "./devirt.go:173:36", + callee: "AddFn", + }, + // ExerciseFuncField + { + pos: "./devirt.go:207:35", + callee: "AddFn", + }, + // ExerciseFuncClosure + // TODO(prattmic): Closure callees not implemented. + //{ + // pos: "./devirt.go:249:27", + // callee: "AddClosure.func1", + //}, + //{ + // pos: "./devirt.go:249:15", + // callee: "mult.MultClosure.func1", + //}, + } + + testPGODevirtualize(t, dir, want, profFileName) +} + +var multFnRe = regexp.MustCompile(`func MultFn\(a, b int64\) int64`) + +func convertMultToGeneric(path string) error { + content, err := os.ReadFile(path) + if err != nil { + return fmt.Errorf("error opening: %w", err) + } + + if !multFnRe.Match(content) { + return fmt.Errorf("MultFn not found; update regexp?") + } + + // Users of MultFn shouldn't need adjustment, type inference should + // work OK. + content = multFnRe.ReplaceAll(content, []byte(`func MultFn[T int32|int64](a, b T) T`)) + + return os.WriteFile(path, content, 0644) } diff --git a/src/cmd/compile/internal/test/pgo_inl_test.go b/src/cmd/compile/internal/test/pgo_inl_test.go index da6c4a53d3..7d665655d5 100644 --- a/src/cmd/compile/internal/test/pgo_inl_test.go +++ b/src/cmd/compile/internal/test/pgo_inl_test.go @@ -18,6 +18,9 @@ import ( "testing" ) +const profFile = "inline_hot.pprof" +const preProfFile = "inline_hot.pprof.node_map" + func buildPGOInliningTest(t *testing.T, dir string, gcflag string) []byte { const pkg = "example.com/pgo/inline" @@ -43,7 +46,7 @@ go 1.19 } // testPGOIntendedInlining tests that specific functions are inlined. -func testPGOIntendedInlining(t *testing.T, dir string) { +func testPGOIntendedInlining(t *testing.T, dir string, profFile string) { testenv.MustHaveGoRun(t) t.Parallel() @@ -86,8 +89,7 @@ func testPGOIntendedInlining(t *testing.T, dir string) { // Build the test with the profile. Use a smaller threshold to test. // TODO: maybe adjust the test to work with default threshold. - pprof := filepath.Join(dir, "inline_hot.pprof") - gcflag := fmt.Sprintf("-m -m -pgoprofile=%s -d=pgoinlinebudget=160,pgoinlinecdfthreshold=90", pprof) + gcflag := fmt.Sprintf("-m -m -pgoprofile=%s -d=pgoinlinebudget=160,pgoinlinecdfthreshold=90", profFile) out := buildPGOInliningTest(t, dir, gcflag) scanner := bufio.NewScanner(bytes.NewReader(out)) @@ -155,13 +157,34 @@ func TestPGOIntendedInlining(t *testing.T) { // Copy the module to a scratch location so we can add a go.mod. dir := t.TempDir() - for _, file := range []string{"inline_hot.go", "inline_hot_test.go", "inline_hot.pprof"} { + for _, file := range []string{"inline_hot.go", "inline_hot_test.go", profFile} { if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil { t.Fatalf("error copying %s: %v", file, err) } } - testPGOIntendedInlining(t, dir) + testPGOIntendedInlining(t, dir, profFile) +} + +// TestPGOIntendedInlining tests that specific functions are inlined when PGO +// is applied to the exact source that was profiled. +func TestPGOPreprocessInlining(t *testing.T) { + wd, err := os.Getwd() + if err != nil { + t.Fatalf("error getting wd: %v", err) + } + srcDir := filepath.Join(wd, "testdata/pgo/inline") + + // Copy the module to a scratch location so we can add a go.mod. + dir := t.TempDir() + + for _, file := range []string{"inline_hot.go", "inline_hot_test.go", preProfFile} { + if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil { + t.Fatalf("error copying %s: %v", file, err) + } + } + + testPGOIntendedInlining(t, dir, preProfFile) } // TestPGOIntendedInlining tests that specific functions are inlined when PGO @@ -177,7 +200,7 @@ func TestPGOIntendedInliningShiftedLines(t *testing.T) { dir := t.TempDir() // Copy most of the files unmodified. - for _, file := range []string{"inline_hot_test.go", "inline_hot.pprof"} { + for _, file := range []string{"inline_hot_test.go", profFile} { if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil { t.Fatalf("error copying %s : %v", file, err) } @@ -209,7 +232,7 @@ func TestPGOIntendedInliningShiftedLines(t *testing.T) { dst.Close() - testPGOIntendedInlining(t, dir) + testPGOIntendedInlining(t, dir, profFile) } // TestPGOSingleIndex tests that the sample index can not be 1 and compilation @@ -239,15 +262,15 @@ func TestPGOSingleIndex(t *testing.T) { // Copy the module to a scratch location so we can add a go.mod. dir := t.TempDir() - originalPprofFile, err := os.Open(filepath.Join(srcDir, "inline_hot.pprof")) + originalPprofFile, err := os.Open(filepath.Join(srcDir, profFile)) if err != nil { - t.Fatalf("error opening inline_hot.pprof: %v", err) + t.Fatalf("error opening %v: %v", profFile, err) } defer originalPprofFile.Close() p, err := profile.Parse(originalPprofFile) if err != nil { - t.Fatalf("error parsing inline_hot.pprof: %v", err) + t.Fatalf("error parsing %v: %v", profFile, err) } // Move the samples count value-type to the 0 index. @@ -258,14 +281,14 @@ func TestPGOSingleIndex(t *testing.T) { s.Value = []int64{s.Value[tc.originalIndex]} } - modifiedPprofFile, err := os.Create(filepath.Join(dir, "inline_hot.pprof")) + modifiedPprofFile, err := os.Create(filepath.Join(dir, profFile)) if err != nil { - t.Fatalf("error creating inline_hot.pprof: %v", err) + t.Fatalf("error creating %v: %v", profFile, err) } defer modifiedPprofFile.Close() if err := p.Write(modifiedPprofFile); err != nil { - t.Fatalf("error writing inline_hot.pprof: %v", err) + t.Fatalf("error writing %v: %v", profFile, err) } for _, file := range []string{"inline_hot.go", "inline_hot_test.go"} { @@ -274,7 +297,7 @@ func TestPGOSingleIndex(t *testing.T) { } } - testPGOIntendedInlining(t, dir) + testPGOIntendedInlining(t, dir, profFile) }) } } @@ -312,13 +335,13 @@ func TestPGOHash(t *testing.T) { // Copy the module to a scratch location so we can add a go.mod. dir := t.TempDir() - for _, file := range []string{"inline_hot.go", "inline_hot_test.go", "inline_hot.pprof"} { + for _, file := range []string{"inline_hot.go", "inline_hot_test.go", profFile} { if err := copyFile(filepath.Join(dir, file), filepath.Join(srcDir, file)); err != nil { t.Fatalf("error copying %s: %v", file, err) } } - pprof := filepath.Join(dir, "inline_hot.pprof") + pprof := filepath.Join(dir, profFile) // build with -trimpath so the source location (thus the hash) // does not depend on the temporary directory path. gcflag0 := fmt.Sprintf("-pgoprofile=%s -trimpath %s=>%s -d=pgoinlinebudget=160,pgoinlinecdfthreshold=90,pgodebug=1", pprof, dir, pkg) diff --git a/src/cmd/compile/internal/test/test.go b/src/cmd/compile/internal/test/test.go index 56e5404079..195c65a9ea 100644 --- a/src/cmd/compile/internal/test/test.go +++ b/src/cmd/compile/internal/test/test.go @@ -1 +1,5 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package test diff --git a/src/cmd/compile/internal/test/testdata/arith_test.go b/src/cmd/compile/internal/test/testdata/arith_test.go index 2b8cd9fad3..cd7b5bc2c4 100644 --- a/src/cmd/compile/internal/test/testdata/arith_test.go +++ b/src/cmd/compile/internal/test/testdata/arith_test.go @@ -268,6 +268,70 @@ func testOverflowConstShift(t *testing.T) { } } +//go:noinline +func rsh64x64ConstOverflow8(x int8) int64 { + return int64(x) >> 9 +} + +//go:noinline +func rsh64x64ConstOverflow16(x int16) int64 { + return int64(x) >> 17 +} + +//go:noinline +func rsh64x64ConstOverflow32(x int32) int64 { + return int64(x) >> 33 +} + +func testArithRightShiftConstOverflow(t *testing.T) { + allSet := int64(-1) + if got, want := rsh64x64ConstOverflow8(0x7f), int64(0); got != want { + t.Errorf("rsh64x64ConstOverflow8 failed: got %v, want %v", got, want) + } + if got, want := rsh64x64ConstOverflow16(0x7fff), int64(0); got != want { + t.Errorf("rsh64x64ConstOverflow16 failed: got %v, want %v", got, want) + } + if got, want := rsh64x64ConstOverflow32(0x7ffffff), int64(0); got != want { + t.Errorf("rsh64x64ConstOverflow32 failed: got %v, want %v", got, want) + } + if got, want := rsh64x64ConstOverflow8(int8(-1)), allSet; got != want { + t.Errorf("rsh64x64ConstOverflow8 failed: got %v, want %v", got, want) + } + if got, want := rsh64x64ConstOverflow16(int16(-1)), allSet; got != want { + t.Errorf("rsh64x64ConstOverflow16 failed: got %v, want %v", got, want) + } + if got, want := rsh64x64ConstOverflow32(int32(-1)), allSet; got != want { + t.Errorf("rsh64x64ConstOverflow32 failed: got %v, want %v", got, want) + } +} + +//go:noinline +func rsh64Ux64ConstOverflow8(x uint8) uint64 { + return uint64(x) >> 9 +} + +//go:noinline +func rsh64Ux64ConstOverflow16(x uint16) uint64 { + return uint64(x) >> 17 +} + +//go:noinline +func rsh64Ux64ConstOverflow32(x uint32) uint64 { + return uint64(x) >> 33 +} + +func testRightShiftConstOverflow(t *testing.T) { + if got, want := rsh64Ux64ConstOverflow8(0xff), uint64(0); got != want { + t.Errorf("rsh64Ux64ConstOverflow8 failed: got %v, want %v", got, want) + } + if got, want := rsh64Ux64ConstOverflow16(0xffff), uint64(0); got != want { + t.Errorf("rsh64Ux64ConstOverflow16 failed: got %v, want %v", got, want) + } + if got, want := rsh64Ux64ConstOverflow32(0xffffffff), uint64(0); got != want { + t.Errorf("rsh64Ux64ConstOverflow32 failed: got %v, want %v", got, want) + } +} + // test64BitConstMult tests that rewrite rules don't fold 64 bit constants // into multiply instructions. func test64BitConstMult(t *testing.T) { @@ -918,6 +982,8 @@ func TestArithmetic(t *testing.T) { testShiftCX(t) testSubConst(t) testOverflowConstShift(t) + testArithRightShiftConstOverflow(t) + testRightShiftConstOverflow(t) testArithConstShift(t) testArithRshConst(t) testLargeConst(t) diff --git a/src/cmd/compile/internal/test/testdata/ctl_test.go b/src/cmd/compile/internal/test/testdata/ctl_test.go index ff3a1609c5..501f79eee1 100644 --- a/src/cmd/compile/internal/test/testdata/ctl_test.go +++ b/src/cmd/compile/internal/test/testdata/ctl_test.go @@ -70,7 +70,6 @@ func switch_ssa(a int) int { ret += 1 } return ret - } func fallthrough_ssa(a int) int { @@ -92,7 +91,6 @@ func fallthrough_ssa(a int) int { ret++ } return ret - } func testFallthrough(t *testing.T) { diff --git a/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.pprof.node_map b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.pprof.node_map new file mode 100644 index 0000000000..c55f990e84 --- /dev/null +++ b/src/cmd/compile/internal/test/testdata/pgo/devirtualize/devirt.pprof.node_map @@ -0,0 +1,52 @@ +GO PREPROFILE V1 +example.com/pgo/devirtualize.ExerciseFuncClosure +example.com/pgo/devirtualize/mult%2epkg.MultClosure.func1 +18 93 +example.com/pgo/devirtualize.ExerciseIface +example.com/pgo/devirtualize/mult%2epkg.NegMult.Multiply +49 4 +example.com/pgo/devirtualize.ExerciseFuncConcrete +example.com/pgo/devirtualize.AddFn +48 103 +example.com/pgo/devirtualize.ExerciseFuncField +example.com/pgo/devirtualize/mult%2epkg.NegMultFn +23 8 +example.com/pgo/devirtualize.ExerciseFuncField +example.com/pgo/devirtualize/mult%2epkg.MultFn +23 94 +example.com/pgo/devirtualize.ExerciseIface +example.com/pgo/devirtualize/mult%2epkg.Mult.Multiply +49 40 +example.com/pgo/devirtualize.ExerciseIface +example.com/pgo/devirtualize.Add.Add +49 55 +example.com/pgo/devirtualize.ExerciseFuncConcrete +example.com/pgo/devirtualize/mult%2epkg.NegMultFn +48 8 +example.com/pgo/devirtualize.ExerciseFuncClosure +example.com/pgo/devirtualize/mult%2epkg.NegMultClosure.func1 +18 10 +example.com/pgo/devirtualize.ExerciseIface +example.com/pgo/devirtualize.Sub.Add +49 7 +example.com/pgo/devirtualize.ExerciseFuncField +example.com/pgo/devirtualize.AddFn +23 101 +example.com/pgo/devirtualize.ExerciseFuncField +example.com/pgo/devirtualize.SubFn +23 12 +example.com/pgo/devirtualize.BenchmarkDevirtFuncConcrete +example.com/pgo/devirtualize.ExerciseFuncConcrete +1 2 +example.com/pgo/devirtualize.ExerciseFuncConcrete +example.com/pgo/devirtualize/mult%2epkg.MultFn +48 91 +example.com/pgo/devirtualize.ExerciseFuncConcrete +example.com/pgo/devirtualize.SubFn +48 5 +example.com/pgo/devirtualize.ExerciseFuncClosure +example.com/pgo/devirtualize.Add.Add +18 92 +example.com/pgo/devirtualize.ExerciseFuncClosure +example.com/pgo/devirtualize.Sub.Add +18 14 diff --git a/src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot.pprof.node_map b/src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot.pprof.node_map new file mode 100644 index 0000000000..6e5f937a50 --- /dev/null +++ b/src/cmd/compile/internal/test/testdata/pgo/inline/inline_hot.pprof.node_map @@ -0,0 +1,13 @@ +GO PREPROFILE V1 +example.com/pgo/inline.benchmarkB +example.com/pgo/inline.A +18 1 +example.com/pgo/inline.(*BS).NS +example.com/pgo/inline.T +8 3 +example.com/pgo/inline.(*BS).NS +example.com/pgo/inline.T +13 2 +example.com/pgo/inline.A +example.com/pgo/inline.(*BS).NS +7 129 diff --git a/src/cmd/compile/internal/typecheck/_builtin/coverage.go b/src/cmd/compile/internal/typecheck/_builtin/coverage.go index 02226356bc..f5c9e24991 100644 --- a/src/cmd/compile/internal/typecheck/_builtin/coverage.go +++ b/src/cmd/compile/internal/typecheck/_builtin/coverage.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // NOTE: If you change this file you must run "go generate" +// in cmd/compile/internal/typecheck // to update builtin.go. This is not done automatically // to avoid depending on having a working compiler binary. diff --git a/src/cmd/compile/internal/typecheck/_builtin/runtime.go b/src/cmd/compile/internal/typecheck/_builtin/runtime.go index f27a773a88..1ae9fe21d9 100644 --- a/src/cmd/compile/internal/typecheck/_builtin/runtime.go +++ b/src/cmd/compile/internal/typecheck/_builtin/runtime.go @@ -3,6 +3,7 @@ // license that can be found in the LICENSE file. // NOTE: If you change this file you must run "go generate" +// in cmd/compile/internal/typecheck // to update builtin.go. This is not done automatically // to avoid depending on having a working compiler binary. @@ -122,7 +123,7 @@ func panicrangeexit() // defer in range over func func deferrangefunc() interface{} -func fastrand() uint32 +func rand32() uint32 // *byte is really *runtime.Type func makemap64(mapType *byte, hint int64, mapbuf *any) (hmap map[any]any) @@ -158,7 +159,8 @@ func makechan(chanType *byte, size int) (hchan chan any) func chanrecv1(hchan <-chan any, elem *any) func chanrecv2(hchan <-chan any, elem *any) bool func chansend1(hchan chan<- any, elem *any) -func closechan(hchan any) +func closechan(hchan chan<- any) +func chanlen(hchan any) int var writeBarrier struct { enabled bool diff --git a/src/cmd/compile/internal/typecheck/builtin.go b/src/cmd/compile/internal/typecheck/builtin.go index 142fc26d2e..975eff3f50 100644 --- a/src/cmd/compile/internal/typecheck/builtin.go +++ b/src/cmd/compile/internal/typecheck/builtin.go @@ -104,7 +104,7 @@ var runtimeDecls = [...]struct { {"efaceeq", funcTag, 72}, {"panicrangeexit", funcTag, 9}, {"deferrangefunc", funcTag, 73}, - {"fastrand", funcTag, 74}, + {"rand32", funcTag, 74}, {"makemap64", funcTag, 76}, {"makemap", funcTag, 77}, {"makemap_small", funcTag, 78}, @@ -136,106 +136,107 @@ var runtimeDecls = [...]struct { {"chanrecv1", funcTag, 100}, {"chanrecv2", funcTag, 101}, {"chansend1", funcTag, 103}, - {"closechan", funcTag, 30}, - {"writeBarrier", varTag, 105}, - {"typedmemmove", funcTag, 106}, - {"typedmemclr", funcTag, 107}, - {"typedslicecopy", funcTag, 108}, - {"selectnbsend", funcTag, 109}, - {"selectnbrecv", funcTag, 110}, - {"selectsetpc", funcTag, 111}, - {"selectgo", funcTag, 112}, + {"closechan", funcTag, 104}, + {"chanlen", funcTag, 105}, + {"writeBarrier", varTag, 107}, + {"typedmemmove", funcTag, 108}, + {"typedmemclr", funcTag, 109}, + {"typedslicecopy", funcTag, 110}, + {"selectnbsend", funcTag, 111}, + {"selectnbrecv", funcTag, 112}, + {"selectsetpc", funcTag, 113}, + {"selectgo", funcTag, 114}, {"block", funcTag, 9}, - {"makeslice", funcTag, 113}, - {"makeslice64", funcTag, 114}, - {"makeslicecopy", funcTag, 115}, - {"growslice", funcTag, 117}, - {"unsafeslicecheckptr", funcTag, 118}, + {"makeslice", funcTag, 115}, + {"makeslice64", funcTag, 116}, + {"makeslicecopy", funcTag, 117}, + {"growslice", funcTag, 119}, + {"unsafeslicecheckptr", funcTag, 120}, {"panicunsafeslicelen", funcTag, 9}, {"panicunsafeslicenilptr", funcTag, 9}, - {"unsafestringcheckptr", funcTag, 119}, + {"unsafestringcheckptr", funcTag, 121}, {"panicunsafestringlen", funcTag, 9}, {"panicunsafestringnilptr", funcTag, 9}, - {"memmove", funcTag, 120}, - {"memclrNoHeapPointers", funcTag, 121}, - {"memclrHasPointers", funcTag, 121}, - {"memequal", funcTag, 122}, - {"memequal0", funcTag, 123}, - {"memequal8", funcTag, 123}, - {"memequal16", funcTag, 123}, - {"memequal32", funcTag, 123}, - {"memequal64", funcTag, 123}, - {"memequal128", funcTag, 123}, - {"f32equal", funcTag, 124}, - {"f64equal", funcTag, 124}, - {"c64equal", funcTag, 124}, - {"c128equal", funcTag, 124}, - {"strequal", funcTag, 124}, - {"interequal", funcTag, 124}, - {"nilinterequal", funcTag, 124}, - {"memhash", funcTag, 125}, - {"memhash0", funcTag, 126}, - {"memhash8", funcTag, 126}, - {"memhash16", funcTag, 126}, - {"memhash32", funcTag, 126}, - {"memhash64", funcTag, 126}, - {"memhash128", funcTag, 126}, - {"f32hash", funcTag, 127}, - {"f64hash", funcTag, 127}, - {"c64hash", funcTag, 127}, - {"c128hash", funcTag, 127}, - {"strhash", funcTag, 127}, - {"interhash", funcTag, 127}, - {"nilinterhash", funcTag, 127}, - {"int64div", funcTag, 128}, - {"uint64div", funcTag, 129}, - {"int64mod", funcTag, 128}, - {"uint64mod", funcTag, 129}, - {"float64toint64", funcTag, 130}, - {"float64touint64", funcTag, 131}, - {"float64touint32", funcTag, 132}, - {"int64tofloat64", funcTag, 133}, - {"int64tofloat32", funcTag, 135}, - {"uint64tofloat64", funcTag, 136}, - {"uint64tofloat32", funcTag, 137}, - {"uint32tofloat64", funcTag, 138}, - {"complex128div", funcTag, 139}, - {"getcallerpc", funcTag, 140}, - {"getcallersp", funcTag, 140}, + {"memmove", funcTag, 122}, + {"memclrNoHeapPointers", funcTag, 123}, + {"memclrHasPointers", funcTag, 123}, + {"memequal", funcTag, 124}, + {"memequal0", funcTag, 125}, + {"memequal8", funcTag, 125}, + {"memequal16", funcTag, 125}, + {"memequal32", funcTag, 125}, + {"memequal64", funcTag, 125}, + {"memequal128", funcTag, 125}, + {"f32equal", funcTag, 126}, + {"f64equal", funcTag, 126}, + {"c64equal", funcTag, 126}, + {"c128equal", funcTag, 126}, + {"strequal", funcTag, 126}, + {"interequal", funcTag, 126}, + {"nilinterequal", funcTag, 126}, + {"memhash", funcTag, 127}, + {"memhash0", funcTag, 128}, + {"memhash8", funcTag, 128}, + {"memhash16", funcTag, 128}, + {"memhash32", funcTag, 128}, + {"memhash64", funcTag, 128}, + {"memhash128", funcTag, 128}, + {"f32hash", funcTag, 129}, + {"f64hash", funcTag, 129}, + {"c64hash", funcTag, 129}, + {"c128hash", funcTag, 129}, + {"strhash", funcTag, 129}, + {"interhash", funcTag, 129}, + {"nilinterhash", funcTag, 129}, + {"int64div", funcTag, 130}, + {"uint64div", funcTag, 131}, + {"int64mod", funcTag, 130}, + {"uint64mod", funcTag, 131}, + {"float64toint64", funcTag, 132}, + {"float64touint64", funcTag, 133}, + {"float64touint32", funcTag, 134}, + {"int64tofloat64", funcTag, 135}, + {"int64tofloat32", funcTag, 137}, + {"uint64tofloat64", funcTag, 138}, + {"uint64tofloat32", funcTag, 139}, + {"uint32tofloat64", funcTag, 140}, + {"complex128div", funcTag, 141}, + {"getcallerpc", funcTag, 142}, + {"getcallersp", funcTag, 142}, {"racefuncenter", funcTag, 31}, {"racefuncexit", funcTag, 9}, {"raceread", funcTag, 31}, {"racewrite", funcTag, 31}, - {"racereadrange", funcTag, 141}, - {"racewriterange", funcTag, 141}, - {"msanread", funcTag, 141}, - {"msanwrite", funcTag, 141}, - {"msanmove", funcTag, 142}, - {"asanread", funcTag, 141}, - {"asanwrite", funcTag, 141}, - {"checkptrAlignment", funcTag, 143}, - {"checkptrArithmetic", funcTag, 145}, - {"libfuzzerTraceCmp1", funcTag, 146}, - {"libfuzzerTraceCmp2", funcTag, 147}, - {"libfuzzerTraceCmp4", funcTag, 148}, - {"libfuzzerTraceCmp8", funcTag, 149}, - {"libfuzzerTraceConstCmp1", funcTag, 146}, - {"libfuzzerTraceConstCmp2", funcTag, 147}, - {"libfuzzerTraceConstCmp4", funcTag, 148}, - {"libfuzzerTraceConstCmp8", funcTag, 149}, - {"libfuzzerHookStrCmp", funcTag, 150}, - {"libfuzzerHookEqualFold", funcTag, 150}, - {"addCovMeta", funcTag, 152}, + {"racereadrange", funcTag, 143}, + {"racewriterange", funcTag, 143}, + {"msanread", funcTag, 143}, + {"msanwrite", funcTag, 143}, + {"msanmove", funcTag, 144}, + {"asanread", funcTag, 143}, + {"asanwrite", funcTag, 143}, + {"checkptrAlignment", funcTag, 145}, + {"checkptrArithmetic", funcTag, 147}, + {"libfuzzerTraceCmp1", funcTag, 148}, + {"libfuzzerTraceCmp2", funcTag, 149}, + {"libfuzzerTraceCmp4", funcTag, 150}, + {"libfuzzerTraceCmp8", funcTag, 151}, + {"libfuzzerTraceConstCmp1", funcTag, 148}, + {"libfuzzerTraceConstCmp2", funcTag, 149}, + {"libfuzzerTraceConstCmp4", funcTag, 150}, + {"libfuzzerTraceConstCmp8", funcTag, 151}, + {"libfuzzerHookStrCmp", funcTag, 152}, + {"libfuzzerHookEqualFold", funcTag, 152}, + {"addCovMeta", funcTag, 154}, {"x86HasPOPCNT", varTag, 6}, {"x86HasSSE41", varTag, 6}, {"x86HasFMA", varTag, 6}, {"armHasVFPv4", varTag, 6}, {"arm64HasATOMICS", varTag, 6}, - {"asanregisterglobals", funcTag, 121}, + {"asanregisterglobals", funcTag, 123}, } func runtimeTypes() []*types.Type { - var typs [153]*types.Type + var typs [155]*types.Type typs[0] = types.ByteType typs[1] = types.NewPtr(typs[0]) typs[2] = types.Types[types.TANY] @@ -340,55 +341,57 @@ func runtimeTypes() []*types.Type { typs[101] = newSig(params(typs[99], typs[3]), params(typs[6])) typs[102] = types.NewChan(typs[2], types.Csend) typs[103] = newSig(params(typs[102], typs[3]), nil) - typs[104] = types.NewArray(typs[0], 3) - typs[105] = types.NewStruct([]*types.Field{types.NewField(src.NoXPos, Lookup("enabled"), typs[6]), types.NewField(src.NoXPos, Lookup("pad"), typs[104]), types.NewField(src.NoXPos, Lookup("cgo"), typs[6]), types.NewField(src.NoXPos, Lookup("alignme"), typs[24])}) - typs[106] = newSig(params(typs[1], typs[3], typs[3]), nil) - typs[107] = newSig(params(typs[1], typs[3]), nil) - typs[108] = newSig(params(typs[1], typs[3], typs[15], typs[3], typs[15]), params(typs[15])) - typs[109] = newSig(params(typs[102], typs[3]), params(typs[6])) - typs[110] = newSig(params(typs[3], typs[99]), params(typs[6], typs[6])) - typs[111] = newSig(params(typs[71]), nil) - typs[112] = newSig(params(typs[1], typs[1], typs[71], typs[15], typs[15], typs[6]), params(typs[15], typs[6])) - typs[113] = newSig(params(typs[1], typs[15], typs[15]), params(typs[7])) - typs[114] = newSig(params(typs[1], typs[22], typs[22]), params(typs[7])) - typs[115] = newSig(params(typs[1], typs[15], typs[15], typs[7]), params(typs[7])) - typs[116] = types.NewSlice(typs[2]) - typs[117] = newSig(params(typs[3], typs[15], typs[15], typs[15], typs[1]), params(typs[116])) - typs[118] = newSig(params(typs[1], typs[7], typs[22]), nil) - typs[119] = newSig(params(typs[7], typs[22]), nil) - typs[120] = newSig(params(typs[3], typs[3], typs[5]), nil) - typs[121] = newSig(params(typs[7], typs[5]), nil) - typs[122] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6])) - typs[123] = newSig(params(typs[3], typs[3]), params(typs[6])) - typs[124] = newSig(params(typs[7], typs[7]), params(typs[6])) - typs[125] = newSig(params(typs[3], typs[5], typs[5]), params(typs[5])) - typs[126] = newSig(params(typs[7], typs[5]), params(typs[5])) - typs[127] = newSig(params(typs[3], typs[5]), params(typs[5])) - typs[128] = newSig(params(typs[22], typs[22]), params(typs[22])) - typs[129] = newSig(params(typs[24], typs[24]), params(typs[24])) - typs[130] = newSig(params(typs[20]), params(typs[22])) - typs[131] = newSig(params(typs[20]), params(typs[24])) - typs[132] = newSig(params(typs[20]), params(typs[60])) - typs[133] = newSig(params(typs[22]), params(typs[20])) - typs[134] = types.Types[types.TFLOAT32] - typs[135] = newSig(params(typs[22]), params(typs[134])) - typs[136] = newSig(params(typs[24]), params(typs[20])) - typs[137] = newSig(params(typs[24]), params(typs[134])) - typs[138] = newSig(params(typs[60]), params(typs[20])) - typs[139] = newSig(params(typs[26], typs[26]), params(typs[26])) - typs[140] = newSig(nil, params(typs[5])) - typs[141] = newSig(params(typs[5], typs[5]), nil) - typs[142] = newSig(params(typs[5], typs[5], typs[5]), nil) - typs[143] = newSig(params(typs[7], typs[1], typs[5]), nil) - typs[144] = types.NewSlice(typs[7]) - typs[145] = newSig(params(typs[7], typs[144]), nil) - typs[146] = newSig(params(typs[64], typs[64], typs[17]), nil) - typs[147] = newSig(params(typs[58], typs[58], typs[17]), nil) - typs[148] = newSig(params(typs[60], typs[60], typs[17]), nil) - typs[149] = newSig(params(typs[24], typs[24], typs[17]), nil) - typs[150] = newSig(params(typs[28], typs[28], typs[17]), nil) - typs[151] = types.NewArray(typs[0], 16) - typs[152] = newSig(params(typs[7], typs[60], typs[151], typs[28], typs[15], typs[64], typs[64]), params(typs[60])) + typs[104] = newSig(params(typs[102]), nil) + typs[105] = newSig(params(typs[2]), params(typs[15])) + typs[106] = types.NewArray(typs[0], 3) + typs[107] = types.NewStruct([]*types.Field{types.NewField(src.NoXPos, Lookup("enabled"), typs[6]), types.NewField(src.NoXPos, Lookup("pad"), typs[106]), types.NewField(src.NoXPos, Lookup("cgo"), typs[6]), types.NewField(src.NoXPos, Lookup("alignme"), typs[24])}) + typs[108] = newSig(params(typs[1], typs[3], typs[3]), nil) + typs[109] = newSig(params(typs[1], typs[3]), nil) + typs[110] = newSig(params(typs[1], typs[3], typs[15], typs[3], typs[15]), params(typs[15])) + typs[111] = newSig(params(typs[102], typs[3]), params(typs[6])) + typs[112] = newSig(params(typs[3], typs[99]), params(typs[6], typs[6])) + typs[113] = newSig(params(typs[71]), nil) + typs[114] = newSig(params(typs[1], typs[1], typs[71], typs[15], typs[15], typs[6]), params(typs[15], typs[6])) + typs[115] = newSig(params(typs[1], typs[15], typs[15]), params(typs[7])) + typs[116] = newSig(params(typs[1], typs[22], typs[22]), params(typs[7])) + typs[117] = newSig(params(typs[1], typs[15], typs[15], typs[7]), params(typs[7])) + typs[118] = types.NewSlice(typs[2]) + typs[119] = newSig(params(typs[3], typs[15], typs[15], typs[15], typs[1]), params(typs[118])) + typs[120] = newSig(params(typs[1], typs[7], typs[22]), nil) + typs[121] = newSig(params(typs[7], typs[22]), nil) + typs[122] = newSig(params(typs[3], typs[3], typs[5]), nil) + typs[123] = newSig(params(typs[7], typs[5]), nil) + typs[124] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6])) + typs[125] = newSig(params(typs[3], typs[3]), params(typs[6])) + typs[126] = newSig(params(typs[7], typs[7]), params(typs[6])) + typs[127] = newSig(params(typs[3], typs[5], typs[5]), params(typs[5])) + typs[128] = newSig(params(typs[7], typs[5]), params(typs[5])) + typs[129] = newSig(params(typs[3], typs[5]), params(typs[5])) + typs[130] = newSig(params(typs[22], typs[22]), params(typs[22])) + typs[131] = newSig(params(typs[24], typs[24]), params(typs[24])) + typs[132] = newSig(params(typs[20]), params(typs[22])) + typs[133] = newSig(params(typs[20]), params(typs[24])) + typs[134] = newSig(params(typs[20]), params(typs[60])) + typs[135] = newSig(params(typs[22]), params(typs[20])) + typs[136] = types.Types[types.TFLOAT32] + typs[137] = newSig(params(typs[22]), params(typs[136])) + typs[138] = newSig(params(typs[24]), params(typs[20])) + typs[139] = newSig(params(typs[24]), params(typs[136])) + typs[140] = newSig(params(typs[60]), params(typs[20])) + typs[141] = newSig(params(typs[26], typs[26]), params(typs[26])) + typs[142] = newSig(nil, params(typs[5])) + typs[143] = newSig(params(typs[5], typs[5]), nil) + typs[144] = newSig(params(typs[5], typs[5], typs[5]), nil) + typs[145] = newSig(params(typs[7], typs[1], typs[5]), nil) + typs[146] = types.NewSlice(typs[7]) + typs[147] = newSig(params(typs[7], typs[146]), nil) + typs[148] = newSig(params(typs[64], typs[64], typs[17]), nil) + typs[149] = newSig(params(typs[58], typs[58], typs[17]), nil) + typs[150] = newSig(params(typs[60], typs[60], typs[17]), nil) + typs[151] = newSig(params(typs[24], typs[24], typs[17]), nil) + typs[152] = newSig(params(typs[28], typs[28], typs[17]), nil) + typs[153] = types.NewArray(typs[0], 16) + typs[154] = newSig(params(typs[7], typs[60], typs[153], typs[28], typs[15], typs[64], typs[64]), params(typs[60])) return typs[:] } diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go index 12d1743874..eb9dc62d8a 100644 --- a/src/cmd/compile/internal/typecheck/expr.go +++ b/src/cmd/compile/internal/typecheck/expr.go @@ -621,19 +621,6 @@ func tcIndex(n *ir.IndexExpr) ir.Node { return n } - if !n.Bounded() && ir.IsConst(n.Index, constant.Int) { - x := n.Index.Val() - if constant.Sign(x) < 0 { - base.Errorf("invalid %s index %v (index must be non-negative)", why, n.Index) - } else if t.IsArray() && constant.Compare(x, token.GEQ, constant.MakeInt64(t.NumElem())) { - base.Errorf("invalid array index %v (out of bounds for %d-element array)", n.Index, t.NumElem()) - } else if ir.IsConst(n.X, constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(ir.StringVal(n.X))))) { - base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Index, len(ir.StringVal(n.X))) - } else if ir.ConstOverflow(x, types.Types[types.TINT]) { - base.Errorf("invalid %s index %v (index too large)", why, n.Index) - } - } - case types.TMAP: n.Index = AssignConv(n.Index, t.Key(), "map index") n.SetType(t.Elem()) diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go index 5c54a5bd49..02e59fa360 100644 --- a/src/cmd/compile/internal/typecheck/func.go +++ b/src/cmd/compile/internal/typecheck/func.go @@ -103,12 +103,15 @@ func ClosureType(clo *ir.ClosureExpr) *types.Type { fields := make([]*types.Field, 1+len(clo.Func.ClosureVars)) fields[0] = types.NewField(base.AutogeneratedPos, types.LocalPkg.Lookup("F"), types.Types[types.TUINTPTR]) - for i, v := range clo.Func.ClosureVars { - typ := v.Type() - if !v.Byval() { - typ = types.NewPtr(typ) + it := NewClosureStructIter(clo.Func.ClosureVars) + i := 0 + for { + n, typ, _ := it.Next() + if n == nil { + break } fields[1+i] = types.NewField(base.AutogeneratedPos, types.LocalPkg.LookupNum("X", i), typ) + i++ } typ := types.NewStruct(fields) typ.SetNoalg(true) @@ -832,3 +835,37 @@ func tcUnsafeString(n *ir.BinaryExpr) *ir.BinaryExpr { n.SetType(types.Types[types.TSTRING]) return n } + +// ClosureStructIter iterates through a slice of closure variables returning +// their type and offset in the closure struct. +type ClosureStructIter struct { + closureVars []*ir.Name + offset int64 + next int +} + +// NewClosureStructIter creates a new ClosureStructIter for closureVars. +func NewClosureStructIter(closureVars []*ir.Name) *ClosureStructIter { + return &ClosureStructIter{ + closureVars: closureVars, + offset: int64(types.PtrSize), // PtrSize to skip past function entry PC field + next: 0, + } +} + +// Next returns the next name, type and offset of the next closure variable. +// A nil name is returned after the last closure variable. +func (iter *ClosureStructIter) Next() (n *ir.Name, typ *types.Type, offset int64) { + if iter.next >= len(iter.closureVars) { + return nil, nil, 0 + } + n = iter.closureVars[iter.next] + typ = n.Type() + if !n.Byval() { + typ = types.NewPtr(typ) + } + iter.next++ + offset = types.RoundUp(iter.offset, typ.Alignment()) + iter.offset = offset + typ.Size() + return n, typ, offset +} diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go index e54d5256e6..8d792485d8 100644 --- a/src/cmd/compile/internal/typecheck/stmt.go +++ b/src/cmd/compile/internal/typecheck/stmt.go @@ -198,32 +198,36 @@ func tcFor(n *ir.ForStmt) ir.Node { return n } -// tcGoDefer typechecks an OGO/ODEFER statement. +// tcGoDefer typechecks (normalizes) an OGO/ODEFER statement. +func tcGoDefer(n *ir.GoDeferStmt) { + call := normalizeGoDeferCall(n.Pos(), n.Op(), n.Call, n.PtrInit()) + call.GoDefer = true + n.Call = call +} + +// normalizeGoDeferCall normalizes call into a normal function call +// with no arguments and no results, suitable for use in an OGO/ODEFER +// statement. // -// Really, this means normalizing the statement to always use a simple -// function call with no arguments and no results. For example, it -// rewrites: +// For example, it normalizes: // -// defer f(x, y) +// f(x, y) // // into: // -// x1, y1 := x, y -// defer func() { f(x1, y1) }() -func tcGoDefer(n *ir.GoDeferStmt) { - call := n.Call - - init := n.PtrInit() +// x1, y1 := x, y // added to init +// func() { f(x1, y1) }() // result +func normalizeGoDeferCall(pos src.XPos, op ir.Op, call ir.Node, init *ir.Nodes) *ir.CallExpr { init.Append(ir.TakeInit(call)...) - if call, ok := n.Call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC { + if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC { if sig := call.Fun.Type(); sig.NumParams()+sig.NumResults() == 0 { - return // already in normal form + return call // already in normal form } } // Create a new wrapper function without parameters or results. - wrapperFn := ir.NewClosureFunc(n.Pos(), n.Pos(), n.Op(), types.NewSignature(nil, nil, nil), ir.CurFunc, Target) + wrapperFn := ir.NewClosureFunc(pos, pos, op, types.NewSignature(nil, nil, nil), ir.CurFunc, Target) wrapperFn.DeclareParams(true) wrapperFn.SetWrapper(true) @@ -372,8 +376,8 @@ func tcGoDefer(n *ir.GoDeferStmt) { // evaluate there. wrapperFn.Body = []ir.Node{call} - // Finally, rewrite the go/defer statement to call the wrapper. - n.Call = Call(call.Pos(), wrapperFn.OClosure, nil, false) + // Finally, construct a call to the wrapper. + return Call(call.Pos(), wrapperFn.OClosure, nil, false).(*ir.CallExpr) } // tcIf typechecks an OIF node. diff --git a/src/cmd/compile/internal/types/goversion.go b/src/cmd/compile/internal/types/goversion.go index c57493a5cb..ac08a49d0c 100644 --- a/src/cmd/compile/internal/types/goversion.go +++ b/src/cmd/compile/internal/types/goversion.go @@ -34,7 +34,7 @@ func AllowsGoVersion(major, minor int) bool { } // ParseLangFlag verifies that the -lang flag holds a valid value, and -// exits if not. It initializes data used by langSupported. +// exits if not. It initializes data used by AllowsGoVersion. func ParseLangFlag() { if base.Flag.Lang == "" { return @@ -59,6 +59,10 @@ func ParseLangFlag() { // parseLang parses a -lang option into a langVer. func parseLang(s string) (lang, error) { + if s == "go1" { // cmd/go's new spelling of "go1.0" (#65528) + s = "go1.0" + } + matches := goVersionRE.FindStringSubmatch(s) if matches == nil { return lang{}, fmt.Errorf(`should be something like "go1.12"`) diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go index 2777b4f007..c2b0ca3a44 100644 --- a/src/cmd/compile/internal/types/type.go +++ b/src/cmd/compile/internal/types/type.go @@ -657,6 +657,9 @@ func NewPtr(elem *Type) *Type { if elem.HasShape() { t.SetHasShape(true) } + if elem.Noalg() { + t.SetNoalg(true) + } return t } diff --git a/src/cmd/compile/internal/types2/alias.go b/src/cmd/compile/internal/types2/alias.go index 2cc57721f9..149cd3b265 100644 --- a/src/cmd/compile/internal/types2/alias.go +++ b/src/cmd/compile/internal/types2/alias.go @@ -13,47 +13,51 @@ import "fmt" // Otherwise, the alias information is only in the type name, // which points directly to the actual (aliased) type. type Alias struct { - obj *TypeName // corresponding declared alias object - fromRHS Type // RHS of type alias declaration; may be an alias - actual Type // actual (aliased) type; never an alias + obj *TypeName // corresponding declared alias object + tparams *TypeParamList // type parameters, or nil + fromRHS Type // RHS of type alias declaration; may be an alias + actual Type // actual (aliased) type; never an alias } // NewAlias creates a new Alias type with the given type name and rhs. // rhs must not be nil. func NewAlias(obj *TypeName, rhs Type) *Alias { - return (*Checker)(nil).newAlias(obj, rhs) + alias := (*Checker)(nil).newAlias(obj, rhs) + // Ensure that alias.actual is set (#65455). + unalias(alias) + return alias } func (a *Alias) Obj() *TypeName { return a.obj } -func (a *Alias) Underlying() Type { return a.actual.Underlying() } +func (a *Alias) Underlying() Type { return unalias(a).Underlying() } func (a *Alias) String() string { return TypeString(a, nil) } -// Type accessors - // Unalias returns t if it is not an alias type; // otherwise it follows t's alias chain until it // reaches a non-alias type which is then returned. // Consequently, the result is never an alias type. func Unalias(t Type) Type { if a0, _ := t.(*Alias); a0 != nil { - if a0.actual != nil { - return a0.actual - } - for a := a0; ; { - t = a.fromRHS - a, _ = t.(*Alias) - if a == nil { - break - } - } - if t == nil { - panic(fmt.Sprintf("non-terminated alias %s", a0.obj.name)) - } - a0.actual = t + return unalias(a0) } return t } +func unalias(a0 *Alias) Type { + if a0.actual != nil { + return a0.actual + } + var t Type + for a := a0; a != nil; a, _ = t.(*Alias) { + t = a.fromRHS + } + if t == nil { + panic(fmt.Sprintf("non-terminated alias %s", a0.obj.name)) + } + a0.actual = t + return t +} + // asNamed returns t as *Named if that is t's // actual type. It returns nil otherwise. func asNamed(t Type) *Named { @@ -65,7 +69,7 @@ func asNamed(t Type) *Named { // rhs must not be nil. func (check *Checker) newAlias(obj *TypeName, rhs Type) *Alias { assert(rhs != nil) - a := &Alias{obj, rhs, nil} + a := &Alias{obj, nil, rhs, nil} if obj.typ == nil { obj.typ = a } diff --git a/src/cmd/compile/internal/types2/api.go b/src/cmd/compile/internal/types2/api.go index 6628174428..bb02d9198e 100644 --- a/src/cmd/compile/internal/types2/api.go +++ b/src/cmd/compile/internal/types2/api.go @@ -268,6 +268,15 @@ type Info struct { // scope, the function scopes are embedded in the file scope of the file // containing the function declaration. // + // The Scope of a function contains the declarations of any + // type parameters, parameters, and named results, plus any + // local declarations in the body block. + // It is coextensive with the complete extent of the + // function's syntax ([*ast.FuncDecl] or [*ast.FuncLit]). + // The Scopes mapping does not contain an entry for the + // function body ([*ast.BlockStmt]); the function's scope is + // associated with the [*ast.FuncType]. + // // The following node types may appear in Scopes: // // *syntax.File diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go index 56cddf6b29..bab120ff93 100644 --- a/src/cmd/compile/internal/types2/api_test.go +++ b/src/cmd/compile/internal/types2/api_test.go @@ -152,7 +152,7 @@ func TestValuesInfo(t *testing.T) { // look for expression var expr syntax.Expr for e := range info.Types { - if syntax.String(e) == test.expr { + if ExprString(e) == test.expr { expr = e break } @@ -424,7 +424,7 @@ func TestTypesInfo(t *testing.T) { // look for expression type var typ Type for e, tv := range info.Types { - if syntax.String(e) == test.expr { + if ExprString(e) == test.expr { typ = tv.Type break } @@ -1135,8 +1135,8 @@ func TestPredicatesInfo(t *testing.T) { // look for expression predicates got := "" for e, tv := range info.Types { - //println(name, syntax.String(e)) - if syntax.String(e) == test.expr { + //println(name, ExprString(e)) + if ExprString(e) == test.expr { got = predString(tv) break } @@ -1892,12 +1892,12 @@ const Pi = 3.1415 type T struct{} var Y, _ = lib.X, X -func F(){ +func F[T *U, U any](param1, param2 int) /*param1=undef*/ (res1 /*res1=undef*/, res2 int) /*param1=var:12*/ /*res1=var:12*/ /*U=typename:12*/ { const pi, e = 3.1415, /*pi=undef*/ 2.71828 /*pi=const:13*/ /*e=const:13*/ type /*t=undef*/ t /*t=typename:14*/ *t print(Y) /*Y=var:10*/ x, Y := Y, /*x=undef*/ /*Y=var:10*/ Pi /*x=var:16*/ /*Y=var:16*/ ; _ = x; _ = Y - var F = /*F=func:12*/ F /*F=var:17*/ ; _ = F + var F = /*F=func:12*/ F[*int, int] /*F=var:17*/ ; _ = F var a []int for i, x := range a /*i=undef*/ /*x=var:16*/ { _ = i; _ = x } @@ -1916,6 +1916,10 @@ func F(){ println(int) default /*int=var:31*/ : } + + _ = param1 + _ = res1 + return } /*main=undef*/ ` @@ -1981,7 +1985,29 @@ func F(){ _, gotObj := inner.LookupParent(id.Value, id.Pos()) if gotObj != wantObj { - t.Errorf("%s: got %v, want %v", id.Pos(), gotObj, wantObj) + // Print the scope tree of mainScope in case of error. + var printScopeTree func(indent string, s *Scope) + printScopeTree = func(indent string, s *Scope) { + t.Logf("%sscope %s %v-%v = %v", + indent, + ScopeComment(s), + s.Pos(), + s.End(), + s.Names()) + for i := range s.NumChildren() { + printScopeTree(indent+" ", s.Child(i)) + } + } + printScopeTree("", mainScope) + + t.Errorf("%s: Scope(%s).LookupParent(%s@%v) got %v, want %v [scopePos=%v]", + id.Pos(), + ScopeComment(inner), + id.Value, + id.Pos(), + gotObj, + wantObj, + ObjectScopePos(wantObj)) continue } } @@ -2169,6 +2195,12 @@ func TestIssue61737(t *testing.T) { iface.NumMethods() // unlike go/types, there is no Complete() method, so we complete implicitly } +func TestNewAlias_Issue65455(t *testing.T) { + obj := NewTypeName(nopos, nil, "A", nil) + alias := NewAlias(obj, Typ[Int]) + alias.Underlying() // must not panic +} + func TestIssue15305(t *testing.T) { const src = "package p; func f() int16; var _ = f(undef)" f := mustParse(src) diff --git a/src/cmd/compile/internal/types2/assignments.go b/src/cmd/compile/internal/types2/assignments.go index 079802b0b0..382ce2d1dd 100644 --- a/src/cmd/compile/internal/types2/assignments.go +++ b/src/cmd/compile/internal/types2/assignments.go @@ -24,7 +24,10 @@ func (check *Checker) assignment(x *operand, T Type, context string) { switch x.mode { case invalid: return // error reported before - case constant_, variable, mapindex, value, nilvalue, commaok, commaerr: + case nilvalue: + assert(isTypes2) + // ok + case constant_, variable, mapindex, value, commaok, commaerr: // ok default: // we may get here because of other problems (go.dev/issue/39634, crash 12) @@ -41,14 +44,25 @@ func (check *Checker) assignment(x *operand, T Type, context string) { // bool, rune, int, float64, complex128 or string respectively, depending // on whether the value is a boolean, rune, integer, floating-point, // complex, or string constant." - if x.isNil() { - if T == nil { - check.errorf(x, UntypedNilUse, "use of untyped nil in %s", context) - x.mode = invalid - return + if isTypes2 { + if x.isNil() { + if T == nil { + check.errorf(x, UntypedNilUse, "use of untyped nil in %s", context) + x.mode = invalid + return + } + } else if T == nil || isNonTypeParamInterface(T) { + target = Default(x.typ) + } + } else { // go/types + if T == nil || isNonTypeParamInterface(T) { + if T == nil && x.typ == Typ[UntypedNil] { + check.errorf(x, UntypedNilUse, "use of untyped nil in %s", context) + x.mode = invalid + return + } + target = Default(x.typ) } - } else if T == nil || isNonTypeParamInterface(T) { - target = Default(x.typ) } newType, val, code := check.implicitTypeAndValue(x, target) if code != 0 { @@ -218,7 +232,7 @@ func (check *Checker) lhsVar(lhs syntax.Expr) Type { var op operand check.expr(nil, &op, sel.X) if op.mode == mapindex { - check.errorf(&x, UnaddressableFieldAssign, "cannot assign to struct field %s in map", syntax.String(x.expr)) + check.errorf(&x, UnaddressableFieldAssign, "cannot assign to struct field %s in map", ExprString(x.expr)) return Typ[Invalid] } } @@ -232,7 +246,7 @@ func (check *Checker) lhsVar(lhs syntax.Expr) Type { // assignVar checks the assignment lhs = rhs (if x == nil), or lhs = x (if x != nil). // If x != nil, it must be the evaluation of rhs (and rhs will be ignored). // If the assignment check fails and x != nil, x.mode is set to invalid. -func (check *Checker) assignVar(lhs, rhs syntax.Expr, x *operand) { +func (check *Checker) assignVar(lhs, rhs syntax.Expr, x *operand, context string) { T := check.lhsVar(lhs) // nil if lhs is _ if !isValid(T) { if x != nil { @@ -244,12 +258,18 @@ func (check *Checker) assignVar(lhs, rhs syntax.Expr, x *operand) { } if x == nil { + var target *target + // avoid calling ExprString if not needed + if T != nil { + if _, ok := under(T).(*Signature); ok { + target = newTarget(T, ExprString(lhs)) + } + } x = new(operand) - check.expr(T, x, rhs) + check.expr(target, x, rhs) } - context := "assignment" - if T == nil { + if T == nil && context == "assignment" { context = "assignment to _ identifier" } check.assignment(x, T, context) @@ -337,12 +357,11 @@ func (check *Checker) returnError(at poser, lhs []*Var, rhs []*operand) { } else if r > 0 { at = rhs[r-1] // report at last value } - var err error_ - err.code = WrongResultCount - err.errorf(at, "%s return values", qualifier) - err.errorf(nopos, "have %s", check.typesSummary(operandTypes(rhs), false)) - err.errorf(nopos, "want %s", check.typesSummary(varTypes(lhs), false)) - check.report(&err) + err := check.newError(WrongResultCount) + err.addf(at, "%s return values", qualifier) + err.addf(nopos, "have %s", check.typesSummary(operandTypes(rhs), false)) + err.addf(nopos, "want %s", check.typesSummary(varTypes(lhs), false)) + err.report() } // initVars type-checks assignments of initialization expressions orig_rhs @@ -369,7 +388,11 @@ func (check *Checker) initVars(lhs []*Var, orig_rhs []syntax.Expr, returnStmt sy if l == r && !isCall { var x operand for i, lhs := range lhs { - check.expr(lhs.typ, &x, orig_rhs[i]) + desc := lhs.name + if returnStmt != nil && desc == "" { + desc = "result variable" + } + check.expr(newTarget(lhs.typ, desc), &x, orig_rhs[i]) check.initVar(lhs, &x, context) } return @@ -443,7 +466,7 @@ func (check *Checker) assignVars(lhs, orig_rhs []syntax.Expr) { // each value can be assigned to its corresponding variable. if l == r && !isCall { for i, lhs := range lhs { - check.assignVar(lhs, orig_rhs[i], nil) + check.assignVar(lhs, orig_rhs[i], nil, "assignment") } return } @@ -464,7 +487,7 @@ func (check *Checker) assignVars(lhs, orig_rhs []syntax.Expr) { r = len(rhs) if l == r { for i, lhs := range lhs { - check.assignVar(lhs, nil, rhs[i]) + check.assignVar(lhs, nil, rhs[i], "assignment") } // Only record comma-ok expression if both assignments succeeded // (go.dev/issue/59371). @@ -483,7 +506,7 @@ func (check *Checker) assignVars(lhs, orig_rhs []syntax.Expr) { // orig_rhs[0] was already evaluated } -func (check *Checker) shortVarDecl(pos syntax.Pos, lhs, rhs []syntax.Expr) { +func (check *Checker) shortVarDecl(pos poser, lhs, rhs []syntax.Expr) { top := len(check.delayed) scope := check.scope @@ -496,6 +519,7 @@ func (check *Checker) shortVarDecl(pos syntax.Pos, lhs, rhs []syntax.Expr) { ident, _ := lhs.(*syntax.Name) if ident == nil { check.useLHS(lhs) + // TODO(gri) This is redundant with a go/parser error. Consider omitting in go/types? check.errorf(lhs, BadDecl, "non-name %s on left side of :=", lhs) hasErr = true continue @@ -558,7 +582,7 @@ func (check *Checker) shortVarDecl(pos syntax.Pos, lhs, rhs []syntax.Expr) { // a function begins at the end of the ConstSpec or VarSpec (ShortVarDecl // for short variable declarations) and ends at the end of the innermost // containing block." - scopePos := syntax.EndPos(rhs[len(rhs)-1]) + scopePos := endPos(rhs[len(rhs)-1]) for _, obj := range newVars { check.declare(scope, nil, obj, scopePos) // id = nil: recordDef already called } diff --git a/src/cmd/compile/internal/types2/builtins.go b/src/cmd/compile/internal/types2/builtins.go index 01b8e46304..a87474ad6c 100644 --- a/src/cmd/compile/internal/types2/builtins.go +++ b/src/cmd/compile/internal/types2/builtins.go @@ -22,9 +22,8 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( // append is the only built-in that permits the use of ... for the last argument bin := predeclaredFuncs[id] - if call.HasDots && id != _Append { - //check.errorf(call.Ellipsis, invalidOp + "invalid use of ... with built-in %s", bin.name) - check.errorf(call, + if hasDots(call) && id != _Append { + check.errorf(dddErrPos(call), InvalidDotDotDot, invalidOp+"invalid use of ... with built-in %s", bin.name) check.use(argList...) @@ -76,7 +75,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( msg = "too many" } if msg != "" { - check.errorf(call, WrongArgCount, invalidOp+"%s arguments for %v (expected %d, found %d)", msg, call, bin.nargs, nargs) + check.errorf(argErrPos(call), WrongArgCount, invalidOp+"%s arguments for %v (expected %d, found %d)", msg, call, bin.nargs, nargs) return } } @@ -114,7 +113,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( // spec: "As a special case, append also accepts a first argument assignable // to type []byte with a second argument of string type followed by ... . // This form appends the bytes of the string. - if nargs == 2 && call.HasDots { + if nargs == 2 && hasDots(call) { if ok, _ := x.assignableTo(check, NewSlice(universeByte), nil); ok { y := args[1] if t := coreString(y.typ); t != nil && isString(t) { @@ -720,7 +719,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( base := derefStructPtr(x.typ) sel := selx.Sel.Value - obj, index, indirect := LookupFieldOrMethod(base, false, check.pkg, sel) + obj, index, indirect := lookupFieldOrMethod(base, false, check.pkg, sel, false) switch obj.(type) { case nil: check.errorf(x, MissingFieldOrMethod, invalidArg+"%s has no single field %s", base, sel) @@ -799,7 +798,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( // unsafe.Slice(ptr *T, len IntegerType) []T check.verifyVersionf(call.Fun, go1_17, "unsafe.Slice") - ptr, _ := under(x.typ).(*Pointer) // TODO(gri) should this be coreType rather than under? + ptr, _ := coreType(x.typ).(*Pointer) if ptr == nil { check.errorf(x, InvalidUnsafeSlice, invalidArg+"%s is not a pointer", x) return @@ -820,7 +819,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( // unsafe.SliceData(slice []T) *T check.verifyVersionf(call.Fun, go1_20, "unsafe.SliceData") - slice, _ := under(x.typ).(*Slice) // TODO(gri) should this be coreType rather than under? + slice, _ := coreType(x.typ).(*Slice) if slice == nil { check.errorf(x, InvalidUnsafeSliceData, invalidArg+"%s is not a slice", x) return @@ -909,7 +908,7 @@ func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) ( // trace is only available in test mode - no need to record signature default: - unreachable() + panic("unreachable") } assert(x.mode != invalid) @@ -948,13 +947,13 @@ func hasVarSize(t Type, seen map[*Named]bool) (varSized bool) { case *Interface: return isTypeParam(t) case *Named, *Union: - unreachable() + panic("unreachable") } return false } // applyTypeFunc applies f to x. If x is a type parameter, -// the result is a type parameter constrained by an new +// the result is a type parameter constrained by a new // interface bound. The type bounds for that interface // are computed by applying f to each of the type bounds // of x. If any of these applications of f return nil, @@ -991,7 +990,7 @@ func (check *Checker) applyTypeFunc(f func(Type) Type, x *operand, id builtinId) case _Complex: code = InvalidComplex default: - unreachable() + panic("unreachable") } check.softErrorf(x, code, "%s not supported as argument to %s for go1.18 (see go.dev/issue/50937)", x, predeclaredFuncs[id].name) @@ -1034,14 +1033,3 @@ func arrayPtrDeref(typ Type) Type { } return typ } - -// unparen returns e with any enclosing parentheses stripped. -func unparen(e syntax.Expr) syntax.Expr { - for { - p, ok := e.(*syntax.ParenExpr) - if !ok { - return e - } - e = p.X - } -} diff --git a/src/cmd/compile/internal/types2/builtins_test.go b/src/cmd/compile/internal/types2/builtins_test.go index 875ee5a4d5..2b4854b6f7 100644 --- a/src/cmd/compile/internal/types2/builtins_test.go +++ b/src/cmd/compile/internal/types2/builtins_test.go @@ -207,7 +207,7 @@ func testBuiltinSignature(t *testing.T, name, src0, want string) { // the recorded type for the built-in must match the wanted signature typ := types[fun].Type if typ == nil { - t.Errorf("%s: no type recorded for %s", src0, syntax.String(fun)) + t.Errorf("%s: no type recorded for %s", src0, ExprString(fun)) return } if got := typ.String(); got != want { diff --git a/src/cmd/compile/internal/types2/call.go b/src/cmd/compile/internal/types2/call.go index 439f515265..fe5b71d965 100644 --- a/src/cmd/compile/internal/types2/call.go +++ b/src/cmd/compile/internal/types2/call.go @@ -10,14 +10,13 @@ import ( "cmd/compile/internal/syntax" . "internal/types/errors" "strings" - "unicode" ) // funcInst type-checks a function instantiation. // The incoming x must be a generic function. // If inst != nil, it provides some or all of the type arguments (inst.Index). -// If target type tsig != nil, the signature may be used to infer missing type -// arguments of x, if any. At least one of tsig or inst must be provided. +// If target != nil, it may be used to infer missing type arguments of x, if any. +// At least one of T or inst must be provided. // // There are two modes of operation: // @@ -32,12 +31,13 @@ import ( // // If an error (other than a version error) occurs in any case, it is reported // and x.mode is set to invalid. -func (check *Checker) funcInst(tsig *Signature, pos syntax.Pos, x *operand, inst *syntax.IndexExpr, infer bool) ([]Type, []syntax.Expr) { - assert(tsig != nil || inst != nil) +func (check *Checker) funcInst(T *target, pos syntax.Pos, x *operand, inst *syntax.IndexExpr, infer bool) ([]Type, []syntax.Expr) { + assert(T != nil || inst != nil) var instErrPos poser if inst != nil { instErrPos = inst.Pos() + x.expr = inst // if we don't have an index expression, keep the existing expression of x } else { instErrPos = pos } @@ -51,7 +51,6 @@ func (check *Checker) funcInst(tsig *Signature, pos syntax.Pos, x *operand, inst targs = check.typeList(xlist) if targs == nil { x.mode = invalid - x.expr = inst return nil, nil } assert(len(targs) == len(xlist)) @@ -66,7 +65,6 @@ func (check *Checker) funcInst(tsig *Signature, pos syntax.Pos, x *operand, inst // Providing too many type arguments is always an error. check.errorf(xlist[got-1], WrongTypeArgCount, "got %d type arguments but want %d", got, want) x.mode = invalid - x.expr = inst return nil, nil } @@ -87,7 +85,8 @@ func (check *Checker) funcInst(tsig *Signature, pos syntax.Pos, x *operand, inst // var args []*operand var params []*Var - if tsig != nil && sig.tparams != nil { + var reverse bool + if T != nil && sig.tparams != nil { if !versionErr && !check.allowVersion(check.pkg, instErrPos, go1_21) { if inst != nil { check.versionErrorf(instErrPos, go1_21, "partially instantiated function in assignment") @@ -100,19 +99,22 @@ func (check *Checker) funcInst(tsig *Signature, pos syntax.Pos, x *operand, inst // The type of the argument operand is tsig, which is the type of the LHS in an assignment // or the result type in a return statement. Create a pseudo-expression for that operand // that makes sense when reported in error messages from infer, below. - expr := syntax.NewName(x.Pos(), "variable in assignment") - args = []*operand{{mode: value, expr: expr, typ: tsig}} + expr := syntax.NewName(x.Pos(), T.desc) + args = []*operand{{mode: value, expr: expr, typ: T.sig}} + reverse = true } // Rename type parameters to avoid problems with recursive instantiations. // Note that NewTuple(params...) below is (*Tuple)(nil) if len(params) == 0, as desired. tparams, params2 := check.renameTParams(pos, sig.TypeParams().list(), NewTuple(params...)) - targs = check.infer(pos, tparams, targs, params2.(*Tuple), args) + err := check.newError(CannotInferTypeArgs) + targs = check.infer(pos, tparams, targs, params2.(*Tuple), args, reverse, err) if targs == nil { - // error was already reported + if !err.empty() { + err.report() + } x.mode = invalid - x.expr = inst return nil, nil } got = len(targs) @@ -120,15 +122,10 @@ func (check *Checker) funcInst(tsig *Signature, pos syntax.Pos, x *operand, inst assert(got == want) // instantiate function signature - expr := x.expr // if we don't have an index expression, keep the existing expression of x - if inst != nil { - expr = inst - } - sig = check.instantiateSignature(x.Pos(), expr, sig, targs, xlist) + sig = check.instantiateSignature(x.Pos(), x.expr, sig, targs, xlist) x.typ = sig x.mode = value - x.expr = expr return nil, nil } @@ -211,7 +208,7 @@ func (check *Checker) callExpr(x *operand, call *syntax.CallExpr) exprKind { break } } - if call.HasDots { + if hasDots(call) { check.errorf(call.ArgList[0], BadDotDotDotSyntax, "invalid use of ... in conversion to %s", T) break } @@ -470,7 +467,7 @@ func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []T nargs := len(args) npars := sig.params.Len() - ddd := call.HasDots + ddd := hasDots(call) // set up parameters sigParams := sig.params // adjusted for variadic functions (may be nil for empty parameter lists!) @@ -530,12 +527,11 @@ func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []T if sig.params != nil { params = sig.params.vars } - var err error_ - err.code = WrongArgCount - err.errorf(at, "%s arguments in call to %s", qualifier, call.Fun) - err.errorf(nopos, "have %s", check.typesSummary(operandTypes(args), false)) - err.errorf(nopos, "want %s", check.typesSummary(varTypes(params), sig.variadic)) - check.report(&err) + err := check.newError(WrongArgCount) + err.addf(at, "%s arguments in call to %s", qualifier, call.Fun) + err.addf(nopos, "have %s", check.typesSummary(operandTypes(args), false)) + err.addf(nopos, "want %s", check.typesSummary(varTypes(params), sig.variadic)) + err.report() return } @@ -608,13 +604,17 @@ func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []T // infer missing type arguments of callee and function arguments if len(tparams) > 0 { - targs = check.infer(call.Pos(), tparams, targs, sigParams, args) + err := check.newError(CannotInferTypeArgs) + targs = check.infer(call.Pos(), tparams, targs, sigParams, args, false, err) if targs == nil { // TODO(gri) If infer inferred the first targs[:n], consider instantiating // the call signature for better error messages/gopls behavior. // Perhaps instantiate as much as we can, also for arguments. // This will require changes to how infer returns its results. - return // error already reported + if !err.empty() { + check.errorf(err.pos(), CannotInferTypeArgs, "in call to %s, %s", call.Fun, err.msg()) + } + return } // update result signature: instantiate if needed @@ -755,7 +755,7 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *TypeName x.id = exp.id default: check.dump("%v: unexpected object %v", atPos(e.Sel), exp) - unreachable() + panic("unreachable") } x.expr = e return @@ -767,7 +767,7 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *TypeName case typexpr: // don't crash for "type T T.x" (was go.dev/issue/51509) if def != nil && def.typ == x.typ { - check.cycleError([]Object{def}) + check.cycleError([]Object{def}, 0) goto Error } case builtin: @@ -796,7 +796,7 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *TypeName goto Error } - obj, index, indirect = LookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel) + obj, index, indirect = lookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel, false) if obj == nil { // Don't report another error if the underlying type was invalid (go.dev/issue/49541). if !isValid(under(x.typ)) { @@ -822,21 +822,8 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *TypeName if isInterfacePtr(x.typ) { why = check.interfacePtrError(x.typ) } else { - why = check.sprintf("type %s has no field or method %s", x.typ, sel) - // Check if capitalization of sel matters and provide better error message in that case. - // TODO(gri) This code only looks at the first character but LookupFieldOrMethod has an - // (internal) mechanism for case-insensitive lookup. Should use that instead. - if len(sel) > 0 { - var changeCase string - if r := rune(sel[0]); unicode.IsUpper(r) { - changeCase = string(unicode.ToLower(r)) + sel[1:] - } else { - changeCase = string(unicode.ToUpper(r)) + sel[1:] - } - if obj, _, _ = LookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, changeCase); obj != nil { - why += ", but does have " + changeCase - } - } + alt, _, _ := lookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel, true) + why = check.lookupError(x.typ, sel, alt, false) } check.errorf(e.Sel, MissingFieldOrMethod, "%s.%s undefined (%s)", x.expr, sel, why) goto Error @@ -851,7 +838,6 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *TypeName // method expression m, _ := obj.(*Func) if m == nil { - // TODO(gri) should check if capitalization of sel matters and provide better error message in that case check.errorf(e.Sel, MissingFieldOrMethod, "%s.%s undefined (type %s has no method %s)", x.expr, sel, x.typ, sel) goto Error } @@ -922,7 +908,7 @@ func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *TypeName check.addDeclDep(obj) default: - unreachable() + panic("unreachable") } } diff --git a/src/cmd/compile/internal/types2/check.go b/src/cmd/compile/internal/types2/check.go index 0582367083..f36dff3d4a 100644 --- a/src/cmd/compile/internal/types2/check.go +++ b/src/cmd/compile/internal/types2/check.go @@ -110,7 +110,8 @@ type Checker struct { nextID uint64 // unique Id for type parameters (first valid Id is 1) objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package - valids instanceLookup // valid *Named (incl. instantiated) types per the validType check + // see TODO in validtype.go + // valids instanceLookup // valid *Named (incl. instantiated) types per the validType check // pkgPathMap maps package names to the set of distinct import paths we've // seen for that name, anywhere in the import graph. It is used for @@ -513,7 +514,7 @@ func (check *Checker) recordUntyped() { for x, info := range check.untyped { if debug && isTyped(info.typ) { check.dump("%v: %s (type %s) is typed", atPos(x), x, info.typ) - unreachable() + panic("unreachable") } check.recordTypeAndValue(x, info.mode, info.typ, info.val) } @@ -578,7 +579,7 @@ func (check *Checker) recordBuiltinType(f syntax.Expr, sig *Signature) { case *syntax.ParenExpr: f = p.X default: - unreachable() + panic("unreachable") } } } diff --git a/src/cmd/compile/internal/types2/check_test.go b/src/cmd/compile/internal/types2/check_test.go index a9d6202a33..8b309898d2 100644 --- a/src/cmd/compile/internal/types2/check_test.go +++ b/src/cmd/compile/internal/types2/check_test.go @@ -398,7 +398,7 @@ func TestCheck(t *testing.T) { DefPredeclaredTestFuncs() testDirFiles(t, "../../../../internal/types/testdata/check", 50, false) // TODO(gri) narrow column tolerance } -func TestSpec(t *testing.T) { testDirFiles(t, "../../../../internal/types/testdata/spec", 0, false) } +func TestSpec(t *testing.T) { testDirFiles(t, "../../../../internal/types/testdata/spec", 20, false) } // TODO(gri) narrow column tolerance func TestExamples(t *testing.T) { testDirFiles(t, "../../../../internal/types/testdata/examples", 125, false) } // TODO(gri) narrow column tolerance diff --git a/src/cmd/compile/internal/types2/const.go b/src/cmd/compile/internal/types2/const.go index af27c727dd..5e5bc74ba3 100644 --- a/src/cmd/compile/internal/types2/const.go +++ b/src/cmd/compile/internal/types2/const.go @@ -118,7 +118,7 @@ func representableConst(x constant.Value, check *Checker, typ *Basic, rounded *c case Uint64: return 0 <= x default: - unreachable() + panic("unreachable") } } // x does not fit into int64 @@ -159,7 +159,7 @@ func representableConst(x constant.Value, check *Checker, typ *Basic, rounded *c case UntypedFloat: return true default: - unreachable() + panic("unreachable") } case isComplex(typ): @@ -191,7 +191,7 @@ func representableConst(x constant.Value, check *Checker, typ *Basic, rounded *c case UntypedComplex: return true default: - unreachable() + panic("unreachable") } case isString(typ): diff --git a/src/cmd/compile/internal/types2/conversions.go b/src/cmd/compile/internal/types2/conversions.go index 8027092c6c..d9ed0b3c1b 100644 --- a/src/cmd/compile/internal/types2/conversions.go +++ b/src/cmd/compile/internal/types2/conversions.go @@ -98,17 +98,18 @@ func (check *Checker) conversion(x *operand, T Type) { // given a type explicitly by a constant declaration or conversion,...". if isUntyped(x.typ) { final := T - // - For conversions to interfaces, except for untyped nil arguments, - // use the argument's default type. + // - For conversions to interfaces, except for untyped nil arguments + // and isTypes2, use the argument's default type. // - For conversions of untyped constants to non-constant types, also // use the default type (e.g., []byte("foo") should report string // not []byte as type for the constant "foo"). + // - If !isTypes2, keep untyped nil for untyped nil arguments. // - For constant integer to string conversions, keep the argument type. // (See also the TODO below.) - if x.typ == Typ[UntypedNil] { + if isTypes2 && x.typ == Typ[UntypedNil] { // ok - } else if isNonTypeParamInterface(T) || constArg && !isConstType(T) { - final = Default(x.typ) + } else if isNonTypeParamInterface(T) || constArg && !isConstType(T) || !isTypes2 && x.isNil() { + final = Default(x.typ) // default type of untyped nil is untyped nil } else if x.mode == constant_ && isInteger(x.typ) && allString(T) { final = x.typ } @@ -202,7 +203,7 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool { // check != nil if cause != nil { // TODO(gri) consider restructuring versionErrorf so we can use it here and below - *cause = "conversion of slices to arrays requires go1.20 or later" + *cause = "conversion of slice to array requires go1.20 or later" } return false } @@ -214,7 +215,7 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool { } // check != nil if cause != nil { - *cause = "conversion of slices to array pointers requires go1.17 or later" + *cause = "conversion of slice to array pointer requires go1.17 or later" } return false } @@ -227,7 +228,7 @@ func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool { return false } - errorf := func(format string, args ...interface{}) { + errorf := func(format string, args ...any) { if check != nil && cause != nil { msg := check.sprintf(format, args...) if *cause != "" { diff --git a/src/cmd/compile/internal/types2/decl.go b/src/cmd/compile/internal/types2/decl.go index 3abde44c71..c07a6b4dee 100644 --- a/src/cmd/compile/internal/types2/decl.go +++ b/src/cmd/compile/internal/types2/decl.go @@ -11,15 +11,6 @@ import ( . "internal/types/errors" ) -func (err *error_) recordAltDecl(obj Object) { - if pos := obj.Pos(); pos.IsKnown() { - // We use "other" rather than "previous" here because - // the first declaration seen may not be textually - // earlier in the source. - err.errorf(pos, "other declaration of %s", obj.Name()) - } -} - func (check *Checker) declare(scope *Scope, id *syntax.Name, obj Object, pos syntax.Pos) { // spec: "The blank identifier, represented by the underscore // character _, may be used in a declaration like any other @@ -27,11 +18,10 @@ func (check *Checker) declare(scope *Scope, id *syntax.Name, obj Object, pos syn // binding." if obj.Name() != "_" { if alt := scope.Insert(obj); alt != nil { - var err error_ - err.code = DuplicateDecl - err.errorf(obj, "%s redeclared in this block", obj.Name()) - err.recordAltDecl(alt) - check.report(&err) + err := check.newError(DuplicateDecl) + err.addf(obj, "%s redeclared in this block", obj.Name()) + err.addAltDecl(alt) + err.report() return } obj.setScopePos(pos) @@ -162,7 +152,7 @@ func (check *Checker) objDecl(obj Object, def *TypeName) { } default: - unreachable() + panic("unreachable") } assert(obj.Type() != nil) return @@ -171,7 +161,7 @@ func (check *Checker) objDecl(obj Object, def *TypeName) { d := check.objMap[obj] if d == nil { check.dump("%v: %s should have been declared", obj.Pos(), obj) - unreachable() + panic("unreachable") } // save/restore current environment and set up object environment @@ -202,7 +192,7 @@ func (check *Checker) objDecl(obj Object, def *TypeName) { // functions may be recursive - no need to track dependencies check.funcDecl(obj, d) default: - unreachable() + panic("unreachable") } } @@ -216,7 +206,7 @@ func (check *Checker) validCycle(obj Object) (valid bool) { isPkgObj := obj.Parent() == check.pkg.scope if isPkgObj != inObjMap { check.dump("%v: inconsistent object map for %s (isPkgObj = %v, inObjMap = %v)", obj.Pos(), obj, isPkgObj, inObjMap) - unreachable() + panic("unreachable") } } @@ -266,7 +256,7 @@ loop: case *Func: // ignored for now default: - unreachable() + panic("unreachable") } } @@ -302,13 +292,12 @@ loop: } } - check.cycleError(cycle) + check.cycleError(cycle, firstInSrc(cycle)) return false } -// cycleError reports a declaration cycle starting with -// the object in cycle that is "first" in the source. -func (check *Checker) cycleError(cycle []Object) { +// cycleError reports a declaration cycle starting with the object at cycle[start]. +func (check *Checker) cycleError(cycle []Object, start int) { // name returns the (possibly qualified) object name. // This is needed because with generic types, cycles // may refer to imported types. See go.dev/issue/50788. @@ -317,11 +306,7 @@ func (check *Checker) cycleError(cycle []Object) { return packagePrefix(obj.Pkg(), check.qualifier) + obj.Name() } - // TODO(gri) Should we start with the last (rather than the first) object in the cycle - // since that is the earliest point in the source where we start seeing the - // cycle? That would be more consistent with other error messages. - i := firstInSrc(cycle) - obj := cycle[i] + obj := cycle[start] objName := name(obj) // If obj is a type alias, mark it as valid (not broken) in order to avoid follow-on errors. tname, _ := obj.(*TypeName) @@ -343,15 +328,15 @@ func (check *Checker) cycleError(cycle []Object) { return } - var err error_ - err.code = InvalidDeclCycle + err := check.newError(InvalidDeclCycle) if tname != nil { - err.errorf(obj, "invalid recursive type %s", objName) + err.addf(obj, "invalid recursive type %s", objName) } else { - err.errorf(obj, "invalid cycle in declaration of %s", objName) + err.addf(obj, "invalid cycle in declaration of %s", objName) } + i := start for range cycle { - err.errorf(obj, "%s refers to", objName) + err.addf(obj, "%s refers to", objName) i++ if i >= len(cycle) { i = 0 @@ -359,8 +344,8 @@ func (check *Checker) cycleError(cycle []Object) { obj = cycle[i] objName = name(obj) } - err.errorf(obj, "%s", objName) - check.report(&err) + err.addf(obj, "%s", objName) + err.report() } // firstInSrc reports the index of the object with the "smallest" @@ -433,7 +418,7 @@ func (check *Checker) varDecl(obj *Var, lhs []*Var, typ, init syntax.Expr) { // if any, would not be checked. // // TODO(gri) If we have no init expr, we should distribute - // a given type otherwise we need to re-evalate the type + // a given type otherwise we need to re-evaluate the type // expr for each lhs variable, leading to duplicate work. } @@ -449,7 +434,7 @@ func (check *Checker) varDecl(obj *Var, lhs []*Var, typ, init syntax.Expr) { if lhs == nil || len(lhs) == 1 { assert(lhs == nil || lhs[0] == obj) var x operand - check.expr(obj.typ, &x, init) + check.expr(newTarget(obj.typ, obj.name), &x, init) check.initVar(obj, &x, "variable declaration") return } @@ -494,37 +479,59 @@ func (check *Checker) isImportedConstraint(typ Type) bool { func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *TypeName) { assert(obj.typ == nil) + // Only report a version error if we have not reported one already. + versionErr := false + var rhs Type check.later(func() { if t := asNamed(obj.typ); t != nil { // type may be invalid check.validType(t) } // If typ is local, an error was already reported where typ is specified/defined. - _ = check.isImportedConstraint(rhs) && check.verifyVersionf(tdecl.Type, go1_18, "using type constraint %s", rhs) + _ = !versionErr && check.isImportedConstraint(rhs) && check.verifyVersionf(tdecl.Type, go1_18, "using type constraint %s", rhs) }).describef(obj, "validType(%s)", obj.Name()) - aliasDecl := tdecl.Alias - if aliasDecl && tdecl.TParamList != nil { - // The parser will ensure this but we may still get an invalid AST. - // Complain and continue as regular type definition. - check.error(tdecl, BadDecl, "generic type cannot be alias") - aliasDecl = false + // First type parameter, or nil. + var tparam0 *syntax.Field + if len(tdecl.TParamList) > 0 { + tparam0 = tdecl.TParamList[0] } // alias declaration - if aliasDecl { - check.verifyVersionf(tdecl, go1_9, "type aliases") + if tdecl.Alias { + // Report highest version requirement first so that fixing a version issue + // avoids possibly two -lang changes (first to Go 1.9 and then to Go 1.23). + if !versionErr && tparam0 != nil && !check.verifyVersionf(tparam0, go1_23, "generic type alias") { + versionErr = true + } + if !versionErr && !check.verifyVersionf(tdecl, go1_9, "type alias") { + versionErr = true + } + if check.enableAlias { // TODO(gri) Should be able to use nil instead of Typ[Invalid] to mark // the alias as incomplete. Currently this causes problems // with certain cycles. Investigate. alias := check.newAlias(obj, Typ[Invalid]) setDefType(def, alias) + + // handle type parameters even if not allowed (Alias type is supported) + if tparam0 != nil { + check.openScope(tdecl, "type parameters") + defer check.closeScope() + check.collectTypeParams(&alias.tparams, tdecl.TParamList) + } + rhs = check.definedType(tdecl.Type, obj) assert(rhs != nil) alias.fromRHS = rhs Unalias(alias) // resolve alias.actual } else { + if !versionErr && tparam0 != nil { + check.error(tdecl, UnsupportedFeature, "generic type alias requires GODEBUG=gotypesalias=1") + versionErr = true + } + check.brokenAlias(obj) rhs = check.typ(tdecl.Type) check.validAlias(obj, rhs) @@ -533,6 +540,10 @@ func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *TypeN } // type definition or generic type declaration + if !versionErr && tparam0 != nil && !check.verifyVersionf(tparam0, go1_18, "type parameter") { + versionErr = true + } + named := check.newNamed(obj, nil, nil) setDefType(def, named) @@ -570,8 +581,11 @@ func (check *Checker) collectTypeParams(dst **TypeParamList, list []*syntax.Fiel // Declare type parameters up-front. // The scope of type parameters starts at the beginning of the type parameter // list (so we can have mutually recursive parameterized type bounds). - for i, f := range list { - tparams[i] = check.declareTypeParam(f.Name) + if len(list) > 0 { + scopePos := list[0].Pos() + for i, f := range list { + tparams[i] = check.declareTypeParam(f.Name, scopePos) + } } // Set the type parameters before collecting the type constraints because @@ -628,7 +642,7 @@ func (check *Checker) bound(x syntax.Expr) Type { return check.typ(x) } -func (check *Checker) declareTypeParam(name *syntax.Name) *TypeParam { +func (check *Checker) declareTypeParam(name *syntax.Name, scopePos syntax.Pos) *TypeParam { // Use Typ[Invalid] for the type constraint to ensure that a type // is present even if the actual constraint has not been assigned // yet. @@ -636,8 +650,8 @@ func (check *Checker) declareTypeParam(name *syntax.Name) *TypeParam { // constraints to make sure we don't rely on them if they // are not properly set yet. tname := NewTypeName(name.Pos(), check.pkg, name.Value, nil) - tpar := check.newTypeParam(tname, Typ[Invalid]) // assigns type to tname as a side-effect - check.declare(check.scope, name, tname, check.scope.pos) // TODO(gri) check scope position + tpar := check.newTypeParam(tname, Typ[Invalid]) // assigns type to tname as a side-effect + check.declare(check.scope, name, tname, scopePos) return tpar } @@ -685,7 +699,7 @@ func (check *Checker) collectMethods(obj *TypeName) { assert(m.name != "_") if alt := mset.insert(m); alt != nil { if alt.Pos().IsKnown() { - check.errorf(m.pos, DuplicateMethod, "method %s.%s already declared at %s", obj.Name(), m.name, alt.Pos()) + check.errorf(m.pos, DuplicateMethod, "method %s.%s already declared at %v", obj.Name(), m.name, alt.Pos()) } else { check.errorf(m.pos, DuplicateMethod, "method %s.%s already declared", obj.Name(), m.name) } @@ -718,11 +732,10 @@ func (check *Checker) checkFieldUniqueness(base *Named) { // For historical consistency, we report the primary error on the // method, and the alt decl on the field. - var err error_ - err.code = DuplicateFieldAndMethod - err.errorf(alt, "field and method with the same name %s", fld.name) - err.recordAltDecl(fld) - check.report(&err) + err := check.newError(DuplicateFieldAndMethod) + err.addf(alt, "field and method with the same name %s", fld.name) + err.addAltDecl(fld) + err.report() } } } @@ -750,6 +763,11 @@ func (check *Checker) funcDecl(obj *Func, decl *declInfo) { check.funcType(sig, fdecl.Recv, fdecl.TParamList, fdecl.Type) obj.color_ = saved + // Set the scope's extent to the complete "func (...) { ... }" + // so that Scope.Innermost works correctly. + sig.scope.pos = fdecl.Pos() + sig.scope.end = syntax.EndPos(fdecl) + if len(fdecl.TParamList) > 0 && fdecl.Body == nil { check.softErrorf(fdecl, BadDecl, "generic function is missing function body") } diff --git a/src/cmd/compile/internal/types2/errorcalls_test.go b/src/cmd/compile/internal/types2/errorcalls_test.go index 6153b42a34..ba4dc87b6a 100644 --- a/src/cmd/compile/internal/types2/errorcalls_test.go +++ b/src/cmd/compile/internal/types2/errorcalls_test.go @@ -1,6 +1,6 @@ // Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE ast. +// license that can be found in the LICENSE file. package types2_test diff --git a/src/cmd/compile/internal/types2/errors.go b/src/cmd/compile/internal/types2/errors.go index b8414b4849..44f2adc7b7 100644 --- a/src/cmd/compile/internal/types2/errors.go +++ b/src/cmd/compile/internal/types2/errors.go @@ -2,17 +2,15 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file implements various error reporters. +// This file implements error reporting. package types2 import ( - "bytes" "cmd/compile/internal/syntax" "fmt" . "internal/types/errors" "runtime" - "strconv" "strings" ) @@ -28,23 +26,48 @@ func assert(p bool) { } } -func unreachable() { - panic("unreachable") +// An errorDesc describes part of a type-checking error. +type errorDesc struct { + pos syntax.Pos + msg string } // An error_ represents a type-checking error. -// To report an error_, call Checker.report. +// A new error_ is created with Checker.newError. +// To report an error_, call error_.report. type error_ struct { - desc []errorDesc - code Code - soft bool // TODO(gri) eventually determine this from an error code + check *Checker + desc []errorDesc + code Code + soft bool // TODO(gri) eventually determine this from an error code } -// An errorDesc describes part of a type-checking error. -type errorDesc struct { - pos syntax.Pos - format string - args []interface{} +// newError returns a new error_ with the given error code. +func (check *Checker) newError(code Code) *error_ { + if code == 0 { + panic("error code must not be 0") + } + return &error_{check: check, code: code} +} + +// addf adds formatted error information to err. +// It may be called multiple times to provide additional information. +// The position of the first call to addf determines the position of the reported Error. +// Subsequent calls to addf provide additional information in the form of additional lines +// in the error message (types2) or continuation errors identified by a tab-indented error +// message (go/types). +func (err *error_) addf(at poser, format string, args ...interface{}) { + err.desc = append(err.desc, errorDesc{atPos(at), err.check.sprintf(format, args...)}) +} + +// addAltDecl is a specialized form of addf reporting another declaration of obj. +func (err *error_) addAltDecl(obj Object) { + if pos := obj.Pos(); pos.IsKnown() { + // We use "other" rather than "previous" here because + // the first declaration seen may not be textually + // earlier in the source. + err.addf(obj, "other declaration of %s", obj.Name()) + } } func (err *error_) empty() bool { @@ -58,10 +81,12 @@ func (err *error_) pos() syntax.Pos { return err.desc[0].pos } -func (err *error_) msg(qf Qualifier) string { +// msg returns the formatted error message without the primary error position pos(). +func (err *error_) msg() string { if err.empty() { return "no error" } + var buf strings.Builder for i := range err.desc { p := &err.desc[i] @@ -71,161 +96,15 @@ func (err *error_) msg(qf Qualifier) string { fmt.Fprintf(&buf, "%s: ", p.pos) } } - buf.WriteString(sprintf(qf, false, p.format, p.args...)) + buf.WriteString(p.msg) } return buf.String() } -// String is for testing. -func (err *error_) String() string { +// report reports the error err, setting check.firstError if necessary. +func (err *error_) report() { if err.empty() { - return "no error" - } - return fmt.Sprintf("%s: %s", err.pos(), err.msg(nil)) -} - -// errorf adds formatted error information to err. -// It may be called multiple times to provide additional information. -func (err *error_) errorf(at poser, format string, args ...interface{}) { - err.desc = append(err.desc, errorDesc{atPos(at), format, args}) -} - -func sprintf(qf Qualifier, tpSubscripts bool, format string, args ...interface{}) string { - for i, arg := range args { - switch a := arg.(type) { - case nil: - arg = "" - case operand: - panic("got operand instead of *operand") - case *operand: - arg = operandString(a, qf) - case syntax.Pos: - arg = a.String() - case syntax.Expr: - arg = syntax.String(a) - case []syntax.Expr: - var buf strings.Builder - buf.WriteByte('[') - for i, x := range a { - if i > 0 { - buf.WriteString(", ") - } - buf.WriteString(syntax.String(x)) - } - buf.WriteByte(']') - arg = buf.String() - case Object: - arg = ObjectString(a, qf) - case Type: - var buf bytes.Buffer - w := newTypeWriter(&buf, qf) - w.tpSubscripts = tpSubscripts - w.typ(a) - arg = buf.String() - case []Type: - var buf bytes.Buffer - w := newTypeWriter(&buf, qf) - w.tpSubscripts = tpSubscripts - buf.WriteByte('[') - for i, x := range a { - if i > 0 { - buf.WriteString(", ") - } - w.typ(x) - } - buf.WriteByte(']') - arg = buf.String() - case []*TypeParam: - var buf bytes.Buffer - w := newTypeWriter(&buf, qf) - w.tpSubscripts = tpSubscripts - buf.WriteByte('[') - for i, x := range a { - if i > 0 { - buf.WriteString(", ") - } - w.typ(x) - } - buf.WriteByte(']') - arg = buf.String() - } - args[i] = arg - } - return fmt.Sprintf(format, args...) -} - -func (check *Checker) qualifier(pkg *Package) string { - // Qualify the package unless it's the package being type-checked. - if pkg != check.pkg { - if check.pkgPathMap == nil { - check.pkgPathMap = make(map[string]map[string]bool) - check.seenPkgMap = make(map[*Package]bool) - check.markImports(check.pkg) - } - // If the same package name was used by multiple packages, display the full path. - if len(check.pkgPathMap[pkg.name]) > 1 { - return strconv.Quote(pkg.path) - } - return pkg.name - } - return "" -} - -// markImports recursively walks pkg and its imports, to record unique import -// paths in pkgPathMap. -func (check *Checker) markImports(pkg *Package) { - if check.seenPkgMap[pkg] { - return - } - check.seenPkgMap[pkg] = true - - forName, ok := check.pkgPathMap[pkg.name] - if !ok { - forName = make(map[string]bool) - check.pkgPathMap[pkg.name] = forName - } - forName[pkg.path] = true - - for _, imp := range pkg.imports { - check.markImports(imp) - } -} - -// check may be nil. -func (check *Checker) sprintf(format string, args ...interface{}) string { - var qf Qualifier - if check != nil { - qf = check.qualifier - } - return sprintf(qf, false, format, args...) -} - -func (check *Checker) report(err *error_) { - if err.empty() { - panic("no error to report") - } - check.err(err.pos(), err.code, err.msg(check.qualifier), err.soft) -} - -func (check *Checker) trace(pos syntax.Pos, format string, args ...interface{}) { - fmt.Printf("%s:\t%s%s\n", - pos, - strings.Repeat(". ", check.indent), - sprintf(check.qualifier, true, format, args...), - ) -} - -// dump is only needed for debugging -func (check *Checker) dump(format string, args ...interface{}) { - fmt.Println(sprintf(check.qualifier, true, format, args...)) -} - -func (check *Checker) err(at poser, code Code, msg string, soft bool) { - switch code { - case InvalidSyntaxTree: - msg = "invalid syntax tree: " + msg - case 0: - panic("no error code provided") + panic("no error") } // Cheap trick: Don't report errors with messages containing @@ -233,47 +112,98 @@ func (check *Checker) err(at poser, code Code, msg string, soft bool) { // follow-on errors which don't add useful information. Only // exclude them if these strings are not at the beginning, // and only if we have at least one error already reported. - if check.firstErr != nil && (strings.Index(msg, "invalid operand") > 0 || strings.Index(msg, "invalid type") > 0) { - return - } - - pos := atPos(at) - - // If we are encountering an error while evaluating an inherited - // constant initialization expression, pos is the position of in - // the original expression, and not of the currently declared - // constant identifier. Use the provided errpos instead. - // TODO(gri) We may also want to augment the error message and - // refer to the position (pos) in the original expression. - if check.errpos.IsKnown() { - assert(check.iota != nil) - pos = check.errpos - } - - // If we have a URL for error codes, add a link to the first line. - if code != 0 && check.conf.ErrorURL != "" { - u := fmt.Sprintf(check.conf.ErrorURL, code) - if i := strings.Index(msg, "\n"); i >= 0 { - msg = msg[:i] + u + msg[i:] - } else { - msg += u + check := err.check + if check.firstErr != nil { + // It is sufficient to look at the first sub-error only. + msg := err.desc[0].msg + if strings.Index(msg, "invalid operand") > 0 || strings.Index(msg, "invalid type") > 0 { + return } } - err := Error{pos, stripAnnotations(msg), msg, soft, code} - if check.firstErr == nil { - check.firstErr = err + if check.conf.Trace { + check.trace(err.pos(), "ERROR: %s (code = %d)", err.desc[0].msg, err.code) } - if check.conf.Trace { - check.trace(pos, "ERROR: %s", msg) + // In go/types, if there is a sub-error with a valid position, + // call the typechecker error handler for each sub-error. + // Otherwise, call it once, with a single combined message. + multiError := false + if !isTypes2 { + for i := 1; i < len(err.desc); i++ { + if err.desc[i].pos.IsKnown() { + multiError = true + break + } + } + } + + if multiError { + for i := range err.desc { + p := &err.desc[i] + check.handleError(i, p.pos, err.code, p.msg, err.soft) + } + } else { + check.handleError(0, err.pos(), err.code, err.msg(), err.soft) + } + + // make sure the error is not reported twice + err.desc = nil +} + +// handleError should only be called by error_.report. +func (check *Checker) handleError(index int, pos syntax.Pos, code Code, msg string, soft bool) { + assert(code != 0) + + if index == 0 { + // If we are encountering an error while evaluating an inherited + // constant initialization expression, pos is the position of + // the original expression, and not of the currently declared + // constant identifier. Use the provided errpos instead. + // TODO(gri) We may also want to augment the error message and + // refer to the position (pos) in the original expression. + if check.errpos.Pos().IsKnown() { + assert(check.iota != nil) + pos = check.errpos + } + + // Report invalid syntax trees explicitly. + if code == InvalidSyntaxTree { + msg = "invalid syntax tree: " + msg + } + + // If we have a URL for error codes, add a link to the first line. + if check.conf.ErrorURL != "" { + url := fmt.Sprintf(check.conf.ErrorURL, code) + if i := strings.Index(msg, "\n"); i >= 0 { + msg = msg[:i] + url + msg[i:] + } else { + msg += url + } + } + } else { + // Indent sub-error. + // Position information is passed explicitly to Error, below. + msg = "\t" + msg + } + + e := Error{ + Pos: pos, + Msg: stripAnnotations(msg), + Full: msg, + Soft: soft, + Code: code, + } + + if check.firstErr == nil { + check.firstErr = e } f := check.conf.Error if f == nil { - panic(bailout{}) // report only first error + panic(bailout{}) // record first error and exit } - f(err) + f(e) } const ( @@ -281,26 +211,35 @@ const ( invalidOp = "invalid operation: " ) +// The poser interface is used to extract the position of type-checker errors. type poser interface { Pos() syntax.Pos } func (check *Checker) error(at poser, code Code, msg string) { - check.err(at, code, msg, false) + err := check.newError(code) + err.addf(at, "%s", msg) + err.report() } -func (check *Checker) errorf(at poser, code Code, format string, args ...interface{}) { - check.err(at, code, check.sprintf(format, args...), false) +func (check *Checker) errorf(at poser, code Code, format string, args ...any) { + err := check.newError(code) + err.addf(at, format, args...) + err.report() } -func (check *Checker) softErrorf(at poser, code Code, format string, args ...interface{}) { - check.err(at, code, check.sprintf(format, args...), true) +func (check *Checker) softErrorf(at poser, code Code, format string, args ...any) { + err := check.newError(code) + err.addf(at, format, args...) + err.soft = true + err.report() } -func (check *Checker) versionErrorf(at poser, v goVersion, format string, args ...interface{}) { +func (check *Checker) versionErrorf(at poser, v goVersion, format string, args ...any) { msg := check.sprintf(format, args...) - msg = fmt.Sprintf("%s requires %s or later", msg, v) - check.err(at, UnsupportedFeature, msg, true) + err := check.newError(UnsupportedFeature) + err.addf(at, "%s requires %s or later", msg, v) + err.report() } // atPos reports the left (= start) position of at. @@ -315,18 +254,3 @@ func atPos(at poser) syntax.Pos { } return at.Pos() } - -// stripAnnotations removes internal (type) annotations from s. -func stripAnnotations(s string) string { - var buf strings.Builder - for _, r := range s { - // strip #'s and subscript digits - if r < '₀' || '₀'+10 <= r { // '₀' == U+2080 - buf.WriteRune(r) - } - } - if buf.Len() < len(s) { - return buf.String() - } - return s -} diff --git a/src/cmd/compile/internal/types2/errors_test.go b/src/cmd/compile/internal/types2/errors_test.go index ac73ca4650..cfa52472b2 100644 --- a/src/cmd/compile/internal/types2/errors_test.go +++ b/src/cmd/compile/internal/types2/errors_test.go @@ -9,19 +9,19 @@ import "testing" func TestError(t *testing.T) { var err error_ want := "no error" - if got := err.String(); got != want { + if got := err.msg(); got != want { t.Errorf("empty error: got %q, want %q", got, want) } - want = ": foo 42" - err.errorf(nopos, "foo %d", 42) - if got := err.String(); got != want { + want = "foo 42" + err.addf(nopos, "foo %d", 42) + if got := err.msg(); got != want { t.Errorf("simple error: got %q, want %q", got, want) } - want = ": foo 42\n\tbar 43" - err.errorf(nopos, "bar %d", 43) - if got := err.String(); got != want { + want = "foo 42\n\tbar 43" + err.addf(nopos, "bar %d", 43) + if got := err.msg(); got != want { t.Errorf("simple error: got %q, want %q", got, want) } } diff --git a/src/cmd/compile/internal/types2/errsupport.go b/src/cmd/compile/internal/types2/errsupport.go new file mode 100644 index 0000000000..168150f679 --- /dev/null +++ b/src/cmd/compile/internal/types2/errsupport.go @@ -0,0 +1,113 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file implements support functions for error messages. + +package types2 + +// lookupError returns a case-specific error when a lookup of selector sel in the +// given type fails but an object with alternative spelling (case folding) is found. +// If structLit is set, the error message is specifically for struct literal fields. +func (check *Checker) lookupError(typ Type, sel string, obj Object, structLit bool) string { + // Provide more detail if there is an unexported object, or one with different capitalization. + // If selector and object are in the same package (==), export doesn't matter, otherwise (!=) it does. + // Messages depend on whether it's a general lookup or a field lookup in a struct literal. + // + // case sel pkg have message (examples for general lookup) + // --------------------------------------------------------------------------------------------------------- + // ok x.Foo == Foo + // misspelled x.Foo == FoO type X has no field or method Foo, but does have field FoO + // misspelled x.Foo == foo type X has no field or method Foo, but does have field foo + // misspelled x.Foo == foO type X has no field or method Foo, but does have field foO + // + // misspelled x.foo == Foo type X has no field or method foo, but does have field Foo + // misspelled x.foo == FoO type X has no field or method foo, but does have field FoO + // ok x.foo == foo + // misspelled x.foo == foO type X has no field or method foo, but does have field foO + // + // ok x.Foo != Foo + // misspelled x.Foo != FoO type X has no field or method Foo, but does have field FoO + // unexported x.Foo != foo type X has no field or method Foo, but does have unexported field foo + // missing x.Foo != foO type X has no field or method Foo + // + // misspelled x.foo != Foo type X has no field or method foo, but does have field Foo + // missing x.foo != FoO type X has no field or method foo + // inaccessible x.foo != foo cannot refer to unexported field foo + // missing x.foo != foO type X has no field or method foo + + const ( + ok = iota + missing // no object found + misspelled // found object with different spelling + unexported // found object with name differing only in first letter + inaccessible // found object with matching name but inaccessible from the current package + ) + + // determine case + e := missing + var alt string // alternative spelling of selector; if any + if obj != nil { + alt = obj.Name() + if obj.Pkg() == check.pkg { + assert(alt != sel) // otherwise there is no lookup error + e = misspelled + } else if isExported(sel) { + if isExported(alt) { + e = misspelled + } else if tail(sel) == tail(alt) { + e = unexported + } + } else if isExported(alt) { + if tail(sel) == tail(alt) { + e = misspelled + } + } else if sel == alt { + e = inaccessible + } + } + + if structLit { + switch e { + case missing: + return check.sprintf("unknown field %s in struct literal of type %s", sel, typ) + case misspelled: + return check.sprintf("unknown field %s in struct literal of type %s, but does have %s", sel, typ, alt) + case unexported: + return check.sprintf("unknown field %s in struct literal of type %s, but does have unexported %s", sel, typ, alt) + case inaccessible: + return check.sprintf("cannot refer to unexported field %s in struct literal of type %s", alt, typ) + } + } else { + what := "object" + switch obj.(type) { + case *Var: + what = "field" + case *Func: + what = "method" + } + switch e { + case missing: + return check.sprintf("type %s has no field or method %s", typ, sel) + case misspelled: + return check.sprintf("type %s has no field or method %s, but does have %s %s", typ, sel, what, alt) + case unexported: + return check.sprintf("type %s has no field or method %s, but does have unexported %s %s", typ, sel, what, alt) + case inaccessible: + return check.sprintf("cannot refer to unexported %s %s", what, alt) + } + } + + panic("unreachable") +} + +// tail returns the string s without its first (UTF-8) character. +// If len(s) == 0, the result is s. +func tail(s string) string { + for i, _ := range s { + if i > 0 { + return s[i:] + } + } + return s +} diff --git a/src/cmd/compile/internal/types2/expr.go b/src/cmd/compile/internal/types2/expr.go index 321b0c4762..2f9d544a4b 100644 --- a/src/cmd/compile/internal/types2/expr.go +++ b/src/cmd/compile/internal/types2/expr.go @@ -12,6 +12,7 @@ import ( "go/constant" "go/token" . "internal/types/errors" + "strings" ) /* @@ -268,7 +269,7 @@ func (check *Checker) updateExprType0(parent, x syntax.Expr, typ Type, final boo // upon assignment or use. if debug { check.dump("%v: found old type(%s): %s (new: %s)", atPos(x), x, old.typ, typ) - unreachable() + panic("unreachable") } return @@ -337,7 +338,7 @@ func (check *Checker) updateExprType0(parent, x syntax.Expr, typ Type, final boo } default: - unreachable() + panic("unreachable") } // If the new type is not final and still untyped, just @@ -546,7 +547,7 @@ func (check *Checker) comparison(x, y *operand, op syntax.Operator, switchCase b } default: - unreachable() + panic("unreachable") } // comparison is ok @@ -956,18 +957,32 @@ const ( statement ) -// TODO(gri) In rawExpr below, consider using T instead of hint and -// some sort of "operation mode" instead of allowGeneric. -// May be clearer and less error-prone. +// target represent the (signature) type and description of the LHS +// variable of an assignment, or of a function result variable. +type target struct { + sig *Signature + desc string +} + +// newTarget creates a new target for the given type and description. +// The result is nil if typ is not a signature. +func newTarget(typ Type, desc string) *target { + if typ != nil { + if sig, _ := under(typ).(*Signature); sig != nil { + return &target{sig, desc} + } + } + return nil +} // rawExpr typechecks expression e and initializes x with the expression // value or type. If an error occurred, x.mode is set to invalid. -// If a non-nil target type T is given and e is a generic function -// or function call, T is used to infer the type arguments for e. +// If a non-nil target T is given and e is a generic function, +// T is used to infer the type arguments for e. // If hint != nil, it is the type of a composite literal element. // If allowGeneric is set, the operand type may be an uninstantiated // parameterized type or function value. -func (check *Checker) rawExpr(T Type, x *operand, e syntax.Expr, hint Type, allowGeneric bool) exprKind { +func (check *Checker) rawExpr(T *target, x *operand, e syntax.Expr, hint Type, allowGeneric bool) exprKind { if check.conf.Trace { check.trace(e.Pos(), "-- expr %s", e) check.indent++ @@ -989,9 +1004,9 @@ func (check *Checker) rawExpr(T Type, x *operand, e syntax.Expr, hint Type, allo } // If x is a generic type, or a generic function whose type arguments cannot be inferred -// from a non-nil target type T, nonGeneric reports an error and invalidates x.mode and x.typ. +// from a non-nil target T, nonGeneric reports an error and invalidates x.mode and x.typ. // Otherwise it leaves x alone. -func (check *Checker) nonGeneric(T Type, x *operand) { +func (check *Checker) nonGeneric(T *target, x *operand) { if x.mode == invalid || x.mode == novalue { return } @@ -1004,10 +1019,8 @@ func (check *Checker) nonGeneric(T Type, x *operand) { case *Signature: if t.tparams != nil { if enableReverseTypeInference && T != nil { - if tsig, _ := under(T).(*Signature); tsig != nil { - check.funcInst(tsig, x.Pos(), x, nil, true) - return - } + check.funcInst(T, x.Pos(), x, nil, true) + return } what = "function" } @@ -1019,10 +1032,39 @@ func (check *Checker) nonGeneric(T Type, x *operand) { } } +// langCompat reports an error if the representation of a numeric +// literal is not compatible with the current language version. +func (check *Checker) langCompat(lit *syntax.BasicLit) { + s := lit.Value + if len(s) <= 2 || check.allowVersion(check.pkg, lit, go1_13) { + return + } + // len(s) > 2 + if strings.Contains(s, "_") { + check.versionErrorf(lit, go1_13, "underscore in numeric literal") + return + } + if s[0] != '0' { + return + } + radix := s[1] + if radix == 'b' || radix == 'B' { + check.versionErrorf(lit, go1_13, "binary literal") + return + } + if radix == 'o' || radix == 'O' { + check.versionErrorf(lit, go1_13, "0o/0O-style octal literal") + return + } + if lit.Kind != syntax.IntLit && (radix == 'x' || radix == 'X') { + check.versionErrorf(lit, go1_13, "hexadecimal floating-point literal") + } +} + // exprInternal contains the core of type checking of expressions. // Must only be called by rawExpr. // (See rawExpr for an explanation of the parameters.) -func (check *Checker) exprInternal(T Type, x *operand, e syntax.Expr, hint Type) exprKind { +func (check *Checker) exprInternal(T *target, x *operand, e syntax.Expr, hint Type) exprKind { // make sure x has a valid state in case of bailout // (was go.dev/issue/5770) x.mode = invalid @@ -1030,7 +1072,7 @@ func (check *Checker) exprInternal(T Type, x *operand, e syntax.Expr, hint Type) switch e := e.(type) { case nil: - unreachable() + panic("unreachable") case *syntax.BadExpr: goto Error // error was reported before @@ -1081,6 +1123,10 @@ func (check *Checker) exprInternal(T Type, x *operand, e syntax.Expr, hint Type) case *syntax.FuncLit: if sig, ok := check.typ(e.Type).(*Signature); ok { + // Set the Scope's extent to the complete "func (...) {...}" + // so that Scope.Innermost works correctly. + sig.scope.pos = e.Pos() + sig.scope.end = syntax.EndPos(e) if !check.conf.IgnoreFuncBodies && e.Body != nil { // Anonymous functions are considered part of the // init expression/func declaration which contains @@ -1168,9 +1214,14 @@ func (check *Checker) exprInternal(T Type, x *operand, e syntax.Expr, hint Type) check.errorf(kv, InvalidLitField, "invalid field name %s in struct literal", kv.Key) continue } - i := fieldIndex(utyp.fields, check.pkg, key.Value) + i := fieldIndex(fields, check.pkg, key.Value, false) if i < 0 { - check.errorf(kv.Key, MissingLitField, "unknown field %s in struct literal of type %s", key.Value, base) + var alt Object + if j := fieldIndex(fields, check.pkg, key.Value, true); j >= 0 { + alt = fields[j] + } + msg := check.lookupError(base, key.Value, alt, true) + check.error(kv.Key, MissingLitField, msg) continue } fld := fields[i] @@ -1328,11 +1379,10 @@ func (check *Checker) exprInternal(T Type, x *operand, e syntax.Expr, hint Type) case *syntax.IndexExpr: if check.indexExpr(x, e) { - var tsig *Signature - if enableReverseTypeInference && T != nil { - tsig, _ = under(T).(*Signature) + if !enableReverseTypeInference { + T = nil } - check.funcInst(tsig, e.Pos(), x, e, true) + check.funcInst(T, e.Pos(), x, e, true) } if x.mode == invalid { goto Error @@ -1543,11 +1593,11 @@ func (check *Checker) typeAssertion(e syntax.Expr, x *operand, T Type, typeSwitc } // expr typechecks expression e and initializes x with the expression value. -// If a non-nil target type T is given and e is a generic function -// or function call, T is used to infer the type arguments for e. +// If a non-nil target T is given and e is a generic function or +// a function call, T is used to infer the type arguments for e. // The result must be a single value. // If an error occurred, x.mode is set to invalid. -func (check *Checker) expr(T Type, x *operand, e syntax.Expr) { +func (check *Checker) expr(T *target, x *operand, e syntax.Expr) { check.rawExpr(T, x, e, nil, false) check.exclude(x, 1< 0 { + buf.WriteString(", ") + } + buf.WriteString(ExprString(x)) + } + buf.WriteByte(']') + arg = buf.String() + case Object: + arg = ObjectString(a, qf) + case Type: + var buf bytes.Buffer + w := newTypeWriter(&buf, qf) + w.tpSubscripts = tpSubscripts + w.typ(a) + arg = buf.String() + case []Type: + var buf bytes.Buffer + w := newTypeWriter(&buf, qf) + w.tpSubscripts = tpSubscripts + buf.WriteByte('[') + for i, x := range a { + if i > 0 { + buf.WriteString(", ") + } + w.typ(x) + } + buf.WriteByte(']') + arg = buf.String() + case []*TypeParam: + var buf bytes.Buffer + w := newTypeWriter(&buf, qf) + w.tpSubscripts = tpSubscripts + buf.WriteByte('[') + for i, x := range a { + if i > 0 { + buf.WriteString(", ") + } + w.typ(x) + } + buf.WriteByte(']') + arg = buf.String() + } + args[i] = arg + } + return fmt.Sprintf(format, args...) +} + +// check may be nil. +func (check *Checker) sprintf(format string, args ...any) string { + var qf Qualifier + if check != nil { + qf = check.qualifier + } + return sprintf(qf, false, format, args...) +} + +func (check *Checker) trace(pos syntax.Pos, format string, args ...any) { + fmt.Printf("%s:\t%s%s\n", + pos, + strings.Repeat(". ", check.indent), + sprintf(check.qualifier, true, format, args...), + ) +} + +// dump is only needed for debugging +func (check *Checker) dump(format string, args ...any) { + fmt.Println(sprintf(check.qualifier, true, format, args...)) +} + +func (check *Checker) qualifier(pkg *Package) string { + // Qualify the package unless it's the package being type-checked. + if pkg != check.pkg { + if check.pkgPathMap == nil { + check.pkgPathMap = make(map[string]map[string]bool) + check.seenPkgMap = make(map[*Package]bool) + check.markImports(check.pkg) + } + // If the same package name was used by multiple packages, display the full path. + if len(check.pkgPathMap[pkg.name]) > 1 { + return strconv.Quote(pkg.path) + } + return pkg.name + } + return "" +} + +// markImports recursively walks pkg and its imports, to record unique import +// paths in pkgPathMap. +func (check *Checker) markImports(pkg *Package) { + if check.seenPkgMap[pkg] { + return + } + check.seenPkgMap[pkg] = true + + forName, ok := check.pkgPathMap[pkg.name] + if !ok { + forName = make(map[string]bool) + check.pkgPathMap[pkg.name] = forName + } + forName[pkg.path] = true + + for _, imp := range pkg.imports { + check.markImports(imp) + } +} + +// stripAnnotations removes internal (type) annotations from s. +func stripAnnotations(s string) string { + var buf strings.Builder + for _, r := range s { + // strip #'s and subscript digits + if r < '₀' || '₀'+10 <= r { // '₀' == U+2080 + buf.WriteRune(r) + } + } + if buf.Len() < len(s) { + return buf.String() + } + return s +} diff --git a/src/cmd/compile/internal/types2/gcsizes.go b/src/cmd/compile/internal/types2/gcsizes.go index d204d9feef..15f3e00642 100644 --- a/src/cmd/compile/internal/types2/gcsizes.go +++ b/src/cmd/compile/internal/types2/gcsizes.go @@ -56,7 +56,7 @@ func (s *gcSizes) Alignof(T Type) (result int64) { return s.WordSize } case *TypeParam, *Union: - unreachable() + panic("unreachable") } a := s.Sizeof(T) // may be 0 or negative // spec: "For a variable x of any type: unsafe.Alignof(x) is at least 1." @@ -154,7 +154,7 @@ func (s *gcSizes) Sizeof(T Type) int64 { assert(!isTypeParam(T)) return s.WordSize * 2 case *TypeParam, *Union: - unreachable() + panic("unreachable") } return s.WordSize // catch-all } diff --git a/src/cmd/compile/internal/types2/infer.go b/src/cmd/compile/internal/types2/infer.go index 3a7c74dc82..b3f0f47c22 100644 --- a/src/cmd/compile/internal/types2/infer.go +++ b/src/cmd/compile/internal/types2/infer.go @@ -9,7 +9,6 @@ package types2 import ( "cmd/compile/internal/syntax" "fmt" - . "internal/types/errors" "strings" ) @@ -24,9 +23,12 @@ const enableReverseTypeInference = true // disable for debugging // based on the given type parameters tparams, type arguments targs, function parameters params, and // function arguments args, if any. There must be at least one type parameter, no more type arguments // than type parameters, and params and args must match in number (incl. zero). +// If reverse is set, an error message's contents are reversed for a better error message for some +// errors related to reverse type inference (where the function call is synthetic). // If successful, infer returns the complete list of given and inferred type arguments, one for each -// type parameter. Otherwise the result is nil and appropriate errors will be reported. -func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, params *Tuple, args []*operand) (inferred []Type) { +// type parameter. Otherwise the result is nil. Errors are reported through the err parameter. +// Note: infer may fail (return nil) due to invalid args operands without reporting additional errors. +func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, params *Tuple, args []*operand, reverse bool, err *error_) (inferred []Type) { // Don't verify result conditions if there's no error handler installed: // in that case, an error leads to an exit panic and the result value may // be incorrect. But in that case it doesn't matter because callers won't @@ -110,7 +112,7 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, // Terminology: generic parameter = function parameter with a type-parameterized type u := newUnifier(tparams, targs, check.allowVersion(check.pkg, pos, go1_21)) - errorf := func(kind string, tpar, targ Type, arg *operand) { + errorf := func(tpar, targ Type, arg *operand) { // provide a better error message if we can targs := u.inferred(tparams) if targs[0] == nil { @@ -125,7 +127,7 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, } } if allFailed { - check.errorf(arg, CannotInferTypeArgs, "%s %s of %s does not match %s (cannot infer %s)", kind, targ, arg.expr, tpar, typeParamsString(tparams)) + err.addf(arg, "type %s of %s does not match %s (cannot infer %s)", targ, arg.expr, tpar, typeParamsString(tparams)) return } } @@ -137,9 +139,13 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, // InvalidTypeArg). We can't differentiate these cases, so fall back on // the more general CannotInferTypeArgs. if inferred != tpar { - check.errorf(arg, CannotInferTypeArgs, "%s %s of %s does not match inferred type %s for %s", kind, targ, arg.expr, inferred, tpar) + if reverse { + err.addf(arg, "inferred type %s for %s does not match type %s of %s", inferred, tpar, targ, arg.expr) + } else { + err.addf(arg, "type %s of %s does not match inferred type %s for %s", targ, arg.expr, inferred, tpar) + } } else { - check.errorf(arg, CannotInferTypeArgs, "%s %s of %s does not match %s", kind, targ, arg.expr, tpar) + err.addf(arg, "type %s of %s does not match %s", targ, arg.expr, tpar) } } @@ -168,7 +174,7 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, // Collect the indices of untyped arguments and handle them later. if isTyped(arg.typ) { if !u.unify(par.typ, arg.typ, assign) { - errorf("type", par.typ, arg.typ, arg) + errorf(par.typ, arg.typ, arg) return nil } } else if _, ok := par.typ.(*TypeParam); ok && !arg.isNil() { @@ -246,7 +252,7 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, // TODO(gri) Type parameters that appear in the constraint and // for which we have type arguments inferred should // use those type arguments for a better error message. - check.errorf(pos, CannotInferTypeArgs, "%s (type %s) does not satisfy %s", tpar, tx, tpar.Constraint()) + err.addf(pos, "%s (type %s) does not satisfy %s", tpar, tx, tpar.Constraint()) return nil } case single && !core.tilde: @@ -271,7 +277,7 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, constraint := tpar.iface() if m, _ := check.missingMethod(tx, constraint, true, func(x, y Type) bool { return u.unify(x, y, exact) }, &cause); m != nil { // TODO(gri) better error message (see TODO above) - check.errorf(pos, CannotInferTypeArgs, "%s (type %s) does not satisfy %s %s", tpar, tx, tpar.Constraint(), cause) + err.addf(pos, "%s (type %s) does not satisfy %s %s", tpar, tx, tpar.Constraint(), cause) return nil } } @@ -312,7 +318,7 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, } else { m := maxType(max, arg.typ) if m == nil { - check.errorf(arg, CannotInferTypeArgs, "mismatched types %s and %s (cannot infer %s)", max, arg.typ, tpar) + err.addf(arg, "mismatched types %s and %s (cannot infer %s)", max, arg.typ, tpar) return nil } max = m @@ -421,7 +427,7 @@ func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, for i, typ := range inferred { if typ == nil || isParameterized(tparams, typ) { obj := tparams[i].obj - check.errorf(pos, CannotInferTypeArgs, "cannot infer %s (%s)", obj.name, obj.pos) + err.addf(pos, "cannot infer %s (%v)", obj.name, obj.pos) return nil } } diff --git a/src/cmd/compile/internal/types2/initorder.go b/src/cmd/compile/internal/types2/initorder.go index 6e041721e8..0d28495542 100644 --- a/src/cmd/compile/internal/types2/initorder.go +++ b/src/cmd/compile/internal/types2/initorder.go @@ -160,17 +160,16 @@ func (check *Checker) reportCycle(cycle []Object) { return } - var err error_ - err.code = InvalidInitCycle - err.errorf(obj, "initialization cycle for %s", obj.Name()) + err := check.newError(InvalidInitCycle) + err.addf(obj, "initialization cycle for %s", obj.Name()) // subtle loop: print cycle[i] for i = 0, n-1, n-2, ... 1 for len(cycle) = n for i := len(cycle) - 1; i >= 0; i-- { - err.errorf(obj, "%s refers to", obj.Name()) + err.addf(obj, "%s refers to", obj.Name()) obj = cycle[i] } // print cycle[0] again to close the cycle - err.errorf(obj, "%s", obj.Name()) - check.report(&err) + err.addf(obj, "%s", obj.Name()) + err.report() } // ---------------------------------------------------------------------------- @@ -315,11 +314,11 @@ func (a nodeQueue) Less(i, j int) bool { return x.ndeps < y.ndeps || x.ndeps == y.ndeps && x.obj.order() < y.obj.order() } -func (a *nodeQueue) Push(x interface{}) { +func (a *nodeQueue) Push(x any) { panic("unreachable") } -func (a *nodeQueue) Pop() interface{} { +func (a *nodeQueue) Pop() any { n := len(*a) x := (*a)[n-1] x.index = -1 // for safety diff --git a/src/cmd/compile/internal/types2/instantiate.go b/src/cmd/compile/internal/types2/instantiate.go index 0c6b202ff9..e33d4b41c2 100644 --- a/src/cmd/compile/internal/types2/instantiate.go +++ b/src/cmd/compile/internal/types2/instantiate.go @@ -122,7 +122,8 @@ func (check *Checker) instance(pos syntax.Pos, orig Type, targs []Type, expandin assert(expanding == nil) // function instances cannot be reached from Named types tparams := orig.TypeParams() - if !check.validateTArgLen(pos, tparams.Len(), len(targs)) { + // TODO(gri) investigate if this is needed (type argument and parameter count seem to be correct here) + if !check.validateTArgLen(pos, orig.String(), tparams.Len(), len(targs)) { return Typ[Invalid] } if tparams.Len() == 0 { @@ -150,19 +151,27 @@ func (check *Checker) instance(pos syntax.Pos, orig Type, targs []Type, expandin return updateContexts(res) } -// validateTArgLen verifies that the length of targs and tparams matches, -// reporting an error if not. If validation fails and check is nil, -// validateTArgLen panics. -func (check *Checker) validateTArgLen(pos syntax.Pos, ntparams, ntargs int) bool { - if ntargs != ntparams { - // TODO(gri) provide better error message - if check != nil { - check.errorf(pos, WrongTypeArgCount, "got %d arguments but %d type parameters", ntargs, ntparams) - return false - } - panic(fmt.Sprintf("%v: got %d arguments but %d type parameters", pos, ntargs, ntparams)) +// validateTArgLen checks that the number of type arguments (got) matches the +// number of type parameters (want); if they don't match an error is reported. +// If validation fails and check is nil, validateTArgLen panics. +func (check *Checker) validateTArgLen(pos syntax.Pos, name string, want, got int) bool { + var qual string + switch { + case got < want: + qual = "not enough" + case got > want: + qual = "too many" + default: + return true } - return true + + msg := check.sprintf("%s type arguments for type %s: have %d, want %d", qual, name, got, want) + if check != nil { + check.error(atPos(pos), WrongTypeArgCount, msg) + return false + } + + panic(fmt.Sprintf("%v: %s", pos, msg)) } func (check *Checker) verify(pos syntax.Pos, tparams []*TypeParam, targs []Type, ctxt *Context) (int, error) { diff --git a/src/cmd/compile/internal/types2/issues_test.go b/src/cmd/compile/internal/types2/issues_test.go index 95b9f94078..0275fe70d7 100644 --- a/src/cmd/compile/internal/types2/issues_test.go +++ b/src/cmd/compile/internal/types2/issues_test.go @@ -698,14 +698,14 @@ func TestIssue51093(t *testing.T) { n++ tpar, _ := tv.Type.(*TypeParam) if tpar == nil { - t.Fatalf("%s: got type %s, want type parameter", syntax.String(x), tv.Type) + t.Fatalf("%s: got type %s, want type parameter", ExprString(x), tv.Type) } if name := tpar.Obj().Name(); name != "P" { - t.Fatalf("%s: got type parameter name %s, want P", syntax.String(x), name) + t.Fatalf("%s: got type parameter name %s, want P", ExprString(x), name) } // P(val) must not be constant if tv.Value != nil { - t.Errorf("%s: got constant value %s (%s), want no constant", syntax.String(x), tv.Value, tv.Value.String()) + t.Errorf("%s: got constant value %s (%s), want no constant", ExprString(x), tv.Value, tv.Value.String()) } } } @@ -998,8 +998,98 @@ type S struct{ A } } got := S.String() - const want = "type p.S struct{p.A /* = []int */}" + const want = "type p.S struct{p.A}" if got != want { t.Fatalf("got %q; want %q", got, want) } } + +func TestIssue59831(t *testing.T) { + // Package a exports a type S with an unexported method m; + // the tests check the error messages when m is not found. + const asrc = `package a; type S struct{}; func (S) m() {}` + apkg := mustTypecheck(asrc, nil, nil) + + // Package b exports a type S with an exported method m; + // the tests check the error messages when M is not found. + const bsrc = `package b; type S struct{}; func (S) M() {}` + bpkg := mustTypecheck(bsrc, nil, nil) + + tests := []struct { + imported *Package + src, err string + }{ + // tests importing a (or nothing) + {apkg, `package a1; import "a"; var _ interface { M() } = a.S{}`, + "a.S does not implement interface{M()} (missing method M) have m() want M()"}, + + {apkg, `package a2; import "a"; var _ interface { m() } = a.S{}`, + "a.S does not implement interface{m()} (unexported method m)"}, // test for issue + + {nil, `package a3; type S struct{}; func (S) m(); var _ interface { M() } = S{}`, + "S does not implement interface{M()} (missing method M) have m() want M()"}, + + {nil, `package a4; type S struct{}; func (S) m(); var _ interface { m() } = S{}`, + ""}, // no error expected + + {nil, `package a5; type S struct{}; func (S) m(); var _ interface { n() } = S{}`, + "S does not implement interface{n()} (missing method n)"}, + + // tests importing b (or nothing) + {bpkg, `package b1; import "b"; var _ interface { m() } = b.S{}`, + "b.S does not implement interface{m()} (missing method m) have M() want m()"}, + + {bpkg, `package b2; import "b"; var _ interface { M() } = b.S{}`, + ""}, // no error expected + + {nil, `package b3; type S struct{}; func (S) M(); var _ interface { M() } = S{}`, + ""}, // no error expected + + {nil, `package b4; type S struct{}; func (S) M(); var _ interface { m() } = S{}`, + "S does not implement interface{m()} (missing method m) have M() want m()"}, + + {nil, `package b5; type S struct{}; func (S) M(); var _ interface { n() } = S{}`, + "S does not implement interface{n()} (missing method n)"}, + } + + for _, test := range tests { + // typecheck test source + conf := Config{Importer: importHelper{pkg: test.imported}} + pkg, err := typecheck(test.src, &conf, nil) + if err == nil { + if test.err != "" { + t.Errorf("package %s: got no error, want %q", pkg.Name(), test.err) + } + continue + } + if test.err == "" { + t.Errorf("package %s: got %q, want not error", pkg.Name(), err.Error()) + } + + // flatten reported error message + errmsg := strings.ReplaceAll(err.Error(), "\n", " ") + errmsg = strings.ReplaceAll(errmsg, "\t", "") + + // verify error message + if !strings.Contains(errmsg, test.err) { + t.Errorf("package %s: got %q, want %q", pkg.Name(), errmsg, test.err) + } + } +} + +func TestIssue64759(t *testing.T) { + const src = ` +//go:build go1.18 +package p + +func f[S ~[]E, E any](S) {} + +func _() { + f([]string{}) +} +` + // Per the go:build directive, the source must typecheck + // even though the (module) Go version is set to go1.17. + conf := Config{GoVersion: "go1.17"} + mustTypecheck(src, &conf, nil) +} diff --git a/src/cmd/compile/internal/types2/labels.go b/src/cmd/compile/internal/types2/labels.go index ffb37004ce..8ea58ad0aa 100644 --- a/src/cmd/compile/internal/types2/labels.go +++ b/src/cmd/compile/internal/types2/labels.go @@ -133,12 +133,11 @@ func (check *Checker) blockBranches(all *Scope, parent *block, lstmt *syntax.Lab if name := s.Label.Value; name != "_" { lbl := NewLabel(s.Label.Pos(), check.pkg, name) if alt := all.Insert(lbl); alt != nil { - var err error_ - err.code = DuplicateLabel + err := check.newError(DuplicateLabel) err.soft = true - err.errorf(lbl.pos, "label %s already declared", name) - err.recordAltDecl(alt) - check.report(&err) + err.addf(lbl.pos, "label %s already declared", name) + err.addAltDecl(alt) + err.report() // ok to continue } else { b.insert(s) diff --git a/src/cmd/compile/internal/types2/lookup.go b/src/cmd/compile/internal/types2/lookup.go index 893cdb157d..3583a48407 100644 --- a/src/cmd/compile/internal/types2/lookup.go +++ b/src/cmd/compile/internal/types2/lookup.go @@ -9,7 +9,6 @@ package types2 import ( "bytes" "cmd/compile/internal/syntax" - "strings" ) // Internal use of LookupFieldOrMethod: If the obj result is a method @@ -46,7 +45,12 @@ func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o if T == nil { panic("LookupFieldOrMethod on nil type") } + return lookupFieldOrMethod(T, addressable, pkg, name, false) +} +// lookupFieldOrMethod is like LookupFieldOrMethod but with the additional foldCase parameter +// (see Object.sameId for the meaning of foldCase). +func lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string, foldCase bool) (obj Object, index []int, indirect bool) { // Methods cannot be associated to a named pointer type. // (spec: "The type denoted by T is called the receiver base type; // it must not be a pointer or interface type and it must be declared @@ -56,7 +60,7 @@ func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o // not have found it for T (see also go.dev/issue/8590). if t := asNamed(T); t != nil { if p, _ := t.Underlying().(*Pointer); p != nil { - obj, index, indirect = lookupFieldOrMethodImpl(p, false, pkg, name, false) + obj, index, indirect = lookupFieldOrMethodImpl(p, false, pkg, name, foldCase) if _, ok := obj.(*Func); ok { return nil, nil, false } @@ -64,7 +68,7 @@ func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o } } - obj, index, indirect = lookupFieldOrMethodImpl(T, addressable, pkg, name, false) + obj, index, indirect = lookupFieldOrMethodImpl(T, addressable, pkg, name, foldCase) // If we didn't find anything and if we have a type parameter with a core type, // see if there is a matching field (but not a method, those need to be declared @@ -73,7 +77,7 @@ func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o const enableTParamFieldLookup = false // see go.dev/issue/51576 if enableTParamFieldLookup && obj == nil && isTypeParam(T) { if t := coreType(T); t != nil { - obj, index, indirect = lookupFieldOrMethodImpl(t, addressable, pkg, name, false) + obj, index, indirect = lookupFieldOrMethodImpl(t, addressable, pkg, name, foldCase) if _, ok := obj.(*Var); !ok { obj, index, indirect = nil, nil, false // accept fields (variables) only } @@ -82,8 +86,8 @@ func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o return } -// lookupFieldOrMethodImpl is the implementation of LookupFieldOrMethod. -// Notably, in contrast to LookupFieldOrMethod, it won't find struct fields +// lookupFieldOrMethodImpl is the implementation of lookupFieldOrMethod. +// Notably, in contrast to lookupFieldOrMethod, it won't find struct fields // in base types of defined (*Named) pointer types T. For instance, given // the declaration: // @@ -92,12 +96,9 @@ func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (o // lookupFieldOrMethodImpl won't find the field f in the defined (*Named) type T // (methods on T are not permitted in the first place). // -// Thus, lookupFieldOrMethodImpl should only be called by LookupFieldOrMethod +// Thus, lookupFieldOrMethodImpl should only be called by lookupFieldOrMethod // and missingMethod (the latter doesn't care about struct fields). // -// If foldCase is true, method names are considered equal if they are equal -// with case folding. -// // The resulting object may not be fully type-checked. func lookupFieldOrMethodImpl(T Type, addressable bool, pkg *Package, name string, foldCase bool) (obj Object, index []int, indirect bool) { // WARNING: The code in this function is extremely subtle - do not modify casually! @@ -167,7 +168,7 @@ func lookupFieldOrMethodImpl(T Type, addressable bool, pkg *Package, name string case *Struct: // look for a matching field and collect embedded types for i, f := range t.fields { - if f.sameId(pkg, name) { + if f.sameId(pkg, name, foldCase) { assert(f.typ != nil) index = concat(e.index, i) if obj != nil || e.multiples { @@ -343,6 +344,7 @@ func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y ok = iota notFound wrongName + unexported wrongSig ambigSel ptrRecv @@ -388,6 +390,11 @@ func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y f, _ = obj.(*Func) if f != nil { state = wrongName + if f.name == m.name { + // If the names are equal, f must be unexported + // (otherwise the package wouldn't matter). + state = unexported + } } } break @@ -436,8 +443,9 @@ func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y } case wrongName: fs, ms := check.funcString(f, false), check.funcString(m, false) - *cause = check.sprintf("(missing method %s)\n\t\thave %s\n\t\twant %s", - m.Name(), fs, ms) + *cause = check.sprintf("(missing method %s)\n\t\thave %s\n\t\twant %s", m.Name(), fs, ms) + case unexported: + *cause = check.sprintf("(unexported method %s)", m.Name()) case wrongSig: fs, ms := check.funcString(f, false), check.funcString(m, false) if fs == ms { @@ -445,8 +453,18 @@ func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y // Add package information to disambiguate (go.dev/issue/54258). fs, ms = check.funcString(f, true), check.funcString(m, true) } - *cause = check.sprintf("(wrong type for method %s)\n\t\thave %s\n\t\twant %s", - m.Name(), fs, ms) + if fs == ms { + // We still have "want Foo, have Foo". + // This is most likely due to different type parameters with + // the same name appearing in the instantiated signatures + // (go.dev/issue/61685). + // Rather than reporting this misleading error cause, for now + // just point out that the method signature is incorrect. + // TODO(gri) should find a good way to report the root cause + *cause = check.sprintf("(wrong type for method %s)", m.Name()) + break + } + *cause = check.sprintf("(wrong type for method %s)\n\t\thave %s\n\t\twant %s", m.Name(), fs, ms) case ambigSel: *cause = check.sprintf("(ambiguous selector %s.%s)", V, m.Name()) case ptrRecv: @@ -454,7 +472,7 @@ func (check *Checker) missingMethod(V, T Type, static bool, equivalent func(x, y case field: *cause = check.sprintf("(%s.%s is a field, not a method)", V, m.Name()) default: - unreachable() + panic("unreachable") } } @@ -560,10 +578,11 @@ func concat(list []int, i int) []int { } // fieldIndex returns the index for the field with matching package and name, or a value < 0. -func fieldIndex(fields []*Var, pkg *Package, name string) int { +// See Object.sameId for the meaning of foldCase. +func fieldIndex(fields []*Var, pkg *Package, name string, foldCase bool) int { if name != "_" { for i, f := range fields { - if f.sameId(pkg, name) { + if f.sameId(pkg, name, foldCase) { return i } } @@ -571,12 +590,12 @@ func fieldIndex(fields []*Var, pkg *Package, name string) int { return -1 } -// lookupMethod returns the index of and method with matching package and name, or (-1, nil). -// If foldCase is true, method names are considered equal if they are equal with case folding. -func lookupMethod(methods []*Func, pkg *Package, name string, foldCase bool) (int, *Func) { +// methodIndex returns the index of and method with matching package and name, or (-1, nil). +// See Object.sameId for the meaning of foldCase. +func methodIndex(methods []*Func, pkg *Package, name string, foldCase bool) (int, *Func) { if name != "_" { for i, m := range methods { - if (m.name == name || foldCase && strings.EqualFold(m.name, name)) && m.sameId(pkg, m.name) { + if m.sameId(pkg, name, foldCase) { return i, m } } diff --git a/src/cmd/compile/internal/types2/mono.go b/src/cmd/compile/internal/types2/mono.go index dae9230252..1263507421 100644 --- a/src/cmd/compile/internal/types2/mono.go +++ b/src/cmd/compile/internal/types2/mono.go @@ -137,10 +137,9 @@ func (check *Checker) reportInstanceLoop(v int) { // TODO(mdempsky): Pivot stack so we report the cycle from the top? - var err error_ - err.code = InvalidInstanceCycle + err := check.newError(InvalidInstanceCycle) obj0 := check.mono.vertices[v].obj - err.errorf(obj0, "instantiation cycle:") + err.addf(obj0, "instantiation cycle:") qf := RelativeTo(check.pkg) for _, v := range stack { @@ -151,12 +150,12 @@ func (check *Checker) reportInstanceLoop(v int) { default: panic("unexpected type") case *Named: - err.errorf(edge.pos, "%s implicitly parameterized by %s", obj.Name(), TypeString(edge.typ, qf)) // secondary error, \t indented + err.addf(atPos(edge.pos), "%s implicitly parameterized by %s", obj.Name(), TypeString(edge.typ, qf)) // secondary error, \t indented case *TypeParam: - err.errorf(edge.pos, "%s instantiated as %s", obj.Name(), TypeString(edge.typ, qf)) // secondary error, \t indented + err.addf(atPos(edge.pos), "%s instantiated as %s", obj.Name(), TypeString(edge.typ, qf)) // secondary error, \t indented } } - check.report(&err) + err.report() } // recordCanon records that tpar is the canonical type parameter @@ -174,7 +173,7 @@ func (w *monoGraph) recordInstance(pkg *Package, pos syntax.Pos, tparams []*Type for i, tpar := range tparams { pos := pos if i < len(xlist) { - pos = syntax.StartPos(xlist[i]) + pos = startPos(xlist[i]) } w.assign(pkg, pos, tpar, targs[i]) } diff --git a/src/cmd/compile/internal/types2/named.go b/src/cmd/compile/internal/types2/named.go index 893247de35..aa7ab00c33 100644 --- a/src/cmd/compile/internal/types2/named.go +++ b/src/cmd/compile/internal/types2/named.go @@ -6,6 +6,7 @@ package types2 import ( "cmd/compile/internal/syntax" + "strings" "sync" "sync/atomic" ) @@ -334,6 +335,12 @@ func (t *Named) NumMethods() int { // For an ordinary or instantiated type t, the receiver base type of this // method is the named type t. For an uninstantiated generic type t, each // method receiver is instantiated with its receiver type parameters. +// +// Methods are numbered deterministically: given the same list of source files +// presented to the type checker, or the same sequence of NewMethod and AddMethod +// calls, the mapping from method index to corresponding method remains the same. +// But the specific ordering is not specified and must not be relied on as it may +// change in the future. func (t *Named) Method(i int) *Func { t.resolve() @@ -444,15 +451,40 @@ func (t *Named) SetUnderlying(underlying Type) { } // AddMethod adds method m unless it is already in the method list. -// t must not have type arguments. +// The method must be in the same package as t, and t must not have +// type arguments. func (t *Named) AddMethod(m *Func) { + assert(samePkg(t.obj.pkg, m.pkg)) assert(t.inst == nil) t.resolve() - if i, _ := lookupMethod(t.methods, m.pkg, m.name, false); i < 0 { + if t.methodIndex(m.name, false) < 0 { t.methods = append(t.methods, m) } } +// methodIndex returns the index of the method with the given name. +// If foldCase is set, capitalization in the name is ignored. +// The result is negative if no such method exists. +func (t *Named) methodIndex(name string, foldCase bool) int { + if name == "_" { + return -1 + } + if foldCase { + for i, m := range t.methods { + if strings.EqualFold(m.name, name) { + return i + } + } + } else { + for i, m := range t.methods { + if m.name == name { + return i + } + } + } + return -1 +} + // TODO(gri) Investigate if Unalias can be moved to where underlying is set. func (t *Named) Underlying() Type { return Unalias(t.resolve().underlying) } func (t *Named) String() string { return TypeString(t, nil) } @@ -520,7 +552,7 @@ loop: n = n1 if i, ok := seen[n]; ok { // cycle - check.cycleError(path[i:]) + check.cycleError(path[i:], firstInSrc(path[i:])) u = Typ[Invalid] break } @@ -553,15 +585,16 @@ loop: func (n *Named) lookupMethod(pkg *Package, name string, foldCase bool) (int, *Func) { n.resolve() - // If n is an instance, we may not have yet instantiated all of its methods. - // Look up the method index in orig, and only instantiate method at the - // matching index (if any). - i, _ := lookupMethod(n.Origin().methods, pkg, name, foldCase) - if i < 0 { - return -1, nil + if samePkg(n.obj.pkg, pkg) || isExported(name) || foldCase { + // If n is an instance, we may not have yet instantiated all of its methods. + // Look up the method index in orig, and only instantiate method at the + // matching index (if any). + if i := n.Origin().methodIndex(name, foldCase); i >= 0 { + // For instances, m.Method(i) will be different from the orig method. + return i, n.Method(i) + } } - // For instances, m.Method(i) will be different from the orig method. - return i, n.Method(i) + return -1, nil } // context returns the type-checker context. diff --git a/src/cmd/compile/internal/types2/named_test.go b/src/cmd/compile/internal/types2/named_test.go index 705dcaee27..25aea26792 100644 --- a/src/cmd/compile/internal/types2/named_test.go +++ b/src/cmd/compile/internal/types2/named_test.go @@ -112,3 +112,51 @@ type Inst = *Tree[int] t.Errorf("Duplicate instances in cycle: %s (%p) -> %s (%p) -> %s (%p)", Inst, Inst, Node, Node, Tree, Tree) } } + +// TestMethodOrdering is a simple test verifying that the indices of methods of +// a named type remain the same as long as the same source and AddMethod calls +// are presented to the type checker in the same order (go.dev/issue/61298). +func TestMethodOrdering(t *testing.T) { + const src = ` +package p + +type T struct{} + +func (T) a() {} +func (T) c() {} +func (T) b() {} +` + // should get the same method order each time + var methods []string + for i := 0; i < 5; i++ { + // collect T methods as provided in src + pkg := mustTypecheck(src, nil, nil) + T := pkg.Scope().Lookup("T").Type().(*Named) + + // add a few more methods manually + for _, name := range []string{"foo", "bar", "bal"} { + m := NewFunc(nopos, pkg, name, nil /* don't care about signature */) + T.AddMethod(m) + } + + // check method order + if i == 0 { + // first round: collect methods in given order + methods = make([]string, T.NumMethods()) + for j := range methods { + methods[j] = T.Method(j).Name() + } + } else { + // successive rounds: methods must appear in the same order + if got := T.NumMethods(); got != len(methods) { + t.Errorf("got %d methods, want %d", got, len(methods)) + continue + } + for j, m := range methods { + if got := T.Method(j).Name(); got != m { + t.Errorf("got method %s, want %s", got, m) + } + } + } + } +} diff --git a/src/cmd/compile/internal/types2/object.go b/src/cmd/compile/internal/types2/object.go index 251587224b..e48a4895a7 100644 --- a/src/cmd/compile/internal/types2/object.go +++ b/src/cmd/compile/internal/types2/object.go @@ -9,6 +9,7 @@ import ( "cmd/compile/internal/syntax" "fmt" "go/constant" + "strings" "unicode" "unicode/utf8" ) @@ -50,7 +51,9 @@ type Object interface { setParent(*Scope) // sameId reports whether obj.Id() and Id(pkg, name) are the same. - sameId(pkg *Package, name string) bool + // If foldCase is true, names are considered equal if they are equal with case folding + // and their packages are ignored (e.g., pkg1.m, pkg1.M, pkg2.m, and pkg2.M are all equal). + sameId(pkg *Package, name string, foldCase bool) bool // scopePos returns the start position of the scope of this Object scopePos() syntax.Pos @@ -163,26 +166,24 @@ func (obj *object) setOrder(order uint32) { assert(order > 0); obj.order_ = func (obj *object) setColor(color color) { assert(color != white); obj.color_ = color } func (obj *object) setScopePos(pos syntax.Pos) { obj.scopePos_ = pos } -func (obj *object) sameId(pkg *Package, name string) bool { +func (obj *object) sameId(pkg *Package, name string, foldCase bool) bool { + // If we don't care about capitalization, we also ignore packages. + if foldCase && strings.EqualFold(obj.name, name) { + return true + } // spec: // "Two identifiers are different if they are spelled differently, // or if they appear in different packages and are not exported. // Otherwise, they are the same." - if name != obj.name { + if obj.name != name { return false } // obj.Name == name if obj.Exported() { return true } - // not exported, so packages must be the same (pkg == nil for - // fields in Universe scope; this can only happen for types - // introduced via Eval) - if pkg == nil || obj.pkg == nil { - return pkg == obj.pkg - } - // pkg != nil && obj.pkg != nil - return pkg.path == obj.pkg.path + // not exported, so packages must be the same + return samePkg(obj.pkg, pkg) } // less reports whether object a is ordered before object b. diff --git a/src/cmd/compile/internal/types2/operand.go b/src/cmd/compile/internal/types2/operand.go index 3f151007e5..15ec86fb5e 100644 --- a/src/cmd/compile/internal/types2/operand.go +++ b/src/cmd/compile/internal/types2/operand.go @@ -11,7 +11,6 @@ import ( "cmd/compile/internal/syntax" "fmt" "go/constant" - "go/token" . "internal/types/errors" ) @@ -27,7 +26,7 @@ const ( variable // operand is an addressable variable mapindex // operand is a map index expression (acts like a variable on lhs, commaok on rhs of an assignment) value // operand is a computed value - nilvalue // operand is the nil value + nilvalue // operand is the nil value - only used by types2 commaok // like value, but operand may be used in a comma,ok expression commaerr // like commaok, but second value is error, not boolean cgofunc // operand is a cgo function @@ -42,7 +41,7 @@ var operandModeString = [...]string{ variable: "variable", mapindex: "map index expression", value: "value", - nilvalue: "nil", + nilvalue: "nil", // only used by types2 commaok: "comma, ok expression", commaerr: "comma, error expression", cgofunc: "cgo function", @@ -109,14 +108,20 @@ func (x *operand) Pos() syntax.Pos { // cgofunc ( of type ) func operandString(x *operand, qf Qualifier) string { // special-case nil - if x.mode == nilvalue { - switch x.typ { - case nil, Typ[Invalid]: - return "nil (with invalid type)" - case Typ[UntypedNil]: + if isTypes2 { + if x.mode == nilvalue { + switch x.typ { + case nil, Typ[Invalid]: + return "nil (with invalid type)" + case Typ[UntypedNil]: + return "nil" + default: + return fmt.Sprintf("nil (of type %s)", TypeString(x.typ, qf)) + } + } + } else { // go/types + if x.mode == value && x.typ == Typ[UntypedNil] { return "nil" - default: - return fmt.Sprintf("nil (of type %s)", TypeString(x.typ, qf)) } } @@ -124,7 +129,7 @@ func operandString(x *operand, qf Qualifier) string { var expr string if x.expr != nil { - expr = syntax.String(x.expr) + expr = ExprString(x.expr) } else { switch x.mode { case builtin: @@ -221,10 +226,10 @@ func (x *operand) setConst(k syntax.LitKind, lit string) { case syntax.StringLit: kind = UntypedString default: - unreachable() + panic("unreachable") } - val := constant.MakeFromLiteral(lit, kind2tok[k], 0) + val := makeFromLiteral(lit, k) if val.Kind() == constant.Unknown { x.mode = invalid x.typ = Typ[Invalid] @@ -236,7 +241,13 @@ func (x *operand) setConst(k syntax.LitKind, lit string) { } // isNil reports whether x is the (untyped) nil value. -func (x *operand) isNil() bool { return x.mode == nilvalue } +func (x *operand) isNil() bool { + if isTypes2 { + return x.mode == nilvalue + } else { // go/types + return x.mode == value && x.typ == Typ[UntypedNil] + } +} // assignableTo reports whether x is assignable to a variable of type T. If the // result is false and a non-nil cause is provided, it may be set to a more @@ -332,7 +343,7 @@ func (x *operand) assignableTo(check *Checker, T Type, cause *string) (bool, Cod return false, IncompatibleAssign } - errorf := func(format string, args ...interface{}) { + errorf := func(format string, args ...any) { if check != nil && cause != nil { msg := check.sprintf(format, args...) if *cause != "" { @@ -385,12 +396,3 @@ func (x *operand) assignableTo(check *Checker, T Type, cause *string) (bool, Cod return false, IncompatibleAssign } - -// kind2tok translates syntax.LitKinds into token.Tokens. -var kind2tok = [...]token.Token{ - syntax.IntLit: token.INT, - syntax.FloatLit: token.FLOAT, - syntax.ImagLit: token.IMAG, - syntax.RuneLit: token.CHAR, - syntax.StringLit: token.STRING, -} diff --git a/src/cmd/compile/internal/types2/predicates.go b/src/cmd/compile/internal/types2/predicates.go index 7a096e3d97..938f8dcf8b 100644 --- a/src/cmd/compile/internal/types2/predicates.go +++ b/src/cmd/compile/internal/types2/predicates.go @@ -205,6 +205,16 @@ func hasNil(t Type) bool { return false } +// samePkg reports whether packages a and b are the same. +func samePkg(a, b *Package) bool { + // package is nil for objects in universe scope + if a == nil || b == nil { + return a == b + } + // a != nil && b != nil + return a.path == b.path +} + // An ifacePair is a node in a stack of interface type pairs compared for identity. type ifacePair struct { x, y *Interface @@ -269,7 +279,7 @@ func (c *comparer) identical(x, y Type, p *ifacePair) bool { g := y.fields[i] if f.embedded != g.embedded || !c.ignoreTags && x.Tag(i) != y.Tag(i) || - !f.sameId(g.pkg, g.name) || + !f.sameId(g.pkg, g.name, false) || !c.identical(f.typ, g.typ, p) { return false } @@ -467,7 +477,7 @@ func (c *comparer) identical(x, y Type, p *ifacePair) bool { // avoid a crash in case of nil type default: - unreachable() + panic("unreachable") } return false diff --git a/src/cmd/compile/internal/types2/resolver.go b/src/cmd/compile/internal/types2/resolver.go index 0cf7c9142e..f57234806e 100644 --- a/src/cmd/compile/internal/types2/resolver.go +++ b/src/cmd/compile/internal/types2/resolver.go @@ -314,11 +314,10 @@ func (check *Checker) collectObjects() { // the object may be imported into more than one file scope // concurrently. See go.dev/issue/32154.) if alt := fileScope.Lookup(name); alt != nil { - var err error_ - err.code = DuplicateDecl - err.errorf(s.LocalPkgName, "%s redeclared in this block", alt.Name()) - err.recordAltDecl(alt) - check.report(&err) + err := check.newError(DuplicateDecl) + err.addf(s.LocalPkgName, "%s redeclared in this block", alt.Name()) + err.addAltDecl(alt) + err.report() } else { fileScope.insert(name, obj) check.dotImportMap[dotImportKey{fileScope, name}] = pkgName @@ -406,7 +405,6 @@ func (check *Checker) collectObjects() { } case *syntax.TypeDecl: - _ = len(s.TParamList) != 0 && check.verifyVersionf(s.TParamList[0], go1_18, "type parameter") obj := NewTypeName(s.Name.Pos(), pkg, s.Name.Value, nil) check.declarePkgObj(s.Name, obj, &declInfo{file: fileScope, tdecl: s}) @@ -473,17 +471,16 @@ func (check *Checker) collectObjects() { for name, obj := range scope.elems { if alt := pkg.scope.Lookup(name); alt != nil { obj = resolve(name, obj) - var err error_ - err.code = DuplicateDecl + err := check.newError(DuplicateDecl) if pkg, ok := obj.(*PkgName); ok { - err.errorf(alt, "%s already declared through import of %s", alt.Name(), pkg.Imported()) - err.recordAltDecl(pkg) + err.addf(alt, "%s already declared through import of %s", alt.Name(), pkg.Imported()) + err.addAltDecl(pkg) } else { - err.errorf(alt, "%s already declared through dot-import of %s", alt.Name(), obj.Pkg()) - // TODO(gri) dot-imported objects don't have a position; recordAltDecl won't print anything - err.recordAltDecl(obj) + err.addf(alt, "%s already declared through dot-import of %s", alt.Name(), obj.Pkg()) + // TODO(gri) dot-imported objects don't have a position; addAltDecl won't print anything + err.addAltDecl(obj) } - check.report(&err) + err.report() } } } diff --git a/src/cmd/compile/internal/types2/return.go b/src/cmd/compile/internal/types2/return.go index 01988b012e..4e9988755c 100644 --- a/src/cmd/compile/internal/types2/return.go +++ b/src/cmd/compile/internal/types2/return.go @@ -16,7 +16,7 @@ import ( func (check *Checker) isTerminating(s syntax.Stmt, label string) bool { switch s := s.(type) { default: - unreachable() + panic("unreachable") case *syntax.DeclStmt, *syntax.EmptyStmt, *syntax.SendStmt, *syntax.AssignStmt, *syntax.CallStmt: @@ -108,7 +108,7 @@ func (check *Checker) isTerminatingSwitch(body []*syntax.CaseClause, label strin func hasBreak(s syntax.Stmt, label string, implicit bool) bool { switch s := s.(type) { default: - unreachable() + panic("unreachable") case *syntax.DeclStmt, *syntax.EmptyStmt, *syntax.ExprStmt, *syntax.SendStmt, *syntax.AssignStmt, *syntax.CallStmt, diff --git a/src/cmd/compile/internal/types2/scope.go b/src/cmd/compile/internal/types2/scope.go index 25bde6a794..b75e5cbaf7 100644 --- a/src/cmd/compile/internal/types2/scope.go +++ b/src/cmd/compile/internal/types2/scope.go @@ -273,20 +273,20 @@ func resolve(name string, obj Object) Object { // stub implementations so *lazyObject implements Object and we can // store them directly into Scope.elems. -func (*lazyObject) Parent() *Scope { panic("unreachable") } -func (*lazyObject) Pos() syntax.Pos { panic("unreachable") } -func (*lazyObject) Pkg() *Package { panic("unreachable") } -func (*lazyObject) Name() string { panic("unreachable") } -func (*lazyObject) Type() Type { panic("unreachable") } -func (*lazyObject) Exported() bool { panic("unreachable") } -func (*lazyObject) Id() string { panic("unreachable") } -func (*lazyObject) String() string { panic("unreachable") } -func (*lazyObject) order() uint32 { panic("unreachable") } -func (*lazyObject) color() color { panic("unreachable") } -func (*lazyObject) setType(Type) { panic("unreachable") } -func (*lazyObject) setOrder(uint32) { panic("unreachable") } -func (*lazyObject) setColor(color color) { panic("unreachable") } -func (*lazyObject) setParent(*Scope) { panic("unreachable") } -func (*lazyObject) sameId(pkg *Package, name string) bool { panic("unreachable") } -func (*lazyObject) scopePos() syntax.Pos { panic("unreachable") } -func (*lazyObject) setScopePos(pos syntax.Pos) { panic("unreachable") } +func (*lazyObject) Parent() *Scope { panic("unreachable") } +func (*lazyObject) Pos() syntax.Pos { panic("unreachable") } +func (*lazyObject) Pkg() *Package { panic("unreachable") } +func (*lazyObject) Name() string { panic("unreachable") } +func (*lazyObject) Type() Type { panic("unreachable") } +func (*lazyObject) Exported() bool { panic("unreachable") } +func (*lazyObject) Id() string { panic("unreachable") } +func (*lazyObject) String() string { panic("unreachable") } +func (*lazyObject) order() uint32 { panic("unreachable") } +func (*lazyObject) color() color { panic("unreachable") } +func (*lazyObject) setType(Type) { panic("unreachable") } +func (*lazyObject) setOrder(uint32) { panic("unreachable") } +func (*lazyObject) setColor(color color) { panic("unreachable") } +func (*lazyObject) setParent(*Scope) { panic("unreachable") } +func (*lazyObject) sameId(*Package, string, bool) bool { panic("unreachable") } +func (*lazyObject) scopePos() syntax.Pos { panic("unreachable") } +func (*lazyObject) setScopePos(syntax.Pos) { panic("unreachable") } diff --git a/src/cmd/compile/internal/types2/selection.go b/src/cmd/compile/internal/types2/selection.go index dfbf3a0191..2d882b2ee4 100644 --- a/src/cmd/compile/internal/types2/selection.go +++ b/src/cmd/compile/internal/types2/selection.go @@ -163,7 +163,7 @@ func SelectionString(s *Selection, qf Qualifier) string { case MethodExpr: k = "method expr " default: - unreachable() + panic("unreachable") } var buf bytes.Buffer buf.WriteString(k) diff --git a/src/cmd/compile/internal/types2/signature.go b/src/cmd/compile/internal/types2/signature.go index 8b896f7a90..bb4d32b016 100644 --- a/src/cmd/compile/internal/types2/signature.go +++ b/src/cmd/compile/internal/types2/signature.go @@ -108,9 +108,12 @@ func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams [] // - the receiver specification acts as local declaration for its type parameters, which may be blank _, rname, rparams := check.unpackRecv(recvPar.Type, true) if len(rparams) > 0 { + // The scope of the type parameter T in "func (r T[T]) f()" + // starts after f, not at "r"; see #52038. + scopePos := ftyp.Pos() tparams := make([]*TypeParam, len(rparams)) for i, rparam := range rparams { - tparams[i] = check.declareTypeParam(rparam) + tparams[i] = check.declareTypeParam(rparam, scopePos) } sig.rparams = bindTParams(tparams) // Blank identifiers don't get declared, so naive type-checking of the @@ -167,22 +170,26 @@ func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams [] check.collectTypeParams(&sig.tparams, tparams) } - // Value (non-type) parameters' scope starts in the function body. Use a temporary scope for their - // declarations and then squash that scope into the parent scope (and report any redeclarations at - // that time). + // Use a temporary scope for all parameter declarations and then + // squash that scope into the parent scope (and report any + // redeclarations at that time). + // + // TODO(adonovan): now that each declaration has the correct + // scopePos, there should be no need for scope squashing. + // Audit to ensure all lookups honor scopePos and simplify. scope := NewScope(check.scope, nopos, nopos, "function body (temp. scope)") - var recvList []*Var // TODO(gri) remove the need for making a list here + scopePos := syntax.EndPos(ftyp) // all parameters' scopes start after the signature + var recvList []*Var // TODO(gri) remove the need for making a list here if recvPar != nil { - recvList, _ = check.collectParams(scope, []*syntax.Field{recvPar}, false) // use rewritten receiver type, if any + recvList, _ = check.collectParams(scope, []*syntax.Field{recvPar}, false, scopePos) // use rewritten receiver type, if any } - params, variadic := check.collectParams(scope, ftyp.ParamList, true) - results, _ := check.collectParams(scope, ftyp.ResultList, false) + params, variadic := check.collectParams(scope, ftyp.ParamList, true, scopePos) + results, _ := check.collectParams(scope, ftyp.ResultList, false, scopePos) scope.Squash(func(obj, alt Object) { - var err error_ - err.code = DuplicateDecl - err.errorf(obj, "%s redeclared in this block", obj.Name()) - err.recordAltDecl(alt) - check.report(&err) + err := check.newError(DuplicateDecl) + err.addf(obj, "%s redeclared in this block", obj.Name()) + err.addAltDecl(alt) + err.report() }) if recvPar != nil { @@ -239,7 +246,7 @@ func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams [] case *TypeParam: // The underlying type of a receiver base type cannot be a // type parameter: "type T[P any] P" is not a valid declaration. - unreachable() + panic("unreachable") } if cause != "" { check.errorf(recv, InvalidRecv, "invalid receiver type %s (%s)", rtyp, cause) @@ -259,7 +266,7 @@ func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams [] // collectParams declares the parameters of list in scope and returns the corresponding // variable list. -func (check *Checker) collectParams(scope *Scope, list []*syntax.Field, variadicOk bool) (params []*Var, variadic bool) { +func (check *Checker) collectParams(scope *Scope, list []*syntax.Field, variadicOk bool, scopePos syntax.Pos) (params []*Var, variadic bool) { if list == nil { return } @@ -294,7 +301,7 @@ func (check *Checker) collectParams(scope *Scope, list []*syntax.Field, variadic // ok to continue } par := NewParam(field.Name.Pos(), check.pkg, name, typ) - check.declare(scope, field.Name, par, scope.pos) + check.declare(scope, field.Name, par, scopePos) params = append(params, par) named = true } else { diff --git a/src/cmd/compile/internal/types2/sizes.go b/src/cmd/compile/internal/types2/sizes.go index 486c05c61c..7d20c97010 100644 --- a/src/cmd/compile/internal/types2/sizes.go +++ b/src/cmd/compile/internal/types2/sizes.go @@ -94,7 +94,7 @@ func (s *StdSizes) Alignof(T Type) (result int64) { return s.WordSize } case *TypeParam, *Union: - unreachable() + panic("unreachable") } a := s.Sizeof(T) // may be 0 or negative // spec: "For a variable x of any type: unsafe.Alignof(x) is at least 1." @@ -221,7 +221,7 @@ func (s *StdSizes) Sizeof(T Type) int64 { assert(!isTypeParam(T)) return s.WordSize * 2 case *TypeParam, *Union: - unreachable() + panic("unreachable") } return s.WordSize // catch-all } diff --git a/src/cmd/compile/internal/types2/stdlib_test.go b/src/cmd/compile/internal/types2/stdlib_test.go index 7c14e3476e..405af78572 100644 --- a/src/cmd/compile/internal/types2/stdlib_test.go +++ b/src/cmd/compile/internal/types2/stdlib_test.go @@ -311,6 +311,7 @@ func TestStdFixed(t *testing.T) { testTestDir(t, filepath.Join(testenv.GOROOT(t), "test", "fixedbugs"), "bug248.go", "bug302.go", "bug369.go", // complex test instructions - ignore + "bug398.go", // types2 doesn't check for anonymous interface cycles (go.dev/issue/56103) "issue6889.go", // gc-specific test "issue11362.go", // canonical import path check "issue16369.go", // types2 handles this correctly - not an issue diff --git a/src/cmd/compile/internal/types2/stmt.go b/src/cmd/compile/internal/types2/stmt.go index e4bda49c52..e79e4cd586 100644 --- a/src/cmd/compile/internal/types2/stmt.go +++ b/src/cmd/compile/internal/types2/stmt.go @@ -23,10 +23,6 @@ func (check *Checker) funcBody(decl *declInfo, name string, sig *Signature, body check.trace(body.Pos(), "-- %s: %s", name, sig) } - // set function scope extent - sig.scope.pos = body.Pos() - sig.scope.end = syntax.EndPos(body) - // save/restore current environment and set up function environment // (and use 0 indentation at function start) defer func(env environment, indent int) { @@ -190,7 +186,7 @@ func (check *Checker) suspendedCall(keyword string, call syntax.Expr) { case statement: return default: - unreachable() + panic("unreachable") } check.errorf(&x, code, "%s %s %s", keyword, msg, &x) } @@ -264,11 +260,10 @@ L: // (quadratic algorithm, but these lists tend to be very short) for _, vt := range seen[val] { if Identical(v.typ, vt.typ) { - var err error_ - err.code = DuplicateCase - err.errorf(&v, "duplicate case %s in expression switch", &v) - err.errorf(vt.pos, "previous case") - check.report(&err) + err := check.newError(DuplicateCase) + err.addf(&v, "duplicate case %s in expression switch", &v) + err.addf(vt.pos, "previous case") + err.report() continue L } } @@ -311,11 +306,10 @@ L: if T != nil { Ts = TypeString(T, check.qualifier) } - var err error_ - err.code = DuplicateCase - err.errorf(e, "duplicate case %s in type switch", Ts) - err.errorf(other, "previous case") - check.report(&err) + err := check.newError(DuplicateCase) + err.addf(e, "duplicate case %s in type switch", Ts) + err.addf(other, "previous case") + err.report() continue L } } @@ -354,11 +348,10 @@ L: // if T != nil { // Ts = TypeString(T, check.qualifier) // } -// var err error_ -// err.code = _DuplicateCase -// err.errorf(e, "duplicate case %s in type switch", Ts) -// err.errorf(other, "previous case") -// check.report(&err) +// err := check.newError(_DuplicateCase) +// err.addf(e, "duplicate case %s in type switch", Ts) +// err.addf(other, "previous case") +// err.report() // continue L // } // seen[hash] = e @@ -459,7 +452,7 @@ func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) { check.errorf(s.Lhs, NonNumericIncDec, invalidOp+"%s%s%s (non-numeric type %s)", s.Lhs, s.Op, s.Op, x.typ) return } - check.assignVar(s.Lhs, nil, &x) + check.assignVar(s.Lhs, nil, &x, "assignment") return } @@ -482,7 +475,7 @@ func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) { var x operand check.binary(&x, nil, lhs[0], rhs[0], s.Op) - check.assignVar(lhs[0], nil, &x) + check.assignVar(lhs[0], nil, &x, "assignment") case *syntax.CallStmt: kind := "go" @@ -502,11 +495,10 @@ func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) { // with the same name as a result parameter is in scope at the place of the return." for _, obj := range res.vars { if alt := check.lookup(obj.name); alt != nil && alt != obj { - var err error_ - err.code = OutOfScopeResult - err.errorf(s, "result parameter %s not in scope at return", obj.name) - err.errorf(alt, "inner declaration of %s", obj) - check.report(&err) + err := check.newError(OutOfScopeResult) + err.addf(s, "result parameter %s not in scope at return", obj.name) + err.addf(alt, "inner declaration of %s", obj) + err.report() // ok to continue } } @@ -830,7 +822,7 @@ func (check *Checker) typeSwitchStmt(inner stmtContext, s *syntax.SwitchStmt, gu func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *syntax.RangeClause) { // Convert syntax form to local variables. - type expr = syntax.Expr + type Expr = syntax.Expr type identType = syntax.Name identName := func(n *identType) string { return n.Value } sKey := rclause.Lhs // possibly nil @@ -865,7 +857,9 @@ func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *s var key, val Type if x.mode != invalid { // Ranging over a type parameter is permitted if it has a core type. - k, v, cause, isFunc, ok := rangeKeyVal(x.typ) + k, v, cause, isFunc, ok := rangeKeyVal(x.typ, func(v goVersion) bool { + return check.allowVersion(check.pkg, x.expr, v) + }) switch { case !ok && cause != "": check.softErrorf(&x, InvalidRangeExpr, "cannot range over %s: %s", &x, cause) @@ -901,8 +895,10 @@ func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *s // (irregular assignment, cannot easily map to existing assignment checks) // lhs expressions and initialization value (rhs) types - lhs := [2]expr{sKey, sValue} - rhs := [2]Type{key, val} // key, val may be nil + lhs := [2]Expr{sKey, sValue} // sKey, sValue may be nil + rhs := [2]Type{key, val} // key, val may be nil + + constIntRange := x.mode == constant_ && isInteger(x.typ) if isDef { // short variable declaration @@ -929,11 +925,13 @@ func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *s } // initialize lhs variable - if typ := rhs[i]; typ != nil { + if constIntRange { + check.initVar(obj, &x, "range clause") + } else if typ := rhs[i]; typ != nil { x.mode = value x.expr = lhs // we don't have a better rhs expression to use here x.typ = typ - check.initVar(obj, &x, "range clause") + check.initVar(obj, &x, "assignment") // error is on variable, use "assignment" not "range clause" } else { obj.typ = Typ[Invalid] obj.used = true // don't complain about unused variable @@ -949,35 +947,47 @@ func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *s } else { check.error(noNewVarPos, NoNewVar, "no new variables on left side of :=") } - } else { + } else if sKey != nil /* lhs[0] != nil */ { // ordinary assignment for i, lhs := range lhs { if lhs == nil { continue } - if typ := rhs[i]; typ != nil { + + if constIntRange { + check.assignVar(lhs, nil, &x, "range clause") + } else if typ := rhs[i]; typ != nil { x.mode = value x.expr = lhs // we don't have a better rhs expression to use here x.typ = typ - check.assignVar(lhs, nil, &x) + check.assignVar(lhs, nil, &x, "assignment") // error is on variable, use "assignment" not "range clause" } } + } else if constIntRange { + // If we don't have any iteration variables, we still need to + // check that a (possibly untyped) integer range expression x + // is valid. + // We do this by checking the assignment _ = x. This ensures + // that an untyped x can be converted to a value of type int. + check.assignment(&x, nil, "range clause") } check.stmt(inner, s.Body) } // RangeKeyVal returns the key and value types for a range over typ. +// Exported for use by the compiler (does not exist in go/types). func RangeKeyVal(typ Type) (Type, Type) { - key, val, _, _, _ := rangeKeyVal(typ) + key, val, _, _, _ := rangeKeyVal(typ, nil) return key, val } // rangeKeyVal returns the key and value type produced by a range clause -// over an expression of type typ. If the range clause is not permitted, -// rangeKeyVal returns ok = false. When ok = false, rangeKeyVal may also -// return a reason in cause. -func rangeKeyVal(typ Type) (key, val Type, cause string, isFunc, ok bool) { +// over an expression of type typ. +// If allowVersion != nil, it is used to check the required language version. +// If the range clause is not permitted, rangeKeyVal returns ok = false. +// When ok = false, rangeKeyVal may also return a reason in cause. +func rangeKeyVal(typ Type, allowVersion func(goVersion) bool) (key, val Type, cause string, isFunc, ok bool) { bad := func(cause string) (Type, Type, string, bool, bool) { return Typ[Invalid], Typ[Invalid], cause, false, false } @@ -995,6 +1005,9 @@ func rangeKeyVal(typ Type) (key, val Type, cause string, isFunc, ok bool) { return Typ[Int], universeRune, "", false, true // use 'rune' name } if isInteger(typ) { + if allowVersion != nil && !allowVersion(go1_22) { + return bad("requires go1.22 or later") + } return orig, nil, "", false, true } case *Array: @@ -1009,8 +1022,8 @@ func rangeKeyVal(typ Type) (key, val Type, cause string, isFunc, ok bool) { } return typ.elem, nil, "", false, true case *Signature: - if !buildcfg.Experiment.RangeFunc { - break + if !buildcfg.Experiment.RangeFunc && allowVersion != nil && !allowVersion(go1_23) { + return bad("requires go1.23 or later") } assert(typ.Recv() == nil) switch { diff --git a/src/cmd/compile/internal/types2/struct.go b/src/cmd/compile/internal/types2/struct.go index 9e46b349a3..f5cdc472f7 100644 --- a/src/cmd/compile/internal/types2/struct.go +++ b/src/cmd/compile/internal/types2/struct.go @@ -80,7 +80,7 @@ func (check *Checker) structType(styp *Struct, e *syntax.StructType) { // current field typ and tag var typ Type var tag string - add := func(ident *syntax.Name, embedded bool, pos syntax.Pos) { + add := func(ident *syntax.Name, embedded bool) { if tag != "" && tags == nil { tags = make([]string, len(fields)) } @@ -88,6 +88,7 @@ func (check *Checker) structType(styp *Struct, e *syntax.StructType) { tags = append(tags, tag) } + pos := ident.Pos() name := ident.Value fld := NewField(pos, check.pkg, name, typ, embedded) // spec: "Within a struct, non-blank field names must be unique." @@ -101,10 +102,10 @@ func (check *Checker) structType(styp *Struct, e *syntax.StructType) { // fields with errors; this keeps the number of struct fields in sync // with the source as long as the fields are _ or have different names // (go.dev/issue/25627). - addInvalid := func(ident *syntax.Name, pos syntax.Pos) { + addInvalid := func(ident *syntax.Name) { typ = Typ[Invalid] tag = "" - add(ident, true, pos) + add(ident, true) } var prev syntax.Expr @@ -121,7 +122,7 @@ func (check *Checker) structType(styp *Struct, e *syntax.StructType) { } if f.Name != nil { // named field - add(f.Name, false, f.Name.Pos()) + add(f.Name, false) } else { // embedded field // spec: "An embedded type must be specified as a type name T or as a @@ -131,11 +132,11 @@ func (check *Checker) structType(styp *Struct, e *syntax.StructType) { name := embeddedFieldIdent(f.Type) if name == nil { check.errorf(pos, InvalidSyntaxTree, "invalid embedded field type %s", f.Type) - name = &syntax.Name{Value: "_"} // TODO(gri) need to set position to pos - addInvalid(name, pos) + name = syntax.NewName(pos, "_") + addInvalid(name) continue } - add(name, true, name.Pos()) // struct{p.T} field has position of T + add(name, true) // struct{p.T} field has position of T // Because we have a name, typ must be of the form T or *T, where T is the name // of a (named or alias) type, and t (= deref(typ)) must be the type of T. @@ -199,11 +200,10 @@ func embeddedFieldIdent(e syntax.Expr) *syntax.Name { func (check *Checker) declareInSet(oset *objset, pos syntax.Pos, obj Object) bool { if alt := oset.insert(obj); alt != nil { - var err error_ - err.code = DuplicateDecl - err.errorf(pos, "%s redeclared", obj.Name()) - err.recordAltDecl(alt) - check.report(&err) + err := check.newError(DuplicateDecl) + err.addf(pos, "%s redeclared", obj.Name()) + err.addAltDecl(alt) + err.report() return false } return true diff --git a/src/cmd/compile/internal/types2/subst.go b/src/cmd/compile/internal/types2/subst.go index aefa53603f..fa636a1e1e 100644 --- a/src/cmd/compile/internal/types2/subst.go +++ b/src/cmd/compile/internal/types2/subst.go @@ -95,6 +95,18 @@ func (subst *subster) typ(typ Type) Type { case *Basic: // nothing to do + case *Alias: + rhs := subst.typ(t.fromRHS) + if rhs != t.fromRHS { + // This branch cannot be reached because the RHS of an alias + // may only contain type parameters of an enclosing function. + // Such function bodies are never "instantiated" and thus + // substitution is not called on locally declared alias types. + // TODO(gri) adjust once parameterized aliases are supported + panic("unreachable for unparameterized aliases") + // return subst.check.newAlias(t.obj, rhs) + } + case *Array: elem := subst.typOrNil(t.elem) if elem != t.elem { @@ -169,6 +181,7 @@ func (subst *subster) typ(typ Type) Type { if mcopied || ecopied { iface := subst.check.newInterface() iface.embeddeds = embeddeds + iface.embedPos = t.embedPos iface.implicit = t.implicit assert(t.complete) // otherwise we are copying incomplete data iface.complete = t.complete @@ -270,7 +283,7 @@ func (subst *subster) typ(typ Type) Type { return subst.smap.lookup(t) default: - unreachable() + panic("unreachable") } return typ diff --git a/src/cmd/compile/internal/types2/typeset.go b/src/cmd/compile/internal/types2/typeset.go index 719041657c..778809e42e 100644 --- a/src/cmd/compile/internal/types2/typeset.go +++ b/src/cmd/compile/internal/types2/typeset.go @@ -57,7 +57,7 @@ func (s *_TypeSet) Method(i int) *Func { return s.methods[i] } // LookupMethod returns the index of and method with matching package and name, or (-1, nil). func (s *_TypeSet) LookupMethod(pkg *Package, name string, foldCase bool) (int, *Func) { - return lookupMethod(s.methods, pkg, name, foldCase) + return methodIndex(s.methods, pkg, name, foldCase) } func (s *_TypeSet) String() string { @@ -161,6 +161,10 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_ // set (and don't store it!), so that we still compute the full // type set eventually. Instead, return the top type set and // let any follow-on errors play out. + // + // TODO(gri) Consider recording when this happens and reporting + // it as an error (but only if there were no other errors so to + // to not have unnecessary follow-on errors). if !ityp.complete { return &topTypeSet } @@ -221,11 +225,10 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_ mpos[m] = pos case explicit: if check != nil { - var err error_ - err.code = DuplicateDecl - err.errorf(pos, "duplicate method %s", m.name) - err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name) - check.report(&err) + err := check.newError(DuplicateDecl) + err.addf(atPos(pos), "duplicate method %s", m.name) + err.addf(atPos(mpos[other.(*Func)]), "other declaration of %s", m.name) + err.report() } default: // We have a duplicate method name in an embedded (not explicitly declared) method. @@ -235,14 +238,13 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_ // error message. if check != nil { check.later(func() { - if !check.allowVersion(m.pkg, pos, go1_14) || !Identical(m.typ, other.Type()) { - var err error_ - err.code = DuplicateDecl - err.errorf(pos, "duplicate method %s", m.name) - err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name) - check.report(&err) + if !check.allowVersion(m.pkg, atPos(pos), go1_14) || !Identical(m.typ, other.Type()) { + err := check.newError(DuplicateDecl) + err.addf(atPos(pos), "duplicate method %s", m.name) + err.addf(atPos(mpos[other.(*Func)]), "other declaration of %s", m.name) + err.report() } - }).describef(pos, "duplicate method check for %s", m.name) + }).describef(atPos(pos), "duplicate method check for %s", m.name) } } } @@ -270,7 +272,7 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_ assert(!isTypeParam(typ)) tset := computeInterfaceTypeSet(check, pos, u) // If typ is local, an error was already reported where typ is specified/defined. - if check != nil && check.isImportedConstraint(typ) && !check.verifyVersionf(pos, go1_18, "embedding constraint interface %s", typ) { + if check != nil && check.isImportedConstraint(typ) && !check.verifyVersionf(atPos(pos), go1_18, "embedding constraint interface %s", typ) { continue } comparable = tset.comparable @@ -279,7 +281,7 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_ } terms = tset.terms case *Union: - if check != nil && !check.verifyVersionf(pos, go1_18, "embedding interface element %s", u) { + if check != nil && !check.verifyVersionf(atPos(pos), go1_18, "embedding interface element %s", u) { continue } tset := computeUnionTypeSet(check, unionSets, pos, u) @@ -293,7 +295,7 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_ if !isValid(u) { continue } - if check != nil && !check.verifyVersionf(pos, go1_18, "embedding non-interface type %s", typ) { + if check != nil && !check.verifyVersionf(atPos(pos), go1_18, "embedding non-interface type %s", typ) { continue } terms = termlist{{false, typ}} @@ -304,7 +306,6 @@ func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_ // separately. Here we only need to intersect the term lists and comparable bits. allTerms, allComparable = intersectTermLists(allTerms, allComparable, terms, comparable) } - ityp.embedPos = nil // not needed anymore (errors have been reported) ityp.tset.comparable = allComparable if len(allMethods) != 0 { @@ -404,7 +405,7 @@ func computeUnionTypeSet(check *Checker, unionSets map[*Union]*_TypeSet, pos syn allTerms = allTerms.union(terms) if len(allTerms) > maxTermCount { if check != nil { - check.errorf(pos, InvalidUnion, "cannot handle more than %d union terms (implementation limitation)", maxTermCount) + check.errorf(atPos(pos), InvalidUnion, "cannot handle more than %d union terms (implementation limitation)", maxTermCount) } unionSets[utyp] = &invalidTypeSet return unionSets[utyp] diff --git a/src/cmd/compile/internal/types2/typestring.go b/src/cmd/compile/internal/types2/typestring.go index 0e0da0f7f6..723c074e60 100644 --- a/src/cmd/compile/internal/types2/typestring.go +++ b/src/cmd/compile/internal/types2/typestring.go @@ -16,18 +16,18 @@ import ( ) // A Qualifier controls how named package-level objects are printed in -// calls to TypeString, ObjectString, and SelectionString. +// calls to [TypeString], [ObjectString], and [SelectionString]. // // These three formatting routines call the Qualifier for each // package-level object O, and if the Qualifier returns a non-empty // string p, the object is printed in the form p.O. // If it returns an empty string, only the object name O is printed. // -// Using a nil Qualifier is equivalent to using (*Package).Path: the +// Using a nil Qualifier is equivalent to using (*[Package]).Path: the // object is qualified by the import path, e.g., "encoding/json.Marshal". type Qualifier func(*Package) string -// RelativeTo returns a Qualifier that fully qualifies members of +// RelativeTo returns a [Qualifier] that fully qualifies members of // all packages other than pkg. func RelativeTo(pkg *Package) Qualifier { if pkg == nil { @@ -42,7 +42,7 @@ func RelativeTo(pkg *Package) Qualifier { } // TypeString returns the string representation of typ. -// The Qualifier controls the printing of +// The [Qualifier] controls the printing of // package-level objects, and may be nil. func TypeString(typ Type, qf Qualifier) string { var buf bytes.Buffer @@ -51,14 +51,14 @@ func TypeString(typ Type, qf Qualifier) string { } // WriteType writes the string representation of typ to buf. -// The Qualifier controls the printing of +// The [Qualifier] controls the printing of // package-level objects, and may be nil. func WriteType(buf *bytes.Buffer, typ Type, qf Qualifier) { newTypeWriter(buf, qf).typ(typ) } // WriteSignature writes the representation of the signature sig to buf, -// without a leading "func" keyword. The Qualifier controls the printing +// without a leading "func" keyword. The [Qualifier] controls the printing // of package-level objects, and may be nil. func WriteSignature(buf *bytes.Buffer, sig *Signature, qf Qualifier) { newTypeWriter(buf, qf).signature(sig) @@ -322,7 +322,13 @@ func (w *typeWriter) typ(typ Type) { // error messages. This doesn't need to be super-elegant; we just // need a clear indication that this is not a predeclared name. if w.ctxt == nil && Universe.Lookup(t.obj.name) != nil { - w.string(fmt.Sprintf(" /* with %s declared at %s */", t.obj.name, t.obj.Pos())) + if isTypes2 { + w.string(fmt.Sprintf(" /* with %s declared at %v */", t.obj.name, t.obj.Pos())) + } else { + // Can't print position information because + // we don't have a token.FileSet accessible. + w.string("/* type parameter */") + } } } @@ -331,8 +337,6 @@ func (w *typeWriter) typ(typ Type) { if w.ctxt != nil { // TODO(gri) do we need to print the alias type name, too? w.typ(Unalias(t.obj.typ)) - } else { - w.string(fmt.Sprintf(" /* = %s */", Unalias(t.obj.typ))) } default: diff --git a/src/cmd/compile/internal/types2/typexpr.go b/src/cmd/compile/internal/types2/typexpr.go index 0ee92be6ee..be7c306a8d 100644 --- a/src/cmd/compile/internal/types2/typexpr.go +++ b/src/cmd/compile/internal/types2/typexpr.go @@ -48,6 +48,20 @@ func (check *Checker) ident(x *operand, e *syntax.Name, def *TypeName, wantType } check.recordUse(e, obj) + // If we want a type but don't have one, stop right here and avoid potential problems + // with missing underlying types. This also gives better error messages in some cases + // (see go.dev/issue/65344). + _, gotType := obj.(*TypeName) + if !gotType && wantType { + check.errorf(e, NotAType, "%s is not a type", obj.Name()) + // avoid "declared but not used" errors + // (don't use Checker.use - we don't want to evaluate too much) + if v, _ := obj.(*Var); v != nil && v.pkg == check.pkg /* see Checker.use1 */ { + v.used = true + } + return + } + // Type-check the object. // Only call Checker.objDecl if the object doesn't have a type yet // (in which case we must actually determine it) or the object is a @@ -57,7 +71,7 @@ func (check *Checker) ident(x *operand, e *syntax.Name, def *TypeName, wantType // informative "not a type/value" error that this function's caller // will issue (see go.dev/issue/25790). typ := obj.Type() - if _, gotType := obj.(*TypeName); typ == nil || gotType && wantType { + if typ == nil || gotType && wantType { check.objDecl(obj, def) typ = obj.Type() // type must have been assigned by Checker.objDecl } @@ -125,7 +139,7 @@ func (check *Checker) ident(x *operand, e *syntax.Name, def *TypeName, wantType x.mode = nilvalue default: - unreachable() + panic("unreachable") } x.typ = typ @@ -463,7 +477,7 @@ func (check *Checker) instantiatedType(x syntax.Expr, xlist []syntax.Expr, def * // errors. check.recordInstance(x, inst.TypeArgs().list(), inst) - if check.validateTArgLen(x.Pos(), inst.TypeParams().Len(), inst.TypeArgs().Len()) { + if check.validateTArgLen(x.Pos(), inst.obj.name, inst.TypeParams().Len(), inst.TypeArgs().Len()) { if i, err := check.verify(x.Pos(), inst.TypeParams().list(), inst.TypeArgs().list(), check.context()); err != nil { // best position for error reporting pos := x.Pos() diff --git a/src/cmd/compile/internal/types2/unify.go b/src/cmd/compile/internal/types2/unify.go index 8218939b68..6838f270c1 100644 --- a/src/cmd/compile/internal/types2/unify.go +++ b/src/cmd/compile/internal/types2/unify.go @@ -608,7 +608,7 @@ func (u *unifier) nify(x, y Type, mode unifyMode, p *ifacePair) (result bool) { g := y.fields[i] if f.embedded != g.embedded || x.Tag(i) != y.Tag(i) || - !f.sameId(g.pkg, g.name) || + !f.sameId(g.pkg, g.name, false) || !u.nify(f.typ, g.typ, emode, p) { return false } diff --git a/src/cmd/compile/internal/types2/universe.go b/src/cmd/compile/internal/types2/universe.go index c8be81b9eb..8e1e4a2bb7 100644 --- a/src/cmd/compile/internal/types2/universe.go +++ b/src/cmd/compile/internal/types2/universe.go @@ -279,7 +279,7 @@ func def(obj Object) { case *Builtin: obj.pkg = Unsafe default: - unreachable() + panic("unreachable") } } if scope.Insert(obj) != nil { diff --git a/src/cmd/compile/internal/types2/util.go b/src/cmd/compile/internal/types2/util.go index 01da1c12ca..0422c03346 100644 --- a/src/cmd/compile/internal/types2/util.go +++ b/src/cmd/compile/internal/types2/util.go @@ -9,7 +9,13 @@ package types2 -import "cmd/compile/internal/syntax" +import ( + "cmd/compile/internal/syntax" + "go/constant" + "go/token" +) + +const isTypes2 = true // cmpPos compares the positions p and q and returns a result r as follows: // @@ -20,3 +26,37 @@ import "cmd/compile/internal/syntax" // If p and q are in different files, p is before q if the filename // of p sorts lexicographically before the filename of q. func cmpPos(p, q syntax.Pos) int { return p.Cmp(q) } + +// hasDots reports whether the last argument in the call is followed by ... +func hasDots(call *syntax.CallExpr) bool { return call.HasDots } + +// dddErrPos returns the node (poser) for reporting an invalid ... use in a call. +func dddErrPos(call *syntax.CallExpr) *syntax.CallExpr { + // TODO(gri) should use "..." instead of call position + return call +} + +// argErrPos returns the node (poser) for reportign an invalid argument count. +func argErrPos(call *syntax.CallExpr) *syntax.CallExpr { return call } + +// ExprString returns a string representation of x. +func ExprString(x syntax.Node) string { return syntax.String(x) } + +// startPos returns the start position of node n. +func startPos(n syntax.Node) syntax.Pos { return syntax.StartPos(n) } + +// endPos returns the position of the first character immediately after node n. +func endPos(n syntax.Node) syntax.Pos { return syntax.EndPos(n) } + +// makeFromLiteral returns the constant value for the given literal string and kind. +func makeFromLiteral(lit string, kind syntax.LitKind) constant.Value { + return constant.MakeFromLiteral(lit, kind2tok[kind], 0) +} + +var kind2tok = [...]token.Token{ + syntax.IntLit: token.INT, + syntax.FloatLit: token.FLOAT, + syntax.ImagLit: token.IMAG, + syntax.RuneLit: token.CHAR, + syntax.StringLit: token.STRING, +} diff --git a/src/cmd/compile/internal/types2/util_test.go b/src/cmd/compile/internal/types2/util_test.go index 4cbd002355..70058aad84 100644 --- a/src/cmd/compile/internal/types2/util_test.go +++ b/src/cmd/compile/internal/types2/util_test.go @@ -7,6 +7,11 @@ package types2 -import "cmd/compile/internal/syntax" +import ( + "cmd/compile/internal/syntax" +) func CmpPos(p, q syntax.Pos) int { return cmpPos(p, q) } + +func ScopeComment(s *Scope) string { return s.comment } +func ObjectScopePos(obj Object) syntax.Pos { return obj.scopePos() } diff --git a/src/cmd/compile/internal/types2/validtype.go b/src/cmd/compile/internal/types2/validtype.go index a880a3d933..7397318511 100644 --- a/src/cmd/compile/internal/types2/validtype.go +++ b/src/cmd/compile/internal/types2/validtype.go @@ -4,12 +4,14 @@ package types2 +import "cmd/compile/internal/syntax" + // validType verifies that the given type does not "expand" indefinitely // producing a cycle in the type graph. // (Cycles involving alias types, as in "type A = [10]A" are detected // earlier, via the objDecl cycle detection mechanism.) func (check *Checker) validType(typ *Named) { - check.validType0(typ, nil, nil) + check.validType0(nopos, typ, nil, nil) } // validType0 checks if the given type is valid. If typ is a type parameter @@ -22,8 +24,21 @@ func (check *Checker) validType(typ *Named) { // of) F in S, leading to the nest S->F. If a type appears in its own nest // (say S->F->S) we have an invalid recursive type. The path list is the full // path of named types in a cycle, it is only needed for error reporting. -func (check *Checker) validType0(typ Type, nest, path []*Named) bool { - switch t := Unalias(typ).(type) { +func (check *Checker) validType0(pos syntax.Pos, typ Type, nest, path []*Named) bool { + typ = Unalias(typ) + + if check.conf.Trace { + if t, _ := typ.(*Named); t != nil && t.obj != nil /* obj should always exist but be conservative */ { + pos = t.obj.pos + } + check.indent++ + check.trace(pos, "validType(%s) nest %v, path %v", typ, pathString(makeObjList(nest)), pathString(makeObjList(path))) + defer func() { + check.indent-- + }() + } + + switch t := typ.(type) { case nil: // We should never see a nil type but be conservative and panic // only in debug mode. @@ -32,38 +47,49 @@ func (check *Checker) validType0(typ Type, nest, path []*Named) bool { } case *Array: - return check.validType0(t.elem, nest, path) + return check.validType0(pos, t.elem, nest, path) case *Struct: for _, f := range t.fields { - if !check.validType0(f.typ, nest, path) { + if !check.validType0(pos, f.typ, nest, path) { return false } } case *Union: for _, t := range t.terms { - if !check.validType0(t.typ, nest, path) { + if !check.validType0(pos, t.typ, nest, path) { return false } } case *Interface: for _, etyp := range t.embeddeds { - if !check.validType0(etyp, nest, path) { + if !check.validType0(pos, etyp, nest, path) { return false } } case *Named: - // Exit early if we already know t is valid. - // This is purely an optimization but it prevents excessive computation - // times in pathological cases such as testdata/fixedbugs/issue6977.go. - // (Note: The valids map could also be allocated locally, once for each - // validType call.) - if check.valids.lookup(t) != nil { - break - } + // TODO(gri) The optimization below is incorrect (see go.dev/issue/65711): + // in that issue `type A[P any] [1]P` is a valid type on its own + // and the (uninstantiated) A is recorded in check.valids. As a + // consequence, when checking the remaining declarations, which + // are not valid, the validity check ends prematurely because A + // is considered valid, even though its validity depends on the + // type argument provided to it. + // + // A correct optimization is important for pathological cases. + // Keep code around for reference until we found an optimization. + // + // // Exit early if we already know t is valid. + // // This is purely an optimization but it prevents excessive computation + // // times in pathological cases such as testdata/fixedbugs/issue6977.go. + // // (Note: The valids map could also be allocated locally, once for each + // // validType call.) + // if check.valids.lookup(t) != nil { + // break + // } // Don't report a 2nd error if we already know the type is invalid // (e.g., if a cycle was detected earlier, via under). @@ -109,7 +135,7 @@ func (check *Checker) validType0(typ Type, nest, path []*Named) bool { // index of t in nest. Search again. for start, p := range path { if Identical(p, t) { - check.cycleError(makeObjList(path[start:])) + check.cycleError(makeObjList(path[start:]), 0) return false } } @@ -121,11 +147,12 @@ func (check *Checker) validType0(typ Type, nest, path []*Named) bool { // Every type added to nest is also added to path; thus every type that is in nest // must also be in path (invariant). But not every type in path is in nest, since // nest may be pruned (see below, *TypeParam case). - if !check.validType0(t.Origin().fromRHS, append(nest, t), append(path, t)) { + if !check.validType0(pos, t.Origin().fromRHS, append(nest, t), append(path, t)) { return false } - check.valids.add(t) // t is valid + // see TODO above + // check.valids.add(t) // t is valid case *TypeParam: // A type parameter stands for the type (argument) it was instantiated with. @@ -146,7 +173,7 @@ func (check *Checker) validType0(typ Type, nest, path []*Named) bool { // the current (instantiated) type (see the example // at the end of this file). // For error reporting we keep the full path. - return check.validType0(targ, nest[:len(nest)-1], path) + return check.validType0(pos, targ, nest[:len(nest)-1], path) } } } diff --git a/src/cmd/compile/internal/types2/version.go b/src/cmd/compile/internal/types2/version.go index 12c86ef9fe..bcd47fbb7e 100644 --- a/src/cmd/compile/internal/types2/version.go +++ b/src/cmd/compile/internal/types2/version.go @@ -9,7 +9,6 @@ import ( "fmt" "go/version" "internal/goversion" - "strings" ) // A goVersion is a Go language version string of the form "go1.%d" @@ -43,40 +42,13 @@ var ( go1_18 = asGoVersion("go1.18") go1_20 = asGoVersion("go1.20") go1_21 = asGoVersion("go1.21") + go1_22 = asGoVersion("go1.22") + go1_23 = asGoVersion("go1.23") // current (deployed) Go version go_current = asGoVersion(fmt.Sprintf("go1.%d", goversion.Version)) ) -// langCompat reports an error if the representation of a numeric -// literal is not compatible with the current language version. -func (check *Checker) langCompat(lit *syntax.BasicLit) { - s := lit.Value - if len(s) <= 2 || check.allowVersion(check.pkg, lit, go1_13) { - return - } - // len(s) > 2 - if strings.Contains(s, "_") { - check.versionErrorf(lit, go1_13, "underscores in numeric literals") - return - } - if s[0] != '0' { - return - } - radix := s[1] - if radix == 'b' || radix == 'B' { - check.versionErrorf(lit, go1_13, "binary literals") - return - } - if radix == 'o' || radix == 'O' { - check.versionErrorf(lit, go1_13, "0o/0O-style octal literals") - return - } - if lit.Kind != syntax.IntLit && (radix == 'x' || radix == 'X') { - check.versionErrorf(lit, go1_13, "hexadecimal floating-point literals") - } -} - // allowVersion reports whether the given package is allowed to use version v. func (check *Checker) allowVersion(pkg *Package, at poser, v goVersion) bool { // We assume that imported packages have all been checked, diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go index afd1a326d3..fc3b858a80 100644 --- a/src/cmd/compile/internal/walk/assign.go +++ b/src/cmd/compile/internal/walk/assign.go @@ -6,6 +6,7 @@ package walk import ( "go/constant" + "internal/abi" "cmd/compile/internal/base" "cmd/compile/internal/ir" @@ -168,7 +169,7 @@ func walkAssignMapRead(init *ir.Nodes, n *ir.AssignListStmt) ir.Node { a := n.Lhs[0] var call *ir.CallExpr - if w := t.Elem().Size(); w <= zeroValSize { + if w := t.Elem().Size(); w <= abi.ZeroValSize { fn := mapfn(mapaccess2[fast], t, false) call = mkcall1(fn, fn.Type().ResultsTuple(), init, reflectdata.IndexMapRType(base.Pos, r), r.X, key) } else { diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go index 90c32154b9..9a2c1353bb 100644 --- a/src/cmd/compile/internal/walk/builtin.go +++ b/src/cmd/compile/internal/walk/builtin.go @@ -8,6 +8,7 @@ import ( "fmt" "go/constant" "go/token" + "internal/abi" "strings" "cmd/compile/internal/base" @@ -152,9 +153,7 @@ func walkClear(n *ir.UnaryExpr) ir.Node { // walkClose walks an OCLOSE node. func walkClose(n *ir.UnaryExpr, init *ir.Nodes) ir.Node { - // cannot use chanfn - closechan takes any, not chan any - fn := typecheck.LookupRuntime("closechan", n.X.Type()) - return mkcall1(fn, nil, init, n.X) + return mkcall1(chanfn("closechan", 1, n.X.Type()), nil, init, n.X) } // Lower copy(a, b) to a memmove call or a runtime call. @@ -262,6 +261,12 @@ func walkLenCap(n *ir.UnaryExpr, init *ir.Nodes) ir.Node { _, len := backingArrayPtrLen(cheapExpr(conv.X, init)) return len } + if isChanLen(n) { + // cannot use chanfn - closechan takes any, not chan any, + // because it accepts both send-only and recv-only channels. + fn := typecheck.LookupRuntime("chanlen", n.X.Type()) + return mkcall1(fn, n.Type(), init, n.X) + } n.X = walkExpr(n.X, init) @@ -321,7 +326,7 @@ func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node { // Maximum key and elem size is 128 bytes, larger objects // are stored with an indirection. So max bucket size is 2048+eps. if !ir.IsConst(hint, constant.Int) || - constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) { + constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.MapBucketCount)) { // In case hint is larger than BUCKETSIZE runtime.makemap // will allocate the buckets on the heap, see #20184 @@ -332,7 +337,7 @@ func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node { // h.buckets = b // } - nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(base.Pos, reflectdata.BUCKETSIZE)), nil, nil) + nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(base.Pos, abi.MapBucketCount)), nil, nil) nif.Likely = true // var bv bmap @@ -347,7 +352,7 @@ func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node { } } - if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) { + if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.MapBucketCount)) { // Handling make(map[any]any) and // make(map[any]any, hint) where hint <= BUCKETSIZE // special allows for faster map initialization and @@ -358,8 +363,8 @@ func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node { if n.Esc() == ir.EscNone { // Only need to initialize h.hash0 since // hmap h has been allocated on the stack already. - // h.hash0 = fastrand() - rand := mkcall("fastrand", types.Types[types.TUINT32], init) + // h.hash0 = rand32() + rand := mkcall("rand32", types.Types[types.TUINT32], init) hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand)) return typecheck.ConvNop(h, t) @@ -886,3 +891,10 @@ func isByteCount(n ir.Node) bool { return base.Flag.N == 0 && !base.Flag.Cfg.Instrumenting && n.Op() == ir.OLEN && (n.(*ir.UnaryExpr).X.Op() == ir.OBYTES2STR || n.(*ir.UnaryExpr).X.Op() == ir.OBYTES2STRTMP) } + +// isChanLen reports whether n is of the form len(c) for a channel c. +// Note that this does not check for -n or instrumenting because this +// is a correctness rewrite, not an optimization. +func isChanLen(n ir.Node) bool { + return n.Op() == ir.OLEN && n.(*ir.UnaryExpr).X.Type().IsChan() +} diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go index 6fc2317afb..38c6c03dc4 100644 --- a/src/cmd/compile/internal/walk/closure.go +++ b/src/cmd/compile/internal/walk/closure.go @@ -144,7 +144,7 @@ func walkClosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node { return walkExpr(cfn, init) } -// closureArgs returns a slice of expressions that an be used to +// closureArgs returns a slice of expressions that can be used to // initialize the given closure's free variables. These correspond // one-to-one with the variables in clo.Func.ClosureVars, and will be // either an ONAME node (if the variable is captured by value) or an diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go index 64d20b555e..f73b5d9503 100644 --- a/src/cmd/compile/internal/walk/expr.go +++ b/src/cmd/compile/internal/walk/expr.go @@ -7,6 +7,7 @@ package walk import ( "fmt" "go/constant" + "internal/abi" "internal/buildcfg" "strings" @@ -722,7 +723,7 @@ func makeTypeAssertDescriptor(target *types.Type, canFail bool) *obj.LSym { typeAssertGen++ c := rttype.NewCursor(lsym, 0, rttype.TypeAssert) c.Field("Cache").WritePtr(typecheck.LookupRuntimeVar("emptyTypeAssertCache")) - c.Field("Inter").WritePtr(reflectdata.TypeSym(target).Linksym()) + c.Field("Inter").WritePtr(reflectdata.TypeLinksym(target)) c.Field("CanFail").WriteBool(canFail) objw.Global(lsym, int32(rttype.TypeAssert.Size()), obj.LOCAL) lsym.Gotype = reflectdata.TypeLinksym(rttype.TypeAssert) @@ -825,7 +826,7 @@ func walkIndexMap(n *ir.IndexExpr, init *ir.Nodes) ir.Node { switch { case n.Assigned: mapFn = mapfn(mapassign[fast], t, false) - case t.Elem().Size() > zeroValSize: + case t.Elem().Size() > abi.ZeroValSize: args = append(args, reflectdata.ZeroAddr(t.Elem().Size())) mapFn = mapfn("mapaccess1_fat", t, true) default: diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go index 4d9b2fbee5..179fbdb99e 100644 --- a/src/cmd/compile/internal/walk/order.go +++ b/src/cmd/compile/internal/walk/order.go @@ -11,6 +11,7 @@ import ( "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/reflectdata" + "cmd/compile/internal/ssa" "cmd/compile/internal/staticinit" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -231,14 +232,29 @@ func (o *orderState) addrTemp(n ir.Node) ir.Node { vstat = typecheck.Expr(vstat).(*ir.Name) return vstat } + + // Prevent taking the address of an SSA-able local variable (#63332). + // + // TODO(mdempsky): Note that OuterValue unwraps OCONVNOPs, but + // IsAddressable does not. It should be possible to skip copying for + // at least some of these OCONVNOPs (e.g., reinsert them after the + // OADDR operation), but at least walkCompare needs to be fixed to + // support that (see trybot failures on go.dev/cl/541715, PS1). if ir.IsAddressable(n) { + if name, ok := ir.OuterValue(n).(*ir.Name); ok && name.Op() == ir.ONAME { + if name.Class == ir.PAUTO && !name.Addrtaken() && ssa.CanSSA(name.Type()) { + goto Copy + } + } + return n } + +Copy: return o.copyExpr(n) } // mapKeyTemp prepares n to be a key in a map runtime call and returns n. -// It should only be used for map runtime calls which have *_fast* versions. // The first parameter is the position of n's containing node, for use in case // that n's position is not unique (e.g., if n is an ONAME). func (o *orderState) mapKeyTemp(outerPos src.XPos, t *types.Type, n ir.Node) ir.Node { @@ -603,8 +619,38 @@ func (o *orderState) stmt(n ir.Node) { case ir.OAS: n := n.(*ir.AssignStmt) t := o.markTemp() + + // There's a delicate interaction here between two OINDEXMAP + // optimizations. + // + // First, we want to handle m[k] = append(m[k], ...) with a single + // runtime call to mapassign. This requires the m[k] expressions to + // satisfy ir.SameSafeExpr in walkAssign. + // + // But if k is a slow map key type that's passed by reference (e.g., + // byte), then we want to avoid marking user variables as addrtaken, + // if that might prevent the compiler from keeping k in a register. + // + // TODO(mdempsky): It would be better if walk was responsible for + // inserting temporaries as needed. + mapAppend := n.X.Op() == ir.OINDEXMAP && n.Y.Op() == ir.OAPPEND && + ir.SameSafeExpr(n.X, n.Y.(*ir.CallExpr).Args[0]) + n.X = o.expr(n.X, nil) - n.Y = o.expr(n.Y, n.X) + if mapAppend { + indexLHS := n.X.(*ir.IndexExpr) + indexLHS.X = o.cheapExpr(indexLHS.X) + indexLHS.Index = o.cheapExpr(indexLHS.Index) + + call := n.Y.(*ir.CallExpr) + indexRHS := call.Args[0].(*ir.IndexExpr) + indexRHS.X = indexLHS.X + indexRHS.Index = indexLHS.Index + + o.exprList(call.Args[1:]) + } else { + n.Y = o.expr(n.Y, n.X) + } o.mapAssign(n) o.popTemp(t) @@ -1158,7 +1204,7 @@ func (o *orderState) expr1(n, lhs ir.Node) ir.Node { } } - // key must be addressable + // key may need to be be addressable n.Index = o.mapKeyTemp(n.Pos(), n.X.Type(), n.Index) if needCopy { return o.copyExpr(n) diff --git a/src/cmd/compile/internal/walk/switch.go b/src/cmd/compile/internal/walk/switch.go index b67d0114c7..119647912b 100644 --- a/src/cmd/compile/internal/walk/switch.go +++ b/src/cmd/compile/internal/walk/switch.go @@ -532,7 +532,7 @@ func walkSwitchType(sw *ir.SwitchStmt) { c.Field("NCases").WriteInt(int64(len(interfaceCases))) array, sizeDelta := c.Field("Cases").ModifyArray(len(interfaceCases)) for i, c := range interfaceCases { - array.Elem(i).WritePtr(reflectdata.TypeSym(c.typ.Type()).Linksym()) + array.Elem(i).WritePtr(reflectdata.TypeLinksym(c.typ.Type())) } objw.Global(lsym, int32(rttype.InterfaceSwitch.Size()+sizeDelta), obj.LOCAL) // The GC only needs to see the first pointer in the structure (all the others @@ -700,7 +700,7 @@ func typeHashFieldOf(pos src.XPos, itab *ir.UnaryExpr) *ir.SelectorExpr { } else { // runtime.itab's hash field if itabHashField == nil { - itabHashField = runtimeField("hash", int64(2*types.PtrSize), types.Types[types.TUINT32]) + itabHashField = runtimeField("hash", rttype.ITab.OffsetOf("Hash"), types.Types[types.TUINT32]) } hashField = itabHashField } diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go index 8be5804616..439f3ac71b 100644 --- a/src/cmd/compile/internal/walk/walk.go +++ b/src/cmd/compile/internal/walk/walk.go @@ -6,10 +6,12 @@ package walk import ( "fmt" + "internal/abi" "cmd/compile/internal/base" "cmd/compile/internal/ir" "cmd/compile/internal/reflectdata" + "cmd/compile/internal/rttype" "cmd/compile/internal/ssagen" "cmd/compile/internal/typecheck" "cmd/compile/internal/types" @@ -18,7 +20,6 @@ import ( // The constant is known to runtime. const tmpstringbufsize = 32 -const zeroValSize = 1024 // must match value of runtime/map.go:maxZero func Walk(fn *ir.Func) { ir.CurFunc = fn @@ -33,12 +34,6 @@ func Walk(fn *ir.Func) { ir.DumpList(s, ir.CurFunc.Body) } - lno := base.Pos - - base.Pos = lno - if base.Errors() > errorsBefore { - return - } walkStmtList(ir.CurFunc.Body) if base.Flag.W != 0 { s := fmt.Sprintf("after walk %v", ir.CurFunc.Sym()) @@ -189,8 +184,7 @@ var mapassign = mkmapnames("mapassign", "ptr") var mapdelete = mkmapnames("mapdelete", "") func mapfast(t *types.Type) int { - // Check runtime/map.go:maxElemSize before changing. - if t.Elem().Size() > 128 { + if t.Elem().Size() > abi.MapMaxElemBytes { return mapslow } switch reflectdata.AlgType(t.Key()) { @@ -346,8 +340,8 @@ func mayCall(n ir.Node) bool { // itabType loads the _type field from a runtime.itab struct. func itabType(itab ir.Node) ir.Node { if itabTypeField == nil { - // runtime.itab's _type field - itabTypeField = runtimeField("_type", int64(types.PtrSize), types.NewPtr(types.Types[types.TUINT8])) + // internal/abi.ITab's Type field + itabTypeField = runtimeField("Type", rttype.ITab.OffsetOf("Type"), types.NewPtr(types.Types[types.TUINT8])) } return boundedDotPtr(base.Pos, itab, itabTypeField) } diff --git a/src/cmd/dist/build.go b/src/cmd/dist/build.go index 32e59b446a..64f8f53054 100644 --- a/src/cmd/dist/build.go +++ b/src/cmd/dist/build.go @@ -33,13 +33,14 @@ var ( gohostos string goos string goarm string + goarm64 string go386 string goamd64 string gomips string gomips64 string goppc64 string + goriscv64 string goroot string - goroot_final string goextlinkenabled string gogcflags string // For running built compiler goldflags string @@ -126,12 +127,6 @@ func xinit() { // All exec calls rewrite "go" into gorootBinGo. gorootBinGo = pathf("%s/bin/go", goroot) - b = os.Getenv("GOROOT_FINAL") - if b == "" { - b = goroot - } - goroot_final = b - b = os.Getenv("GOOS") if b == "" { b = gohostos @@ -147,6 +142,12 @@ func xinit() { } goarm = b + b = os.Getenv("GOARM64") + if b == "" { + b = "v8.0" + } + goarm64 = b + b = os.Getenv("GO386") if b == "" { b = "sse2" @@ -177,6 +178,12 @@ func xinit() { } goppc64 = b + b = os.Getenv("GORISCV64") + if b == "" { + b = "rva20u64" + } + goriscv64 = b + if p := pathf("%s/src/all.bash", goroot); !isfile(p) { fatalf("$GOROOT is not set correctly or not exported\n"+ "\tGOROOT=%s\n"+ @@ -230,14 +237,15 @@ func xinit() { os.Setenv("GOAMD64", goamd64) os.Setenv("GOARCH", goarch) os.Setenv("GOARM", goarm) + os.Setenv("GOARM64", goarm64) os.Setenv("GOHOSTARCH", gohostarch) os.Setenv("GOHOSTOS", gohostos) os.Setenv("GOOS", goos) os.Setenv("GOMIPS", gomips) os.Setenv("GOMIPS64", gomips64) os.Setenv("GOPPC64", goppc64) + os.Setenv("GORISCV64", goriscv64) os.Setenv("GOROOT", goroot) - os.Setenv("GOROOT_FINAL", goroot_final) // Set GOBIN to GOROOT/bin. The meaning of GOBIN has drifted over time // (see https://go.dev/issue/3269, https://go.dev/cl/183058, @@ -891,6 +899,24 @@ func runInstall(pkg string, ch chan struct{}) { asmArgs = append(asmArgs, "-D", "GOPPC64_power8") } } + if goarch == "riscv64" { + // Define GORISCV64_value from goriscv64 + asmArgs = append(asmArgs, "-D", "GORISCV64_"+goriscv64) + } + if goarch == "arm" { + // Define GOARM_value from goarm, which can be either a version + // like "6", or a version and a FP mode, like "7,hardfloat". + switch { + case strings.Contains(goarm, "7"): + asmArgs = append(asmArgs, "-D", "GOARM_7") + fallthrough + case strings.Contains(goarm, "6"): + asmArgs = append(asmArgs, "-D", "GOARM_6") + fallthrough + default: + asmArgs = append(asmArgs, "-D", "GOARM_5") + } + } goasmh := pathf("%s/go_asm.h", workdir) // Collect symabis from assembly code. @@ -1221,6 +1247,9 @@ func cmdenv() { if goarch == "arm" { xprintf(format, "GOARM", goarm) } + if goarch == "arm64" { + xprintf(format, "GOARM64", goarm64) + } if goarch == "386" { xprintf(format, "GO386", go386) } @@ -1236,6 +1265,9 @@ func cmdenv() { if goarch == "ppc64" || goarch == "ppc64le" { xprintf(format, "GOPPC64", goppc64) } + if goarch == "riscv64" { + xprintf(format, "GORISCV64", goriscv64) + } xprintf(format, "GOWORK", "off") if *path { @@ -1729,7 +1761,7 @@ var cgoEnabled = map[string]bool{ "openbsd/arm64": true, "openbsd/mips64": true, "openbsd/ppc64": false, - "openbsd/riscv64": false, + "openbsd/riscv64": true, "plan9/386": false, "plan9/amd64": false, "plan9/arm": false, @@ -1745,9 +1777,8 @@ var cgoEnabled = map[string]bool{ // get filtered out of cgoEnabled for 'dist list'. // See go.dev/issue/56679. var broken = map[string]bool{ - "linux/sparc64": true, // An incomplete port. See CL 132155. - "openbsd/mips64": true, // Broken: go.dev/issue/58110. - "openbsd/riscv64": true, // An incomplete port: go.dev/issue/55999. + "linux/sparc64": true, // An incomplete port. See CL 132155. + "openbsd/mips64": true, // Broken: go.dev/issue/58110. } // List of platforms which are first class ports. See go.dev/issue/38874. @@ -1851,10 +1882,7 @@ func banner() { xprintf("Installed Go for %s/%s in %s\n", goos, goarch, goroot) xprintf("Installed commands in %s\n", gorootBin) - if !xsamefile(goroot_final, goroot) { - // If the files are to be moved, don't check that gobin - // is on PATH; assume they know what they are doing. - } else if gohostos == "plan9" { + if gohostos == "plan9" { // Check that GOROOT/bin is bound before /bin. pid := strings.Replace(readfile("#c/pid"), " ", "", -1) ns := fmt.Sprintf("/proc/%s/ns", pid) @@ -1879,12 +1907,6 @@ func banner() { xprintf("*** You need to add %s to your PATH.\n", gorootBin) } } - - if !xsamefile(goroot_final, goroot) { - xprintf("\n"+ - "The binaries expect %s to be copied or moved to %s\n", - goroot, goroot_final) - } } // Version prints the Go version. diff --git a/src/cmd/dist/buildruntime.go b/src/cmd/dist/buildruntime.go index 1de78f0fdb..7095f43772 100644 --- a/src/cmd/dist/buildruntime.go +++ b/src/cmd/dist/buildruntime.go @@ -54,9 +54,11 @@ func mkbuildcfg(file string) { fmt.Fprintf(&buf, "const defaultGO386 = `%s`\n", go386) fmt.Fprintf(&buf, "const defaultGOAMD64 = `%s`\n", goamd64) fmt.Fprintf(&buf, "const defaultGOARM = `%s`\n", goarm) + fmt.Fprintf(&buf, "const defaultGOARM64 = `%s`\n", goarm64) fmt.Fprintf(&buf, "const defaultGOMIPS = `%s`\n", gomips) fmt.Fprintf(&buf, "const defaultGOMIPS64 = `%s`\n", gomips64) fmt.Fprintf(&buf, "const defaultGOPPC64 = `%s`\n", goppc64) + fmt.Fprintf(&buf, "const defaultGORISCV64 = `%s`\n", goriscv64) fmt.Fprintf(&buf, "const defaultGOEXPERIMENT = `%s`\n", goexperiment) fmt.Fprintf(&buf, "const defaultGO_EXTLINK_ENABLED = `%s`\n", goextlinkenabled) fmt.Fprintf(&buf, "const defaultGO_LDSO = `%s`\n", defaultldso) diff --git a/src/cmd/dist/test.go b/src/cmd/dist/test.go index 9635c4fb61..8f4b081ad1 100644 --- a/src/cmd/dist/test.go +++ b/src/cmd/dist/test.go @@ -691,6 +691,14 @@ func (t *tester) registerTests() { }) } + // Check that all crypto packages compile with the purego build tag. + t.registerTest("crypto with tag purego", &goTest{ + variant: "purego", + tags: []string{"purego"}, + pkg: "crypto/...", + runTests: "^$", // only ensure they compile + }) + // Test ios/amd64 for the iOS simulator. if goos == "darwin" && goarch == "amd64" && t.cgoEnabled { t.registerTest("GOOS=ios on darwin/amd64", @@ -705,17 +713,30 @@ func (t *tester) registerTests() { // Runtime CPU tests. if !t.compileOnly && t.hasParallelism() { - t.registerTest("GOMAXPROCS=2 runtime -cpu=1,2,4 -quick", + for i := 1; i <= 4; i *= 2 { + t.registerTest(fmt.Sprintf("GOMAXPROCS=2 runtime -cpu=%d -quick", i), + &goTest{ + variant: "cpu" + strconv.Itoa(i), + timeout: 300 * time.Second, + cpu: strconv.Itoa(i), + short: true, + testFlags: []string{"-quick"}, + // We set GOMAXPROCS=2 in addition to -cpu=1,2,4 in order to test runtime bootstrap code, + // creation of first goroutines and first garbage collections in the parallel setting. + env: []string{"GOMAXPROCS=2"}, + pkg: "runtime", + }) + } + } + + // GOEXPERIMENT=rangefunc tests + if !t.compileOnly { + t.registerTest("GOEXPERIMENT=rangefunc go test iter", &goTest{ - variant: "cpu124", - timeout: 300 * time.Second, - cpu: "1,2,4", - short: true, - testFlags: []string{"-quick"}, - // We set GOMAXPROCS=2 in addition to -cpu=1,2,4 in order to test runtime bootstrap code, - // creation of first goroutines and first garbage collections in the parallel setting. - env: []string{"GOMAXPROCS=2"}, - pkg: "runtime", + variant: "iter", + short: t.short, + env: []string{"GOEXPERIMENT=rangefunc"}, + pkg: "iter", }) } @@ -891,8 +912,11 @@ func (t *tester) registerTests() { // so we really only need to run this check once anywhere to get adequate coverage. // To help developers avoid trybot-only failures, we try to run on typical developer machines // which is darwin,linux,windows/amd64 and darwin/arm64. + // + // The same logic applies to the release notes that correspond to each api/next file. if goos == "darwin" || ((goos == "linux" || goos == "windows") && goarch == "amd64") { t.registerTest("API check", &goTest{variant: "check", pkg: "cmd/api", timeout: 5 * time.Minute, testFlags: []string{"-check"}}) + t.registerTest("API release note check", &goTest{variant: "check", pkg: "cmd/relnote", testFlags: []string{"-check"}}) } } @@ -1414,7 +1438,7 @@ func (t *tester) registerRaceTests() { // Building cmd/cgo/internal/test takes a long time. // There are already cgo-enabled packages being tested with the race detector. // We shouldn't need to redo all of cmd/cgo/internal/test too. - // The race buildler will take care of this. + // The race builder will take care of this. // t.registerTest(hdr, &goTest{variant: "race", race: true, env: []string{"GOTRACEBACK=2"}, pkg: "cmd/cgo/internal/test"}) } if t.extLink() { @@ -1627,7 +1651,7 @@ func buildModeSupported(compiler, buildmode, goos, goarch string) bool { case "plugin": switch platform { - case "linux/amd64", "linux/arm", "linux/arm64", "linux/386", "linux/s390x", "linux/ppc64le", + case "linux/amd64", "linux/arm", "linux/arm64", "linux/386", "linux/loong64", "linux/s390x", "linux/ppc64le", "android/amd64", "android/386", "darwin/amd64", "darwin/arm64", "freebsd/amd64": diff --git a/src/cmd/doc/pkg.go b/src/cmd/doc/pkg.go index dfdc5674e9..a21d8a4688 100644 --- a/src/cmd/doc/pkg.go +++ b/src/cmd/doc/pkg.go @@ -43,9 +43,9 @@ type Package struct { buf pkgBuffer } -func (p *Package) ToText(w io.Writer, text, prefix, codePrefix string) { - d := p.doc.Parser().Parse(text) - pr := p.doc.Printer() +func (pkg *Package) ToText(w io.Writer, text, prefix, codePrefix string) { + d := pkg.doc.Parser().Parse(text) + pr := pkg.doc.Printer() pr.TextPrefix = prefix pr.TextCodePrefix = codePrefix w.Write(pr.Text(d)) diff --git a/src/cmd/fix/buildtag.go b/src/cmd/fix/buildtag.go index 5f4fbfef16..6b706c4cb5 100644 --- a/src/cmd/fix/buildtag.go +++ b/src/cmd/fix/buildtag.go @@ -6,6 +6,7 @@ package main import ( "go/ast" + "go/version" "strings" ) @@ -13,7 +14,7 @@ func init() { register(buildtagFix) } -const buildtagGoVersionCutoff = 1_18 +const buildtagGoVersionCutoff = "go1.18" var buildtagFix = fix{ name: "buildtag", @@ -23,7 +24,7 @@ var buildtagFix = fix{ } func buildtag(f *ast.File) bool { - if goVersion < buildtagGoVersionCutoff { + if version.Compare(*goVersion, buildtagGoVersionCutoff) < 0 { return false } diff --git a/src/cmd/fix/buildtag_test.go b/src/cmd/fix/buildtag_test.go index 1c6efbe9e0..e5997043c2 100644 --- a/src/cmd/fix/buildtag_test.go +++ b/src/cmd/fix/buildtag_test.go @@ -11,7 +11,7 @@ func init() { var buildtagTests = []testCase{ { Name: "buildtag.oldGo", - Version: 1_10, + Version: "go1.10", In: `//go:build yes // +build yes @@ -20,7 +20,7 @@ package main }, { Name: "buildtag.new", - Version: 1_99, + Version: "go1.99", In: `//go:build yes // +build yes diff --git a/src/cmd/fix/main.go b/src/cmd/fix/main.go index 0f36fcc312..db67b4ba07 100644 --- a/src/cmd/fix/main.go +++ b/src/cmd/fix/main.go @@ -13,13 +13,13 @@ import ( "go/parser" "go/scanner" "go/token" + "go/version" "internal/diff" "io" "io/fs" "os" "path/filepath" "sort" - "strconv" "strings" ) @@ -37,10 +37,8 @@ var forceRewrites = flag.String("force", "", var allowed, force map[string]bool var ( - doDiff = flag.Bool("diff", false, "display diffs instead of rewriting files") - goVersionStr = flag.String("go", "", "go language version for files") - - goVersion int // 115 for go1.15 + doDiff = flag.Bool("diff", false, "display diffs instead of rewriting files") + goVersion = flag.String("go", "", "go language version for files") ) // enable for debugging fix failures @@ -68,24 +66,9 @@ func main() { flag.Usage = usage flag.Parse() - if *goVersionStr != "" { - if !strings.HasPrefix(*goVersionStr, "go") { - report(fmt.Errorf("invalid -go=%s", *goVersionStr)) - os.Exit(exitCode) - } - majorStr := (*goVersionStr)[len("go"):] - minorStr := "0" - if before, after, found := strings.Cut(majorStr, "."); found { - majorStr, minorStr = before, after - } - major, err1 := strconv.Atoi(majorStr) - minor, err2 := strconv.Atoi(minorStr) - if err1 != nil || err2 != nil || major < 0 || major >= 100 || minor < 0 || minor >= 100 { - report(fmt.Errorf("invalid -go=%s", *goVersionStr)) - os.Exit(exitCode) - } - - goVersion = major*100 + minor + if !version.IsValid(*goVersion) { + report(fmt.Errorf("invalid -go=%s", *goVersion)) + os.Exit(exitCode) } sort.Sort(byDate(fixes)) diff --git a/src/cmd/fix/main_test.go b/src/cmd/fix/main_test.go index cafd116cfd..8d841b101f 100644 --- a/src/cmd/fix/main_test.go +++ b/src/cmd/fix/main_test.go @@ -17,7 +17,7 @@ import ( type testCase struct { Name string Fn func(*ast.File) bool - Version int + Version string In string Out string } @@ -96,7 +96,7 @@ func TestRewrite(t *testing.T) { for _, tt := range testCases { tt := tt t.Run(tt.Name, func(t *testing.T) { - if tt.Version == 0 { + if tt.Version == "" { if testing.Verbose() { // Don't run in parallel: cmd/fix sometimes writes directly to stderr, // and since -v prints which test is currently running we want that @@ -105,10 +105,10 @@ func TestRewrite(t *testing.T) { t.Parallel() } } else { - old := goVersion - goVersion = tt.Version + old := *goVersion + *goVersion = tt.Version defer func() { - goVersion = old + *goVersion = old }() } diff --git a/src/cmd/go.mod b/src/cmd/go.mod index 1e94f5dab0..346a156a0f 100644 --- a/src/cmd/go.mod +++ b/src/cmd/go.mod @@ -1,15 +1,21 @@ module cmd -go 1.22 +go 1.23 require ( - github.com/google/pprof v0.0.0-20230811205829-9131a7e9cc17 - golang.org/x/arch v0.5.1-0.20231011141335-a6bdeed49307 - golang.org/x/mod v0.13.1-0.20231025225536-6e58e47c7bd6 - golang.org/x/sync v0.4.1-0.20231011140417-10739b037d36 - golang.org/x/sys v0.13.1-0.20231011215430-1bfbee0e20e3 - golang.org/x/term v0.13.1-0.20231011140651-6a610bc55bff - golang.org/x/tools v0.14.1-0.20231019165902-71f6a46884ab + github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 + golang.org/x/arch v0.7.0 + golang.org/x/build v0.0.0-20240222153247-cf4ed81bb19f + golang.org/x/mod v0.16.0 + golang.org/x/sync v0.6.0 + golang.org/x/sys v0.18.0 + golang.org/x/telemetry v0.0.0-20240306210657-d5a85b27db3e + golang.org/x/term v0.17.0 + golang.org/x/tools v0.18.0 ) -require github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab // indirect +require ( + github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab // indirect + golang.org/x/text v0.14.0 // indirect + rsc.io/markdown v0.0.0-20240117044121-669d2fdf1650 // indirect +) diff --git a/src/cmd/go.sum b/src/cmd/go.sum index ab476f84f9..683d46d169 100644 --- a/src/cmd/go.sum +++ b/src/cmd/go.sum @@ -1,16 +1,44 @@ -github.com/google/pprof v0.0.0-20230811205829-9131a7e9cc17 h1:0h35ESZ02+hN/MFZb7XZOXg+Rl9+Rk8fBIf5YLws9gA= -github.com/google/pprof v0.0.0-20230811205829-9131a7e9cc17/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= +github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89 h1:aPflPkRFkVwbW6dmcVqfgwp1i+UWGFH6VgR1Jim5Ygc= +github.com/chromedp/cdproto v0.0.0-20230802225258-3cf4e6d46a89/go.mod h1:GKljq0VrfU4D5yc+2qA6OVr8pmO/MBbPEWqWQ/oqGEs= +github.com/chromedp/chromedp v0.9.2 h1:dKtNz4kApb06KuSXoTQIyUC2TrA0fhGDwNZf3bcgfKw= +github.com/chromedp/chromedp v0.9.2/go.mod h1:LkSXJKONWTCHAfQasKFUZI+mxqS4tZqhmtGzzhLsnLs= +github.com/chromedp/sysutil v1.0.0 h1:+ZxhTpfpZlmchB58ih/LBHX52ky7w2VhQVKQMucy3Ic= +github.com/chromedp/sysutil v1.0.0/go.mod h1:kgWmDdq8fTzXYcKIBqIYvRRTnYb9aNS9moAV0xufSww= +github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= +github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= +github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og= +github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= +github.com/gobwas/ws v1.2.1 h1:F2aeBZrm2NDsc7vbovKrWSogd4wvfAxg0FQ89/iqOTk= +github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5 h1:E/LAvt58di64hlYjx7AsNS6C/ysHWYo+2qPCZKTQhRo= +github.com/google/pprof v0.0.0-20240207164012-fb44976bdcd5/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab h1:BA4a7pe6ZTd9F8kXETBoijjFJ/ntaa//1wiH9BZu4zU= github.com/ianlancetaylor/demangle v0.0.0-20230524184225-eabc099b10ab/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= -golang.org/x/arch v0.5.1-0.20231011141335-a6bdeed49307 h1:1nIbNxjxQ3+dss3xYMxayoIZONazUTg8/BENwc19sAQ= -golang.org/x/arch v0.5.1-0.20231011141335-a6bdeed49307/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= -golang.org/x/mod v0.13.1-0.20231025225536-6e58e47c7bd6 h1:YSyE+/SK6vfYAxf27iVtUZ/tTZOHGN6epnMgE1al/+M= -golang.org/x/mod v0.13.1-0.20231025225536-6e58e47c7bd6/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/sync v0.4.1-0.20231011140417-10739b037d36 h1:+lDu3sHZVY5Qqb7ynMbjaT4IsYicvoxypEOIE4aYlYE= -golang.org/x/sync v0.4.1-0.20231011140417-10739b037d36/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.13.1-0.20231011215430-1bfbee0e20e3 h1:G9se7UpoI67yWrFY0IIFGf6H3nwLLUZFDBCyOJwWeSc= -golang.org/x/sys v0.13.1-0.20231011215430-1bfbee0e20e3/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.13.1-0.20231011140651-6a610bc55bff h1:4lCCwEX5qbLiqBk8cqIlwrDbmkzfggoqDXYLTU+jr30= -golang.org/x/term v0.13.1-0.20231011140651-6a610bc55bff/go.mod h1:tfGnZ3a6ww9diaioGSzdPRgIfpk6Odt1UPXNhRcgfag= -golang.org/x/tools v0.14.1-0.20231019165902-71f6a46884ab h1:wNyFWaRZ6iFNvDL/8TK0HF5x3mGttsqpXPuHeKN88G8= -golang.org/x/tools v0.14.1-0.20231019165902-71f6a46884ab/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/yuin/goldmark v1.6.0 h1:boZcn2GTjpsynOsC0iJHnBWa4Bi0qzfJjthwauItG68= +github.com/yuin/goldmark v1.6.0/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +golang.org/x/arch v0.7.0 h1:pskyeJh/3AmoQ8CPE95vxHLqp1G1GfGNXTmcl9NEKTc= +golang.org/x/arch v0.7.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys= +golang.org/x/build v0.0.0-20240222153247-cf4ed81bb19f h1:XQ2eu0I26WsNCKQkRehp+5mwjjChw94trD9LT8LLSq0= +golang.org/x/build v0.0.0-20240222153247-cf4ed81bb19f/go.mod h1:HTqTCkubWT8epEK9hDWWGkoOOB7LGSrU1qvWZCSwO50= +golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic= +golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= +golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/telemetry v0.0.0-20240306210657-d5a85b27db3e h1:PLWTnhLSeWLoHHuUDdzlJeYqRntM+xTyojGjTrFg01c= +golang.org/x/telemetry v0.0.0-20240306210657-d5a85b27db3e/go.mod h1:wQS78u8AjB4H3mN7DPniFYwsXnV9lPziq+He/eA7JIw= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +rsc.io/markdown v0.0.0-20240117044121-669d2fdf1650 h1:fuOABZYWclLVNotDsHVaFixLdtoC7+UQZJ0KSC1ocm0= +rsc.io/markdown v0.0.0-20240117044121-669d2fdf1650/go.mod h1:8xcPgWmwlZONN1D9bjxtHEjrUtSEa3fakVF8iaewYKQ= diff --git a/src/cmd/go/alldocs.go b/src/cmd/go/alldocs.go index a5148ad486..dde47ac1b8 100644 --- a/src/cmd/go/alldocs.go +++ b/src/cmd/go/alldocs.go @@ -1004,6 +1004,8 @@ // Retracted []string // retraction information, if any (with -retracted or -u) // Deprecated string // deprecation message, if any (with -u) // Error *ModuleError // error loading module +// Sum string // checksum for path, version (as in go.sum) +// GoModSum string // checksum for go.mod (as in go.sum) // Origin any // provenance of module // Reuse bool // reuse of old module info is safe // } @@ -1324,9 +1326,6 @@ // using import comments in .go files, vendoring tool configuration files (like // Gopkg.lock), and the current directory (if in GOPATH). // -// If a configuration file for a vendoring tool is present, init will attempt to -// import module requirements from it. -// // See https://golang.org/ref/mod#go-mod-init for more about 'go mod init'. // // # Add missing and remove unused modules @@ -1805,7 +1804,7 @@ // The rule for a match in the cache is that the run involves the same // test binary and the flags on the command line come entirely from a // restricted set of 'cacheable' test flags, defined as -benchtime, -cpu, -// -list, -parallel, -run, -short, -timeout, -failfast, and -v. +// -list, -parallel, -run, -short, -timeout, -failfast, -fullpath and -v. // If a run of go test has any test or non-test flags outside this set, // the result is not cached. To disable test caching, use any test flag // or argument other than the cacheable flags. The idiomatic way to disable @@ -1991,6 +1990,8 @@ // correspond to the amd64.v1, amd64.v2, and amd64.v3 feature build tags. // - For GOARCH=arm, GOARM=5, 6, and 7 // correspond to the arm.5, arm.6, and arm.7 feature build tags. +// - For GOARCH=arm64, GOARM64=v8.{0-9} and v9.{0-5} +// correspond to the arm64.v8.{0-9} and arm64.v9.{0-5} feature build tags. // - For GOARCH=mips or mipsle, // GOMIPS=hardfloat and softfloat // correspond to the mips.hardfloat and mips.softfloat @@ -2004,10 +2005,13 @@ // ppc64.power8, ppc64.power9, and ppc64.power10 // (or ppc64le.power8, ppc64le.power9, and ppc64le.power10) // feature build tags. +// - For GOARCH=riscv64, +// GORISCV64=rva20u64 and rva22u64 correspond to the riscv64.rva20u64 +// and riscv64.rva22u64 build tags. // - For GOARCH=wasm, GOWASM=satconv and signext // correspond to the wasm.satconv and wasm.signext feature build tags. // -// For GOARCH=amd64, arm, ppc64, and ppc64le, a particular feature level +// For GOARCH=amd64, arm, ppc64, ppc64le, and riscv64, a particular feature level // sets the feature build tags for all previous levels as well. // For example, GOAMD64=v2 sets the amd64.v1 and amd64.v2 feature flags. // This ensures that code making use of v2 features continues to compile @@ -2285,6 +2289,15 @@ // GOARM // For GOARCH=arm, the ARM architecture for which to compile. // Valid values are 5, 6, 7. +// The value can be followed by an option specifying how to implement floating point instructions. +// Valid options are ,softfloat (default for 5) and ,hardfloat (default for 6 and 7). +// GOARM64 +// For GOARCH=arm64, the ARM64 architecture for which to compile. +// Valid values are v8.0 (default), v8.{1-9}, v9.{0-5}. +// The value can be followed by an option specifying extensions implemented by target hardware. +// Valid options are ,lse and ,crypto. +// Note that some extensions are enabled by default starting from a certain GOARM64 version; +// for example, lse is enabled by default starting from v8.1. // GO386 // For GOARCH=386, how to implement floating point instructions. // Valid values are sse2 (default), softfloat. @@ -2301,6 +2314,10 @@ // GOPPC64 // For GOARCH=ppc64{,le}, the target ISA (Instruction Set Architecture). // Valid values are power8 (default), power9, power10. +// GORISCV64 +// For GOARCH=riscv64, the RISC-V user-mode application profile for which +// to compile. Valid values are rva20u64 (default), rva22u64. +// See https://github.com/riscv/riscv-profiles/blob/main/profiles.adoc // GOWASM // For GOARCH=wasm, comma-separated list of experimental WebAssembly features to use. // Valid values are satconv, signext. @@ -2323,11 +2340,6 @@ // See src/internal/goexperiment/flags.go for currently valid values. // Warning: This variable is provided for the development and testing // of the Go toolchain itself. Use beyond that purpose is unsupported. -// GOROOT_FINAL -// The root of the installed Go tree, when it is -// installed in a location other than where it is built. -// File names in stack traces are rewritten from GOROOT to -// GOROOT_FINAL. // GO_EXTLINK_ENABLED // Whether the linker should use external linking mode // when using -linkmode=auto with code that uses cgo. diff --git a/src/cmd/go/counters_test.go b/src/cmd/go/counters_test.go new file mode 100644 index 0000000000..0413597924 --- /dev/null +++ b/src/cmd/go/counters_test.go @@ -0,0 +1,89 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main_test + +import ( + "cmd/go/internal/base" + "flag" + "internal/diff" + "os" + "slices" + "strings" + "testing" +) + +var update = flag.Bool("update", false, "if true update testdata/counternames.txt") + +func TestCounterNamesUpToDate(t *testing.T) { + if !*update { + t.Parallel() + } + + var counters []string + // -C is a special case because it's handled by handleChdirFlag rather than + // standard flag processing with FlagSets. + // cmd/go/subcommand:unknown is also a special case: it's used when the subcommand + // doesn't match any of the known commands. + counters = append(counters, "cmd/go/flag:C", "cmd/go/subcommand:unknown") + counters = append(counters, flagscounters("cmd/go/flag:", *flag.CommandLine)...) + + for _, cmd := range base.Go.Commands { + counters = append(counters, cmdcounters(nil, cmd)...) + } + cstr := []byte(strings.Join(counters, "\n") + "\n") + + const counterNamesFile = "testdata/counters.txt" + old, err := os.ReadFile(counterNamesFile) + if err != nil { + t.Fatalf("error reading %s: %v", counterNamesFile, err) + } + diff := diff.Diff(counterNamesFile, old, "generated counter names", cstr) + if diff == nil { + t.Logf("%s is up to date.", counterNamesFile) + return + } + + if *update { + if err := os.WriteFile(counterNamesFile, cstr, 0666); err != nil { + t.Fatal(err) + } + t.Logf("wrote %d bytes to %s", len(cstr), counterNamesFile) + t.Logf("don't forget to file a proposal to update the list of collected counters") + } else { + t.Logf("\n%s", diff) + t.Errorf("%s is stale. To update, run 'go generate cmd/go'.", counterNamesFile) + } +} + +func flagscounters(prefix string, flagSet flag.FlagSet) []string { + var counters []string + flagSet.VisitAll(func(f *flag.Flag) { + counters = append(counters, prefix+f.Name) + }) + return counters +} + +func cmdcounters(previous []string, cmd *base.Command) []string { + const subcommandPrefix = "cmd/go/subcommand:" + const flagPrefix = "cmd/go/flag:" + var counters []string + previousComponent := strings.Join(previous, "-") + if len(previousComponent) > 0 { + previousComponent += "-" + } + if cmd.Runnable() { + counters = append(counters, subcommandPrefix+previousComponent+cmd.Name()) + } + counters = append(counters, flagscounters(flagPrefix+previousComponent+cmd.Name()+"-", cmd.Flag)...) + if len(previous) != 0 { + counters = append(counters, subcommandPrefix+previousComponent+"help-"+cmd.Name()) + } + counters = append(counters, subcommandPrefix+"help-"+previousComponent+cmd.Name()) + + for _, subcmd := range cmd.Commands { + counters = append(counters, cmdcounters(append(slices.Clone(previous), cmd.Name()), subcmd)...) + } + return counters +} diff --git a/src/cmd/go/go_test.go b/src/cmd/go/go_test.go index bae83eb92f..34a3cf15c9 100644 --- a/src/cmd/go/go_test.go +++ b/src/cmd/go/go_test.go @@ -44,6 +44,8 @@ import ( "cmd/internal/sys" cmdgo "cmd/go" + + "golang.org/x/telemetry/counter/countertest" ) func init() { @@ -90,10 +92,6 @@ func tooSlow(t *testing.T, reason string) { // (temp) directory. var testGOROOT string -// testGOROOT_FINAL is the GOROOT_FINAL with which the test binary is assumed to -// have been built. -var testGOROOT_FINAL = os.Getenv("GOROOT_FINAL") - var testGOCACHE string var testGo string @@ -157,6 +155,15 @@ func TestMain(m *testing.M) { web.EnableTestHooks(interceptors) } + cmdgo.TelemetryStart = func() { + // TODO(matloob): we'll ideally want to call telemetry.Start here + // but it calls counter.Open, which we don't want to do because + // we want to call countertest.Open. + if telemetryDir := os.Getenv("TESTGO_TELEMETRY_DIR"); telemetryDir != "" { + countertest.Open(telemetryDir) + } + } + cmdgo.Main() os.Exit(0) } @@ -223,10 +230,6 @@ func TestMain(m *testing.M) { } testGOROOT = goEnv("GOROOT") os.Setenv("TESTGO_GOROOT", testGOROOT) - // Ensure that GOROOT is set explicitly. - // Otherwise, if the toolchain was built with GOROOT_FINAL set but has not - // yet been moved to its final location, programs that invoke runtime.GOROOT - // may accidentally use the wrong path. os.Setenv("GOROOT", testGOROOT) // The whole GOROOT/pkg tree was installed using the GOHOSTOS/GOHOSTARCH @@ -881,22 +884,17 @@ func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) { // Copy the runtime packages into a temporary GOROOT // so that we can change files. - for _, copydir := range []string{ - "src/runtime", - "src/internal/abi", - "src/internal/bytealg", - "src/internal/coverage/rtcov", - "src/internal/cpu", - "src/internal/goarch", - "src/internal/godebugs", - "src/internal/goexperiment", - "src/internal/goos", - "src/internal/coverage/rtcov", - "src/math/bits", - "src/unsafe", + var dirs []string + tg.run("list", "-deps", "runtime") + pkgs := strings.Split(strings.TrimSpace(tg.getStdout()), "\n") + for _, pkg := range pkgs { + dirs = append(dirs, filepath.Join("src", pkg)) + } + dirs = append(dirs, filepath.Join("pkg/tool", goHostOS+"_"+goHostArch), "pkg/include", - } { + ) + for _, copydir := range dirs { srcdir := filepath.Join(testGOROOT, copydir) tg.tempDir(filepath.Join("goroot", copydir)) err := filepath.WalkDir(srcdir, @@ -912,6 +910,9 @@ func TestNewReleaseRebuildsStalePackagesInGOPATH(t *testing.T) { return err } dest := filepath.Join("goroot", copydir, srcrel) + if _, err := os.Stat(dest); err == nil { + return nil + } data, err := os.ReadFile(path) if err != nil { return err diff --git a/src/cmd/go/internal/cfg/cfg.go b/src/cmd/go/internal/cfg/cfg.go index a8daa2dfc3..e0da810c73 100644 --- a/src/cmd/go/internal/cfg/cfg.go +++ b/src/cmd/go/internal/cfg/cfg.go @@ -64,33 +64,31 @@ func ToolExeSuffix() string { // These are general "build flags" used by build and other commands. var ( - BuildA bool // -a flag - BuildBuildmode string // -buildmode flag - BuildBuildvcs = "auto" // -buildvcs flag: "true", "false", or "auto" - BuildContext = defaultContext() - BuildMod string // -mod flag - BuildModExplicit bool // whether -mod was set explicitly - BuildModReason string // reason -mod was set, if set by default - BuildLinkshared bool // -linkshared flag - BuildMSan bool // -msan flag - BuildASan bool // -asan flag - BuildCover bool // -cover flag - BuildCoverMode string // -covermode flag - BuildCoverPkg []string // -coverpkg flag - BuildN bool // -n flag - BuildO string // -o flag - BuildP = runtime.GOMAXPROCS(0) // -p flag - BuildPGO string // -pgo flag - BuildPkgdir string // -pkgdir flag - BuildRace bool // -race flag - BuildToolexec []string // -toolexec flag - BuildToolchainName string - BuildToolchainCompiler func() string - BuildToolchainLinker func() string - BuildTrimpath bool // -trimpath flag - BuildV bool // -v flag - BuildWork bool // -work flag - BuildX bool // -x flag + BuildA bool // -a flag + BuildBuildmode string // -buildmode flag + BuildBuildvcs = "auto" // -buildvcs flag: "true", "false", or "auto" + BuildContext = defaultContext() + BuildMod string // -mod flag + BuildModExplicit bool // whether -mod was set explicitly + BuildModReason string // reason -mod was set, if set by default + BuildLinkshared bool // -linkshared flag + BuildMSan bool // -msan flag + BuildASan bool // -asan flag + BuildCover bool // -cover flag + BuildCoverMode string // -covermode flag + BuildCoverPkg []string // -coverpkg flag + BuildN bool // -n flag + BuildO string // -o flag + BuildP = runtime.GOMAXPROCS(0) // -p flag + BuildPGO string // -pgo flag + BuildPkgdir string // -pkgdir flag + BuildRace bool // -race flag + BuildToolexec []string // -toolexec flag + BuildToolchainName string + BuildTrimpath bool // -trimpath flag + BuildV bool // -v flag + BuildWork bool // -work flag + BuildX bool // -x flag ModCacheRW bool // -modcacherw flag ModFile string // -modfile flag @@ -181,8 +179,6 @@ func defaultContext() build.Context { func init() { SetGOROOT(Getenv("GOROOT"), false) - BuildToolchainCompiler = func() string { return "missing-compiler" } - BuildToolchainLinker = func() string { return "missing-linker" } } // SetGOROOT sets GOROOT and associated variables to the given values. @@ -203,7 +199,6 @@ func SetGOROOT(goroot string, isTestGo bool) { GOROOTpkg = filepath.Join(goroot, "pkg") GOROOTsrc = filepath.Join(goroot, "src") } - GOROOT_FINAL = findGOROOT_FINAL(goroot) installedGOOS = runtime.GOOS installedGOARCH = runtime.GOARCH @@ -402,19 +397,18 @@ var ( GOROOTpkg string GOROOTsrc string - GOROOT_FINAL string - GOBIN = Getenv("GOBIN") GOMODCACHE = envOr("GOMODCACHE", gopathDir("pkg/mod")) // Used in envcmd.MkEnv and build ID computations. - GOARM = envOr("GOARM", fmt.Sprint(buildcfg.GOARM)) - GO386 = envOr("GO386", buildcfg.GO386) - GOAMD64 = envOr("GOAMD64", fmt.Sprintf("%s%d", "v", buildcfg.GOAMD64)) - GOMIPS = envOr("GOMIPS", buildcfg.GOMIPS) - GOMIPS64 = envOr("GOMIPS64", buildcfg.GOMIPS64) - GOPPC64 = envOr("GOPPC64", fmt.Sprintf("%s%d", "power", buildcfg.GOPPC64)) - GOWASM = envOr("GOWASM", fmt.Sprint(buildcfg.GOWASM)) + GOARM = envOr("GOARM", fmt.Sprint(buildcfg.GOARM)) + GO386 = envOr("GO386", buildcfg.GO386) + GOAMD64 = envOr("GOAMD64", fmt.Sprintf("%s%d", "v", buildcfg.GOAMD64)) + GOMIPS = envOr("GOMIPS", buildcfg.GOMIPS) + GOMIPS64 = envOr("GOMIPS64", buildcfg.GOMIPS64) + GOPPC64 = envOr("GOPPC64", fmt.Sprintf("%s%d", "power", buildcfg.GOPPC64)) + GORISCV64 = envOr("GORISCV64", fmt.Sprintf("rva%du64", buildcfg.GORISCV64)) + GOWASM = envOr("GOWASM", fmt.Sprint(buildcfg.GOWASM)) GOPROXY = envOr("GOPROXY", "") GOSUMDB = envOr("GOSUMDB", "") @@ -445,6 +439,8 @@ func GetArchEnv() (key, val string) { return "GOMIPS64", GOMIPS64 case "ppc64", "ppc64le": return "GOPPC64", GOPPC64 + case "riscv64": + return "GORISCV64", GORISCV64 case "wasm": return "GOWASM", GOWASM } @@ -533,16 +529,6 @@ func findGOROOT(env string) string { return def } -func findGOROOT_FINAL(goroot string) string { - // $GOROOT_FINAL is only for use during make.bash - // so it is not settable using go/env, so we use os.Getenv here. - def := goroot - if env := os.Getenv("GOROOT_FINAL"); env != "" { - def = filepath.Clean(env) - } - return def -} - // isSameDir reports whether dir1 and dir2 are the same directory. func isSameDir(dir1, dir2 string) bool { if dir1 == dir2 { diff --git a/src/cmd/go/internal/envcmd/env.go b/src/cmd/go/internal/envcmd/env.go index c7c2e83e0f..1680753b0f 100644 --- a/src/cmd/go/internal/envcmd/env.go +++ b/src/cmd/go/internal/envcmd/env.go @@ -104,6 +104,7 @@ func MkEnv() []cfg.EnvVar { {Name: "GOTOOLDIR", Value: build.ToolDir}, {Name: "GOVCS", Value: cfg.GOVCS}, {Name: "GOVERSION", Value: runtime.Version()}, + {Name: "GODEBUG", Value: os.Getenv("GODEBUG")}, } if work.GccgoBin != "" { @@ -530,7 +531,7 @@ func checkEnvWrite(key, val string) error { switch key { case "GOEXE", "GOGCCFLAGS", "GOHOSTARCH", "GOHOSTOS", "GOMOD", "GOWORK", "GOTOOLDIR", "GOVERSION": return fmt.Errorf("%s cannot be modified", key) - case "GOENV": + case "GOENV", "GODEBUG": return fmt.Errorf("%s can only be set using the OS environment", key) } diff --git a/src/cmd/go/internal/generate/generate.go b/src/cmd/go/internal/generate/generate.go index dbe84d7fd6..6371353e20 100644 --- a/src/cmd/go/internal/generate/generate.go +++ b/src/cmd/go/internal/generate/generate.go @@ -181,6 +181,8 @@ func init() { } func runGenerate(ctx context.Context, cmd *base.Command, args []string) { + modload.InitWorkfile() + if generateRunFlag != "" { var err error generateRunRE, err = regexp.Compile(generateRunFlag) diff --git a/src/cmd/go/internal/help/helpdoc.go b/src/cmd/go/internal/help/helpdoc.go index 93613ac65e..e1d719be4f 100644 --- a/src/cmd/go/internal/help/helpdoc.go +++ b/src/cmd/go/internal/help/helpdoc.go @@ -601,6 +601,15 @@ Architecture-specific environment variables: GOARM For GOARCH=arm, the ARM architecture for which to compile. Valid values are 5, 6, 7. + The value can be followed by an option specifying how to implement floating point instructions. + Valid options are ,softfloat (default for 5) and ,hardfloat (default for 6 and 7). + GOARM64 + For GOARCH=arm64, the ARM64 architecture for which to compile. + Valid values are v8.0 (default), v8.{1-9}, v9.{0-5}. + The value can be followed by an option specifying extensions implemented by target hardware. + Valid options are ,lse and ,crypto. + Note that some extensions are enabled by default starting from a certain GOARM64 version; + for example, lse is enabled by default starting from v8.1. GO386 For GOARCH=386, how to implement floating point instructions. Valid values are sse2 (default), softfloat. @@ -617,6 +626,10 @@ Architecture-specific environment variables: GOPPC64 For GOARCH=ppc64{,le}, the target ISA (Instruction Set Architecture). Valid values are power8 (default), power9, power10. + GORISCV64 + For GOARCH=riscv64, the RISC-V user-mode application profile for which + to compile. Valid values are rva20u64 (default), rva22u64. + See https://github.com/riscv/riscv-profiles/blob/main/profiles.adoc GOWASM For GOARCH=wasm, comma-separated list of experimental WebAssembly features to use. Valid values are satconv, signext. @@ -639,11 +652,6 @@ Special-purpose environment variables: See src/internal/goexperiment/flags.go for currently valid values. Warning: This variable is provided for the development and testing of the Go toolchain itself. Use beyond that purpose is unsupported. - GOROOT_FINAL - The root of the installed Go tree, when it is - installed in a location other than where it is built. - File names in stack traces are rewritten from GOROOT to - GOROOT_FINAL. GO_EXTLINK_ENABLED Whether the linker should use external linking mode when using -linkmode=auto with code that uses cgo. @@ -892,6 +900,8 @@ The defined architecture feature build tags are: correspond to the amd64.v1, amd64.v2, and amd64.v3 feature build tags. - For GOARCH=arm, GOARM=5, 6, and 7 correspond to the arm.5, arm.6, and arm.7 feature build tags. + - For GOARCH=arm64, GOARM64=v8.{0-9} and v9.{0-5} + correspond to the arm64.v8.{0-9} and arm64.v9.{0-5} feature build tags. - For GOARCH=mips or mipsle, GOMIPS=hardfloat and softfloat correspond to the mips.hardfloat and mips.softfloat @@ -905,10 +915,13 @@ The defined architecture feature build tags are: ppc64.power8, ppc64.power9, and ppc64.power10 (or ppc64le.power8, ppc64le.power9, and ppc64le.power10) feature build tags. + - For GOARCH=riscv64, + GORISCV64=rva20u64 and rva22u64 correspond to the riscv64.rva20u64 + and riscv64.rva22u64 build tags. - For GOARCH=wasm, GOWASM=satconv and signext correspond to the wasm.satconv and wasm.signext feature build tags. -For GOARCH=amd64, arm, ppc64, and ppc64le, a particular feature level +For GOARCH=amd64, arm, ppc64, ppc64le, and riscv64, a particular feature level sets the feature build tags for all previous levels as well. For example, GOAMD64=v2 sets the amd64.v1 and amd64.v2 feature flags. This ensures that code making use of v2 features continues to compile diff --git a/src/cmd/go/internal/list/list.go b/src/cmd/go/internal/list/list.go index d9b09077c1..df3639cba7 100644 --- a/src/cmd/go/internal/list/list.go +++ b/src/cmd/go/internal/list/list.go @@ -245,6 +245,8 @@ applied to a Go struct, but now a Module struct: Retracted []string // retraction information, if any (with -retracted or -u) Deprecated string // deprecation message, if any (with -u) Error *ModuleError // error loading module + Sum string // checksum for path, version (as in go.sum) + GoModSum string // checksum for go.mod (as in go.sum) Origin any // provenance of module Reuse bool // reuse of old module info is safe } @@ -449,13 +451,15 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { if listJson { do = func(x any) { if !listJsonFields.needAll() { - v := reflect.ValueOf(x).Elem() // do is always called with a non-nil pointer. - // Clear all non-requested fields. + // Set x to a copy of itself with all non-requested fields cleared. + v := reflect.New(reflect.TypeOf(x).Elem()).Elem() // do is always called with a non-nil pointer. + v.Set(reflect.ValueOf(x).Elem()) for i := 0; i < v.NumField(); i++ { if !listJsonFields.needAny(v.Type().Field(i).Name) { v.Field(i).SetZero() } } + x = v.Interface() } b, err := json.MarshalIndent(x, "", "\t") if err != nil { @@ -723,6 +727,9 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { b.IsCmdList = true b.NeedExport = *listExport b.NeedCompiledGoFiles = *listCompiled + if cfg.Experiment.CoverageRedesign && cfg.BuildCover { + load.PrepareForCoverageBuild(pkgs) + } a := &work.Action{} // TODO: Use pkgsFilter? for _, p := range pkgs { @@ -730,9 +737,6 @@ func runList(ctx context.Context, cmd *base.Command, args []string) { a.Deps = append(a.Deps, b.AutoAction(work.ModeInstall, work.ModeInstall, p)) } } - if cfg.Experiment.CoverageRedesign && cfg.BuildCover { - load.PrepareForCoverageBuild(pkgs) - } b.Do(ctx, a) } diff --git a/src/cmd/go/internal/load/pkg.go b/src/cmd/go/internal/load/pkg.go index 1549800afb..0e4b6797c6 100644 --- a/src/cmd/go/internal/load/pkg.go +++ b/src/cmd/go/internal/load/pkg.go @@ -2306,7 +2306,7 @@ func (p *Package) setBuildInfo(ctx context.Context, autoVCS bool) { } if mi.Replace != nil { dm.Replace = debugModFromModinfo(mi.Replace) - } else if mi.Version != "" { + } else if mi.Version != "" && cfg.BuildMod != "vendor" { dm.Sum = modfetch.Sum(ctx, module.Version{Path: mi.Path, Version: mi.Version}) } return dm diff --git a/src/cmd/go/internal/load/test.go b/src/cmd/go/internal/load/test.go index d09ababfdd..3135805905 100644 --- a/src/cmd/go/internal/load/test.go +++ b/src/cmd/go/internal/load/test.go @@ -564,7 +564,7 @@ func recompileForTest(pmain, preal, ptest, pxtest *Package) *PackageError { } // isTestFunc tells whether fn has the type of a testing function. arg -// specifies the parameter type we look for: B, M or T. +// specifies the parameter type we look for: B, F, M or T. func isTestFunc(fn *ast.FuncDecl, arg string) bool { if fn.Type.Results != nil && len(fn.Type.Results.List) > 0 || fn.Type.Params.List == nil || @@ -579,7 +579,7 @@ func isTestFunc(fn *ast.FuncDecl, arg string) bool { // We can't easily check that the type is *testing.M // because we don't know how testing has been imported, // but at least check that it's *M or *something.M. - // Same applies for B and T. + // Same applies for B, F and T. if name, ok := ptr.X.(*ast.Ident); ok && name.Name == arg { return true } diff --git a/src/cmd/go/internal/mmap/mmap.go b/src/cmd/go/internal/mmap/mmap.go index 0cad9caf27..fcbd3e08c1 100644 --- a/src/cmd/go/internal/mmap/mmap.go +++ b/src/cmd/go/internal/mmap/mmap.go @@ -1,4 +1,4 @@ -// Copyright 2011 The Go Authors. All rights reserved. +// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/src/cmd/go/internal/mmap/mmap_other.go b/src/cmd/go/internal/mmap/mmap_other.go index 22e9395b21..4d2844fc37 100644 --- a/src/cmd/go/internal/mmap/mmap_other.go +++ b/src/cmd/go/internal/mmap/mmap_other.go @@ -1,4 +1,4 @@ -// Copyright 2022 The Go Authors. All rights reserved. +// Copyright 2022 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/src/cmd/go/internal/mmap/mmap_unix.go b/src/cmd/go/internal/mmap/mmap_unix.go index 53bcbb92a8..5dce872368 100644 --- a/src/cmd/go/internal/mmap/mmap_unix.go +++ b/src/cmd/go/internal/mmap/mmap_unix.go @@ -1,4 +1,4 @@ -// Copyright 2011 The Go Authors. All rights reserved. +// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/src/cmd/go/internal/mmap/mmap_windows.go b/src/cmd/go/internal/mmap/mmap_windows.go index 1cf62feca3..d00bef71e5 100644 --- a/src/cmd/go/internal/mmap/mmap_windows.go +++ b/src/cmd/go/internal/mmap/mmap_windows.go @@ -1,4 +1,4 @@ -// Copyright 2011 The Go Authors. All rights reserved. +// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/src/cmd/go/internal/modcmd/init.go b/src/cmd/go/internal/modcmd/init.go index e4be73fab0..facdaa9911 100644 --- a/src/cmd/go/internal/modcmd/init.go +++ b/src/cmd/go/internal/modcmd/init.go @@ -25,9 +25,6 @@ module path argument is omitted, init will attempt to infer the module path using import comments in .go files, vendoring tool configuration files (like Gopkg.lock), and the current directory (if in GOPATH). -If a configuration file for a vendoring tool is present, init will attempt to -import module requirements from it. - See https://golang.org/ref/mod#go-mod-init for more about 'go mod init'. `, Run: runInit, diff --git a/src/cmd/go/internal/modcmd/vendor.go b/src/cmd/go/internal/modcmd/vendor.go index 3db85bda53..44e0439f68 100644 --- a/src/cmd/go/internal/modcmd/vendor.go +++ b/src/cmd/go/internal/modcmd/vendor.go @@ -324,7 +324,13 @@ func vendorPkg(vdir, pkg string) { } embeds, err := load.ResolveEmbed(bp.Dir, embedPatterns) if err != nil { - base.Fatal(err) + format := "go: resolving embeds in %s: %v\n" + if vendorE { + fmt.Fprintf(os.Stderr, format, pkg, err) + } else { + base.Errorf(format, pkg, err) + } + return } for _, embed := range embeds { embedDst := filepath.Join(dst, embed) @@ -333,23 +339,30 @@ func vendorPkg(vdir, pkg string) { } // Copy the file as is done by copyDir below. - r, err := os.Open(filepath.Join(src, embed)) + err := func() error { + r, err := os.Open(filepath.Join(src, embed)) + if err != nil { + return err + } + if err := os.MkdirAll(filepath.Dir(embedDst), 0777); err != nil { + return err + } + w, err := os.Create(embedDst) + if err != nil { + return err + } + if _, err := io.Copy(w, r); err != nil { + return err + } + r.Close() + return w.Close() + }() if err != nil { - base.Fatal(err) - } - if err := os.MkdirAll(filepath.Dir(embedDst), 0777); err != nil { - base.Fatal(err) - } - w, err := os.Create(embedDst) - if err != nil { - base.Fatal(err) - } - if _, err := io.Copy(w, r); err != nil { - base.Fatal(err) - } - r.Close() - if err := w.Close(); err != nil { - base.Fatal(err) + if vendorE { + fmt.Fprintf(os.Stderr, "go: %v\n", err) + } else { + base.Error(err) + } } } } diff --git a/src/cmd/go/internal/modcmd/verify.go b/src/cmd/go/internal/modcmd/verify.go index 4552ed1ba2..d07f730c5d 100644 --- a/src/cmd/go/internal/modcmd/verify.go +++ b/src/cmd/go/internal/modcmd/verify.go @@ -61,7 +61,7 @@ func runVerify(ctx context.Context, cmd *base.Command, args []string) { if err != nil { base.Fatal(err) } - mods := mg.BuildList()[modload.MainModules.Len():] + mods := mg.BuildList() // Use a slice of result channels, so that the output is deterministic. errsChans := make([]<-chan []error, len(mods)) @@ -94,6 +94,9 @@ func verifyMod(ctx context.Context, mod module.Version) []error { // "go" and "toolchain" have no disk footprint; nothing to verify. return nil } + if modload.MainModules.Contains(mod.Path) { + return nil + } var errs []error zip, zipErr := modfetch.CachePath(ctx, mod, "zip") if zipErr == nil { diff --git a/src/cmd/go/internal/modfetch/codehost/codehost.go b/src/cmd/go/internal/modfetch/codehost/codehost.go index ca57762786..69a3c57e26 100644 --- a/src/cmd/go/internal/modfetch/codehost/codehost.go +++ b/src/cmd/go/internal/modfetch/codehost/codehost.go @@ -95,6 +95,8 @@ type Origin struct { URL string `json:",omitempty"` // URL of repository Subdir string `json:",omitempty"` // subdirectory in repo + Hash string `json:",omitempty"` // commit hash or ID + // If TagSum is non-empty, then the resolution of this module version // depends on the set of tags present in the repo, specifically the tags // of the form TagPrefix + a valid semver version. @@ -111,8 +113,7 @@ type Origin struct { // and the Hash is the Git object hash the ref maps to. // Other VCS might choose differently, but the idea is that Ref is the name // with a mutable meaning while Hash is a name with an immutable meaning. - Ref string `json:",omitempty"` - Hash string `json:",omitempty"` + Ref string `json:",omitempty"` // If RepoSum is non-empty, then the resolution of this module version // failed due to the repo being available but the version not being present. @@ -121,21 +122,6 @@ type Origin struct { RepoSum string `json:",omitempty"` } -// Checkable reports whether the Origin contains anything that can be checked. -// If not, the Origin is purely informational and should fail a CheckReuse call. -func (o *Origin) Checkable() bool { - return o.TagSum != "" || o.Ref != "" || o.Hash != "" || o.RepoSum != "" -} - -// ClearCheckable clears the Origin enough to make Checkable return false. -func (o *Origin) ClearCheckable() { - o.TagSum = "" - o.TagPrefix = "" - o.Ref = "" - o.Hash = "" - o.RepoSum = "" -} - // A Tags describes the available tags in a code repository. type Tags struct { Origin *Origin diff --git a/src/cmd/go/internal/modfetch/codehost/git.go b/src/cmd/go/internal/modfetch/codehost/git.go index d1a18a8d58..7d9e5d82f9 100644 --- a/src/cmd/go/internal/modfetch/codehost/git.go +++ b/src/cmd/go/internal/modfetch/codehost/git.go @@ -18,6 +18,7 @@ import ( "os/exec" "path/filepath" "runtime" + "slices" "sort" "strconv" "strings" @@ -154,7 +155,7 @@ type gitRepo struct { refsErr error localTagsOnce sync.Once - localTags map[string]bool + localTags sync.Map // map[string]bool } const ( @@ -166,7 +167,6 @@ const ( // loadLocalTags loads tag references from the local git cache // into the map r.localTags. -// Should only be called as r.localTagsOnce.Do(r.loadLocalTags). func (r *gitRepo) loadLocalTags(ctx context.Context) { // The git protocol sends all known refs and ls-remote filters them on the client side, // so we might as well record both heads and tags in one shot. @@ -176,10 +176,9 @@ func (r *gitRepo) loadLocalTags(ctx context.Context) { return } - r.localTags = make(map[string]bool) for _, line := range strings.Split(string(out), "\n") { if line != "" { - r.localTags[line] = true + r.localTags.Store(line, true) } } } @@ -430,7 +429,7 @@ func (r *gitRepo) stat(ctx context.Context, rev string) (info *RevInfo, err erro // Maybe rev is a tag we already have locally. // (Note that we're excluding branches, which can be stale.) r.localTagsOnce.Do(func() { r.loadLocalTags(ctx) }) - if r.localTags[rev] { + if _, ok := r.localTags.Load(rev); ok { return r.statLocal(ctx, rev, "refs/tags/"+rev) } @@ -506,11 +505,18 @@ func (r *gitRepo) stat(ctx context.Context, rev string) (info *RevInfo, err erro // Either way, try a local stat before falling back to network I/O. if !didStatLocal { if info, err := r.statLocal(ctx, rev, hash); err == nil { - if after, found := strings.CutPrefix(ref, "refs/tags/"); found { - // Make sure tag exists, so it will be in localTags next time the go command is run. - Run(ctx, r.dir, "git", "tag", after, hash) + tag, fromTag := strings.CutPrefix(ref, "refs/tags/") + if fromTag && !slices.Contains(info.Tags, tag) { + // The local repo includes the commit hash we want, but it is missing + // the corresponding tag. Add that tag and try again. + _, err := Run(ctx, r.dir, "git", "tag", tag, hash) + if err != nil { + return nil, err + } + r.localTags.Store(tag, true) + return r.statLocal(ctx, rev, ref) } - return info, nil + return info, err } } @@ -524,13 +530,7 @@ func (r *gitRepo) stat(ctx context.Context, rev string) (info *RevInfo, err erro if r.fetchLevel <= fetchSome && ref != "" && hash != "" && !r.local { r.fetchLevel = fetchSome var refspec string - if ref != "" && ref != "HEAD" { - // If we do know the ref name, save the mapping locally - // so that (if it is a tag) it can show up in localTags - // on a future call. Also, some servers refuse to allow - // full hashes in ref specs, so prefer a ref name if known. - refspec = ref + ":" + ref - } else { + if ref == "HEAD" { // Fetch the hash but give it a local name (refs/dummy), // because that triggers the fetch behavior of creating any // other known remote tags for the hash. We never use @@ -538,13 +538,23 @@ func (r *gitRepo) stat(ctx context.Context, rev string) (info *RevInfo, err erro // overwritten in the next command, and that's fine. ref = hash refspec = hash + ":refs/dummy" + } else { + // If we do know the ref name, save the mapping locally + // so that (if it is a tag) it can show up in localTags + // on a future call. Also, some servers refuse to allow + // full hashes in ref specs, so prefer a ref name if known. + refspec = ref + ":" + ref } release, err := base.AcquireNet() if err != nil { return nil, err } - _, err = Run(ctx, r.dir, "git", "fetch", "-f", "--depth=1", r.remote, refspec) + // We explicitly set protocol.version=2 for this command to work around + // an apparent Git bug introduced in Git 2.21 (commit 61c771), + // which causes the handler for protocol version 1 to sometimes miss + // tags that point to the requested commit (see https://go.dev/issue/56881). + _, err = Run(ctx, r.dir, "git", "fetch", "-f", "-c", "protocol.version=2", "--depth=1", r.remote, refspec) release() if err == nil { diff --git a/src/cmd/go/internal/modfetch/codehost/git_test.go b/src/cmd/go/internal/modfetch/codehost/git_test.go index 328ab5bf58..dba9935b58 100644 --- a/src/cmd/go/internal/modfetch/codehost/git_test.go +++ b/src/cmd/go/internal/modfetch/codehost/git_test.go @@ -280,9 +280,6 @@ func TestLatest(t *testing.T) { t.Fatal(err) } if !reflect.DeepEqual(info, tt.info) { - if !reflect.DeepEqual(info.Tags, tt.info.Tags) { - testenv.SkipFlaky(t, 56881) - } t.Errorf("Latest: incorrect info\nhave %+v (origin %+v)\nwant %+v (origin %+v)", info, info.Origin, tt.info, tt.info.Origin) } } @@ -661,9 +658,6 @@ func TestStat(t *testing.T) { } info.Origin = nil // TestLatest and ../../../testdata/script/reuse_git.txt test Origin well enough if !reflect.DeepEqual(info, tt.info) { - if !reflect.DeepEqual(info.Tags, tt.info.Tags) { - testenv.SkipFlaky(t, 56881) - } t.Errorf("Stat: incorrect info\nhave %+v\nwant %+v", *info, *tt.info) } } diff --git a/src/cmd/go/internal/modfetch/coderepo.go b/src/cmd/go/internal/modfetch/coderepo.go index 4f10f1f5dd..75c34e9fcb 100644 --- a/src/cmd/go/internal/modfetch/coderepo.go +++ b/src/cmd/go/internal/modfetch/coderepo.go @@ -362,7 +362,7 @@ func (r *codeRepo) convert(ctx context.Context, info *codehost.RevInfo, statVers } tags, tagsErr := r.code.Tags(ctx, prefix) if tagsErr != nil { - origin.ClearCheckable() + revInfo.Origin = nil if err == nil { err = tagsErr } @@ -514,10 +514,20 @@ func (r *codeRepo) convert(ctx context.Context, info *codehost.RevInfo, statVers // Determine version. if module.IsPseudoVersion(statVers) { + // Validate the go.mod location and major version before + // we check for an ancestor tagged with the pseude-version base. + // + // We can rule out an invalid subdirectory or major version with only + // shallow commit information, but checking the pseudo-version base may + // require downloading a (potentially more expensive) full history. + revInfo, err = checkCanonical(statVers) + if err != nil { + return revInfo, err + } if err := r.validatePseudoVersion(ctx, info, statVers); err != nil { return nil, err } - return checkCanonical(statVers) + return revInfo, nil } // statVers is not a pseudo-version, so we need to either resolve it to a diff --git a/src/cmd/go/internal/modfetch/fetch.go b/src/cmd/go/internal/modfetch/fetch.go index eeab6da62a..ce801d34f2 100644 --- a/src/cmd/go/internal/modfetch/fetch.go +++ b/src/cmd/go/internal/modfetch/fetch.go @@ -569,6 +569,47 @@ func HaveSum(mod module.Version) bool { return false } +// RecordedSum returns the sum if the go.sum file contains an entry for mod. +// The boolean reports true if an entry was found or +// false if no entry found or two conflicting sums are found. +// The entry's hash must be generated with a known hash algorithm. +// mod.Version may have a "/go.mod" suffix to distinguish sums for +// .mod and .zip files. +func RecordedSum(mod module.Version) (sum string, ok bool) { + goSum.mu.Lock() + defer goSum.mu.Unlock() + inited, err := initGoSum() + foundSum := "" + if err != nil || !inited { + return "", false + } + for _, goSums := range goSum.w { + for _, h := range goSums[mod] { + if !strings.HasPrefix(h, "h1:") { + continue + } + if !goSum.status[modSum{mod, h}].dirty { + if foundSum != "" && foundSum != h { // conflicting sums exist + return "", false + } + foundSum = h + } + } + } + for _, h := range goSum.m[mod] { + if !strings.HasPrefix(h, "h1:") { + continue + } + if !goSum.status[modSum{mod, h}].dirty { + if foundSum != "" && foundSum != h { // conflicting sums exist + return "", false + } + foundSum = h + } + } + return foundSum, true +} + // checkMod checks the given module's checksum and Go version. func checkMod(ctx context.Context, mod module.Version) { // Do the file I/O before acquiring the go.sum lock. diff --git a/src/cmd/go/internal/modget/query.go b/src/cmd/go/internal/modget/query.go index b78c1c4621..498ba6c2ff 100644 --- a/src/cmd/go/internal/modget/query.go +++ b/src/cmd/go/internal/modget/query.go @@ -55,7 +55,7 @@ type query struct { // path. matchWildcard func(path string) bool - // canMatchWildcard, if non-nil, reports whether the module with the given + // canMatchWildcardInModule, if non-nil, reports whether the module with the given // path could lexically contain a package matching pattern, which must be a // wildcard. canMatchWildcardInModule func(mPath string) bool diff --git a/src/cmd/go/internal/modindex/read.go b/src/cmd/go/internal/modindex/read.go index 83d5faf28f..bda3fb4338 100644 --- a/src/cmd/go/internal/modindex/read.go +++ b/src/cmd/go/internal/modindex/read.go @@ -124,7 +124,7 @@ var ( errNotFromModuleCache = fmt.Errorf("%w: not from module cache", ErrNotIndexed) ) -// GetPackage returns the IndexPackage for the package at the given path. +// GetPackage returns the IndexPackage for the directory at the given path. // It will return ErrNotIndexed if the directory should be read without // using the index, for instance because the index is disabled, or the package // is not in a module. @@ -669,11 +669,9 @@ func IsStandardPackage(goroot_, compiler, path string) bool { reldir = str.TrimFilePathPrefix(reldir, "cmd") modroot = filepath.Join(modroot, "cmd") } - if _, err := GetPackage(modroot, filepath.Join(modroot, reldir)); err == nil { - // Note that goroot.IsStandardPackage doesn't check that the directory - // actually contains any go files-- merely that it exists. GetPackage - // returning a nil error is enough for us to know the directory exists. - return true + if pkg, err := GetPackage(modroot, filepath.Join(modroot, reldir)); err == nil { + hasGo, err := pkg.IsDirWithGoFiles() + return err == nil && hasGo } else if errors.Is(err, ErrNotIndexed) { // Fall back because package isn't indexable. (Probably because // a file was modified recently) @@ -786,8 +784,8 @@ func shouldBuild(sf *sourceFile, tags map[string]bool) bool { return true } -// IndexPackage holds the information needed to access information in the -// index needed to load a package in a specific directory. +// IndexPackage holds the information in the index +// needed to load a package in a specific directory. type IndexPackage struct { error error dir string // directory of the package relative to the modroot diff --git a/src/cmd/go/internal/modinfo/info.go b/src/cmd/go/internal/modinfo/info.go index b0adcbcfb3..336f99245a 100644 --- a/src/cmd/go/internal/modinfo/info.go +++ b/src/cmd/go/internal/modinfo/info.go @@ -14,24 +14,25 @@ import ( // and the fields are documented in the help text in ../list/list.go type ModulePublic struct { - Path string `json:",omitempty"` // module path - Version string `json:",omitempty"` // module version - Query string `json:",omitempty"` // version query corresponding to this version - Versions []string `json:",omitempty"` // available module versions - Replace *ModulePublic `json:",omitempty"` // replaced by this module - Time *time.Time `json:",omitempty"` // time version was created - Update *ModulePublic `json:",omitempty"` // available update (with -u) - Main bool `json:",omitempty"` // is this the main module? - Indirect bool `json:",omitempty"` // module is only indirectly needed by main module - Dir string `json:",omitempty"` // directory holding local copy of files, if any - GoMod string `json:",omitempty"` // path to go.mod file describing module, if any - GoVersion string `json:",omitempty"` // go version used in module - Retracted []string `json:",omitempty"` // retraction information, if any (with -retracted or -u) - Deprecated string `json:",omitempty"` // deprecation message, if any (with -u) - Error *ModuleError `json:",omitempty"` // error loading module - - Origin *codehost.Origin `json:",omitempty"` // provenance of module - Reuse bool `json:",omitempty"` // reuse of old module info is safe + Path string `json:",omitempty"` // module path + Version string `json:",omitempty"` // module version + Query string `json:",omitempty"` // version query corresponding to this version + Versions []string `json:",omitempty"` // available module versions + Replace *ModulePublic `json:",omitempty"` // replaced by this module + Time *time.Time `json:",omitempty"` // time version was created + Update *ModulePublic `json:",omitempty"` // available update (with -u) + Main bool `json:",omitempty"` // is this the main module? + Indirect bool `json:",omitempty"` // module is only indirectly needed by main module + Dir string `json:",omitempty"` // directory holding local copy of files, if any + GoMod string `json:",omitempty"` // path to go.mod file describing module, if any + GoVersion string `json:",omitempty"` // go version used in module + Retracted []string `json:",omitempty"` // retraction information, if any (with -retracted or -u) + Deprecated string `json:",omitempty"` // deprecation message, if any (with -u) + Error *ModuleError `json:",omitempty"` // error loading module + Sum string `json:",omitempty"` // checksum for path, version (as in go.sum) + GoModSum string `json:",omitempty"` // checksum for go.mod (as in go.sum) + Origin *codehost.Origin `json:",omitempty"` // provenance of module + Reuse bool `json:",omitempty"` // reuse of old module info is safe } type ModuleError struct { diff --git a/src/cmd/go/internal/modload/build.go b/src/cmd/go/internal/modload/build.go index ff545ac81d..6e30afd524 100644 --- a/src/cmd/go/internal/modload/build.go +++ b/src/cmd/go/internal/modload/build.go @@ -161,59 +161,63 @@ func addUpdate(ctx context.Context, m *modinfo.ModulePublic) { } } -// mergeOrigin merges two origins, +// mergeOrigin returns the union of data from two origins, // returning either a new origin or one of its unmodified arguments. -// If the two origins conflict, mergeOrigin returns a non-specific one -// that will not pass CheckReuse. -// If m1 or m2 is nil, the other is returned unmodified. -// But if m1 or m2 is non-nil and uncheckable, the result is also uncheckable, -// to preserve uncheckability. +// If the two origins conflict including if either is nil, +// mergeOrigin returns nil. func mergeOrigin(m1, m2 *codehost.Origin) *codehost.Origin { - if m1 == nil { - return m2 - } - if m2 == nil { - return m1 - } - if !m1.Checkable() { - return m1 - } - if !m2.Checkable() { - return m2 + if m1 == nil || m2 == nil { + return nil } - merged := new(codehost.Origin) - *merged = *m1 // Clone to avoid overwriting fields in cached results. + if m2.VCS != m1.VCS || + m2.URL != m1.URL || + m2.Subdir != m1.Subdir { + return nil + } + merged := *m1 + if m2.Hash != "" { + if m1.Hash != "" && m1.Hash != m2.Hash { + return nil + } + merged.Hash = m2.Hash + } if m2.TagSum != "" { if m1.TagSum != "" && (m1.TagSum != m2.TagSum || m1.TagPrefix != m2.TagPrefix) { - merged.ClearCheckable() - return merged + return nil } merged.TagSum = m2.TagSum merged.TagPrefix = m2.TagPrefix } - if m2.Hash != "" { - if m1.Hash != "" && m1.Hash != m2.Hash { - merged.ClearCheckable() - return merged - } - merged.Hash = m2.Hash - } if m2.Ref != "" { if m1.Ref != "" && m1.Ref != m2.Ref { - merged.ClearCheckable() - return merged + return nil } merged.Ref = m2.Ref } - return merged + + switch { + case merged == *m1: + return m1 + case merged == *m2: + return m2 + default: + // Clone the result to avoid an alloc for merged + // if the result is equal to one of the arguments. + clone := merged + return &clone + } } // addVersions fills in m.Versions with the list of known versions. // Excluded versions will be omitted. If listRetracted is false, retracted // versions will also be omitted. func addVersions(ctx context.Context, m *modinfo.ModulePublic, listRetracted bool) { + // TODO(bcmills): Would it make sense to check for reuse here too? + // Perhaps that doesn't buy us much, though: we would always have to fetch + // all of the version tags to list the available versions anyway. + allowed := CheckAllowed if listRetracted { allowed = CheckExclusions @@ -319,21 +323,23 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li return } - if old := reuse[module.Version{Path: m.Path, Version: m.Version}]; old != nil { - if err := checkReuse(ctx, m.Path, old.Origin); err == nil { - *m = *old - m.Query = "" - m.Dir = "" - return - } - } - checksumOk := func(suffix string) bool { return rs == nil || m.Version == "" || !mustHaveSums() || modfetch.HaveSum(module.Version{Path: m.Path, Version: m.Version + suffix}) } + mod := module.Version{Path: m.Path, Version: m.Version} + if m.Version != "" { + if old := reuse[mod]; old != nil { + if err := checkReuse(ctx, mod, old.Origin); err == nil { + *m = *old + m.Query = "" + m.Dir = "" + return + } + } + if q, err := Query(ctx, m.Path, m.Version, "", nil); err != nil { m.Error = &modinfo.ModuleError{Err: err.Error()} } else { @@ -341,7 +347,6 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li m.Time = &q.Time } } - mod := module.Version{Path: m.Path, Version: m.Version} if m.GoVersion == "" && checksumOk("/go.mod") { // Load the go.mod file to determine the Go version, since it hasn't @@ -359,12 +364,18 @@ func moduleInfo(ctx context.Context, rs *Requirements, m module.Version, mode Li m.GoMod = gomod } } + if gomodsum, ok := modfetch.RecordedSum(modkey(mod)); ok { + m.GoModSum = gomodsum + } } if checksumOk("") { dir, err := modfetch.DownloadDir(ctx, mod) if err == nil { m.Dir = dir } + if sum, ok := modfetch.RecordedSum(mod); ok { + m.Sum = sum + } } if mode&ListRetracted != 0 { diff --git a/src/cmd/go/internal/modload/init.go b/src/cmd/go/internal/modload/init.go index f4f4a68254..23db438da1 100644 --- a/src/cmd/go/internal/modload/init.go +++ b/src/cmd/go/internal/modload/init.go @@ -1343,9 +1343,16 @@ func appendGoAndToolchainRoots(roots []module.Version, goVersion, toolchain stri func setDefaultBuildMod() { if cfg.BuildModExplicit { if inWorkspaceMode() && cfg.BuildMod != "readonly" && cfg.BuildMod != "vendor" { - base.Fatalf("go: -mod may only be set to readonly or vendor when in workspace mode, but it is set to %q"+ - "\n\tRemove the -mod flag to use the default readonly value, "+ - "\n\tor set GOWORK=off to disable workspace mode.", cfg.BuildMod) + switch cfg.CmdName { + case "work sync", "mod graph", "mod verify", "mod why": + // These commands run with BuildMod set to mod, but they don't take the + // -mod flag, so we should never get here. + panic("in workspace mode and -mod was set explicitly, but command doesn't support setting -mod") + default: + base.Fatalf("go: -mod may only be set to readonly or vendor when in workspace mode, but it is set to %q"+ + "\n\tRemove the -mod flag to use the default readonly value, "+ + "\n\tor set GOWORK=off to disable workspace mode.", cfg.BuildMod) + } } // Don't override an explicit '-mod=' argument. return diff --git a/src/cmd/go/internal/modload/list.go b/src/cmd/go/internal/modload/list.go index e8872ba4b8..ef93c25121 100644 --- a/src/cmd/go/internal/modload/list.go +++ b/src/cmd/go/internal/modload/list.go @@ -57,8 +57,7 @@ func ListModules(ctx context.Context, args []string, mode ListMode, reuseFile st } return nil, fmt.Errorf("parsing %s: %v", reuseFile, err) } - if m.Origin == nil || !m.Origin.Checkable() { - // Nothing to check to validate reuse. + if m.Origin == nil { continue } m.Reuse = true diff --git a/src/cmd/go/internal/modload/query.go b/src/cmd/go/internal/modload/query.go index 9bd9c6b9a4..c4cf55442b 100644 --- a/src/cmd/go/internal/modload/query.go +++ b/src/cmd/go/internal/modload/query.go @@ -98,18 +98,83 @@ func queryReuse(ctx context.Context, path, query, current string, allowed Allowe return info, err } -// checkReuse checks whether a revision of a given module or a version list +// checkReuse checks whether a revision of a given module // for a given module may be reused, according to the information in origin. -func checkReuse(ctx context.Context, path string, old *codehost.Origin) error { +func checkReuse(ctx context.Context, m module.Version, old *codehost.Origin) error { return modfetch.TryProxies(func(proxy string) error { - repo, err := lookupRepo(ctx, proxy, path) + repo, err := lookupRepo(ctx, proxy, m.Path) if err != nil { return err } - return repo.CheckReuse(ctx, old) + return checkReuseRepo(ctx, repo, m.Path, m.Version, old) }) } +func checkReuseRepo(ctx context.Context, repo versionRepo, path, query string, origin *codehost.Origin) error { + if origin == nil { + return errors.New("nil Origin") + } + + // Ensure that the Origin actually includes enough fields to resolve the query. + // If we got the previous Origin data from a proxy, it may be missing something + // that we would have needed to resolve the query directly from the repo. + switch { + case origin.RepoSum != "": + // A RepoSum is always acceptable, since it incorporates everything + // (and is often associated with an error result). + + case query == module.CanonicalVersion(query): + // This query refers to a specific version, and Go module versions + // are supposed to be cacheable and immutable (confirmed with checksums). + // If the version exists at all, we shouldn't need any extra information + // to identify which commit it resolves to. + // + // It may be associated with a Ref for a semantic-version tag, but if so + // we don't expect that tag to change in the future. We also don't need a + // TagSum: if a tag is removed from some ancestor commit, the version may + // change from valid to invalid, but we're ok with keeping stale versions + // as long as they were valid at some point in the past. + // + // If the version did not successfully resolve, the origin may indicate + // a TagSum and/or RepoSum instead of a Hash, in which case we still need + // to check those to ensure that the error is still applicable. + if origin.Hash == "" && origin.Ref == "" && origin.TagSum == "" { + return errors.New("no Origin information to check") + } + + case IsRevisionQuery(path, query): + // This query may refer to a branch, non-version tag, or commit ID. + // + // If it is a commit ID, we expect to see a Hash in the Origin data. On + // the other hand, if it is not a commit ID, we expect to see either a Ref + // (for a positive result) or a RepoSum (for a negative result), since + // we don't expect refs in general to remain stable over time. + if origin.Hash == "" && origin.Ref == "" { + return fmt.Errorf("query %q requires a Hash or Ref", query) + } + // Once we resolve the query to a particular commit, we will need to + // also identify the most appropriate version to assign to that commit. + // (It may correspond to more than one valid version.) + // + // The most appropriate version depends on the tags associated with + // both the commit itself (if the commit is a tagged version) + // and its ancestors (if we need to produce a pseudo-version for it). + if origin.TagSum == "" { + return fmt.Errorf("query %q requires a TagSum", query) + } + + default: + // The query may be "latest" or a version inequality or prefix. + // Its result depends on the absence of higher tags matching the query, + // not just the state of an individual ref or tag. + if origin.TagSum == "" { + return fmt.Errorf("query %q requires a TagSum", query) + } + } + + return repo.CheckReuse(ctx, origin) +} + // AllowedFunc is used by Query and other functions to filter out unsuitable // versions, for example, those listed in exclude directives in the main // module's go.mod file. @@ -164,7 +229,7 @@ func queryProxy(ctx context.Context, proxy, path, query, current string, allowed } if old := reuse[module.Version{Path: path, Version: query}]; old != nil { - if err := repo.CheckReuse(ctx, old.Origin); err == nil { + if err := checkReuseRepo(ctx, repo, path, query, old.Origin); err == nil { info := &modfetch.RevInfo{ Version: old.Version, Origin: old.Origin, @@ -826,11 +891,12 @@ func queryPrefixModules(ctx context.Context, candidateModules []string, queryMod // is most likely to find helpful: the most useful class of error at the // longest matching path. var ( - noPackage *PackageNotInModuleError - noVersion *NoMatchingVersionError - noPatchBase *NoPatchBaseError - invalidPath *module.InvalidPathError // see comment in case below - notExistErr error + noPackage *PackageNotInModuleError + noVersion *NoMatchingVersionError + noPatchBase *NoPatchBaseError + invalidPath *module.InvalidPathError // see comment in case below + invalidVersion error + notExistErr error ) for _, r := range results { switch rErr := r.err.(type) { @@ -866,6 +932,10 @@ func queryPrefixModules(ctx context.Context, candidateModules []string, queryMod if notExistErr == nil { notExistErr = rErr } + } else if iv := (*module.InvalidVersionError)(nil); errors.As(rErr, &iv) { + if invalidVersion == nil { + invalidVersion = rErr + } } else if err == nil { if len(found) > 0 || noPackage != nil { // golang.org/issue/34094: If we have already found a module that @@ -896,6 +966,8 @@ func queryPrefixModules(ctx context.Context, candidateModules []string, queryMod err = noPatchBase case invalidPath != nil: err = invalidPath + case invalidVersion != nil: + err = invalidVersion case notExistErr != nil: err = notExistErr default: diff --git a/src/cmd/go/internal/str/str.go b/src/cmd/go/internal/str/str.go index af7c699972..94be202ba2 100644 --- a/src/cmd/go/internal/str/str.go +++ b/src/cmd/go/internal/str/str.go @@ -88,16 +88,6 @@ func FoldDup(list []string) (string, string) { return "", "" } -// Contains reports whether x contains s. -func Contains(x []string, s string) bool { - for _, t := range x { - if t == s { - return true - } - } - return false -} - // Uniq removes consecutive duplicate strings from ss. func Uniq(ss *[]string) { if len(*ss) <= 1 { diff --git a/src/cmd/go/internal/test/test.go b/src/cmd/go/internal/test/test.go index 8a40547f2e..08fac5f395 100644 --- a/src/cmd/go/internal/test/test.go +++ b/src/cmd/go/internal/test/test.go @@ -21,6 +21,7 @@ import ( "strconv" "strings" "sync" + "sync/atomic" "time" "cmd/go/internal/base" @@ -126,7 +127,7 @@ elapsed time in the summary line. The rule for a match in the cache is that the run involves the same test binary and the flags on the command line come entirely from a restricted set of 'cacheable' test flags, defined as -benchtime, -cpu, --list, -parallel, -run, -short, -timeout, -failfast, and -v. +-list, -parallel, -run, -short, -timeout, -failfast, -fullpath and -v. If a run of go test has any test or non-test flags outside this set, the result is not cached. To disable test caching, use any test flag or argument other than the cacheable flags. The idiomatic way to disable @@ -540,6 +541,7 @@ var ( testC bool // -c flag testCoverPkgs []*load.Package // -coverpkg flag testCoverProfile string // -coverprofile flag + testFailFast bool // -failfast flag testFuzz string // -fuzz flag testJSON bool // -json flag testList string // -list flag @@ -589,9 +591,10 @@ var ( testHelp bool // -help option passed to test via -args - testKillTimeout = 100 * 365 * 24 * time.Hour // backup alarm; defaults to about a century if no timeout is set - testWaitDelay time.Duration // how long to wait for output to close after a test binary exits; zero means unlimited - testCacheExpire time.Time // ignore cached test results before this time + testKillTimeout = 100 * 365 * 24 * time.Hour // backup alarm; defaults to about a century if no timeout is set + testWaitDelay time.Duration // how long to wait for output to close after a test binary exits; zero means unlimited + testCacheExpire time.Time // ignore cached test results before this time + testShouldFailFast atomic.Bool // signals pending tests to fail fast testBlockProfile, testCPUProfile, testMemProfile, testMutexProfile, testTrace string // profiling flag that limits test to one package @@ -1355,6 +1358,11 @@ func (r *runTestActor) Act(b *work.Builder, ctx context.Context, a *work.Action) // Wait for previous test to get started and print its first json line. select { case <-r.prev: + // If should fail fast then release next test and exit. + if testShouldFailFast.Load() { + close(r.next) + return nil + } case <-base.Interrupted: // We can't wait for the previous test action to complete: we don't start // new actions after an interrupt, so if that action wasn't already running @@ -1396,7 +1404,7 @@ func (r *runTestActor) Act(b *work.Builder, ctx context.Context, a *work.Action) if p := a.Package; len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 { reportNoTestFiles := true - if cfg.BuildCover && cfg.Experiment.CoverageRedesign { + if cfg.BuildCover && cfg.Experiment.CoverageRedesign && p.Internal.Cover.GenMeta { if err := sh.Mkdir(a.Objdir); err != nil { return err } @@ -1631,6 +1639,10 @@ func (r *runTestActor) Act(b *work.Builder, ctx context.Context, a *work.Action) fmt.Fprintf(cmd.Stdout, "ok \t%s\t%s%s%s\n", a.Package.ImportPath, t, coveragePercentage(out), norun) r.c.saveOutput(a) } else { + if testFailFast { + testShouldFailFast.Store(true) + } + base.SetExitStatus(1) if cancelSignaled { fmt.Fprintf(cmd.Stdout, "*** Test killed with %v: ran too long (%v).\n", base.SignalTrace, testKillTimeout) @@ -1717,7 +1729,8 @@ func (c *runCache) tryCacheWithID(b *work.Builder, a *work.Action, id string) bo "-test.short", "-test.timeout", "-test.failfast", - "-test.v": + "-test.v", + "-test.fullpath": // These are cacheable. // Note that this list is documented above, // so if you add to this list, update the docs too. diff --git a/src/cmd/go/internal/test/testflag.go b/src/cmd/go/internal/test/testflag.go index 425378889d..4686e550fd 100644 --- a/src/cmd/go/internal/test/testflag.go +++ b/src/cmd/go/internal/test/testflag.go @@ -48,7 +48,7 @@ func init() { cf.Int("count", 0, "") cf.String("cpu", "", "") cf.StringVar(&testCPUProfile, "cpuprofile", "", "") - cf.Bool("failfast", false, "") + cf.BoolVar(&testFailFast, "failfast", false, "") cf.StringVar(&testFuzz, "fuzz", "", "") cf.Bool("fullpath", false, "") cf.StringVar(&testList, "list", "", "") diff --git a/src/cmd/go/internal/toolchain/select.go b/src/cmd/go/internal/toolchain/select.go index 9fd1549a61..dcf3be92cc 100644 --- a/src/cmd/go/internal/toolchain/select.go +++ b/src/cmd/go/internal/toolchain/select.go @@ -8,6 +8,7 @@ package toolchain import ( "context" "errors" + "flag" "fmt" "go/build" "io/fs" @@ -24,6 +25,7 @@ import ( "cmd/go/internal/modfetch" "cmd/go/internal/modload" "cmd/go/internal/run" + "cmd/go/internal/work" "golang.org/x/mod/module" ) @@ -486,74 +488,132 @@ func goInstallVersion() bool { // Note: We assume there are no flags between 'go' and 'install' or 'run'. // During testing there are some debugging flags that are accepted // in that position, but in production go binaries there are not. - if len(os.Args) < 3 || (os.Args[1] != "install" && os.Args[1] != "run") { + if len(os.Args) < 3 { return false } - // Check for pkg@version. - var arg string + var cmdFlags *flag.FlagSet switch os.Args[1] { default: + // Command doesn't support a pkg@version as the main module. return false case "install": - // We would like to let 'go install -newflag pkg@version' work even - // across a toolchain switch. To make that work, assume the pkg@version - // is the last argument and skip the flag parsing. - arg = os.Args[len(os.Args)-1] + cmdFlags = &work.CmdInstall.Flag case "run": - // For run, the pkg@version can be anywhere on the command line, - // because it is preceded by run flags and followed by arguments to the - // program being run. To handle that precisely, we have to interpret the - // flags a little bit, to know whether each flag takes an optional argument. - // We can still allow unknown flags as long as they have an explicit =value. - args := os.Args[2:] - for i := 0; i < len(args); i++ { - a := args[i] - if !strings.HasPrefix(a, "-") { - arg = a - break - } - if a == "-" { - // non-flag but also non-pkg@version + cmdFlags = &run.CmdRun.Flag + } + + // The modcachrw flag is unique, in that it affects how we fetch the + // requested module to even figure out what toolchain it needs. + // We need to actually set it before we check the toolchain version. + // (See https://go.dev/issue/64282.) + modcacherwFlag := cmdFlags.Lookup("modcacherw") + if modcacherwFlag == nil { + base.Fatalf("internal error: modcacherw flag not registered for command") + } + modcacherwVal, ok := modcacherwFlag.Value.(interface { + IsBoolFlag() bool + flag.Value + }) + if !ok || !modcacherwVal.IsBoolFlag() { + base.Fatalf("internal error: modcacherw is not a boolean flag") + } + + // Make a best effort to parse the command's args to find the pkg@version + // argument and the -modcacherw flag. + var ( + pkgArg string + modcacherwSeen bool + ) + for args := os.Args[2:]; len(args) > 0; { + a := args[0] + args = args[1:] + if a == "--" { + if len(args) == 0 { return false } - if a == "--" { - if i+1 >= len(args) { - return false + pkgArg = args[0] + break + } + + a, ok := strings.CutPrefix(a, "-") + if !ok { + // Not a flag argument. Must be a package. + pkgArg = a + break + } + a = strings.TrimPrefix(a, "-") // Treat --flag as -flag. + + name, val, hasEq := strings.Cut(a, "=") + + if name == "modcacherw" { + if !hasEq { + val = "true" + } + if err := modcacherwVal.Set(val); err != nil { + return false + } + modcacherwSeen = true + continue + } + + if hasEq { + // Already has a value; don't bother parsing it. + continue + } + + f := run.CmdRun.Flag.Lookup(a) + if f == nil { + // We don't know whether this flag is a boolean. + if os.Args[1] == "run" { + // We don't know where to find the pkg@version argument. + // For run, the pkg@version can be anywhere on the command line, + // because it is preceded by run flags and followed by arguments to the + // program being run. Since we don't know whether this flag takes + // an argument, we can't reliably identify the end of the run flags. + // Just give up and let the user clarify using the "=" form.. + return false + } + + // We would like to let 'go install -newflag pkg@version' work even + // across a toolchain switch. To make that work, assume by default that + // the pkg@version is the last argument and skip the remaining args unless + // we spot a plausible "-modcacherw" flag. + for len(args) > 0 { + a := args[0] + name, _, _ := strings.Cut(a, "=") + if name == "-modcacherw" || name == "--modcacherw" { + break } - arg = args[i+1] - break + if len(args) == 1 && !strings.HasPrefix(a, "-") { + pkgArg = a + } + args = args[1:] } - a = strings.TrimPrefix(a, "-") - a = strings.TrimPrefix(a, "-") - if strings.HasPrefix(a, "-") { - // non-flag but also non-pkg@version - return false - } - if strings.Contains(a, "=") { - // already has value - continue - } - f := run.CmdRun.Flag.Lookup(a) - if f == nil { - // Unknown flag. Give up. The command is going to fail in flag parsing. - return false - } - if bf, ok := f.Value.(interface{ IsBoolFlag() bool }); ok && bf.IsBoolFlag() { - // Does not take value. - continue - } - i++ // Does take a value; skip it. + continue + } + + if bf, ok := f.Value.(interface{ IsBoolFlag() bool }); !ok || !bf.IsBoolFlag() { + // The next arg is the value for this flag. Skip it. + args = args[1:] + continue } } - if !strings.Contains(arg, "@") || build.IsLocalImport(arg) || filepath.IsAbs(arg) { + + if !strings.Contains(pkgArg, "@") || build.IsLocalImport(pkgArg) || filepath.IsAbs(pkgArg) { return false } - path, version, _ := strings.Cut(arg, "@") + path, version, _ := strings.Cut(pkgArg, "@") if path == "" || version == "" || gover.IsToolchain(path) { return false } + if !modcacherwSeen && base.InGOFLAGS("-modcacherw") { + fs := flag.NewFlagSet("goInstallVersion", flag.ExitOnError) + fs.Var(modcacherwVal, "modcacherw", modcacherwFlag.Usage) + base.SetFromGOFLAGS(fs) + } + // It would be correct to simply return true here, bypassing use // of the current go.mod or go.work, and let "go run" or "go install" // do the rest, including a toolchain switch. diff --git a/src/cmd/go/internal/trace/trace.go b/src/cmd/go/internal/trace/trace.go index 17d3ee9e7f..f96aa40002 100644 --- a/src/cmd/go/internal/trace/trace.go +++ b/src/cmd/go/internal/trace/trace.go @@ -5,10 +5,10 @@ package trace import ( - "cmd/internal/traceviewer" "context" "encoding/json" "errors" + "internal/trace/traceviewer/format" "os" "strings" "sync/atomic" @@ -47,7 +47,7 @@ func StartSpan(ctx context.Context, name string) (context.Context, *Span) { return ctx, nil } childSpan := &Span{t: tc.t, name: name, tid: tc.tid, start: time.Now()} - tc.t.writeEvent(&traceviewer.Event{ + tc.t.writeEvent(&format.Event{ Name: childSpan.name, Time: float64(childSpan.start.UnixNano()) / float64(time.Microsecond), TID: childSpan.tid, @@ -77,7 +77,7 @@ func Flow(ctx context.Context, from *Span, to *Span) { } id := tc.t.getNextFlowID() - tc.t.writeEvent(&traceviewer.Event{ + tc.t.writeEvent(&format.Event{ Name: from.name + " -> " + to.name, Category: "flow", ID: id, @@ -85,7 +85,7 @@ func Flow(ctx context.Context, from *Span, to *Span) { Phase: phaseFlowStart, TID: from.tid, }) - tc.t.writeEvent(&traceviewer.Event{ + tc.t.writeEvent(&format.Event{ Name: from.name + " -> " + to.name, Category: "flow", // TODO(matloob): Add Category to Flow? ID: id, @@ -110,7 +110,7 @@ func (s *Span) Done() { return } s.end = time.Now() - s.t.writeEvent(&traceviewer.Event{ + s.t.writeEvent(&format.Event{ Name: s.name, Time: float64(s.end.UnixNano()) / float64(time.Microsecond), TID: s.tid, @@ -125,7 +125,7 @@ type tracer struct { nextFlowID atomic.Uint64 } -func (t *tracer) writeEvent(ev *traceviewer.Event) error { +func (t *tracer) writeEvent(ev *format.Event) error { f := <-t.file defer func() { t.file <- f }() var err error diff --git a/src/cmd/go/internal/vcs/vcs.go b/src/cmd/go/internal/vcs/vcs.go index 8550f2a560..19a6a5ef6b 100644 --- a/src/cmd/go/internal/vcs/vcs.go +++ b/src/cmd/go/internal/vcs/vcs.go @@ -331,12 +331,12 @@ func gitStatus(vcsGit *Cmd, rootDir string) (Status, error) { } uncommitted := len(out) > 0 - // "git status" works for empty repositories, but "git show" does not. - // Assume there are no commits in the repo when "git show" fails with + // "git status" works for empty repositories, but "git log" does not. + // Assume there are no commits in the repo when "git log" fails with // uncommitted files and skip tagging revision / committime. var rev string var commitTime time.Time - out, err = vcsGit.runOutputVerboseOnly(rootDir, "-c log.showsignature=false show -s --format=%H:%ct") + out, err = vcsGit.runOutputVerboseOnly(rootDir, "-c log.showsignature=false log -1 --format=%H:%ct") if err != nil && !uncommitted { return Status{}, err } else if err == nil { diff --git a/src/cmd/go/internal/vcweb/git.go b/src/cmd/go/internal/vcweb/git.go index 316c2382ba..d1e0563bed 100644 --- a/src/cmd/go/internal/vcweb/git.go +++ b/src/cmd/go/internal/vcweb/git.go @@ -37,16 +37,35 @@ func (h *gitHandler) Handler(dir string, env []string, logger *log.Logger) (http return nil, ServerNotInstalledError{name: "git"} } - handler := &cgi.Handler{ - Path: h.gitPath, - Logger: logger, - Args: []string{"http-backend"}, - Dir: dir, - Env: append(slices.Clip(env), - "GIT_PROJECT_ROOT="+dir, - "GIT_HTTP_EXPORT_ALL=1", - ), - } + baseEnv := append(slices.Clip(env), + "GIT_PROJECT_ROOT="+dir, + "GIT_HTTP_EXPORT_ALL=1", + ) + + handler := http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + // The Git client sends the requested Git protocol version as a + // "Git-Protocol" HTTP request header, which the CGI host then converts + // to an environment variable (HTTP_GIT_PROTOCOL). + // + // However, versions of Git older that 2.34.0 don't recognize the + // HTTP_GIT_PROTOCOL variable, and instead need that value to be set in the + // GIT_PROTOCOL variable. We do so here so that vcweb can work reliably + // with older Git releases. (As of the time of writing, the Go project's + // builders were on Git version 2.30.2.) + env := slices.Clip(baseEnv) + if p := req.Header.Get("Git-Protocol"); p != "" { + env = append(env, "GIT_PROTOCOL="+p) + } + + h := &cgi.Handler{ + Path: h.gitPath, + Logger: logger, + Args: []string{"http-backend"}, + Dir: dir, + Env: env, + } + h.ServeHTTP(w, req) + }) return handler, nil } diff --git a/src/cmd/go/internal/work/build.go b/src/cmd/go/internal/work/build.go index 408edb5119..ccfb4622e2 100644 --- a/src/cmd/go/internal/work/build.go +++ b/src/cmd/go/internal/work/build.go @@ -276,8 +276,6 @@ func (c buildCompiler) Set(value string) error { return fmt.Errorf("unknown compiler %q", value) } cfg.BuildToolchainName = value - cfg.BuildToolchainCompiler = BuildToolchain.compiler - cfg.BuildToolchainLinker = BuildToolchain.linker cfg.BuildContext.Compiler = value return nil } diff --git a/src/cmd/go/internal/work/buildid.go b/src/cmd/go/internal/work/buildid.go index 276f524afa..bf923d0d5e 100644 --- a/src/cmd/go/internal/work/buildid.go +++ b/src/cmd/go/internal/work/buildid.go @@ -238,8 +238,28 @@ func (b *Builder) gccToolID(name, language string) (id, exe string, err error) { version := "" lines := strings.Split(string(out), "\n") for _, line := range lines { - if fields := strings.Fields(line); len(fields) > 1 && fields[1] == "version" || len(fields) > 2 && fields[2] == "version" { - version = line + fields := strings.Fields(line) + for i, field := range fields { + if strings.HasSuffix(field, ":") { + // Avoid parsing fields of lines like "Configured with: …", which may + // contain arbitrary substrings. + break + } + if field == "version" && i < len(fields)-1 { + // Check that the next field is plausibly a version number. + // We require only that it begins with an ASCII digit, + // since we don't know what version numbering schemes a given + // C compiler may use. (Clang and GCC mostly seem to follow the scheme X.Y.Z, + // but in https://go.dev/issue/64619 we saw "8.3 [DragonFly]", and who knows + // what other C compilers like "zig cc" might report?) + next := fields[i+1] + if len(next) > 0 && next[0] >= '0' && next[0] <= '9' { + version = line + break + } + } + } + if version != "" { break } } diff --git a/src/cmd/go/internal/work/exec.go b/src/cmd/go/internal/work/exec.go index ecad324886..92aa0c1dc5 100644 --- a/src/cmd/go/internal/work/exec.go +++ b/src/cmd/go/internal/work/exec.go @@ -258,9 +258,12 @@ func (b *Builder) buildActionID(a *Action) cache.ActionID { // when building things in GOROOT. // // The C compiler does not, but for packages in GOROOT we rewrite the path - // as though -trimpath were set, so that we don't invalidate the build cache - // (and especially any precompiled C archive files) when changing - // GOROOT_FINAL. (See https://go.dev/issue/50183.) + // as though -trimpath were set. This used to be so that we did not invalidate + // the build cache (and especially precompiled archive files) when changing + // GOROOT_FINAL, but we no longer ship precompiled archive files as of Go 1.20 + // (https://go.dev/issue/47257) and no longer support GOROOT_FINAL + // (https://go.dev/issue/62047). + // TODO(bcmills): Figure out whether this behavior is still useful. // // b.WorkDir is always either trimmed or rewritten to // the literal string "/tmp/go-build". @@ -629,19 +632,6 @@ OverlayLoop: } } - // Run SWIG on each .swig and .swigcxx file. - // Each run will generate two files, a .go file and a .c or .cxx file. - // The .go file will use import "C" and is to be processed by cgo. - if p.UsesSwig() { - outGo, outC, outCXX, err := b.swig(a, objdir, pcCFLAGS) - if err != nil { - return err - } - cgofiles = append(cgofiles, outGo...) - cfiles = append(cfiles, outC...) - cxxfiles = append(cxxfiles, outCXX...) - } - // If we're doing coverage, preprocess the .go files and put them in the work directory if p.Internal.Cover.Mode != "" { outfiles := []string{} @@ -722,6 +712,22 @@ OverlayLoop: } } + // Run SWIG on each .swig and .swigcxx file. + // Each run will generate two files, a .go file and a .c or .cxx file. + // The .go file will use import "C" and is to be processed by cgo. + // For -cover test or build runs, this needs to happen after the cover + // tool is run; we don't want to instrument swig-generated Go files, + // see issue #64661. + if p.UsesSwig() { + outGo, outC, outCXX, err := b.swig(a, objdir, pcCFLAGS) + if err != nil { + return err + } + cgofiles = append(cgofiles, outGo...) + cfiles = append(cfiles, outC...) + cxxfiles = append(cxxfiles, outCXX...) + } + // Run cgo. if p.UsesCgo() || p.UsesSwig() { // In a package using cgo, cgo compiles the C, C++ and assembly files with gcc. @@ -1400,11 +1406,11 @@ func (b *Builder) printLinkerConfig(h io.Writer, p *load.Package) { fmt.Fprintf(h, "GOEXPERIMENT=%q\n", cfg.CleanGOEXPERIMENT) } - // The linker writes source file paths that say GOROOT_FINAL, but - // only if -trimpath is not specified (see ld() in gc.go). - gorootFinal := cfg.GOROOT_FINAL + // The linker writes source file paths that refer to GOROOT, + // but only if -trimpath is not specified (see [gctoolchain.ld] in gc.go). + gorootFinal := cfg.GOROOT if cfg.BuildTrimpath { - gorootFinal = trimPathGoRootFinal + gorootFinal = "" } fmt.Fprintf(h, "GOROOT=%s\n", gorootFinal) @@ -2127,7 +2133,7 @@ func (b *Builder) ccompile(a *Action, outfile string, flags []string, file strin file = mkAbs(p.Dir, file) outfile = mkAbs(p.Dir, outfile) - // Elide source directory paths if -trimpath or GOROOT_FINAL is set. + // Elide source directory paths if -trimpath is set. // This is needed for source files (e.g., a .c file in a package directory). // TODO(golang.org/issue/36072): cgo also generates files with #line // directives pointing to the source directory. It should not generate those @@ -3066,12 +3072,12 @@ func (b *Builder) dynimport(a *Action, objdir, importGo, cgoExe string, cflags, ldflags := cgoLDFLAGS if (cfg.Goarch == "arm" && cfg.Goos == "linux") || cfg.Goos == "android" { - if !str.Contains(ldflags, "-no-pie") { + if !slices.Contains(ldflags, "-no-pie") { // we need to use -pie for Linux/ARM to get accurate imported sym (added in https://golang.org/cl/5989058) // this seems to be outdated, but we don't want to break existing builds depending on this (Issue 45940) ldflags = append(ldflags, "-pie") } - if str.Contains(ldflags, "-pie") && str.Contains(ldflags, "-static") { + if slices.Contains(ldflags, "-pie") && slices.Contains(ldflags, "-static") { // -static -pie doesn't make sense, and causes link errors. // Issue 26197. n := make([]string, 0, len(ldflags)-1) diff --git a/src/cmd/go/internal/work/gc.go b/src/cmd/go/internal/work/gc.go index e2a5456bde..a85b262374 100644 --- a/src/cmd/go/internal/work/gc.go +++ b/src/cmd/go/internal/work/gc.go @@ -29,9 +29,6 @@ import ( // Tests can override this by setting $TESTGO_TOOLCHAIN_VERSION. var ToolchainVersion = runtime.Version() -// The 'path' used for GOROOT_FINAL when -trimpath is specified -const trimPathGoRootFinal string = "$GOROOT" - // The Go toolchain. type gcToolchain struct{} @@ -361,13 +358,19 @@ func asmArgs(a *Action, p *load.Package) []any { } } + if cfg.Goarch == "riscv64" { + // Define GORISCV64_value from cfg.GORISCV64. + args = append(args, "-D", "GORISCV64_"+cfg.GORISCV64) + } + if cfg.Goarch == "arm" { - // Define GOARM_value from cfg.GOARM. - switch cfg.GOARM { - case "7": + // Define GOARM_value from cfg.GOARM, which can be either a version + // like "6", or a version and a FP mode, like "7,hardfloat". + switch { + case strings.Contains(cfg.GOARM, "7"): args = append(args, "-D", "GOARM_7") fallthrough - case "6": + case strings.Contains(cfg.GOARM, "6"): args = append(args, "-D", "GOARM_6") fallthrough default: @@ -663,8 +666,11 @@ func (gcToolchain) ld(b *Builder, root *Action, targetPath, importcfg, mainpkg s } env := []string{} + // When -trimpath is used, GOROOT is cleared if cfg.BuildTrimpath { - env = append(env, "GOROOT_FINAL="+trimPathGoRootFinal) + env = append(env, "GOROOT=") + } else { + env = append(env, "GOROOT="+cfg.GOROOT) } return b.Shell(root).run(dir, root.Package.ImportPath, env, cfg.BuildToolexec, base.Tool("link"), "-o", targetPath, "-importcfg", importcfg, ldflags, mainpkg) } diff --git a/src/cmd/go/internal/work/security.go b/src/cmd/go/internal/work/security.go index 88504be6cd..3289276e77 100644 --- a/src/cmd/go/internal/work/security.go +++ b/src/cmd/go/internal/work/security.go @@ -210,8 +210,7 @@ var validLinkerFlags = []*lazyregexp.Regexp{ re(`-Wl,-?-unresolved-symbols=[^,]+`), re(`-Wl,--(no-)?warn-([^,]+)`), re(`-Wl,-?-wrap[=,][^,@\-][^,]*`), - re(`-Wl,-z,(no)?execstack`), - re(`-Wl,-z,relro`), + re(`-Wl(,-z,(relro|now|(no)?execstack))+`), re(`[a-zA-Z0-9_/].*\.(a|o|obj|dll|dylib|so|tbd)`), // direct linker inputs: x.o or libfoo.so (but not -foo.o or @foo.o) re(`\./.*\.(a|o|obj|dll|dylib|so|tbd)`), diff --git a/src/cmd/go/internal/work/security_test.go b/src/cmd/go/internal/work/security_test.go index c05ba7b9a4..a4c055670a 100644 --- a/src/cmd/go/internal/work/security_test.go +++ b/src/cmd/go/internal/work/security_test.go @@ -167,6 +167,10 @@ var goodLinkerFlags = [][]string{ {"-Wl,-framework", "-Wl,Chocolate"}, {"-Wl,-framework,Chocolate"}, {"-Wl,-unresolved-symbols=ignore-all"}, + {"-Wl,-z,relro"}, + {"-Wl,-z,relro,-z,now"}, + {"-Wl,-z,now"}, + {"-Wl,-z,noexecstack"}, {"libcgotbdtest.tbd"}, {"./libcgotbdtest.tbd"}, } diff --git a/src/cmd/go/internal/work/shell.go b/src/cmd/go/internal/work/shell.go index 6089170007..60817d9c3b 100644 --- a/src/cmd/go/internal/work/shell.go +++ b/src/cmd/go/internal/work/shell.go @@ -494,7 +494,7 @@ func (sh *Shell) reportCmd(desc, dir string, cmdOut []byte, cmdErr error) error } // Usually desc is already p.Desc(), but if not, signal cmdError.Error to - // add a line explicitly metioning the import path. + // add a line explicitly mentioning the import path. needsPath := importPath != "" && p != nil && desc != p.Desc() err := &cmdError{desc, out, importPath, needsPath} diff --git a/src/cmd/go/main.go b/src/cmd/go/main.go index 7d4dedc1bb..c1433b47ad 100644 --- a/src/cmd/go/main.go +++ b/src/cmd/go/main.go @@ -3,12 +3,11 @@ // license that can be found in the LICENSE file. //go:generate go test cmd/go -v -run=^TestDocsUpToDate$ -fixdocs +//go:generate go test cmd/go -v -run=^TestCounterNamesUpToDate$ -update package main import ( - "cmd/go/internal/toolchain" - "cmd/go/internal/workcmd" "context" "flag" "fmt" @@ -16,7 +15,6 @@ import ( "log" "os" "path/filepath" - "runtime" rtrace "runtime/trace" "slices" "strings" @@ -39,10 +37,14 @@ import ( "cmd/go/internal/run" "cmd/go/internal/test" "cmd/go/internal/tool" + "cmd/go/internal/toolchain" "cmd/go/internal/trace" "cmd/go/internal/version" "cmd/go/internal/vet" "cmd/go/internal/work" + "cmd/go/internal/workcmd" + + "golang.org/x/telemetry/counter" ) func init() { @@ -90,11 +92,13 @@ var _ = go11tag func main() { log.SetFlags(0) + TelemetryStart() // Open the telemetry counter file so counters can be written to it. handleChdirFlag() toolchain.Select() flag.Usage = base.Usage flag.Parse() + counter.CountFlags("cmd/go/flag:", *flag.CommandLine) args := flag.Args() if len(args) < 1 { @@ -107,10 +111,19 @@ func main() { return } + if cfg.GOROOT == "" { + fmt.Fprintf(os.Stderr, "go: cannot find GOROOT directory: 'go' binary is trimmed and GOROOT is not set\n") + os.Exit(2) + } + if fi, err := os.Stat(cfg.GOROOT); err != nil || !fi.IsDir() { + fmt.Fprintf(os.Stderr, "go: cannot find GOROOT directory: %v\n", cfg.GOROOT) + os.Exit(2) + } + // Diagnose common mistake: GOPATH==GOROOT. // This setting is equivalent to not setting GOPATH at all, // which is not what most people want when they do it. - if gopath := cfg.BuildContext.GOPATH; filepath.Clean(gopath) == filepath.Clean(runtime.GOROOT()) { + if gopath := cfg.BuildContext.GOPATH; filepath.Clean(gopath) == filepath.Clean(cfg.GOROOT) { fmt.Fprintf(os.Stderr, "warning: GOPATH set to GOROOT (%s) has no effect\n", gopath) } else { for _, p := range filepath.SplitList(gopath) { @@ -139,15 +152,6 @@ func main() { } } - if cfg.GOROOT == "" { - fmt.Fprintf(os.Stderr, "go: cannot find GOROOT directory: 'go' binary is trimmed and GOROOT is not set\n") - os.Exit(2) - } - if fi, err := os.Stat(cfg.GOROOT); err != nil || !fi.IsDir() { - fmt.Fprintf(os.Stderr, "go: cannot find GOROOT directory: %v\n", cfg.GOROOT) - os.Exit(2) - } - cmd, used := lookupCmd(args) cfg.CmdName = strings.Join(args[:used], " ") if len(cmd.Commands) > 0 { @@ -158,6 +162,7 @@ func main() { } if args[used] == "help" { // Accept 'go mod help' and 'go mod help foo' for 'go help mod' and 'go help mod foo'. + counter.Inc("cmd/go/subcommand:" + strings.ReplaceAll(cfg.CmdName, " ", "-") + "-" + strings.Join(args[used:], "-")) help.Help(os.Stdout, append(slices.Clip(args[:used]), args[used+1:]...)) base.Exit() } @@ -169,10 +174,12 @@ func main() { if cmdName == "" { cmdName = args[0] } + counter.Inc("cmd/go/subcommand:unknown") fmt.Fprintf(os.Stderr, "go %s: unknown command\nRun 'go help%s' for usage.\n", cmdName, helpArg) base.SetExitStatus(2) base.Exit() } + counter.Inc("cmd/go/subcommand:" + strings.ReplaceAll(cfg.CmdName, " ", "-")) invoke(cmd, args[used-1:]) base.Exit() } @@ -237,6 +244,7 @@ func invoke(cmd *base.Command, args []string) { } else { base.SetFromGOFLAGS(&cmd.Flag) cmd.Flag.Parse(args[1:]) + counter.CountFlags("cmd/go/flag:"+strings.ReplaceAll(cfg.CmdName, " ", "-")+"-", cmd.Flag) args = cmd.Flag.Args() } @@ -321,6 +329,7 @@ func handleChdirFlag() { _, dir, _ = strings.Cut(a, "=") os.Args = slices.Delete(os.Args, used, used+1) } + counter.Inc("cmd/go/flag:C") if err := os.Chdir(dir); err != nil { base.Fatalf("go: %v", err) diff --git a/src/cmd/go/script_test.go b/src/cmd/go/script_test.go index 624c5bf501..6efa9217de 100644 --- a/src/cmd/go/script_test.go +++ b/src/cmd/go/script_test.go @@ -13,6 +13,7 @@ import ( "bufio" "bytes" "context" + _ "embed" "flag" "internal/testenv" "internal/txtar" @@ -21,6 +22,7 @@ import ( "path/filepath" "runtime" "strings" + "sync" "testing" "time" @@ -29,6 +31,8 @@ import ( "cmd/go/internal/script" "cmd/go/internal/script/scripttest" "cmd/go/internal/vcweb/vcstest" + + "golang.org/x/telemetry/counter/countertest" ) var testSum = flag.String("testsum", "", `may be tidy, listm, or listall. If set, TestScript generates a go.sum file at the beginning of each test and updates test files if they pass.`) @@ -124,7 +128,7 @@ func TestScript(t *testing.T) { if err != nil { t.Fatal(err) } - initScriptDirs(t, s) + telemetryDir := initScriptDirs(t, s) if err := s.ExtractFiles(a); err != nil { t.Fatal(err) } @@ -154,6 +158,7 @@ func TestScript(t *testing.T) { // will work better seeing the full path relative to cmd/go // (where the "go test" command is usually run). scripttest.Run(t, engine, s, file, bytes.NewReader(a.Comment)) + checkCounters(t, telemetryDir) }) } } @@ -177,7 +182,7 @@ func tbFromContext(ctx context.Context) (testing.TB, bool) { // initScriptState creates the initial directory structure in s for unpacking a // cmd/go script. -func initScriptDirs(t testing.TB, s *script.State) { +func initScriptDirs(t testing.TB, s *script.State) (telemetryDir string) { must := func(err error) { if err != nil { t.Helper() @@ -188,6 +193,10 @@ func initScriptDirs(t testing.TB, s *script.State) { work := s.Getwd() must(s.Setenv("WORK", work)) + telemetryDir = filepath.Join(work, "telemetry") + must(os.MkdirAll(telemetryDir, 0777)) + must(s.Setenv("TESTGO_TELEMETRY_DIR", filepath.Join(work, "telemetry"))) + must(os.MkdirAll(filepath.Join(work, "tmp"), 0777)) must(s.Setenv(tempEnvName(), filepath.Join(work, "tmp"))) @@ -196,6 +205,7 @@ func initScriptDirs(t testing.TB, s *script.State) { gopathSrc := filepath.Join(gopath, "src") must(os.MkdirAll(gopathSrc, 0777)) must(s.Chdir(gopathSrc)) + return telemetryDir } func scriptEnv(srv *vcstest.Server, srvCertFile string) ([]string, error) { @@ -223,7 +233,6 @@ func scriptEnv(srv *vcstest.Server, srvCertFile string) ([]string, error) { "GOPROXY=" + proxyURL, "GOPRIVATE=", "GOROOT=" + testGOROOT, - "GOROOT_FINAL=" + testGOROOT_FINAL, // causes spurious rebuilds and breaks the "stale" built-in if not propagated "GOTRACEBACK=system", "TESTGONETWORK=panic", // allow only local connections by default; the [net] condition resets this "TESTGO_GOROOT=" + testGOROOT, @@ -358,3 +367,53 @@ func updateSum(t testing.TB, e *script.Engine, s *script.State, archive *txtar.A } return rewrite } + +func readCounters(t *testing.T, telemetryDir string) map[string]uint64 { + localDir := filepath.Join(telemetryDir, "local") + dirents, err := os.ReadDir(localDir) + if err != nil { + if os.IsNotExist(err) { + return nil // The Go command didn't ever run so the local dir wasn't created + } + t.Fatalf("reading telemetry local dir: %v", err) + } + totals := map[string]uint64{} + for _, dirent := range dirents { + if dirent.IsDir() || !strings.HasSuffix(dirent.Name(), ".count") { + // not a counter file + continue + } + counters, _, err := countertest.ReadFile(filepath.Join(localDir, dirent.Name())) + if err != nil { + t.Fatalf("reading counter file: %v", err) + } + for k, v := range counters { + totals[k] += v + } + } + + return totals +} + +//go:embed testdata/counters.txt +var countersTxt string + +var ( + allowedCountersOnce sync.Once + allowedCounters = map[string]bool{} // Set of allowed counters. +) + +func checkCounters(t *testing.T, telemetryDir string) { + allowedCountersOnce.Do(func() { + for _, counter := range strings.Fields(countersTxt) { + allowedCounters[counter] = true + } + }) + counters := readCounters(t, telemetryDir) + for name := range counters { + if !allowedCounters[name] { + t.Fatalf("incremented counter %q is not in testdata/counters.txt. "+ + "Please update counters_test.go to produce an entry for it.", name) + } + } +} diff --git a/src/cmd/go/scriptconds_test.go b/src/cmd/go/scriptconds_test.go index 8dd9b0d1cd..3f11af272b 100644 --- a/src/cmd/go/scriptconds_test.go +++ b/src/cmd/go/scriptconds_test.go @@ -51,10 +51,10 @@ func scriptConditions() map[string]script.Cond { add("GOEXPERIMENT", script.PrefixCondition("GOEXPERIMENT is enabled", hasGoexperiment)) add("go-builder", script.BoolCondition("GO_BUILDER_NAME is non-empty", testenv.Builder() != "")) add("link", lazyBool("testenv.HasLink()", testenv.HasLink)) - add("mismatched-goroot", script.Condition("test's GOROOT_FINAL does not match the real GOROOT", isMismatchedGoroot)) add("msan", sysCondition("-msan", platform.MSanSupported, true)) add("mustlinkext", script.Condition("platform always requires external linking", mustLinkExt)) add("net", script.PrefixCondition("can connect to external network host ", hasNet)) + add("pielinkext", script.Condition("platform requires external linking for PIE", pieLinkExt)) add("race", sysCondition("-race", platform.RaceDetectorSupported, true)) add("symlink", lazyBool("testenv.HasSymlink()", testenv.HasSymlink)) add("trimpath", script.OnceCondition("test binary was built with -trimpath", isTrimpath)) @@ -84,14 +84,6 @@ func ccIs(s *script.State, want string) (bool, error) { return cfg.DefaultCC(GOOS, GOARCH) == want, nil } -func isMismatchedGoroot(s *script.State) (bool, error) { - gorootFinal, _ := s.LookupEnv("GOROOT_FINAL") - if gorootFinal == "" { - gorootFinal, _ = s.LookupEnv("GOROOT") - } - return gorootFinal != testGOROOT, nil -} - func sysCondition(flag string, f func(goos, goarch string) bool, needsCgo bool) script.Cond { return script.Condition( "GOOS/GOARCH supports "+flag, @@ -233,3 +225,9 @@ func mustLinkExt(s *script.State) (bool, error) { GOARCH, _ := s.LookupEnv("GOARCH") return platform.MustLinkExternal(GOOS, GOARCH, false), nil } + +func pieLinkExt(s *script.State) (bool, error) { + GOOS, _ := s.LookupEnv("GOOS") + GOARCH, _ := s.LookupEnv("GOARCH") + return !platform.InternalLinkPIESupported(GOOS, GOARCH), nil +} diff --git a/src/cmd/go/scriptreadme_test.go b/src/cmd/go/scriptreadme_test.go index 2a842fbc0f..26c7aa19f0 100644 --- a/src/cmd/go/scriptreadme_test.go +++ b/src/cmd/go/scriptreadme_test.go @@ -120,7 +120,6 @@ Scripts also have access to other environment variables, including: GOPATH=$WORK/gopath GOPROXY= GOROOT= - GOROOT_FINAL= TESTGO_GOROOT= HOME=/no-home PATH= diff --git a/src/cmd/go/telemetry.go b/src/cmd/go/telemetry.go new file mode 100644 index 0000000000..ac7a6a9ed4 --- /dev/null +++ b/src/cmd/go/telemetry.go @@ -0,0 +1,13 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !cmd_go_bootstrap + +package main + +import "golang.org/x/telemetry" + +var TelemetryStart = func() { + telemetry.Start(telemetry.Config{Upload: true}) +} diff --git a/src/cmd/go/telemetry_bootstrap.go b/src/cmd/go/telemetry_bootstrap.go new file mode 100644 index 0000000000..8bacf219a2 --- /dev/null +++ b/src/cmd/go/telemetry_bootstrap.go @@ -0,0 +1,9 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build cmd_go_bootstrap + +package main + +var TelemetryStart = func() {} diff --git a/src/cmd/go/testdata/counters.txt b/src/cmd/go/testdata/counters.txt new file mode 100644 index 0000000000..5e1a565cfd --- /dev/null +++ b/src/cmd/go/testdata/counters.txt @@ -0,0 +1,661 @@ +cmd/go/flag:C +cmd/go/subcommand:unknown +cmd/go/flag:fixdocs +cmd/go/flag:fixreadme +cmd/go/flag:flaky +cmd/go/flag:proxy +cmd/go/flag:test.bench +cmd/go/flag:test.benchmem +cmd/go/flag:test.benchtime +cmd/go/flag:test.blockprofile +cmd/go/flag:test.blockprofilerate +cmd/go/flag:test.count +cmd/go/flag:test.coverprofile +cmd/go/flag:test.cpu +cmd/go/flag:test.cpuprofile +cmd/go/flag:test.failfast +cmd/go/flag:test.fullpath +cmd/go/flag:test.fuzz +cmd/go/flag:test.fuzzcachedir +cmd/go/flag:test.fuzzminimizetime +cmd/go/flag:test.fuzztime +cmd/go/flag:test.fuzzworker +cmd/go/flag:test.gocoverdir +cmd/go/flag:test.list +cmd/go/flag:test.memprofile +cmd/go/flag:test.memprofilerate +cmd/go/flag:test.mutexprofile +cmd/go/flag:test.mutexprofilefraction +cmd/go/flag:test.outputdir +cmd/go/flag:test.paniconexit0 +cmd/go/flag:test.parallel +cmd/go/flag:test.run +cmd/go/flag:test.short +cmd/go/flag:test.shuffle +cmd/go/flag:test.skip +cmd/go/flag:test.testlogfile +cmd/go/flag:test.timeout +cmd/go/flag:test.trace +cmd/go/flag:test.v +cmd/go/flag:testsum +cmd/go/flag:testwork +cmd/go/flag:update +cmd/go/subcommand:bug +cmd/go/flag:bug-C +cmd/go/flag:bug-v +cmd/go/subcommand:help-bug +cmd/go/subcommand:build +cmd/go/flag:build-C +cmd/go/flag:build-a +cmd/go/flag:build-asan +cmd/go/flag:build-asmflags +cmd/go/flag:build-buildmode +cmd/go/flag:build-buildvcs +cmd/go/flag:build-compiler +cmd/go/flag:build-cover +cmd/go/flag:build-covermode +cmd/go/flag:build-coverpkg +cmd/go/flag:build-debug-actiongraph +cmd/go/flag:build-debug-runtime-trace +cmd/go/flag:build-debug-trace +cmd/go/flag:build-gccgoflags +cmd/go/flag:build-gcflags +cmd/go/flag:build-installsuffix +cmd/go/flag:build-ldflags +cmd/go/flag:build-linkshared +cmd/go/flag:build-mod +cmd/go/flag:build-modcacherw +cmd/go/flag:build-modfile +cmd/go/flag:build-msan +cmd/go/flag:build-n +cmd/go/flag:build-o +cmd/go/flag:build-overlay +cmd/go/flag:build-p +cmd/go/flag:build-pgo +cmd/go/flag:build-pkgdir +cmd/go/flag:build-race +cmd/go/flag:build-tags +cmd/go/flag:build-toolexec +cmd/go/flag:build-trimpath +cmd/go/flag:build-v +cmd/go/flag:build-work +cmd/go/flag:build-x +cmd/go/subcommand:help-build +cmd/go/subcommand:clean +cmd/go/flag:clean-C +cmd/go/flag:clean-a +cmd/go/flag:clean-asan +cmd/go/flag:clean-asmflags +cmd/go/flag:clean-buildmode +cmd/go/flag:clean-buildvcs +cmd/go/flag:clean-cache +cmd/go/flag:clean-compiler +cmd/go/flag:clean-debug-actiongraph +cmd/go/flag:clean-debug-runtime-trace +cmd/go/flag:clean-debug-trace +cmd/go/flag:clean-fuzzcache +cmd/go/flag:clean-gccgoflags +cmd/go/flag:clean-gcflags +cmd/go/flag:clean-i +cmd/go/flag:clean-installsuffix +cmd/go/flag:clean-ldflags +cmd/go/flag:clean-linkshared +cmd/go/flag:clean-mod +cmd/go/flag:clean-modcache +cmd/go/flag:clean-modcacherw +cmd/go/flag:clean-modfile +cmd/go/flag:clean-msan +cmd/go/flag:clean-n +cmd/go/flag:clean-overlay +cmd/go/flag:clean-p +cmd/go/flag:clean-pgo +cmd/go/flag:clean-pkgdir +cmd/go/flag:clean-r +cmd/go/flag:clean-race +cmd/go/flag:clean-tags +cmd/go/flag:clean-testcache +cmd/go/flag:clean-toolexec +cmd/go/flag:clean-trimpath +cmd/go/flag:clean-v +cmd/go/flag:clean-work +cmd/go/flag:clean-x +cmd/go/subcommand:help-clean +cmd/go/subcommand:doc +cmd/go/subcommand:help-doc +cmd/go/subcommand:env +cmd/go/flag:env-C +cmd/go/flag:env-json +cmd/go/flag:env-n +cmd/go/flag:env-u +cmd/go/flag:env-w +cmd/go/flag:env-x +cmd/go/subcommand:help-env +cmd/go/subcommand:fix +cmd/go/flag:fix-C +cmd/go/flag:fix-a +cmd/go/flag:fix-asan +cmd/go/flag:fix-asmflags +cmd/go/flag:fix-buildmode +cmd/go/flag:fix-buildvcs +cmd/go/flag:fix-compiler +cmd/go/flag:fix-debug-actiongraph +cmd/go/flag:fix-debug-runtime-trace +cmd/go/flag:fix-debug-trace +cmd/go/flag:fix-fix +cmd/go/flag:fix-gccgoflags +cmd/go/flag:fix-gcflags +cmd/go/flag:fix-installsuffix +cmd/go/flag:fix-ldflags +cmd/go/flag:fix-linkshared +cmd/go/flag:fix-mod +cmd/go/flag:fix-modcacherw +cmd/go/flag:fix-modfile +cmd/go/flag:fix-msan +cmd/go/flag:fix-n +cmd/go/flag:fix-overlay +cmd/go/flag:fix-p +cmd/go/flag:fix-pgo +cmd/go/flag:fix-pkgdir +cmd/go/flag:fix-race +cmd/go/flag:fix-tags +cmd/go/flag:fix-toolexec +cmd/go/flag:fix-trimpath +cmd/go/flag:fix-v +cmd/go/flag:fix-work +cmd/go/flag:fix-x +cmd/go/subcommand:help-fix +cmd/go/subcommand:fmt +cmd/go/flag:fmt-C +cmd/go/flag:fmt-mod +cmd/go/flag:fmt-modcacherw +cmd/go/flag:fmt-modfile +cmd/go/flag:fmt-n +cmd/go/flag:fmt-overlay +cmd/go/flag:fmt-x +cmd/go/subcommand:help-fmt +cmd/go/subcommand:generate +cmd/go/flag:generate-C +cmd/go/flag:generate-a +cmd/go/flag:generate-asan +cmd/go/flag:generate-asmflags +cmd/go/flag:generate-buildmode +cmd/go/flag:generate-buildvcs +cmd/go/flag:generate-compiler +cmd/go/flag:generate-debug-actiongraph +cmd/go/flag:generate-debug-runtime-trace +cmd/go/flag:generate-debug-trace +cmd/go/flag:generate-gccgoflags +cmd/go/flag:generate-gcflags +cmd/go/flag:generate-installsuffix +cmd/go/flag:generate-ldflags +cmd/go/flag:generate-linkshared +cmd/go/flag:generate-mod +cmd/go/flag:generate-modcacherw +cmd/go/flag:generate-modfile +cmd/go/flag:generate-msan +cmd/go/flag:generate-n +cmd/go/flag:generate-overlay +cmd/go/flag:generate-p +cmd/go/flag:generate-pgo +cmd/go/flag:generate-pkgdir +cmd/go/flag:generate-race +cmd/go/flag:generate-run +cmd/go/flag:generate-skip +cmd/go/flag:generate-tags +cmd/go/flag:generate-toolexec +cmd/go/flag:generate-trimpath +cmd/go/flag:generate-v +cmd/go/flag:generate-work +cmd/go/flag:generate-x +cmd/go/subcommand:help-generate +cmd/go/subcommand:get +cmd/go/flag:get-C +cmd/go/flag:get-a +cmd/go/flag:get-asan +cmd/go/flag:get-asmflags +cmd/go/flag:get-buildmode +cmd/go/flag:get-buildvcs +cmd/go/flag:get-compiler +cmd/go/flag:get-d +cmd/go/flag:get-debug-actiongraph +cmd/go/flag:get-debug-runtime-trace +cmd/go/flag:get-debug-trace +cmd/go/flag:get-f +cmd/go/flag:get-fix +cmd/go/flag:get-gccgoflags +cmd/go/flag:get-gcflags +cmd/go/flag:get-insecure +cmd/go/flag:get-installsuffix +cmd/go/flag:get-ldflags +cmd/go/flag:get-linkshared +cmd/go/flag:get-m +cmd/go/flag:get-modcacherw +cmd/go/flag:get-modfile +cmd/go/flag:get-msan +cmd/go/flag:get-n +cmd/go/flag:get-overlay +cmd/go/flag:get-p +cmd/go/flag:get-pgo +cmd/go/flag:get-pkgdir +cmd/go/flag:get-race +cmd/go/flag:get-t +cmd/go/flag:get-tags +cmd/go/flag:get-toolexec +cmd/go/flag:get-trimpath +cmd/go/flag:get-u +cmd/go/flag:get-v +cmd/go/flag:get-work +cmd/go/flag:get-x +cmd/go/subcommand:help-get +cmd/go/subcommand:install +cmd/go/flag:install-C +cmd/go/flag:install-a +cmd/go/flag:install-asan +cmd/go/flag:install-asmflags +cmd/go/flag:install-buildmode +cmd/go/flag:install-buildvcs +cmd/go/flag:install-compiler +cmd/go/flag:install-cover +cmd/go/flag:install-covermode +cmd/go/flag:install-coverpkg +cmd/go/flag:install-debug-actiongraph +cmd/go/flag:install-debug-runtime-trace +cmd/go/flag:install-debug-trace +cmd/go/flag:install-gccgoflags +cmd/go/flag:install-gcflags +cmd/go/flag:install-installsuffix +cmd/go/flag:install-ldflags +cmd/go/flag:install-linkshared +cmd/go/flag:install-mod +cmd/go/flag:install-modcacherw +cmd/go/flag:install-modfile +cmd/go/flag:install-msan +cmd/go/flag:install-n +cmd/go/flag:install-overlay +cmd/go/flag:install-p +cmd/go/flag:install-pgo +cmd/go/flag:install-pkgdir +cmd/go/flag:install-race +cmd/go/flag:install-tags +cmd/go/flag:install-toolexec +cmd/go/flag:install-trimpath +cmd/go/flag:install-v +cmd/go/flag:install-work +cmd/go/flag:install-x +cmd/go/subcommand:help-install +cmd/go/subcommand:list +cmd/go/flag:list-C +cmd/go/flag:list-a +cmd/go/flag:list-asan +cmd/go/flag:list-asmflags +cmd/go/flag:list-buildmode +cmd/go/flag:list-buildvcs +cmd/go/flag:list-compiled +cmd/go/flag:list-compiler +cmd/go/flag:list-cover +cmd/go/flag:list-covermode +cmd/go/flag:list-coverpkg +cmd/go/flag:list-debug-actiongraph +cmd/go/flag:list-debug-runtime-trace +cmd/go/flag:list-debug-trace +cmd/go/flag:list-deps +cmd/go/flag:list-e +cmd/go/flag:list-export +cmd/go/flag:list-f +cmd/go/flag:list-find +cmd/go/flag:list-gccgoflags +cmd/go/flag:list-gcflags +cmd/go/flag:list-installsuffix +cmd/go/flag:list-json +cmd/go/flag:list-ldflags +cmd/go/flag:list-linkshared +cmd/go/flag:list-m +cmd/go/flag:list-mod +cmd/go/flag:list-modcacherw +cmd/go/flag:list-modfile +cmd/go/flag:list-msan +cmd/go/flag:list-n +cmd/go/flag:list-overlay +cmd/go/flag:list-p +cmd/go/flag:list-pgo +cmd/go/flag:list-pkgdir +cmd/go/flag:list-race +cmd/go/flag:list-retracted +cmd/go/flag:list-reuse +cmd/go/flag:list-tags +cmd/go/flag:list-test +cmd/go/flag:list-toolexec +cmd/go/flag:list-trimpath +cmd/go/flag:list-u +cmd/go/flag:list-v +cmd/go/flag:list-versions +cmd/go/flag:list-work +cmd/go/flag:list-x +cmd/go/subcommand:help-list +cmd/go/subcommand:help-mod +cmd/go/subcommand:mod-download +cmd/go/flag:mod-download-C +cmd/go/flag:mod-download-json +cmd/go/flag:mod-download-modcacherw +cmd/go/flag:mod-download-modfile +cmd/go/flag:mod-download-overlay +cmd/go/flag:mod-download-reuse +cmd/go/flag:mod-download-x +cmd/go/subcommand:mod-help-download +cmd/go/subcommand:help-mod-download +cmd/go/subcommand:mod-edit +cmd/go/flag:mod-edit-C +cmd/go/flag:mod-edit-dropexclude +cmd/go/flag:mod-edit-dropreplace +cmd/go/flag:mod-edit-droprequire +cmd/go/flag:mod-edit-dropretract +cmd/go/flag:mod-edit-exclude +cmd/go/flag:mod-edit-fmt +cmd/go/flag:mod-edit-go +cmd/go/flag:mod-edit-json +cmd/go/flag:mod-edit-modcacherw +cmd/go/flag:mod-edit-modfile +cmd/go/flag:mod-edit-module +cmd/go/flag:mod-edit-n +cmd/go/flag:mod-edit-overlay +cmd/go/flag:mod-edit-print +cmd/go/flag:mod-edit-replace +cmd/go/flag:mod-edit-require +cmd/go/flag:mod-edit-retract +cmd/go/flag:mod-edit-toolchain +cmd/go/flag:mod-edit-x +cmd/go/subcommand:mod-help-edit +cmd/go/subcommand:help-mod-edit +cmd/go/subcommand:mod-graph +cmd/go/flag:mod-graph-C +cmd/go/flag:mod-graph-go +cmd/go/flag:mod-graph-modcacherw +cmd/go/flag:mod-graph-modfile +cmd/go/flag:mod-graph-overlay +cmd/go/flag:mod-graph-x +cmd/go/subcommand:mod-help-graph +cmd/go/subcommand:help-mod-graph +cmd/go/subcommand:mod-init +cmd/go/flag:mod-init-C +cmd/go/flag:mod-init-modcacherw +cmd/go/flag:mod-init-modfile +cmd/go/flag:mod-init-overlay +cmd/go/subcommand:mod-help-init +cmd/go/subcommand:help-mod-init +cmd/go/subcommand:mod-tidy +cmd/go/flag:mod-tidy-C +cmd/go/flag:mod-tidy-compat +cmd/go/flag:mod-tidy-e +cmd/go/flag:mod-tidy-go +cmd/go/flag:mod-tidy-modcacherw +cmd/go/flag:mod-tidy-modfile +cmd/go/flag:mod-tidy-overlay +cmd/go/flag:mod-tidy-v +cmd/go/flag:mod-tidy-x +cmd/go/subcommand:mod-help-tidy +cmd/go/subcommand:help-mod-tidy +cmd/go/subcommand:mod-vendor +cmd/go/flag:mod-vendor-C +cmd/go/flag:mod-vendor-e +cmd/go/flag:mod-vendor-modcacherw +cmd/go/flag:mod-vendor-modfile +cmd/go/flag:mod-vendor-o +cmd/go/flag:mod-vendor-overlay +cmd/go/flag:mod-vendor-v +cmd/go/subcommand:mod-help-vendor +cmd/go/subcommand:help-mod-vendor +cmd/go/subcommand:mod-verify +cmd/go/flag:mod-verify-C +cmd/go/flag:mod-verify-modcacherw +cmd/go/flag:mod-verify-modfile +cmd/go/flag:mod-verify-overlay +cmd/go/subcommand:mod-help-verify +cmd/go/subcommand:help-mod-verify +cmd/go/subcommand:mod-why +cmd/go/flag:mod-why-C +cmd/go/flag:mod-why-m +cmd/go/flag:mod-why-modcacherw +cmd/go/flag:mod-why-modfile +cmd/go/flag:mod-why-overlay +cmd/go/flag:mod-why-vendor +cmd/go/subcommand:mod-help-why +cmd/go/subcommand:help-mod-why +cmd/go/subcommand:help-work +cmd/go/subcommand:work-edit +cmd/go/flag:work-edit-C +cmd/go/flag:work-edit-dropreplace +cmd/go/flag:work-edit-dropuse +cmd/go/flag:work-edit-fmt +cmd/go/flag:work-edit-go +cmd/go/flag:work-edit-json +cmd/go/flag:work-edit-print +cmd/go/flag:work-edit-replace +cmd/go/flag:work-edit-toolchain +cmd/go/flag:work-edit-use +cmd/go/subcommand:work-help-edit +cmd/go/subcommand:help-work-edit +cmd/go/subcommand:work-init +cmd/go/flag:work-init-C +cmd/go/flag:work-init-modcacherw +cmd/go/flag:work-init-modfile +cmd/go/flag:work-init-overlay +cmd/go/subcommand:work-help-init +cmd/go/subcommand:help-work-init +cmd/go/subcommand:work-sync +cmd/go/flag:work-sync-C +cmd/go/flag:work-sync-modcacherw +cmd/go/flag:work-sync-modfile +cmd/go/flag:work-sync-overlay +cmd/go/subcommand:work-help-sync +cmd/go/subcommand:help-work-sync +cmd/go/subcommand:work-use +cmd/go/flag:work-use-C +cmd/go/flag:work-use-modcacherw +cmd/go/flag:work-use-modfile +cmd/go/flag:work-use-overlay +cmd/go/flag:work-use-r +cmd/go/subcommand:work-help-use +cmd/go/subcommand:help-work-use +cmd/go/subcommand:work-vendor +cmd/go/flag:work-vendor-C +cmd/go/flag:work-vendor-e +cmd/go/flag:work-vendor-modcacherw +cmd/go/flag:work-vendor-modfile +cmd/go/flag:work-vendor-o +cmd/go/flag:work-vendor-overlay +cmd/go/flag:work-vendor-v +cmd/go/subcommand:work-help-vendor +cmd/go/subcommand:help-work-vendor +cmd/go/subcommand:run +cmd/go/flag:run-C +cmd/go/flag:run-a +cmd/go/flag:run-asan +cmd/go/flag:run-asmflags +cmd/go/flag:run-buildmode +cmd/go/flag:run-buildvcs +cmd/go/flag:run-compiler +cmd/go/flag:run-cover +cmd/go/flag:run-covermode +cmd/go/flag:run-coverpkg +cmd/go/flag:run-debug-actiongraph +cmd/go/flag:run-debug-runtime-trace +cmd/go/flag:run-debug-trace +cmd/go/flag:run-exec +cmd/go/flag:run-gccgoflags +cmd/go/flag:run-gcflags +cmd/go/flag:run-installsuffix +cmd/go/flag:run-ldflags +cmd/go/flag:run-linkshared +cmd/go/flag:run-mod +cmd/go/flag:run-modcacherw +cmd/go/flag:run-modfile +cmd/go/flag:run-msan +cmd/go/flag:run-n +cmd/go/flag:run-overlay +cmd/go/flag:run-p +cmd/go/flag:run-pgo +cmd/go/flag:run-pkgdir +cmd/go/flag:run-race +cmd/go/flag:run-tags +cmd/go/flag:run-toolexec +cmd/go/flag:run-trimpath +cmd/go/flag:run-v +cmd/go/flag:run-work +cmd/go/flag:run-x +cmd/go/subcommand:help-run +cmd/go/subcommand:test +cmd/go/flag:test-C +cmd/go/flag:test-a +cmd/go/flag:test-asan +cmd/go/flag:test-asmflags +cmd/go/flag:test-bench +cmd/go/flag:test-benchmem +cmd/go/flag:test-benchtime +cmd/go/flag:test-blockprofile +cmd/go/flag:test-blockprofilerate +cmd/go/flag:test-buildmode +cmd/go/flag:test-buildvcs +cmd/go/flag:test-c +cmd/go/flag:test-compiler +cmd/go/flag:test-count +cmd/go/flag:test-cover +cmd/go/flag:test-covermode +cmd/go/flag:test-coverpkg +cmd/go/flag:test-coverprofile +cmd/go/flag:test-cpu +cmd/go/flag:test-cpuprofile +cmd/go/flag:test-debug-actiongraph +cmd/go/flag:test-debug-runtime-trace +cmd/go/flag:test-debug-trace +cmd/go/flag:test-exec +cmd/go/flag:test-failfast +cmd/go/flag:test-fullpath +cmd/go/flag:test-fuzz +cmd/go/flag:test-fuzzminimizetime +cmd/go/flag:test-fuzztime +cmd/go/flag:test-gccgoflags +cmd/go/flag:test-gcflags +cmd/go/flag:test-installsuffix +cmd/go/flag:test-json +cmd/go/flag:test-ldflags +cmd/go/flag:test-linkshared +cmd/go/flag:test-list +cmd/go/flag:test-memprofile +cmd/go/flag:test-memprofilerate +cmd/go/flag:test-mod +cmd/go/flag:test-modcacherw +cmd/go/flag:test-modfile +cmd/go/flag:test-msan +cmd/go/flag:test-mutexprofile +cmd/go/flag:test-mutexprofilefraction +cmd/go/flag:test-n +cmd/go/flag:test-o +cmd/go/flag:test-outputdir +cmd/go/flag:test-overlay +cmd/go/flag:test-p +cmd/go/flag:test-parallel +cmd/go/flag:test-pgo +cmd/go/flag:test-pkgdir +cmd/go/flag:test-race +cmd/go/flag:test-run +cmd/go/flag:test-short +cmd/go/flag:test-shuffle +cmd/go/flag:test-skip +cmd/go/flag:test-tags +cmd/go/flag:test-test.bench +cmd/go/flag:test-test.benchmem +cmd/go/flag:test-test.benchtime +cmd/go/flag:test-test.blockprofile +cmd/go/flag:test-test.blockprofilerate +cmd/go/flag:test-test.count +cmd/go/flag:test-test.coverprofile +cmd/go/flag:test-test.cpu +cmd/go/flag:test-test.cpuprofile +cmd/go/flag:test-test.failfast +cmd/go/flag:test-test.fullpath +cmd/go/flag:test-test.fuzz +cmd/go/flag:test-test.fuzzminimizetime +cmd/go/flag:test-test.fuzztime +cmd/go/flag:test-test.list +cmd/go/flag:test-test.memprofile +cmd/go/flag:test-test.memprofilerate +cmd/go/flag:test-test.mutexprofile +cmd/go/flag:test-test.mutexprofilefraction +cmd/go/flag:test-test.outputdir +cmd/go/flag:test-test.parallel +cmd/go/flag:test-test.run +cmd/go/flag:test-test.short +cmd/go/flag:test-test.shuffle +cmd/go/flag:test-test.skip +cmd/go/flag:test-test.timeout +cmd/go/flag:test-test.trace +cmd/go/flag:test-test.v +cmd/go/flag:test-timeout +cmd/go/flag:test-toolexec +cmd/go/flag:test-trace +cmd/go/flag:test-trimpath +cmd/go/flag:test-v +cmd/go/flag:test-vet +cmd/go/flag:test-work +cmd/go/flag:test-x +cmd/go/subcommand:help-test +cmd/go/subcommand:tool +cmd/go/flag:tool-C +cmd/go/flag:tool-n +cmd/go/subcommand:help-tool +cmd/go/subcommand:version +cmd/go/flag:version-C +cmd/go/flag:version-m +cmd/go/flag:version-v +cmd/go/subcommand:help-version +cmd/go/subcommand:vet +cmd/go/flag:vet-C +cmd/go/flag:vet-a +cmd/go/flag:vet-asan +cmd/go/flag:vet-asmflags +cmd/go/flag:vet-buildmode +cmd/go/flag:vet-buildvcs +cmd/go/flag:vet-compiler +cmd/go/flag:vet-debug-actiongraph +cmd/go/flag:vet-debug-runtime-trace +cmd/go/flag:vet-debug-trace +cmd/go/flag:vet-gccgoflags +cmd/go/flag:vet-gcflags +cmd/go/flag:vet-installsuffix +cmd/go/flag:vet-ldflags +cmd/go/flag:vet-linkshared +cmd/go/flag:vet-mod +cmd/go/flag:vet-modcacherw +cmd/go/flag:vet-modfile +cmd/go/flag:vet-msan +cmd/go/flag:vet-n +cmd/go/flag:vet-overlay +cmd/go/flag:vet-p +cmd/go/flag:vet-pgo +cmd/go/flag:vet-pkgdir +cmd/go/flag:vet-race +cmd/go/flag:vet-tags +cmd/go/flag:vet-toolexec +cmd/go/flag:vet-trimpath +cmd/go/flag:vet-v +cmd/go/flag:vet-vettool +cmd/go/flag:vet-work +cmd/go/flag:vet-x +cmd/go/subcommand:help-vet +cmd/go/subcommand:help-buildconstraint +cmd/go/subcommand:help-buildmode +cmd/go/subcommand:help-c +cmd/go/subcommand:help-cache +cmd/go/subcommand:help-environment +cmd/go/subcommand:help-filetype +cmd/go/subcommand:help-go.mod +cmd/go/subcommand:help-gopath +cmd/go/subcommand:help-goproxy +cmd/go/subcommand:help-importpath +cmd/go/subcommand:help-modules +cmd/go/subcommand:help-module-auth +cmd/go/subcommand:help-packages +cmd/go/subcommand:help-private +cmd/go/subcommand:help-testflag +cmd/go/subcommand:help-testfunc +cmd/go/subcommand:help-vcs diff --git a/src/cmd/go/testdata/script/README b/src/cmd/go/testdata/script/README index 792a158760..8c95945ebe 100644 --- a/src/cmd/go/testdata/script/README +++ b/src/cmd/go/testdata/script/README @@ -35,7 +35,6 @@ Scripts also have access to other environment variables, including: GOPATH=$WORK/gopath GOPROXY= GOROOT= - GOROOT_FINAL= TESTGO_GOROOT= HOME=/no-home PATH= @@ -402,14 +401,14 @@ The available conditions are: GO_BUILDER_NAME is non-empty [link] testenv.HasLink() -[mismatched-goroot] - test's GOROOT_FINAL does not match the real GOROOT [msan] GOOS/GOARCH supports -msan [mustlinkext] platform always requires external linking [net:*] can connect to external network host +[pielinkext] + platform requires external linking for PIE [race] GOOS/GOARCH supports -race [root] diff --git a/src/cmd/go/testdata/script/build_cc_cache_issue64423.txt b/src/cmd/go/testdata/script/build_cc_cache_issue64423.txt new file mode 100644 index 0000000000..f1bc2c3108 --- /dev/null +++ b/src/cmd/go/testdata/script/build_cc_cache_issue64423.txt @@ -0,0 +1,121 @@ +# Regression test for https://go.dev/issue/64423: +# +# When we parse the version for a Clang binary, we should accept +# an arbitrary vendor prefix, which (as of 2023) may be injected +# by defining CLANG_VENDOR when building clang itself. +# +# Since we don't want to actually rebuild the Clang toolchain in +# this test, we instead simulate it by injecting a fake "clang" +# binary that runs the real one as a subprocess. + +[!cgo] skip +[short] skip 'builds and links a fake clang binary' +[!cc:clang] skip 'test is specific to clang version parsing' + +# Save the location of the real clang command for our fake one to use. +go run ./which clang +cp stdout $WORK/.realclang + +# Build a fake clang and ensure that it is the one in $PATH. +mkdir $WORK/bin +go build -o $WORK/bin/clang$GOEXE ./fakeclang +[!GOOS:plan9] env PATH=$WORK${/}bin +[GOOS:plan9] env path=$WORK${/}bin + +# Force CGO_ENABLED=1 so that the following commands should error +# out if the fake clang doesn't work. +env CGO_ENABLED=1 + +# The bug in https://go.dev/issue/64423 resulted in cache keys that +# didn't contain any information about the C compiler. +# Since the bug was in cache key computation, isolate the cache: +# if we change the way caching works, we want the test to fail +# instead of accidentally reusing the cached information from a +# previous test run. +env GOCACHE=$WORK${/}.cache +mkdir $GOCACHE + +go build -x runtime/cgo + + # Tell our fake clang to stop working. + # Previously, 'go build -x runtime/cgo' would continue to + # succeed because both the broken clang and the non-broken one + # resulted in a cache key with no clang version information. +env GO_BREAK_CLANG=1 +! go build -x runtime/cgo +stderr '# runtime/cgo\nGO_BREAK_CLANG is set' + +-- go.mod -- +module example/issue64423 +go 1.20 +-- which/main.go -- +package main + +import ( + "os" + "os/exec" +) + +func main() { + path, err := exec.LookPath(os.Args[1]) + if err != nil { + panic(err) + } + os.Stdout.WriteString(path) +} +-- fakeclang/main.go -- +package main + +import ( + "bufio" + "bytes" + "log" + "os" + "os/exec" + "path/filepath" + "strings" +) + +func main() { + if os.Getenv("GO_BREAK_CLANG") != "" { + os.Stderr.WriteString("GO_BREAK_CLANG is set\n") + os.Exit(1) + } + + b, err := os.ReadFile(filepath.Join(os.Getenv("WORK"), ".realclang")) + if err != nil { + log.Fatal(err) + } + clang := string(bytes.TrimSpace(b)) + cmd := exec.Command(clang, os.Args[1:]...) + cmd.Stdout = os.Stdout + stderr, err := cmd.StderrPipe() + if err != nil { + log.Fatal(err) + } + + if err := cmd.Start(); err != nil { + log.Fatal(err) + } + + r := bufio.NewReader(stderr) + for { + line, err := r.ReadString('\n') + if line != "" { + if strings.Contains(line, "clang version") { + // Simulate a clang version string with an arbitrary vendor prefix. + const vendorString = "Gopher Solutions Unlimited " + os.Stderr.WriteString(vendorString) + } + os.Stderr.WriteString(line) + } + if err != nil { + break + } + } + os.Stderr.Close() + + if err := cmd.Wait(); err != nil { + os.Exit(1) + } +} diff --git a/src/cmd/go/testdata/script/build_git_missing_tree.txt b/src/cmd/go/testdata/script/build_git_missing_tree.txt new file mode 100644 index 0000000000..43a9ae0a6d --- /dev/null +++ b/src/cmd/go/testdata/script/build_git_missing_tree.txt @@ -0,0 +1,51 @@ +# Regression test for https://go.dev/issue/65339. +# Unnecessary git tree object required + +[short] skip 'constructs a local git repo' +[!git] skip + +env GIT_AUTHOR_NAME='Go Gopher' +env GIT_AUTHOR_EMAIL='gopher@golang.org' +env GIT_COMMITTER_NAME=$GIT_AUTHOR_NAME +env GIT_COMMITTER_EMAIL=$GIT_AUTHOR_EMAIL + +# Create 2 commit +env GIT_COMMITTER_DATE=2024-01-30T10:52:00+08:00 +env GIT_AUTHOR_DATE=2024-01-30T10:52:00+08:00 + +cd $WORK/repo +exec git init +exec git add go.mod main.go +exec git commit -m 'initial commit' + +env GIT_COMMITTER_DATE=2024-01-30T10:53:00+08:00 +env GIT_AUTHOR_DATE=2024-01-30T10:53:00+08:00 +exec git add extra.go +exec git commit -m 'add extra.go' + +# Assume the tree object from initial commit is not available (e.g. partial clone) +exec git log --pretty=%T +cmp stdout $WORK/.git-trees + +rm .git/objects/66/400c89b45cc96da36d232844dbf9ea5daa6bcf + +# Build the module, which should succeed +go build -v -buildvcs=true -o test +go version -m test +stdout '^\tbuild\tvcs.revision=fe3c8204d2332a731166269932dd23760c1b576a$' + +-- $WORK/repo/go.mod -- +module github.com/golang/issue65339 + +go 1.20 +-- $WORK/repo/main.go -- +package main + +func main() { + println("hello, world") +} +-- $WORK/repo/extra.go -- +package main +-- $WORK/.git-trees -- +ac724c6e5e3f86815e057ff58a639cab613abf28 +66400c89b45cc96da36d232844dbf9ea5daa6bcf diff --git a/src/cmd/go/testdata/script/build_issue48319.txt b/src/cmd/go/testdata/script/build_issue48319.txt deleted file mode 100644 index 4543303059..0000000000 --- a/src/cmd/go/testdata/script/build_issue48319.txt +++ /dev/null @@ -1,46 +0,0 @@ -# Regression test for https://go.dev/issue/48319: -# cgo builds should not include debug information from a stale GOROOT_FINAL. - -[short] skip -[!cgo] skip - -# This test is sensitive to cache invalidation, -# so use a separate build cache that we can control. -env GOCACHE=$WORK/gocache -mkdir $GOCACHE - -# Build a binary using a specific value of GOROOT_FINAL. -env GOROOT_FINAL=$WORK${/}goroot1 -go build -o main.exe -mv main.exe main1.exe - -# Now clean the cache and build using a different GOROOT_FINAL. -# The resulting binaries should differ in their debug metadata. -go clean -cache -env GOROOT_FINAL=$WORK${/}goroot2 -go build -o main.exe -mv main.exe main2.exe -! cmp -q main2.exe main1.exe - -# Set GOROOT_FINAL back to the first value. -# If the build is properly reproducible, the two binaries should match. -env GOROOT_FINAL=$WORK${/}goroot1 -go build -o main.exe -cmp -q main.exe main1.exe - --- go.mod -- -module main - -go 1.18 --- main.go -- -package main - -import "C" - -import "runtime" - -var _ C.int - -func main() { - println(runtime.GOROOT()) -} diff --git a/src/cmd/go/testdata/script/build_issue_65528.txt b/src/cmd/go/testdata/script/build_issue_65528.txt new file mode 100644 index 0000000000..ab4d62bbb2 --- /dev/null +++ b/src/cmd/go/testdata/script/build_issue_65528.txt @@ -0,0 +1,9 @@ +go build + +-- go.mod -- +module test + +go 1.0 + +-- p.go -- +package p diff --git a/src/cmd/go/testdata/script/build_plugin_reproducible.txt b/src/cmd/go/testdata/script/build_plugin_reproducible.txt index 5369954859..2f70a0e2b2 100644 --- a/src/cmd/go/testdata/script/build_plugin_reproducible.txt +++ b/src/cmd/go/testdata/script/build_plugin_reproducible.txt @@ -1,5 +1,13 @@ [!buildmode:plugin] skip [short] skip +[!cgo] skip '-buildmode=plugin requires external linking' + +# This test has problems when run on the LUCI darwin longtest builder, +# which uses a more contemporary Xcode version that is unfriendly to +# reproducible builds (see issue #64947 for the gory details). Note +# that individual developers running "go test cmd/go" on Darwin may +# still run into failures depending on their Xcode version. +[GOOS:darwin] [go-builder] skip go build -trimpath -buildvcs=false -buildmode=plugin -o a.so main.go go build -trimpath -buildvcs=false -buildmode=plugin -o b.so main.go @@ -8,4 +16,4 @@ cmp -q a.so b.so -- main.go -- package main -func main() {} \ No newline at end of file +func main() {} diff --git a/src/cmd/go/testdata/script/build_trimpath.txt b/src/cmd/go/testdata/script/build_trimpath.txt index 2a2aa2080a..1f4dde98cb 100644 --- a/src/cmd/go/testdata/script/build_trimpath.txt +++ b/src/cmd/go/testdata/script/build_trimpath.txt @@ -1,8 +1,8 @@ [short] skip -# If GOROOT_FINAL is set, 'go build -trimpath' bakes that into the resulting -# binary instead of GOROOT. Explicitly unset it here. -env GOROOT_FINAL= +# If GOROOT is set, 'go build -trimpath' bakes that into the resulting +# binary. Explicitly unset it here. +env GOROOT= # Set up two identical directories that can be used as GOPATH. env GO111MODULE=on @@ -21,14 +21,13 @@ cd $WORK/a/src/paths go build -o $WORK/paths-dbg.exe . exec $WORK/paths-dbg.exe $WORK/paths-dbg.exe stdout 'binary contains module root: true' -stdout 'binary contains GOROOT: true' +stdout 'binary contains an empty GOROOT' -# A binary built with -trimpath should not contain the current workspace -# or GOROOT. +# A binary built with -trimpath should not contain the current workspace. go build -trimpath -o $WORK/paths-a.exe . exec $WORK/paths-a.exe $WORK/paths-a.exe stdout 'binary contains module root: false' -stdout 'binary contains GOROOT: false' +stdout 'binary contains an empty GOROOT' # A binary from an external module built with -trimpath should not contain # the current workspace or GOROOT. @@ -36,7 +35,7 @@ go get rsc.io/fortune go install -trimpath rsc.io/fortune exec $WORK/paths-a.exe $GOPATH/bin/fortune$GOEXE stdout 'binary contains module root: false' -stdout 'binary contains GOROOT: false' +stdout 'binary contains an empty GOROOT' go mod edit -droprequire rsc.io/fortune # Two binaries built from identical packages in different directories @@ -53,14 +52,13 @@ cd $WORK/a/src/paths go build -overlay overlay.json -o $WORK/paths-dbg.exe ./overlaydir exec $WORK/paths-dbg.exe $WORK/paths-dbg.exe stdout 'binary contains module root: true' -stdout 'binary contains GOROOT: true' +stdout 'binary contains an empty GOROOT' -# A binary built with -trimpath should not contain the current workspace -# or GOROOT. +# A binary built with -trimpath should not contain the current workspace. go build -overlay overlay.json -trimpath -o $WORK/paths-a.exe ./overlaydir exec $WORK/paths-a.exe $WORK/paths-a.exe stdout 'binary contains module root: false' -stdout 'binary contains GOROOT: false' +stdout 'binary contains an empty GOROOT' # Two binaries built from identical packages in different directories # should be identical. @@ -77,13 +75,13 @@ env GOPATH=$WORK/a go build -o paths-dbg.exe paths exec ./paths-dbg.exe paths-dbg.exe stdout 'binary contains GOPATH: true' -stdout 'binary contains GOROOT: true' +stdout 'binary contains an empty GOROOT' -# A binary built with -trimpath should not contain GOPATH or GOROOT. +# A binary built with -trimpath should not contain GOPATH. go build -trimpath -o paths-a.exe paths exec ./paths-a.exe paths-a.exe stdout 'binary contains GOPATH: false' -stdout 'binary contains GOROOT: false' +stdout 'binary contains an empty GOROOT' # Two binaries built from identical packages in different GOPATH roots # should be identical. @@ -103,13 +101,14 @@ env GOPATH=$WORK/a go build -compiler=gccgo -o paths-dbg.exe paths exec ./paths-dbg.exe paths-dbg.exe stdout 'binary contains GOPATH: true' -stdout 'binary contains GOROOT: false' # gccgo doesn't load std from GOROOT. +stdout 'binary contains an empty GOROOT' +# gccgo doesn't load std from GOROOT. # A binary built with gccgo with -trimpath should not contain GOPATH or GOROOT. go build -compiler=gccgo -trimpath -o paths-a.exe paths exec ./paths-a.exe paths-a.exe stdout 'binary contains GOPATH: false' -stdout 'binary contains GOROOT: false' +stdout 'binary contains an empty GOROOT' # Two binaries built from identical packages in different directories # should be identical. @@ -152,6 +151,10 @@ func main() { } func check(data []byte, desc, dir string) { + if dir == "" { + fmt.Printf("binary contains an empty %s\n", desc) + return + } containsDir := bytes.Contains(data, []byte(dir)) containsSlashDir := bytes.Contains(data, []byte(filepath.ToSlash(dir))) fmt.Printf("binary contains %s: %v\n", desc, containsDir || containsSlashDir) diff --git a/src/cmd/go/testdata/script/build_trimpath_goroot.txt b/src/cmd/go/testdata/script/build_trimpath_goroot.txt index a26cfd23be..e31eccec06 100644 --- a/src/cmd/go/testdata/script/build_trimpath_goroot.txt +++ b/src/cmd/go/testdata/script/build_trimpath_goroot.txt @@ -4,11 +4,6 @@ # if GOROOT was not set explicitly in the environment. # It should instead return the empty string, since we know that we don't # have a valid path to return. -# -# TODO(#51483): when runtime.GOROOT() returns the empty string, -# go/build should default to 'go env GOROOT' instead. - -env GOROOT_FINAL= [trimpath] env GOROOT= [trimpath] ! go env GOROOT @@ -17,7 +12,7 @@ env GOROOT_FINAL= [short] stop -# With GOROOT still set but GOROOT_FINAL unset, 'go build' and 'go test -c' +# With GOROOT still set, 'go build' and 'go test -c' # should cause runtime.GOROOT() to report either the correct GOROOT # (without -trimpath) or no GOROOT at all (with -trimpath). @@ -52,7 +47,6 @@ stderr 'cannot find package "runtime" in any of:\n\t\(\$GOROOT not set\)\n\t'$WO # code). [trimpath] stop -[mismatched-goroot] stop ! go run -trimpath . stdout '^GOROOT $' diff --git a/src/cmd/go/testdata/script/cgo_stale_precompiled.txt b/src/cmd/go/testdata/script/cgo_stale_precompiled.txt index eb7e10557b..7f0b515f2e 100644 --- a/src/cmd/go/testdata/script/cgo_stale_precompiled.txt +++ b/src/cmd/go/testdata/script/cgo_stale_precompiled.txt @@ -1,5 +1,5 @@ # Regression test for https://go.dev/issue/47215 and https://go.dev/issue/50183: -# A mismatched $GOROOT_FINAL or missing $CC caused the C dependencies of the net +# A missing $CC caused the C dependencies of the net # package to appear stale, and it could not be rebuilt due to a missing $CC. [!cgo] skip @@ -16,21 +16,17 @@ go build -x runtime/cgo [!short] stderr '[/\\]cgo'$GOEXE'["]? .* -importpath runtime/cgo' -# https://go.dev/issue/50183: a mismatched GOROOT_FINAL caused net to be stale. -env oldGOROOT_FINAL=$GOROOT_FINAL -env GOROOT_FINAL=$WORK${/}goroot -go build -x runtime/cgo -! stderr '[/\\]cgo'$GOEXE'["]? .* -importpath runtime/cgo' - -env GOROOT_FINAL=$oldGOROOT_FINAL - # https://go.dev/issue/47215: a missing $(go env CC) caused the precompiled net # to be stale. But as of https://go.dev/cl/452457 the precompiled libraries are # no longer installed anyway! Since we're requiring a C compiler in order to # build and use cgo libraries in the standard library, we should make sure it # matches what's in the cache. -[!abscc] env CGO_ENABLED=1 -[!abscc] [!GOOS:plan9] env PATH='' # Guaranteed not to include $(go env CC)! -[!abscc] [GOOS:plan9] env path='' -[!abscc] ! go build -x runtime/cgo -[!abscc] stderr 'C compiler .* not found' + +[abscc] stop + +env CGO_ENABLED=1 +env CC='' +[!GOOS:plan9] env PATH='' # Guaranteed not to include $(go env CC)! +[GOOS:plan9] env path='' +! go build -x runtime/cgo +stderr 'C compiler .* not found' diff --git a/src/cmd/go/testdata/script/cover_coverpkg_partial.txt b/src/cmd/go/testdata/script/cover_coverpkg_partial.txt index 524024101a..ef7a4dd2aa 100644 --- a/src/cmd/go/testdata/script/cover_coverpkg_partial.txt +++ b/src/cmd/go/testdata/script/cover_coverpkg_partial.txt @@ -39,6 +39,14 @@ go test -coverprofile=baz.p -coverpkg=./a,./d,./f ./b ./f stdout '^ok\s+M/b\s+\S+\s+coverage: 83.3% of statements in ./a, ./d, ./f' stdout '^\s*M/f\s+coverage: 0.0% of statements' +# This sub-test inspired by issue 65653: if package P is is matched +# via the package pattern supplied as the argument to "go test -cover" +# but P is not part of "-coverpkg", then we don't want coverage for P +# (including the specific case where P has no test files). +go test -coverpkg=./a ./... +stdout '^ok\s+M/a\s+\S+\s+coverage: 100.0% of statements in ./a' +stdout '^\s*\?\s+M/f\s+\[no test files\]' + -- a/a.go -- package a diff --git a/src/cmd/go/testdata/script/cover_list.txt b/src/cmd/go/testdata/script/cover_list.txt index 6b8aaf45d1..1b1f326662 100644 --- a/src/cmd/go/testdata/script/cover_list.txt +++ b/src/cmd/go/testdata/script/cover_list.txt @@ -38,6 +38,10 @@ cp stdout $WORK/toolbuildid.txt # Build IDs should match here. cmp $WORK/toolbuildid.txt $WORK/listbuildid.txt +# Make sure that the build succeeds regardless of covermode. +go list -export -covermode=atomic m/example +go list -export -covermode=count m/example + -- go.mod -- module m diff --git a/src/cmd/go/testdata/script/cover_statements.txt b/src/cmd/go/testdata/script/cover_statements.txt index 24b5751154..030177cb8b 100644 --- a/src/cmd/go/testdata/script/cover_statements.txt +++ b/src/cmd/go/testdata/script/cover_statements.txt @@ -1,5 +1,13 @@ [short] skip +# Workaround for issue 64014 -- for the portion of this test that +# verifies that caching works correctly, the cache should theoretically +# always behave reliably/deterministically, however if other tests are +# concurrently accessing the cache while this test is running, it can +# lead to cache lookup failures, which manifest as test failures here. +# To avoid such flakes, use a separate isolated GOCACHE for this test. +env GOCACHE=$WORK/cache + # Initial run with simple coverage. go test -cover ./pkg1 ./pkg2 ./pkg3 ./pkg4 [!GOEXPERIMENT:coverageredesign] stdout 'pkg1 \[no test files\]' diff --git a/src/cmd/go/testdata/script/cover_swig.txt b/src/cmd/go/testdata/script/cover_swig.txt new file mode 100644 index 0000000000..decb29aaec --- /dev/null +++ b/src/cmd/go/testdata/script/cover_swig.txt @@ -0,0 +1,72 @@ + +# Testcase for issue 64661. This testcase is intended to verify that +# we don't try to send swig-generated Go files through the cover tool +# for "go test -cover" runs on packages that have *.swig source files. + +[!exec:swig] skip +[!cgo] skip + +go test -v -count=1 -coverprofile=foo.p +stdout 'coverage: 100.0% of statements' + +-- go.mod -- +module simple + +go 1.21 +-- main.c -- +/* A global variable */ +double Foo = 3.0; + +/* Compute the greatest common divisor of positive integers */ +int gcd(int x, int y) { + int g; + g = y; + while (x > 0) { + g = x; + x = y % x; + y = g; + } + return g; +} + + +-- main.go -- +package main + +import ( + "fmt" +) + +func main() { + // Call our gcd() function + x := 42 + y := 105 + g := Gcd(x, y) + fmt.Println("The gcd of", x, "and", y, "is", g) + + // Manipulate the Foo global variable + + // Output its current value + fmt.Println("Foo =", GetFoo()) + + // Change its value + SetFoo(3.1415926) + + // See if the change took effect + fmt.Println("Foo =", GetFoo()) +} +-- main.swig -- +%module main + +%inline %{ +extern int gcd(int x, int y); +extern double Foo; +%} +-- main_test.go -- +package main + +import "testing" + +func TestSwigFuncs(t *testing.T) { + main() +} diff --git a/src/cmd/go/testdata/script/darwin_no_cgo.txt b/src/cmd/go/testdata/script/darwin_no_cgo.txt new file mode 100644 index 0000000000..fa445925b7 --- /dev/null +++ b/src/cmd/go/testdata/script/darwin_no_cgo.txt @@ -0,0 +1,9 @@ +# For reproducibility and easier cross-compilation, +# nothing in std is supposed to use cgo on macOS. +# Check that cgo does not appear as a dependency +# of cmd/go, which imports approximately everything +# in std (certainly everything relevant). +[!GOOS:darwin] skip +go list -deps cmd/go +! stdout runtime/cgo + diff --git a/src/cmd/go/testdata/script/env_write.txt b/src/cmd/go/testdata/script/env_write.txt index 5d40949cdd..bf14a2f0bd 100644 --- a/src/cmd/go/testdata/script/env_write.txt +++ b/src/cmd/go/testdata/script/env_write.txt @@ -65,14 +65,16 @@ go env -u GOPATH ! stderr .+ # go env -w rejects unknown or bad variables -! go env -w GODEBUG=gctrace=1 -stderr 'unknown go command variable GODEBUG' +! go env -w GOGC=off +stderr 'unknown go command variable GOGC' ! go env -w GOEXE=.bat stderr 'GOEXE cannot be modified' ! go env -w GOVERSION=customversion stderr 'GOVERSION cannot be modified' ! go env -w GOENV=/env stderr 'GOENV can only be set using the OS environment' +! go env -w GODEBUG=gctrace=1 +stderr 'GODEBUG can only be set using the OS environment' # go env -w can set multiple variables env CC= diff --git a/src/cmd/go/testdata/script/generate_workspace.txt b/src/cmd/go/testdata/script/generate_workspace.txt new file mode 100644 index 0000000000..5ba23932f1 --- /dev/null +++ b/src/cmd/go/testdata/script/generate_workspace.txt @@ -0,0 +1,27 @@ +# This is a regression test for Issue #56098: Go generate +# wasn't initializing workspace mode + +[short] skip + +go generate ./mod +cmp ./mod/got.txt want.txt + +-- go.work -- +go 1.22 + +use ./mod +-- mod/go.mod -- +module example.com/mod +-- mod/gen.go -- +//go:generate go run gen.go got.txt + +package main + +import "os" + +func main() { + outfile := os.Args[1] + os.WriteFile(outfile, []byte("Hello World!\n"), 0644) +} +-- want.txt -- +Hello World! \ No newline at end of file diff --git a/src/cmd/go/testdata/script/get_issue53955.txt b/src/cmd/go/testdata/script/get_issue53955.txt new file mode 100644 index 0000000000..685c6facaa --- /dev/null +++ b/src/cmd/go/testdata/script/get_issue53955.txt @@ -0,0 +1,79 @@ +# Regression test for https://go.dev/issue/53955. +# New remote tags were erroneously added to the local clone of a repo +# only *after* extracting version information for a locally-cached commit, +# causing the version information to have incomplete Tags and Version fields. + +[short] skip 'constructs a local git repo' +[!git] skip +[!net:github.com] skip 'does not actually use github.com because of insteadOf, but silence network check just in case' + +# Redirect git to a test-specific .gitconfig. +# GIT_CONFIG_GLOBAL suffices for git 2.32.0 and newer. +# For older git versions we also set $HOME. +env GIT_CONFIG_GLOBAL=$WORK${/}home${/}gopher${/}.gitconfig +env HOME=$WORK${/}home${/}gopher +exec git config --global --show-origin user.name +stdout 'Go Gopher' + +# Inject a local repo in place of a remote one, so that we can +# add commits to the repo partway through the test. +env GIT_ALLOW_PROTOCOL=file +env GOPRIVATE=github.com/golang/issue53955 + +[!GOOS:windows] exec git config --global 'url.file://'$WORK'/repo.insteadOf' 'https://github.com/golang/issue53955' +[GOOS:windows] exec git config --global 'url.file:///'$WORK'/repo.insteadOf' 'https://github.com/golang/issue53955' + +cd $WORK/repo + +env GIT_AUTHOR_NAME='Go Gopher' +env GIT_AUTHOR_EMAIL='gopher@golang.org' +env GIT_COMMITTER_NAME=$GIT_AUTHOR_NAME +env GIT_COMMITTER_EMAIL=$GIT_AUTHOR_EMAIL + +exec git init + +env GIT_COMMITTER_DATE=2022-07-19T11:07:00-04:00 +env GIT_AUTHOR_DATE=2022-07-19T11:07:00-04:00 +exec git add go.mod issue53955.go +exec git commit -m 'initial commit' +exec git branch -m main +exec git tag v1.0.9 + +env GIT_COMMITTER_DATE=2022-07-19T11:07:01-04:00 +env GIT_AUTHOR_DATE=2022-07-19T11:07:01-04:00 +exec git add extra.go +exec git commit -m 'next commit' +exec git show-ref --tags --heads +cmp stdout $WORK/.git-refs-1 + +cd $WORK/m +go get -x github.com/golang/issue53955@2cb3d49f +stderr '^go: added github.com/golang/issue53955 v1.0.10-0.20220719150701-2cb3d49f8874$' + +cd $WORK/repo +exec git tag v1.0.10 + +cd $WORK/m +go get -x github.com/golang/issue53955@v1.0.10 +! stderr 'v1\.0\.10 is not a tag' +stderr '^go: upgraded github.com/golang/issue53955 v.* => v1\.0\.10$' + +-- $WORK/repo/go.mod -- +module github.com/golang/issue53955 + +go 1.18 +-- $WORK/repo/issue53955.go -- +package issue53955 +-- $WORK/repo/extra.go -- +package issue53955 +-- $WORK/.git-refs-1 -- +2cb3d49f8874b9362ed0ddd2a6512e4108bbf6b1 refs/heads/main +050526ebf5883191e990529eb3cc9345abaf838c refs/tags/v1.0.9 +-- $WORK/m/go.mod -- +module m + +go 1.18 +-- $WORK/home/gopher/.gitconfig -- +[user] + name = Go Gopher + email = gopher@golang.org diff --git a/src/cmd/go/testdata/script/goroot_executable.txt b/src/cmd/go/testdata/script/goroot_executable.txt index e20dbd87ac..ea0f920a37 100644 --- a/src/cmd/go/testdata/script/goroot_executable.txt +++ b/src/cmd/go/testdata/script/goroot_executable.txt @@ -3,16 +3,9 @@ mkdir $WORK/new/bin -# In this test, we are specifically checking the logic for deriving -# the value of GOROOT from runtime.GOROOT. -# GOROOT_FINAL changes the default behavior of runtime.GOROOT, -# and will thus cause the test to fail if it is set when our -# new cmd/go is built. -env GOROOT_FINAL= - # $GOROOT/bin/go is whatever the user has already installed # (using make.bash or similar). We can't make assumptions about what -# options it may have been built with, such as -trimpath or GOROOT_FINAL. +# options it may have been built with, such as -trimpath or not. # Instead, we build a fresh copy of the binary with known settings. go build -o $WORK/new/bin/go$GOEXE cmd/go & go build -trimpath -o $WORK/bin/check$GOEXE check.go & diff --git a/src/cmd/go/testdata/script/goroot_executable_trimpath.txt b/src/cmd/go/testdata/script/goroot_executable_trimpath.txt index dc1e25e606..6b859a6207 100644 --- a/src/cmd/go/testdata/script/goroot_executable_trimpath.txt +++ b/src/cmd/go/testdata/script/goroot_executable_trimpath.txt @@ -14,13 +14,10 @@ mkdir $WORK/new/bin/${GOOS}_${GOARCH} # In this test, we are specifically checking the logic for deriving # the value of GOROOT from os.Executable when runtime.GOROOT is # trimmed away. -# GOROOT_FINAL changes the default behavior of runtime.GOROOT, -# so we explicitly clear it to remove it as a confounding variable. -env GOROOT_FINAL= # $GOROOT/bin/go is whatever the user has already installed # (using make.bash or similar). We can't make assumptions about what -# options it may have been built with, such as -trimpath or GOROOT_FINAL. +# options it may have been built with, such as -trimpath or not. # Instead, we build a fresh copy of the binary with known settings. go build -trimpath -o $WORK/new/bin/go$GOEXE cmd/go & go build -trimpath -o $WORK/bin/check$GOEXE check.go & @@ -29,12 +26,20 @@ wait env TESTGOROOT=$GOROOT env GOROOT= +# Unset GOPATH and any variables that its default may be derived from, +# so that we can check for a spurious warning. +env GOPATH= +env HOME='' +env USERPROFILE='' +env home='' + # Relocated Executable # Since we built with -trimpath and the binary isn't installed in a # normal-looking GOROOT, this command should fail. ! exec $WORK/new/bin/go$GOEXE env GOROOT stderr '^go: cannot find GOROOT directory: ''go'' binary is trimmed and GOROOT is not set$' +! stderr 'GOPATH set to GOROOT' # Cross-compiled binaries in cmd are installed to a ${GOOS}_${GOARCH} subdirectory, # so we also want to try a copy there. @@ -44,6 +49,7 @@ stderr '^go: cannot find GOROOT directory: ''go'' binary is trimmed and GOROOT i cp $WORK/new/bin/go$GOEXE $WORK/new/bin/${GOOS}_${GOARCH}/go$GOEXE ! exec $WORK/new/bin/${GOOS}_${GOARCH}/go$GOEXE env GOROOT stderr '^go: cannot find GOROOT directory: ''go'' binary is trimmed and GOROOT is not set$' +! stderr 'GOPATH set to GOROOT' # Relocated Tree: # If the binary is sitting in a bin dir next to ../pkg/tool, that counts as a GOROOT, @@ -51,6 +57,7 @@ stderr '^go: cannot find GOROOT directory: ''go'' binary is trimmed and GOROOT i mkdir $WORK/new/pkg/tool exec $WORK/bin/check$GOEXE $WORK/new/bin/go$GOEXE $WORK/new exec $WORK/bin/check$GOEXE $WORK/new/bin/${GOOS}_${GOARCH}/go$GOEXE $WORK/new +! stderr 'GOPATH set to GOROOT' -- check.go -- package main diff --git a/src/cmd/go/testdata/script/gotoolchain_path.txt b/src/cmd/go/testdata/script/gotoolchain_path.txt index 9628348f7a..b7a1c9bd89 100644 --- a/src/cmd/go/testdata/script/gotoolchain_path.txt +++ b/src/cmd/go/testdata/script/gotoolchain_path.txt @@ -8,8 +8,7 @@ env TESTGO_VERSION=go1.21pre3 # Compile a fake toolchain to put in the path under various names. env GOTOOLCHAIN= mkdir $WORK/bin -go build -o $WORK/bin/ ./fakego.go # adds .exe extension implicitly on Windows -cp $WORK/bin/fakego$GOEXE $WORK/bin/go1.50.0$GOEXE +go build -o $WORK/bin/go1.50.0$GOEXE ./fakego.go # adds .exe extension implicitly on Windows [!GOOS:plan9] env PATH=$WORK/bin [GOOS:plan9] env path=$WORK/bin diff --git a/src/cmd/go/testdata/script/install_modcacherw_issue64282.txt b/src/cmd/go/testdata/script/install_modcacherw_issue64282.txt new file mode 100644 index 0000000000..3e1e6e562a --- /dev/null +++ b/src/cmd/go/testdata/script/install_modcacherw_issue64282.txt @@ -0,0 +1,45 @@ +# Regression test for https://go.dev/issue/64282. +# +# 'go install' and 'go run' with pkg@version arguments should make +# a best effort to parse flags relevant to downloading modules +# (currently only -modcacherw) before actually downloading the module +# to identify which toolchain version to use. +# +# However, the best-effort flag parsing should not interfere with +# actual flag parsing if we don't switch toolchains. In particular, +# unrecognized flags should still be diagnosed after the module for +# the requested package has been downloaded and checked for toolchain +# upgrades. + + +! go install -cake=delicious -modcacherw example.com/printversion@v0.1.0 +stderr '^flag provided but not defined: -cake$' + # Because the -modcacherw flag was set, we should be able to modify the contents + # of a directory within the module cache. +cp $WORK/extraneous.txt $GOPATH/pkg/mod/example.com/printversion@v0.1.0/extraneous_file.go +go clean -modcache + + +! go install -unknownflag -tags -modcacherw example.com/printversion@v0.1.0 +stderr '^flag provided but not defined: -unknownflag$' +cp $WORK/extraneous.txt $GOPATH/pkg/mod/example.com/printversion@v0.1.0/extraneous_file.go +go clean -modcache + + +# Also try it with a 'go install' that succeeds. +# (But skip in short mode, because linking a binary is expensive.) +[!short] go install -modcacherw example.com/printversion@v0.1.0 +[!short] cp $WORK/extraneous.txt $GOPATH/pkg/mod/example.com/printversion@v0.1.0/extraneous_file.go +[!short] go clean -modcache + + +# The flag should also be applied if given in GOFLAGS +# instead of on the command line. +env GOFLAGS=-modcacherw +! go install -cake=delicious example.com/printversion@v0.1.0 +stderr '^flag provided but not defined: -cake$' +cp $WORK/extraneous.txt $GOPATH/pkg/mod/example.com/printversion@v0.1.0/extraneous_file.go + + +-- $WORK/extraneous.txt -- +This is not a Go source file. diff --git a/src/cmd/go/testdata/script/list_json_issue64946.txt b/src/cmd/go/testdata/script/list_json_issue64946.txt new file mode 100644 index 0000000000..64ff9d9fe3 --- /dev/null +++ b/src/cmd/go/testdata/script/list_json_issue64946.txt @@ -0,0 +1,10 @@ +cd mod +go list -e -json=ImportPath,Error ./foo +stdout '"Err": "no Go files in .*(/|\\\\)src(/|\\\\)mod(/|\\\\)foo"' + +-- mod/go.mod -- +module example.com/foo + +go 1.21 +-- mod/foo/README.md -- +empty \ No newline at end of file diff --git a/src/cmd/go/testdata/script/list_testdata.txt b/src/cmd/go/testdata/script/list_testdata.txt new file mode 100644 index 0000000000..d62dd55c7d --- /dev/null +++ b/src/cmd/go/testdata/script/list_testdata.txt @@ -0,0 +1,11 @@ +# Issue 65406. The testdata directory in GOROOT/src +# shouldn't be treated as a standard package. + +go list -f '{{.ImportPath}} {{.Dir}}' testdata +! stderr 'found package testdata in multiple modules' +stdout 'testdata '$WORK${/}'gopath'${/}'src' + +-- go.mod -- +module testdata +-- p.go -- +package p \ No newline at end of file diff --git a/src/cmd/go/testdata/script/mod_download_git_bareRepository.txt b/src/cmd/go/testdata/script/mod_download_git_bareRepository.txt index 8050461c65..a61283ca49 100644 --- a/src/cmd/go/testdata/script/mod_download_git_bareRepository.txt +++ b/src/cmd/go/testdata/script/mod_download_git_bareRepository.txt @@ -1,8 +1,14 @@ [short] skip [!git] skip -[!GOOS:linux] skip # Uses XDG_CONFIG_HOME -env GIT_CONFIG_GLOBAL=$WORK/.gitconfig +# Redirect git to a test-specific .gitconfig. +# GIT_CONFIG_GLOBAL suffices for git 2.32.0 and newer. +# For older git versions we also set $HOME. +env GIT_CONFIG_GLOBAL=$WORK${/}home${/}gopher${/}.gitconfig +env HOME=$WORK${/}home${/}gopher +exec git config --global --show-origin user.name +stdout 'Go Gopher' + env GOPRIVATE=vcs-test.golang.org go mod download -x @@ -14,6 +20,9 @@ go 1.18 require vcs-test.golang.org/git/gitrepo1.git v1.2.3 --- $WORK/.gitconfig -- +-- $WORK/home/gopher/.gitconfig -- +[user] + name = Go Gopher + email = gopher@golang.org [safe] -bareRepository = explicit + bareRepository = explicit diff --git a/src/cmd/go/testdata/script/mod_download_git_decorate_full.txt b/src/cmd/go/testdata/script/mod_download_git_decorate_full.txt index 080ccf072e..9afd347746 100644 --- a/src/cmd/go/testdata/script/mod_download_git_decorate_full.txt +++ b/src/cmd/go/testdata/script/mod_download_git_decorate_full.txt @@ -3,12 +3,15 @@ env GO111MODULE=on [short] skip [!git] skip +# Redirect git to a test-specific .gitconfig. +# GIT_CONFIG_GLOBAL suffices for git 2.32.0 and newer. +# For older git versions we also set $HOME. +env GIT_CONFIG_GLOBAL=$WORK${/}home${/}gopher${/}.gitconfig +env HOME=$WORK${/}home${/}gopher +exec git config --global --show-origin user.name +stdout 'Go Gopher' + env GOPROXY=direct -env HOME=$WORK/home/gopher - - -go env GOPROXY -stdout 'direct' exec git config --get log.decorate stdout 'full' @@ -24,5 +27,8 @@ go list -m vcs-test.golang.org/git/gitrepo1.git@v1.2.3 stdout 'vcs-test.golang.org/git/gitrepo1.git v1.2.3' -- $WORK/home/gopher/.gitconfig -- +[user] + name = Go Gopher + email = gopher@golang.org [log] decorate = full diff --git a/src/cmd/go/testdata/script/mod_download_issue51114.txt b/src/cmd/go/testdata/script/mod_download_issue51114.txt index 4d274d61a9..a28d467bb8 100644 --- a/src/cmd/go/testdata/script/mod_download_issue51114.txt +++ b/src/cmd/go/testdata/script/mod_download_issue51114.txt @@ -1,8 +1,14 @@ [!net:github.com] skip [!git] skip -[!GOOS:linux] skip # Uses XDG_CONFIG_HOME -env GIT_CONFIG_GLOBAL=$WORK/.gitconfig +# Redirect git to a test-specific .gitconfig. +# GIT_CONFIG_GLOBAL suffices for git 2.32.0 and newer. +# For older git versions we also set $HOME. +env GIT_CONFIG_GLOBAL=$WORK${/}home${/}gopher${/}.gitconfig +env HOME=$WORK${/}home${/}gopher +exec git config --global --show-origin user.name +stdout 'Go Gopher' + env GOPROXY=direct ! go mod download @@ -15,6 +21,9 @@ go 1.18 require github.com/golang/notexist/subdir v0.1.0 --- $WORK/.gitconfig -- +-- $WORK/home/gopher/.gitconfig -- +[user] + name = Go Gopher + email = gopher@golang.org [url "git@github.com:"] insteadOf = https://github.com/ diff --git a/src/cmd/go/testdata/script/mod_download_private_vcs.txt b/src/cmd/go/testdata/script/mod_download_private_vcs.txt index 2f72a4213a..5c8d93a978 100644 --- a/src/cmd/go/testdata/script/mod_download_private_vcs.txt +++ b/src/cmd/go/testdata/script/mod_download_private_vcs.txt @@ -5,6 +5,14 @@ env GO111MODULE=on [!git] skip env GOPROXY=direct +# Redirect git to a test-specific .gitconfig. +# GIT_CONFIG_GLOBAL suffices for git 2.32.0 and newer. +# For older git versions we also set $HOME. +env GIT_CONFIG_GLOBAL=$WORK${/}home${/}gopher${/}.gitconfig +env HOME=$WORK${/}home${/}gopher +exec git config --global --show-origin user.name +stdout 'Go Gopher' + ! go mod download github.com/golang/nonexist@latest stderr 'Confirm the import path was entered correctly.' stderr 'If this is a private repository, see https://golang.org/doc/faq#git_https for additional information.' @@ -27,7 +35,7 @@ stderr '^If this is a private repository, see https://golang.org/doc/faq#git_htt # Test that Git clone errors will be shown to the user instead of a generic # "unknown revision" error. To do this we want to force git ls-remote to return # an error we don't already have special handling for. See golang/go#42751. -env HOME=$WORK${/}home${/}gopher +exec git config --global url.git@github.com.insteadOf https://github.com/ env GIT_SSH_COMMAND=false ! go install github.com/golang/nonexist@master stderr 'fatal: Could not read from remote repository.' @@ -35,5 +43,6 @@ stderr 'fatal: Could not read from remote repository.' ! stdout . -- $WORK/home/gopher/.gitconfig -- -[url "git@github.com:"] - insteadOf = https://github.com/ +[user] + name = Go Gopher + email = gopher@golang.org diff --git a/src/cmd/go/testdata/script/mod_edit_go.txt b/src/cmd/go/testdata/script/mod_edit_go.txt index ec04f40f52..007760df5d 100644 --- a/src/cmd/go/testdata/script/mod_edit_go.txt +++ b/src/cmd/go/testdata/script/mod_edit_go.txt @@ -2,7 +2,7 @@ env GO111MODULE=on ! go build -stderr ' type aliases requires' +stderr ' type alias requires' go mod edit -go=1.9 grep 'go 1.9' go.mod go build @@ -11,7 +11,7 @@ go build # the cached 1.9 build. (https://golang.org/issue/37804) go mod edit -go=1.8 ! go build -stderr 'type aliases requires' +stderr 'type alias requires' # go=none should drop the line go mod edit -go=none diff --git a/src/cmd/go/testdata/script/mod_get_issue47650.txt b/src/cmd/go/testdata/script/mod_get_issue47650.txt new file mode 100644 index 0000000000..8561b21df0 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_get_issue47650.txt @@ -0,0 +1,29 @@ +# Regression test for https://go.dev/issue/47650: +# 'go get' with a pseudo-version of a non-root package within a module +# erroneously rejected the pseudo-version as invalid, because it did not fetch +# enough commit history to validate the pseudo-version base. + +[short] skip 'creates and uses a git repository' +[!git] skip + +env GOPRIVATE=vcs-test.golang.org + +# If we request a package in a subdirectory of a module by commit hash, we +# successfully resolve it to a pseudo-version derived from a tag on the parent +# commit. +cp go.mod go.mod.orig +go get -x vcs-test.golang.org/git/issue47650.git/cmd/issue47650@21535ef346c3 +stderr '^go: added vcs-test.golang.org/git/issue47650.git v0.1.1-0.20210811175200-21535ef346c3$' + +# Explicitly requesting that same version should succeed, fetching additional +# history for the requested commit as needed in order to validate the +# pseudo-version base. +go clean -modcache +cp go.mod.orig go.mod +go get -x vcs-test.golang.org/git/issue47650.git/cmd/issue47650@v0.1.1-0.20210811175200-21535ef346c3 +stderr '^go: added vcs-test.golang.org/git/issue47650.git v0.1.1-0.20210811175200-21535ef346c3$' + +-- go.mod -- +module example + +go 1.20 diff --git a/src/cmd/go/testdata/script/mod_gomodcache_vendor.txt b/src/cmd/go/testdata/script/mod_gomodcache_vendor.txt new file mode 100644 index 0000000000..164460be84 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_gomodcache_vendor.txt @@ -0,0 +1,32 @@ +# This test verifies that GOMODCACHE does not affect whether checksums are embedded +# with vendored files. +# See issue #46400 +[short] skip 'builds and links a binary twice' +go mod tidy +go mod vendor + +go build -mod=vendor +go version -m example$GOEXE +cp stdout version-m.txt + +env GOMODCACHE=$WORK${/}modcache +go build -mod=vendor +go version -m example$GOEXE +cmp stdout version-m.txt + +-- go.mod -- +module example +go 1.22 +require rsc.io/sampler v1.3.0 + +-- main.go -- +package main + +import ( + "fmt" + "rsc.io/sampler" +) + +func main() { + fmt.Println(sampler.Hello()) +} \ No newline at end of file diff --git a/src/cmd/go/testdata/script/mod_list.txt b/src/cmd/go/testdata/script/mod_list.txt index 06316cc335..40820b3bb5 100644 --- a/src/cmd/go/testdata/script/mod_list.txt +++ b/src/cmd/go/testdata/script/mod_list.txt @@ -44,9 +44,9 @@ stderr '^go: module rsc.io/quote/buggy: not a known dependency' # Module loader does not interfere with list -e (golang.org/issue/24149). go list -e -f '{{.Error.Err}}' database -stdout 'no Go files in ' +stdout 'package database is not in std' ! go list database -stderr 'no Go files in ' +stderr 'package database is not in std' -- go.mod -- module x diff --git a/src/cmd/go/testdata/script/mod_list_issue61423.txt b/src/cmd/go/testdata/script/mod_list_issue61423.txt new file mode 100644 index 0000000000..2888391f6d --- /dev/null +++ b/src/cmd/go/testdata/script/mod_list_issue61423.txt @@ -0,0 +1,100 @@ +[short] skip 'generates a vcstest git repo' +[!git] skip + +mkdir $WORK/mod1 +mkdir $WORK/mod2 +env GONOSUMDB=vcs-test.golang.org + +env GOPROXY=direct +env GOMODCACHE=$WORK/mod1 + + +# If we query a module version from a git repo, we expect its +# Origin data to be reusable. + +go list -m -json vcs-test.golang.org/git/issue61415.git@latest +cp stdout git-latest.json +stdout '"Version": "v0.0.0-20231114180001-f213069baa68"' +stdout '"Origin":' +stdout '"VCS": "git"' +stdout '"Hash": "f213069baa68ec26412fb373c7cf6669db1f8e69"' +stdout '"Ref": "HEAD"' +stdout '"TagSum": "t1:47DEQpj8HBSa\+/TImW\+5JCeuQeRkm5NMpJWZG3hSuFU="' + +go list -reuse=git-latest.json -m -json vcs-test.golang.org/git/issue61415.git@latest +stdout '"Version": "v0.0.0-20231114180001-f213069baa68"' +stdout '"Origin":' +stdout '"VCS": "git"' +stdout '"Hash": "f213069baa68ec26412fb373c7cf6669db1f8e69"' +stdout '"Ref": "HEAD"' +stdout '"TagSum": "t1:47DEQpj8HBSa\+/TImW\+5JCeuQeRkm5NMpJWZG3hSuFU="' +stdout '"Reuse": true' + + +# Now we construct a filesystem-based module proxy that +# contains only an older commit. + +go clean -modcache + +go mod download -json vcs-test.golang.org/git/issue61415.git@08a4fa6bb9c04ffba03b26ae427b0d6335d90a2a +stdout '"Version": "v0.0.0-20231114180000-08a4fa6bb9c0"' +stdout '"Origin":' +stdout '"VCS": "git"' +stdout '"Hash": "08a4fa6bb9c04ffba03b26ae427b0d6335d90a2a"' + +[GOOS:windows] env GOPROXY=file:///$WORK/mod1/cache/download +[!GOOS:windows] env GOPROXY=file://$WORK/mod1/cache/download +env GOMODCACHE=$WORK/modcache2 + + +# If we resolve the "latest" version query using a proxy, +# it is only going to have Git origin information about the one +# commit — not the other tags that would go into resolving +# the underlying version list. +# 'go list' should not emit the partial information, +# since it isn't enough to reconstruct the result. + +go list -m -json vcs-test.golang.org/git/issue61415.git@latest +cp stdout proxy-latest.json +stdout '"Version": "v0.0.0-20231114180000-08a4fa6bb9c0"' +! stdout '"Origin":' + +# However, if we list a specific, stable version, we should get +# whatever origin metadata the proxy has for the version. + +go list -m -json vcs-test.golang.org/git/issue61415.git@v0.0.0-20231114180000-08a4fa6bb9c0 +cp stdout proxy-version.json +stdout '"Version": "v0.0.0-20231114180000-08a4fa6bb9c0"' +stdout '"Origin":' +stdout '"VCS": "git"' +stdout '"Hash": "08a4fa6bb9c04ffba03b26ae427b0d6335d90a2a"' +! stdout '"Ref":' +! stdout '"TagSum":' + +# The -reuse flag has no effect with a proxy, since the proxy can serve +# metadata about a given module version cheaply anyway. + +go list -reuse=proxy-version.json -m -json vcs-test.golang.org/git/issue61415.git@v0.0.0-20231114180000-08a4fa6bb9c0 +stdout '"Version": "v0.0.0-20231114180000-08a4fa6bb9c0"' +stdout '"Origin":' +stdout '"VCS": "git"' +stdout '"Hash": "08a4fa6bb9c04ffba03b26ae427b0d6335d90a2a"' +! stdout '"Ref":' +! stdout '"TagSum":' +! stdout '"Reuse":' + + +# With GOPROXY=direct, the -reuse flag has an effect, but +# the Origin data from the proxy should not be sufficient +# for the proxy response to be reused. + +env GOPROXY=direct + +go list -reuse=proxy-latest.json -m -json vcs-test.golang.org/git/issue61415.git@latest +stdout '"Version": "v0.0.0-20231114180001-f213069baa68"' +stdout '"Origin":' +stdout '"VCS": "git"' +stdout '"Hash": "f213069baa68ec26412fb373c7cf6669db1f8e69"' +stdout '"Ref": "HEAD"' +stdout '"TagSum": "t1:47DEQpj8HBSa\+/TImW\+5JCeuQeRkm5NMpJWZG3hSuFU="' +! stdout '"Reuse":' diff --git a/src/cmd/go/testdata/script/mod_list_m.txt b/src/cmd/go/testdata/script/mod_list_m.txt new file mode 100644 index 0000000000..d579153966 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_list_m.txt @@ -0,0 +1,16 @@ +go mod tidy + +go list -m -json all +stdout '"GoModSum":\s+"h1:.+"' +stdout '"Sum":\s+"h1:.+"' + +-- go.mod -- +module example + +go 1.21 + +require rsc.io/quote v1.5.1 +-- example.go -- +package example + +import _ "rsc.io/quote" \ No newline at end of file diff --git a/src/cmd/go/testdata/script/mod_run_flags_issue64738.txt b/src/cmd/go/testdata/script/mod_run_flags_issue64738.txt new file mode 100644 index 0000000000..f143b026fe --- /dev/null +++ b/src/cmd/go/testdata/script/mod_run_flags_issue64738.txt @@ -0,0 +1,4 @@ +# Regression test for https://go.dev/issue/64738: +# a bug in 'go run' caused flags arguments after the requested package to +# also be parsed as cmd/go flags. +go run -n example.com/printversion@v0.1.0 -p ignored diff --git a/src/cmd/go/testdata/script/mod_vendor_embed.txt b/src/cmd/go/testdata/script/mod_vendor_embed.txt index 1a3b2fef26..4a27b1431f 100644 --- a/src/cmd/go/testdata/script/mod_vendor_embed.txt +++ b/src/cmd/go/testdata/script/mod_vendor_embed.txt @@ -6,11 +6,15 @@ cmp vendor/example.com/a/subdir/test/xtest/embed.txt a/subdir/test/xtest/embed.t cd broken_no_matching_files ! go mod vendor -stderr 'go: pattern foo.txt: no matching files found' +stderr '^go: resolving embeds in example.com/brokendep: pattern foo.txt: no matching files found$' +go mod vendor -e +stderr '^go: resolving embeds in example.com/brokendep: pattern foo.txt: no matching files found$' cd ../broken_bad_pattern ! go mod vendor -stderr 'go: pattern ../foo.txt: invalid pattern syntax' +stderr '^go: resolving embeds in example.com/brokendep: pattern ../foo.txt: invalid pattern syntax$' +go mod vendor -e +stderr '^go: resolving embeds in example.com/brokendep: pattern ../foo.txt: invalid pattern syntax$' cd ../embed_go122 go mod vendor diff --git a/src/cmd/go/testdata/script/mod_vendor_goversion.txt b/src/cmd/go/testdata/script/mod_vendor_goversion.txt index 838c5575b0..38975c8a2b 100644 --- a/src/cmd/go/testdata/script/mod_vendor_goversion.txt +++ b/src/cmd/go/testdata/script/mod_vendor_goversion.txt @@ -26,7 +26,7 @@ go mod vendor ! grep 1.17 vendor/modules.txt ! go build example.net/need117 stderr '^vendor[/\\]example\.net[/\\]need117[/\\]need117.go:5:1[89]:' -stderr 'conversion of slices to array pointers requires go1\.17 or later' +stderr 'conversion of slice to array pointer requires go1\.17 or later' ! grep 1.13 vendor/modules.txt go build example.net/bad114 diff --git a/src/cmd/go/testdata/script/mod_verify_work.txt b/src/cmd/go/testdata/script/mod_verify_work.txt new file mode 100644 index 0000000000..d9f5a54585 --- /dev/null +++ b/src/cmd/go/testdata/script/mod_verify_work.txt @@ -0,0 +1,24 @@ +# Regression test for Issue #62663: we would filter out the toolchain and +# main modules from the build list incorrectly, leading to the workspace +# modules being checked for correct sums. Specifically this would happen when +# the module name sorted after the virtual 'go' version module name because +# it could not get chopped off when we removed the MainModules.Len() modules +# at the beginning of the build list and we would remove the go module instead. + +go mod verify + +-- go.work -- +go 1.21 + +use ( + ./a + ./b +) +-- a/go.mod -- +module hexample.com/a // important for test that module name sorts after 'go' + +go 1.21 +-- b/go.mod -- +module hexample.com/b // important for test that module name sorts after 'go' + +go 1.21 \ No newline at end of file diff --git a/src/cmd/go/testdata/script/test_cache_inputs.txt b/src/cmd/go/testdata/script/test_cache_inputs.txt index 3705c700d1..1868da9842 100644 --- a/src/cmd/go/testdata/script/test_cache_inputs.txt +++ b/src/cmd/go/testdata/script/test_cache_inputs.txt @@ -114,6 +114,13 @@ go test testcache -run=TestOSArgs -failfast go test testcache -run=TestOSArgs -failfast stdout '\(cached\)' +# golang.org/issue/64638: that includes the `-fullpath` argument. +go test testcache -run=TestOSArgs -fullpath +! stdout '\(cached\)' +go test testcache -run=TestOSArgs -fullpath +stdout '\(cached\)' + + # Executables within GOROOT and GOPATH should affect caching, # even if the test does not stat them explicitly. diff --git a/src/cmd/go/testdata/script/test_fail_fast.txt b/src/cmd/go/testdata/script/test_fail_fast.txt index 132ea709eb..1f169d6da8 100644 --- a/src/cmd/go/testdata/script/test_fail_fast.txt +++ b/src/cmd/go/testdata/script/test_fail_fast.txt @@ -48,6 +48,15 @@ stdout -count=1 'FAIL - ' ! go test ./failfast_test.go -run='TestFatal[CD]' -failfast=false stdout -count=2 'FAIL - ' +# cross package failfast +! go test -p 1 -failfast ./a ./b ./c +stdout -count=1 'FAIL - ' +stdout -count=1 'FAIL - TestFailingPkgA' + +-- go.mod -- +module m + +go 1.21.0 -- failfast_test.go -- // Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style @@ -111,3 +120,27 @@ func TestFatalC(t *testing.T) { func TestFatalD(t *testing.T) { t.Fatalf("FAIL - %s", t.Name()) } +-- a/a_test.go -- +package a + +import "testing" + +func TestFailingPkgA(t *testing.T) { + t.Errorf("FAIL - %s", t.Name()) +} +-- b/b_test.go -- +package b + +import "testing" + +func TestFailingPkgB(t *testing.T) { + t.Errorf("FAIL - %s", t.Name()) +} +-- c/c_test.go -- +package c + +import "testing" + +func TestFailingPkgC(t *testing.T) { + t.Errorf("FAIL - %s", t.Name()) +} diff --git a/src/cmd/go/testdata/script/test_fuzz_deadline.txt b/src/cmd/go/testdata/script/test_fuzz_deadline.txt index 46d3521558..a51df345e9 100644 --- a/src/cmd/go/testdata/script/test_fuzz_deadline.txt +++ b/src/cmd/go/testdata/script/test_fuzz_deadline.txt @@ -2,6 +2,16 @@ [short] skip env GOCACHE=$WORK/cache +# Warm up the build cache with GOMAXPROCS unrestricted. +go test -c -o $devnull + +# For the fuzzing phase, we reduce GOMAXPROCS to avoid consuming too many +# resources during the test. Ideally this would just free up resources to run +# other parallel tests more quickly, but unfortunately it is actually necessary +# in some 32-bit environments to prevent the fuzzing engine from running out of +# address space (see https://go.dev/issue/65434). +env GOMAXPROCS=2 + # The fuzz function should be able to detect whether -timeout # was set with T.Deadline. Note there is no F.Deadline, and # there is no timeout while fuzzing, even if -fuzztime is set. diff --git a/src/cmd/go/testdata/script/test_fuzz_fuzztime.txt b/src/cmd/go/testdata/script/test_fuzz_fuzztime.txt index 28ef3bf7de..027c434a32 100644 --- a/src/cmd/go/testdata/script/test_fuzz_fuzztime.txt +++ b/src/cmd/go/testdata/script/test_fuzz_fuzztime.txt @@ -5,6 +5,13 @@ env GOCACHE=$WORK/cache # There are no seed values, so 'go test' should finish quickly. go test +# For the fuzzing phase, we reduce GOMAXPROCS to avoid consuming too many +# resources during the test. Ideally this would just free up resources to run +# other parallel tests more quickly, but unfortunately it is actually necessary +# in some 32-bit environments to prevent the fuzzing engine from running out of +# address space (see https://go.dev/issue/65434). +env GOMAXPROCS=2 + # Fuzzing should exit 0 after fuzztime, even if timeout is short. go test -timeout=3s -fuzz=FuzzFast -fuzztime=5s diff --git a/src/cmd/go/testdata/script/test_race_issue26995.txt b/src/cmd/go/testdata/script/test_race_issue26995.txt new file mode 100644 index 0000000000..f40fb46f32 --- /dev/null +++ b/src/cmd/go/testdata/script/test_race_issue26995.txt @@ -0,0 +1,42 @@ +[short] skip +[!race] skip + +go test -v -race +stdout 'testing_test.go:26: directCall' +stdout 'testing_test.go:27: interfaceTBCall' +stdout 'testing_test.go:28: interfaceCall' + +-- go.mod -- +module 26995-TBHelper-line-number + +go 1.21 +-- testing_test.go -- +package testing_test + +import "testing" + +type TestingT interface { + Helper() + Log(args ...interface{}) +} + +func directCall(t *testing.T) { + t.Helper() + t.Log("directCall") +} + +func interfaceTBCall(t testing.TB) { + t.Helper() + t.Log("interfaceTBCall") +} + +func interfaceCall(t TestingT) { + t.Helper() + t.Log("interfaceCall") +} + +func TestTesting(t *testing.T) { + directCall(t) + interfaceTBCall(t) + interfaceCall(t) +} diff --git a/src/cmd/go/testdata/script/tooltags.txt b/src/cmd/go/testdata/script/tooltags.txt index 27068eebae..1f6f54563c 100644 --- a/src/cmd/go/testdata/script/tooltags.txt +++ b/src/cmd/go/testdata/script/tooltags.txt @@ -40,6 +40,26 @@ env GOPPC64=power10 go list -f '{{context.ToolTags}}' stdout 'ppc64le.power8 ppc64le.power9 ppc64le.power10' +env GOARCH=riscv64 +env GORISCV64=rva20u64 +go list -f '{{context.ToolTags}}' +stdout 'riscv64.rva20u64' + +env GOARCH=riscv64 +env GORISCV64=rva22u64 +go list -f '{{context.ToolTags}}' +stdout 'riscv64.rva20u64 riscv64.rva22u64' + +env GOARCH=riscv64 +env GORISCV64=rva22 +! go list -f '{{context.ToolTags}}' +stderr 'go: invalid GORISCV64: must be rva20u64, rva22u64' + +env GOARCH=riscv64 +env GORISCV64= +go list -f '{{context.ToolTags}}' +stdout 'riscv64.rva20u64' + env GOARCH=386 env GO386=sse2 go list -f '{{context.ToolTags}}' diff --git a/src/cmd/go/testdata/script/version.txt b/src/cmd/go/testdata/script/version.txt index 0a2ac1e1d5..a18bcdd915 100644 --- a/src/cmd/go/testdata/script/version.txt +++ b/src/cmd/go/testdata/script/version.txt @@ -57,8 +57,9 @@ stdout '^test2json.exe: .+' stdout '^\tpath\tcmd/test2json$' ! stdout 'mod[^e]' -# Repeat the test with -buildmode=pie. +# Repeat the test with -buildmode=pie and default linking. [!buildmode:pie] stop +[pielinkext] [!cgo] stop go build -buildmode=pie -o external.exe rsc.io/fortune go version external.exe stdout '^external.exe: .+' @@ -68,9 +69,7 @@ stdout '^\tpath\trsc.io/fortune' stdout '^\tmod\trsc.io/fortune\tv1.0.0' # Also test PIE with internal linking. -# currently only supported on linux/amd64, linux/arm64 and windows/amd64. -[!GOOS:linux] [!GOOS:windows] stop -[!GOARCH:amd64] [!GOARCH:arm64] stop +[pielinkext] stop go build -buildmode=pie -ldflags=-linkmode=internal -o internal.exe rsc.io/fortune go version internal.exe stdout '^internal.exe: .+' diff --git a/src/cmd/go/testdata/script/version_buildvcs_git.txt b/src/cmd/go/testdata/script/version_buildvcs_git.txt index 680e492320..a360b9d9b7 100644 --- a/src/cmd/go/testdata/script/version_buildvcs_git.txt +++ b/src/cmd/go/testdata/script/version_buildvcs_git.txt @@ -119,7 +119,7 @@ rm $GOBIN/d$GOEXE go list -x ./... stdout -count=3 '^example.com' stderr -count=1 '^git status' -stderr -count=1 '^git -c log.showsignature=false show' +stderr -count=1 '^git -c log.showsignature=false log' -- $WORK/fakebin/git -- #!/bin/sh diff --git a/src/cmd/go/testdata/script/version_cshared.txt b/src/cmd/go/testdata/script/version_cshared.txt index 29e21fc09a..18f257f64a 100644 --- a/src/cmd/go/testdata/script/version_cshared.txt +++ b/src/cmd/go/testdata/script/version_cshared.txt @@ -1,4 +1,5 @@ [short] skip +[!cgo] skip '-buildmode=c-shared requires external linking' [!buildmode:c-shared] stop env GO111MODULE=on diff --git a/src/cmd/go/testdata/script/ws2_32.txt b/src/cmd/go/testdata/script/ws2_32.txt new file mode 100644 index 0000000000..54f6a94eaf --- /dev/null +++ b/src/cmd/go/testdata/script/ws2_32.txt @@ -0,0 +1,48 @@ +[!GOOS:windows] skip + +go run . +stdout 'ws2_32.dll: not found' + +go run -tags net . +stdout 'ws2_32.dll: found' + +-- go.mod -- +module m + +go 1.21 + +-- utils.go -- +package main + +import ( + "fmt" + "syscall" + "unsafe" +) + +func hasModuleHandle() { + const ws2_32 = "ws2_32.dll" + getModuleHandle := syscall.MustLoadDLL("kernel32.dll").MustFindProc("GetModuleHandleW") + mod, _, _ := getModuleHandle.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(ws2_32)))) + if mod != 0 { + fmt.Println(ws2_32+":", "found") + } else { + fmt.Println(ws2_32+":", "not found") + } +} +-- net.go -- +//go:build net +package main + +import _ "net" + +func main() { + hasModuleHandle() +} +-- nonet.go -- +//go:build !net +package main + +func main() { + hasModuleHandle() +} \ No newline at end of file diff --git a/src/cmd/go/testdata/vcstest/git/issue47650.txt b/src/cmd/go/testdata/vcstest/git/issue47650.txt new file mode 100644 index 0000000000..52040787c8 --- /dev/null +++ b/src/cmd/go/testdata/vcstest/git/issue47650.txt @@ -0,0 +1,42 @@ +handle git + +env GIT_AUTHOR_NAME='Bryan C. Mills' +env GIT_AUTHOR_EMAIL='bcmills@google.com' +env GIT_COMMITTER_NAME=$GIT_AUTHOR_NAME +env GIT_COMMITTER_EMAIL=$GIT_AUTHOR_EMAIL + +git init + +at 2021-08-11T13:52:00-04:00 +git add cmd +git commit -m 'add cmd/issue47650' +git branch -m main +git tag v0.1.0 + +git add go.mod +git commit -m 'add go.mod' + +git show-ref --tags --heads +cmp stdout .git-refs + +git log --oneline --decorate=short +cmp stdout .git-log + +-- .git-refs -- +21535ef346c3e79fd09edd75bd4725f06c828e43 refs/heads/main +4d237df2dbfc8a443af2f5e84be774f08a2aed0c refs/tags/v0.1.0 +-- .git-log -- +21535ef (HEAD -> main) add go.mod +4d237df (tag: v0.1.0) add cmd/issue47650 +-- go.mod -- +module vcs-test.golang.org/git/issue47650.git + +go 1.17 +-- cmd/issue47650/main.go -- +package main + +import "os" + +func main() { + os.Stdout.WriteString("Hello, world!") +} diff --git a/src/cmd/gofmt/gofmt.go b/src/cmd/gofmt/gofmt.go index f4fb6bff84..341c79ab8e 100644 --- a/src/cmd/gofmt/gofmt.go +++ b/src/cmd/gofmt/gofmt.go @@ -555,7 +555,7 @@ func backupFile(filename string, data []byte, perm fs.FileMode) (string, error) if err == nil { break } - if err != nil && !os.IsExist(err) { + if !os.IsExist(err) { return "", err } } diff --git a/src/cmd/internal/bootstrap_test/reboot_test.go b/src/cmd/internal/bootstrap_test/reboot_test.go index fedf58c05c..6b2b58d1c6 100644 --- a/src/cmd/internal/bootstrap_test/reboot_test.go +++ b/src/cmd/internal/bootstrap_test/reboot_test.go @@ -75,7 +75,7 @@ func TestRepeatBootstrap(t *testing.T) { var stdout strings.Builder cmd := exec.Command(filepath.Join(goroot, "src", makeScript)) cmd.Dir = gorootSrc - cmd.Env = append(cmd.Environ(), "GOROOT=", "GOROOT_FINAL=", "GOROOT_BOOTSTRAP="+realGoroot) + cmd.Env = append(cmd.Environ(), "GOROOT=", "GOROOT_BOOTSTRAP="+realGoroot) cmd.Stderr = os.Stderr cmd.Stdout = io.MultiWriter(os.Stdout, &stdout) if err := cmd.Run(); err != nil { diff --git a/src/cmd/internal/buildid/buildid_test.go b/src/cmd/internal/buildid/buildid_test.go index 8efa47346c..854e39f029 100644 --- a/src/cmd/internal/buildid/buildid_test.go +++ b/src/cmd/internal/buildid/buildid_test.go @@ -149,7 +149,7 @@ func TestReadFile(t *testing.T) { // Because we clobbered the note type above, // we don't expect to see a Go build ID. // The issue we are testing for was a crash - // in Readefile; see issue #62097. + // in Readfile; see issue #62097. if id != "" || err != nil { t.Errorf("ReadFile with zero ELF Align = %q, %v, want %q, nil", id, err, "") continue diff --git a/src/cmd/internal/dwarf/dwarf.go b/src/cmd/internal/dwarf/dwarf.go index 3e87e590fb..40ec8a6ec2 100644 --- a/src/cmd/internal/dwarf/dwarf.go +++ b/src/cmd/internal/dwarf/dwarf.go @@ -45,7 +45,8 @@ type Sym interface { // A Var represents a local variable or a function parameter. type Var struct { Name string - Abbrev int // Either DW_ABRV_AUTO[_LOCLIST] or DW_ABRV_PARAM[_LOCLIST] + Tag int // Either DW_TAG_variable or DW_TAG_formal_parameter + WithLoclist bool IsReturnValue bool IsInlFormal bool DictIndex uint16 // index of the dictionary entry describing the type of this variable @@ -61,6 +62,7 @@ type Var struct { InlIndex int32 // subtract 1 to form real index into InlTree ChildIndex int32 // child DIE index in abstract function IsInAbstract bool // variable exists in abstract function + ClosureOffset int64 // if non-zero this is the offset of this variable in the closure struct } // A Scope represents a lexical scope. All variables declared within a @@ -311,8 +313,9 @@ const ( DW_AT_go_embedded_field = 0x2903 DW_AT_go_runtime_type = 0x2904 - DW_AT_go_package_name = 0x2905 // Attribute for DW_TAG_compile_unit - DW_AT_go_dict_index = 0x2906 // Attribute for DW_TAG_typedef_type, index of the dictionary entry describing the real type of this type shape + DW_AT_go_package_name = 0x2905 // Attribute for DW_TAG_compile_unit + DW_AT_go_dict_index = 0x2906 // Attribute for DW_TAG_typedef_type, index of the dictionary entry describing the real type of this type shape + DW_AT_go_closure_offset = 0x2907 // Attribute for DW_TAG_variable, offset in the closure struct where this captured variable resides DW_AT_internal_location = 253 // params and locals; not emitted ) @@ -331,16 +334,6 @@ const ( DW_ABRV_INLINED_SUBROUTINE_RANGES DW_ABRV_VARIABLE DW_ABRV_INT_CONSTANT - DW_ABRV_AUTO - DW_ABRV_AUTO_LOCLIST - DW_ABRV_AUTO_ABSTRACT - DW_ABRV_AUTO_CONCRETE - DW_ABRV_AUTO_CONCRETE_LOCLIST - DW_ABRV_PARAM - DW_ABRV_PARAM_LOCLIST - DW_ABRV_PARAM_ABSTRACT - DW_ABRV_PARAM_CONCRETE - DW_ABRV_PARAM_CONCRETE_LOCLIST DW_ABRV_LEXICAL_BLOCK_RANGES DW_ABRV_LEXICAL_BLOCK_SIMPLE DW_ABRV_STRUCTFIELD @@ -361,7 +354,7 @@ const ( DW_ABRV_STRUCTTYPE DW_ABRV_TYPEDECL DW_ABRV_DICT_INDEX - DW_NABRV + DW_ABRV_PUTVAR_START ) type dwAbbrev struct { @@ -394,22 +387,23 @@ func expandPseudoForm(form uint8) uint8 { // expanding any DW_FORM pseudo-ops to real values. func Abbrevs() []dwAbbrev { if abbrevsFinalized { - return abbrevs[:] + return abbrevs } - for i := 1; i < DW_NABRV; i++ { + abbrevs = append(abbrevs, putvarAbbrevs...) + for i := 1; i < len(abbrevs); i++ { for j := 0; j < len(abbrevs[i].attr); j++ { abbrevs[i].attr[j].form = expandPseudoForm(abbrevs[i].attr[j].form) } } abbrevsFinalized = true - return abbrevs[:] + return abbrevs } // abbrevs is a raw table of abbrev entries; it needs to be post-processed // by the Abbrevs() function above prior to being consumed, to expand // the 'pseudo-form' entries below to real DWARF form values. -var abbrevs = [DW_NABRV]dwAbbrev{ +var abbrevs = []dwAbbrev{ /* The mandatory DW_ABRV_NULL entry. */ {0, 0, []dwAttrForm{}}, @@ -555,118 +549,6 @@ var abbrevs = [DW_NABRV]dwAbbrev{ }, }, - /* AUTO */ - { - DW_TAG_variable, - DW_CHILDREN_no, - []dwAttrForm{ - {DW_AT_name, DW_FORM_string}, - {DW_AT_decl_line, DW_FORM_udata}, - {DW_AT_type, DW_FORM_ref_addr}, - {DW_AT_location, DW_FORM_block1}, - }, - }, - - /* AUTO_LOCLIST */ - { - DW_TAG_variable, - DW_CHILDREN_no, - []dwAttrForm{ - {DW_AT_name, DW_FORM_string}, - {DW_AT_decl_line, DW_FORM_udata}, - {DW_AT_type, DW_FORM_ref_addr}, - {DW_AT_location, DW_FORM_sec_offset}, - }, - }, - - /* AUTO_ABSTRACT */ - { - DW_TAG_variable, - DW_CHILDREN_no, - []dwAttrForm{ - {DW_AT_name, DW_FORM_string}, - {DW_AT_decl_line, DW_FORM_udata}, - {DW_AT_type, DW_FORM_ref_addr}, - }, - }, - - /* AUTO_CONCRETE */ - { - DW_TAG_variable, - DW_CHILDREN_no, - []dwAttrForm{ - {DW_AT_abstract_origin, DW_FORM_ref_addr}, - {DW_AT_location, DW_FORM_block1}, - }, - }, - - /* AUTO_CONCRETE_LOCLIST */ - { - DW_TAG_variable, - DW_CHILDREN_no, - []dwAttrForm{ - {DW_AT_abstract_origin, DW_FORM_ref_addr}, - {DW_AT_location, DW_FORM_sec_offset}, - }, - }, - - /* PARAM */ - { - DW_TAG_formal_parameter, - DW_CHILDREN_no, - []dwAttrForm{ - {DW_AT_name, DW_FORM_string}, - {DW_AT_variable_parameter, DW_FORM_flag}, - {DW_AT_decl_line, DW_FORM_udata}, - {DW_AT_type, DW_FORM_ref_addr}, - {DW_AT_location, DW_FORM_block1}, - }, - }, - - /* PARAM_LOCLIST */ - { - DW_TAG_formal_parameter, - DW_CHILDREN_no, - []dwAttrForm{ - {DW_AT_name, DW_FORM_string}, - {DW_AT_variable_parameter, DW_FORM_flag}, - {DW_AT_decl_line, DW_FORM_udata}, - {DW_AT_type, DW_FORM_ref_addr}, - {DW_AT_location, DW_FORM_sec_offset}, - }, - }, - - /* PARAM_ABSTRACT */ - { - DW_TAG_formal_parameter, - DW_CHILDREN_no, - []dwAttrForm{ - {DW_AT_name, DW_FORM_string}, - {DW_AT_variable_parameter, DW_FORM_flag}, - {DW_AT_type, DW_FORM_ref_addr}, - }, - }, - - /* PARAM_CONCRETE */ - { - DW_TAG_formal_parameter, - DW_CHILDREN_no, - []dwAttrForm{ - {DW_AT_abstract_origin, DW_FORM_ref_addr}, - {DW_AT_location, DW_FORM_block1}, - }, - }, - - /* PARAM_CONCRETE_LOCLIST */ - { - DW_TAG_formal_parameter, - DW_CHILDREN_no, - []dwAttrForm{ - {DW_AT_abstract_origin, DW_FORM_ref_addr}, - {DW_AT_location, DW_FORM_sec_offset}, - }, - }, - /* LEXICAL_BLOCK_RANGES */ { DW_TAG_lexical_block, @@ -834,6 +716,7 @@ var abbrevs = [DW_NABRV]dwAbbrev{ DW_CHILDREN_no, []dwAttrForm{ {DW_AT_name, DW_FORM_string}, + {DW_AT_go_runtime_type, DW_FORM_addr}, }, }, @@ -900,7 +783,7 @@ var abbrevs = [DW_NABRV]dwAbbrev{ func GetAbbrev() []byte { abbrevs := Abbrevs() var buf []byte - for i := 1; i < DW_NABRV; i++ { + for i := 1; i < len(abbrevs); i++ { // See section 7.5.3 buf = AppendUleb128(buf, uint64(i)) buf = AppendUleb128(buf, uint64(abbrevs[i].tag)) @@ -1547,39 +1430,7 @@ func putscope(ctxt Context, s *FnState, scopes []Scope, curscope int32, fnabbrev return curscope } -// Given a default var abbrev code, select corresponding concrete code. -func concreteVarAbbrev(varAbbrev int) int { - switch varAbbrev { - case DW_ABRV_AUTO: - return DW_ABRV_AUTO_CONCRETE - case DW_ABRV_PARAM: - return DW_ABRV_PARAM_CONCRETE - case DW_ABRV_AUTO_LOCLIST: - return DW_ABRV_AUTO_CONCRETE_LOCLIST - case DW_ABRV_PARAM_LOCLIST: - return DW_ABRV_PARAM_CONCRETE_LOCLIST - default: - panic("should never happen") - } -} - -// Pick the correct abbrev code for variable or parameter DIE. -func determineVarAbbrev(v *Var, fnabbrev int) (int, bool, bool) { - abbrev := v.Abbrev - - // If the variable was entirely optimized out, don't emit a location list; - // convert to an inline abbreviation and emit an empty location. - missing := false - switch { - case abbrev == DW_ABRV_AUTO_LOCLIST && v.PutLocationList == nil: - missing = true - abbrev = DW_ABRV_AUTO - case abbrev == DW_ABRV_PARAM_LOCLIST && v.PutLocationList == nil: - missing = true - abbrev = DW_ABRV_PARAM - } - - // Determine whether to use a concrete variable or regular variable DIE. +func concreteVar(fnabbrev int, v *Var) bool { concrete := true switch fnabbrev { case DW_ABRV_FUNCTION, DW_ABRV_WRAPPER: @@ -1595,64 +1446,44 @@ func determineVarAbbrev(v *Var, fnabbrev int) (int, bool, bool) { default: panic("should never happen") } - - // Select proper abbrev based on concrete/non-concrete - if concrete { - abbrev = concreteVarAbbrev(abbrev) - } - - return abbrev, missing, concrete -} - -func abbrevUsesLoclist(abbrev int) bool { - switch abbrev { - case DW_ABRV_AUTO_LOCLIST, DW_ABRV_AUTO_CONCRETE_LOCLIST, - DW_ABRV_PARAM_LOCLIST, DW_ABRV_PARAM_CONCRETE_LOCLIST: - return true - default: - return false - } + return concrete } // Emit DWARF attributes for a variable belonging to an 'abstract' subprogram. func putAbstractVar(ctxt Context, info Sym, v *Var) { - // Remap abbrev - abbrev := v.Abbrev - switch abbrev { - case DW_ABRV_AUTO, DW_ABRV_AUTO_LOCLIST: - abbrev = DW_ABRV_AUTO_ABSTRACT - case DW_ABRV_PARAM, DW_ABRV_PARAM_LOCLIST: - abbrev = DW_ABRV_PARAM_ABSTRACT - } - + // The contents of this functions are used to generate putAbstractVarAbbrev automatically, see TestPutVarAbbrevGenerator. + abbrev := putAbstractVarAbbrev(v) Uleb128put(ctxt, info, int64(abbrev)) - putattr(ctxt, info, abbrev, DW_FORM_string, DW_CLS_STRING, int64(len(v.Name)), v.Name) + putattr(ctxt, info, abbrev, DW_FORM_string, DW_CLS_STRING, int64(len(v.Name)), v.Name) // DW_AT_name // Isreturn attribute if this is a param - if abbrev == DW_ABRV_PARAM_ABSTRACT { + if v.Tag == DW_TAG_formal_parameter { var isReturn int64 if v.IsReturnValue { isReturn = 1 } - putattr(ctxt, info, abbrev, DW_FORM_flag, DW_CLS_FLAG, isReturn, nil) + putattr(ctxt, info, abbrev, DW_FORM_flag, DW_CLS_FLAG, isReturn, nil) // DW_AT_variable_parameter } // Line - if abbrev != DW_ABRV_PARAM_ABSTRACT { + if v.Tag == DW_TAG_variable { // See issue 23374 for more on why decl line is skipped for abs params. - putattr(ctxt, info, abbrev, DW_FORM_udata, DW_CLS_CONSTANT, int64(v.DeclLine), nil) + putattr(ctxt, info, abbrev, DW_FORM_udata, DW_CLS_CONSTANT, int64(v.DeclLine), nil) // DW_AT_decl_line } // Type - putattr(ctxt, info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, v.Type) + putattr(ctxt, info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, v.Type) // DW_AT_type // Var has no children => no terminator } func putvar(ctxt Context, s *FnState, v *Var, absfn Sym, fnabbrev, inlIndex int, encbuf []byte) { - // Remap abbrev according to parent DIE abbrev - abbrev, missing, concrete := determineVarAbbrev(v, fnabbrev) + // The contents of this functions are used to generate putvarAbbrev automatically, see TestPutVarAbbrevGenerator. + concrete := concreteVar(fnabbrev, v) + hasParametricType := !concrete && (v.DictIndex > 0 && s.dictIndexToOffset != nil && s.dictIndexToOffset[v.DictIndex-1] != 0) + withLoclist := v.WithLoclist && v.PutLocationList != nil + abbrev := putvarAbbrev(v, concrete, withLoclist) Uleb128put(ctxt, s.Info, int64(abbrev)) // Abstract origin for concrete / inlined case @@ -1661,35 +1492,39 @@ func putvar(ctxt Context, s *FnState, v *Var, absfn Sym, fnabbrev, inlIndex int, // function subprogram DIE. The child DIE has no LSym, so instead // after the call to 'putattr' below we make a call to register // the child DIE reference. - putattr(ctxt, s.Info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, absfn) + putattr(ctxt, s.Info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, absfn) // DW_AT_abstract_origin ctxt.RecordDclReference(s.Info, absfn, int(v.ChildIndex), inlIndex) } else { // Var name, line for abstract and default cases n := v.Name - putattr(ctxt, s.Info, abbrev, DW_FORM_string, DW_CLS_STRING, int64(len(n)), n) - if abbrev == DW_ABRV_PARAM || abbrev == DW_ABRV_PARAM_LOCLIST || abbrev == DW_ABRV_PARAM_ABSTRACT { + putattr(ctxt, s.Info, abbrev, DW_FORM_string, DW_CLS_STRING, int64(len(n)), n) // DW_AT_name + if v.Tag == DW_TAG_formal_parameter { var isReturn int64 if v.IsReturnValue { isReturn = 1 } - putattr(ctxt, s.Info, abbrev, DW_FORM_flag, DW_CLS_FLAG, isReturn, nil) + putattr(ctxt, s.Info, abbrev, DW_FORM_flag, DW_CLS_FLAG, isReturn, nil) // DW_AT_variable_parameter } - putattr(ctxt, s.Info, abbrev, DW_FORM_udata, DW_CLS_CONSTANT, int64(v.DeclLine), nil) - if v.DictIndex > 0 && s.dictIndexToOffset != nil && s.dictIndexToOffset[v.DictIndex-1] != 0 { + putattr(ctxt, s.Info, abbrev, DW_FORM_udata, DW_CLS_CONSTANT, int64(v.DeclLine), nil) // DW_AT_decl_line + if hasParametricType { // If the type of this variable is parametric use the entry emitted by putparamtypes - putattr(ctxt, s.Info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, s.dictIndexToOffset[v.DictIndex-1], s.Info) + putattr(ctxt, s.Info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, s.dictIndexToOffset[v.DictIndex-1], s.Info) // DW_AT_type } else { - putattr(ctxt, s.Info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, v.Type) + putattr(ctxt, s.Info, abbrev, DW_FORM_ref_addr, DW_CLS_REFERENCE, 0, v.Type) // DW_AT_type + } + + if v.ClosureOffset > 0 { + putattr(ctxt, s.Info, abbrev, DW_FORM_udata, DW_CLS_CONSTANT, v.ClosureOffset, nil) // DW_AT_go_closure_offset } } - if abbrevUsesLoclist(abbrev) { - putattr(ctxt, s.Info, abbrev, DW_FORM_sec_offset, DW_CLS_PTR, ctxt.Size(s.Loc), s.Loc) + if withLoclist { + putattr(ctxt, s.Info, abbrev, DW_FORM_sec_offset, DW_CLS_PTR, ctxt.Size(s.Loc), s.Loc) // DW_AT_location v.PutLocationList(s.Loc, s.StartPC) } else { loc := encbuf[:0] switch { - case missing: + case v.WithLoclist: break // no location case v.StackOffset == 0: loc = append(loc, DW_OP_call_frame_cfa) @@ -1697,7 +1532,7 @@ func putvar(ctxt Context, s *FnState, v *Var, absfn Sym, fnabbrev, inlIndex int, loc = append(loc, DW_OP_fbreg) loc = AppendSleb128(loc, int64(v.StackOffset)) } - putattr(ctxt, s.Info, abbrev, DW_FORM_block1, DW_CLS_BLOCK, int64(len(loc)), loc) + putattr(ctxt, s.Info, abbrev, DW_FORM_block1, DW_CLS_BLOCK, int64(len(loc)), loc) // DW_AT_location } // Var has no children => no terminator diff --git a/src/cmd/internal/dwarf/putvarabbrevgen.go b/src/cmd/internal/dwarf/putvarabbrevgen.go new file mode 100644 index 0000000000..f930fdbb9b --- /dev/null +++ b/src/cmd/internal/dwarf/putvarabbrevgen.go @@ -0,0 +1,201 @@ +// Code generated by TestPutVarAbbrevGenerator. DO NOT EDIT. +// Regenerate using go test -run TestPutVarAbbrevGenerator -generate instead. + +package dwarf + +var putvarAbbrevs = []dwAbbrev{ + { + DW_TAG_variable, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_decl_line, DW_FORM_udata}, + {DW_AT_type, DW_FORM_ref_addr}, + }, + }, + { + DW_TAG_formal_parameter, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_variable_parameter, DW_FORM_flag}, + {DW_AT_type, DW_FORM_ref_addr}, + }, + }, + { + DW_TAG_variable, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_abstract_origin, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_sec_offset}, + }, + }, + { + DW_TAG_variable, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_abstract_origin, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_block1}, + }, + }, + { + DW_TAG_variable, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_decl_line, DW_FORM_udata}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_go_closure_offset, DW_FORM_udata}, + {DW_AT_location, DW_FORM_sec_offset}, + }, + }, + { + DW_TAG_variable, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_decl_line, DW_FORM_udata}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_go_closure_offset, DW_FORM_udata}, + {DW_AT_location, DW_FORM_block1}, + }, + }, + { + DW_TAG_variable, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_decl_line, DW_FORM_udata}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_sec_offset}, + }, + }, + { + DW_TAG_variable, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_decl_line, DW_FORM_udata}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_block1}, + }, + }, + { + DW_TAG_formal_parameter, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_abstract_origin, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_sec_offset}, + }, + }, + { + DW_TAG_formal_parameter, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_abstract_origin, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_block1}, + }, + }, + { + DW_TAG_formal_parameter, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_variable_parameter, DW_FORM_flag}, + {DW_AT_decl_line, DW_FORM_udata}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_go_closure_offset, DW_FORM_udata}, + {DW_AT_location, DW_FORM_sec_offset}, + }, + }, + { + DW_TAG_formal_parameter, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_variable_parameter, DW_FORM_flag}, + {DW_AT_decl_line, DW_FORM_udata}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_go_closure_offset, DW_FORM_udata}, + {DW_AT_location, DW_FORM_block1}, + }, + }, + { + DW_TAG_formal_parameter, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_variable_parameter, DW_FORM_flag}, + {DW_AT_decl_line, DW_FORM_udata}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_sec_offset}, + }, + }, + { + DW_TAG_formal_parameter, + DW_CHILDREN_no, + []dwAttrForm{ + {DW_AT_name, DW_FORM_string}, + {DW_AT_variable_parameter, DW_FORM_flag}, + {DW_AT_decl_line, DW_FORM_udata}, + {DW_AT_type, DW_FORM_ref_addr}, + {DW_AT_location, DW_FORM_block1}, + }, + }, +} + +func putAbstractVarAbbrev(v *Var) int { + if v.Tag == DW_TAG_variable { + return DW_ABRV_PUTVAR_START + 0 + } else { + return DW_ABRV_PUTVAR_START + 1 + } +} + +func putvarAbbrev(v *Var, concrete, withLoclist bool) int { + if v.Tag == DW_TAG_variable { + if concrete { + if withLoclist { + return DW_ABRV_PUTVAR_START + 2 + } else { + return DW_ABRV_PUTVAR_START + 3 + } + } else { + if v.ClosureOffset > 0 { + if withLoclist { + return DW_ABRV_PUTVAR_START + 4 + } else { + return DW_ABRV_PUTVAR_START + 5 + } + } else { + if withLoclist { + return DW_ABRV_PUTVAR_START + 6 + } else { + return DW_ABRV_PUTVAR_START + 7 + } + } + } + } else { + if concrete { + if withLoclist { + return DW_ABRV_PUTVAR_START + 8 + } else { + return DW_ABRV_PUTVAR_START + 9 + } + } else { + if v.ClosureOffset > 0 { + if withLoclist { + return DW_ABRV_PUTVAR_START + 10 + } else { + return DW_ABRV_PUTVAR_START + 11 + } + } else { + if withLoclist { + return DW_ABRV_PUTVAR_START + 12 + } else { + return DW_ABRV_PUTVAR_START + 13 + } + } + } + } +} diff --git a/src/cmd/internal/dwarf/putvarabbrevgen_test.go b/src/cmd/internal/dwarf/putvarabbrevgen_test.go new file mode 100644 index 0000000000..24500a3388 --- /dev/null +++ b/src/cmd/internal/dwarf/putvarabbrevgen_test.go @@ -0,0 +1,316 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package dwarf + +import ( + "bytes" + "flag" + "fmt" + "go/ast" + "go/format" + "go/parser" + "go/printer" + "go/token" + "os" + "strconv" + "strings" + "testing" +) + +const pvagenfile = "./putvarabbrevgen.go" + +var pvaDoGenerate bool + +func TestMain(m *testing.M) { + flag.BoolVar(&pvaDoGenerate, "generate", false, "regenerates "+pvagenfile) + flag.Parse() + os.Exit(m.Run()) + +} + +// TestPutVarAbbrevGenerator checks that putvarabbrevgen.go is kept in sync +// with the contents of functions putvar and putAbstractVar. If test flag -generate +// is specified the file is regenerated instead. +// +// The block of code in putvar and putAbstractVar that picks the correct +// abbrev is also generated automatically by this function by looking at all +// the possible paths in their CFG and the order in which putattr is called. +// +// There are some restrictions on how putattr can be used in putvar and +// putAbstractVar: +// +// 1. it shouldn't appear inside a for or switch statements +// 2. it can appear within any number of nested if/else statements but the +// conditionals must not change after putvarAbbrev/putAbstractVarAbbrev +// are called +// 3. the form argument of putattr must be a compile time constant +// 4. each putattr call must be followed by a comment containing the name of +// the attribute it is setting +// +// TestPutVarAbbrevGenerator will fail if (1) or (4) are not respected and +// the generated code will not compile if (3) is violated. Violating (2) +// will result in code silently wrong code (which will usually be detected +// by one of the tests that parse debug_info). +func TestPutVarAbbrevGenerator(t *testing.T) { + spvagenfile := pvagenerate(t) + + if pvaDoGenerate { + err := os.WriteFile(pvagenfile, []byte(spvagenfile), 0660) + if err != nil { + t.Fatal(err) + } + return + } + + slurp := func(name string) string { + out, err := os.ReadFile(name) + if err != nil { + t.Fatal(err) + } + return string(out) + } + + if spvagenfile != slurp(pvagenfile) { + t.Error(pvagenfile + " is out of date") + } + +} + +func pvagenerate(t *testing.T) string { + var fset token.FileSet + f, err := parser.ParseFile(&fset, "./dwarf.go", nil, parser.ParseComments) + if err != nil { + t.Fatal(err) + } + cm := ast.NewCommentMap(&fset, f, f.Comments) + abbrevs := make(map[string]int) + funcs := map[string]ast.Stmt{} + for _, decl := range f.Decls { + decl, ok := decl.(*ast.FuncDecl) + if !ok || decl.Body == nil { + continue + } + if decl.Name.Name == "putvar" || decl.Name.Name == "putAbstractVar" { + // construct the simplified CFG + pvagraph, _ := pvacfgbody(t, &fset, cm, decl.Body.List) + funcs[decl.Name.Name+"Abbrev"] = pvacfgvisit(pvagraph, abbrevs) + } + } + abbrevslice := make([]string, len(abbrevs)) + for abbrev, n := range abbrevs { + abbrevslice[n] = abbrev + } + + buf := new(bytes.Buffer) + fmt.Fprint(buf, `// Code generated by TestPutVarAbbrevGenerator. DO NOT EDIT. +// Regenerate using go test -run TestPutVarAbbrevGenerator -generate instead. + +package dwarf + +var putvarAbbrevs = []dwAbbrev{ +`) + + for _, abbrev := range abbrevslice { + fmt.Fprint(buf, abbrev+",\n") + } + + fmt.Fprint(buf, "\n}\n\n") + + fmt.Fprint(buf, "func putAbstractVarAbbrev(v *Var) int {\n") + format.Node(buf, &token.FileSet{}, funcs["putAbstractVarAbbrev"]) + fmt.Fprint(buf, "}\n\n") + + fmt.Fprint(buf, "func putvarAbbrev(v *Var, concrete, withLoclist bool) int {\n") + format.Node(buf, &token.FileSet{}, funcs["putvarAbbrev"]) + fmt.Fprint(buf, "}\n") + + out, err := format.Source(buf.Bytes()) + if err != nil { + t.Log(string(buf.Bytes())) + t.Fatal(err) + } + + return string(out) +} + +type pvacfgnode struct { + attr, form string + + cond ast.Expr + then, els *pvacfgnode +} + +// pvacfgbody generates a simplified CFG for a slice of statements, +// containing only calls to putattr and the if statements affecting them. +func pvacfgbody(t *testing.T, fset *token.FileSet, cm ast.CommentMap, body []ast.Stmt) (start, end *pvacfgnode) { + add := func(n *pvacfgnode) { + if start == nil || end == nil { + start = n + end = n + } else { + end.then = n + end = n + } + } + for _, stmt := range body { + switch stmt := stmt.(type) { + case *ast.ExprStmt: + if x, _ := stmt.X.(*ast.CallExpr); x != nil { + funstr := exprToString(x.Fun) + if funstr == "putattr" { + form, _ := x.Args[3].(*ast.Ident) + if form == nil { + t.Fatalf("%s invalid use of putattr", fset.Position(x.Pos())) + } + cmt := findLineComment(cm, stmt) + if cmt == nil { + t.Fatalf("%s invalid use of putattr (no comment containing the attribute name)", fset.Position(x.Pos())) + } + add(&pvacfgnode{attr: strings.TrimSpace(cmt.Text[2:]), form: form.Name}) + } + } + case *ast.IfStmt: + ifStart, ifEnd := pvacfgif(t, fset, cm, stmt) + if ifStart != nil { + add(ifStart) + end = ifEnd + } + default: + // check that nothing under this contains a putattr call + ast.Inspect(stmt, func(n ast.Node) bool { + if call, _ := n.(*ast.CallExpr); call != nil { + if exprToString(call.Fun) == "putattr" { + t.Fatalf("%s use of putattr in unsupported block", fset.Position(call.Pos())) + } + } + return true + }) + } + } + return start, end +} + +func pvacfgif(t *testing.T, fset *token.FileSet, cm ast.CommentMap, ifstmt *ast.IfStmt) (start, end *pvacfgnode) { + thenStart, thenEnd := pvacfgbody(t, fset, cm, ifstmt.Body.List) + var elseStart, elseEnd *pvacfgnode + if ifstmt.Else != nil { + switch els := ifstmt.Else.(type) { + case *ast.IfStmt: + elseStart, elseEnd = pvacfgif(t, fset, cm, els) + case *ast.BlockStmt: + elseStart, elseEnd = pvacfgbody(t, fset, cm, els.List) + default: + t.Fatalf("%s: unexpected statement %T", fset.Position(els.Pos()), els) + } + } + + if thenStart != nil && elseStart != nil && thenStart == thenEnd && elseStart == elseEnd && thenStart.form == elseStart.form && thenStart.attr == elseStart.attr { + return thenStart, thenEnd + } + + if thenStart != nil || elseStart != nil { + start = &pvacfgnode{cond: ifstmt.Cond} + end = &pvacfgnode{} + if thenStart != nil { + start.then = thenStart + thenEnd.then = end + } else { + start.then = end + } + if elseStart != nil { + start.els = elseStart + elseEnd.then = end + } else { + start.els = end + } + } + return start, end +} + +func exprToString(t ast.Expr) string { + var buf bytes.Buffer + printer.Fprint(&buf, token.NewFileSet(), t) + return buf.String() +} + +// findLineComment finds the line comment for statement stmt. +func findLineComment(cm ast.CommentMap, stmt *ast.ExprStmt) *ast.Comment { + var r *ast.Comment + for _, cmtg := range cm[stmt] { + for _, cmt := range cmtg.List { + if cmt.Slash > stmt.Pos() { + if r != nil { + return nil + } + r = cmt + } + } + } + return r +} + +// pvacfgvisit visits the CFG depth first, populates abbrevs with all +// possible dwAbbrev definitions and returns a tree of if/else statements +// that picks the correct abbrev. +func pvacfgvisit(pvacfg *pvacfgnode, abbrevs map[string]int) ast.Stmt { + r := &ast.IfStmt{Cond: &ast.BinaryExpr{ + Op: token.EQL, + X: &ast.SelectorExpr{X: &ast.Ident{Name: "v"}, Sel: &ast.Ident{Name: "Tag"}}, + Y: &ast.Ident{Name: "DW_TAG_variable"}}} + r.Body = &ast.BlockStmt{List: []ast.Stmt{ + pvacfgvisitnode(pvacfg, "DW_TAG_variable", []*pvacfgnode{}, abbrevs), + }} + r.Else = &ast.BlockStmt{List: []ast.Stmt{ + pvacfgvisitnode(pvacfg, "DW_TAG_formal_parameter", []*pvacfgnode{}, abbrevs), + }} + return r +} + +func pvacfgvisitnode(pvacfg *pvacfgnode, tag string, path []*pvacfgnode, abbrevs map[string]int) ast.Stmt { + if pvacfg == nil { + abbrev := toabbrev(tag, path) + if _, ok := abbrevs[abbrev]; !ok { + abbrevs[abbrev] = len(abbrevs) + } + return &ast.ReturnStmt{ + Results: []ast.Expr{&ast.BinaryExpr{ + Op: token.ADD, + X: &ast.Ident{Name: "DW_ABRV_PUTVAR_START"}, + Y: &ast.BasicLit{Kind: token.INT, Value: strconv.Itoa(abbrevs[abbrev])}}}} + } + if pvacfg.attr != "" { + return pvacfgvisitnode(pvacfg.then, tag, append(path, pvacfg), abbrevs) + } else if pvacfg.cond != nil { + if bx, _ := pvacfg.cond.(*ast.BinaryExpr); bx != nil && bx.Op == token.EQL && exprToString(bx.X) == "v.Tag" { + // this condition is "v.Tag == Xxx", check the value of 'tag' + y := exprToString(bx.Y) + if y == tag { + return pvacfgvisitnode(pvacfg.then, tag, path, abbrevs) + } else { + return pvacfgvisitnode(pvacfg.els, tag, path, abbrevs) + } + } else { + r := &ast.IfStmt{Cond: pvacfg.cond} + r.Body = &ast.BlockStmt{List: []ast.Stmt{pvacfgvisitnode(pvacfg.then, tag, path, abbrevs)}} + r.Else = &ast.BlockStmt{List: []ast.Stmt{pvacfgvisitnode(pvacfg.els, tag, path, abbrevs)}} + return r + } + } else { + return pvacfgvisitnode(pvacfg.then, tag, path, abbrevs) + } +} + +func toabbrev(tag string, path []*pvacfgnode) string { + buf := new(bytes.Buffer) + fmt.Fprintf(buf, "{\n%s,\nDW_CHILDREN_no,\n[]dwAttrForm{\n", tag) + for _, node := range path { + if node.cond == nil { + fmt.Fprintf(buf, "{%s, %s},\n", node.attr, node.form) + + } + } + fmt.Fprint(buf, "},\n}") + return buf.String() +} diff --git a/src/cmd/internal/goobj/builtinlist.go b/src/cmd/internal/goobj/builtinlist.go index 03982d54f2..fb729f512e 100644 --- a/src/cmd/internal/goobj/builtinlist.go +++ b/src/cmd/internal/goobj/builtinlist.go @@ -83,7 +83,7 @@ var builtins = [...]struct { {"runtime.efaceeq", 1}, {"runtime.panicrangeexit", 1}, {"runtime.deferrangefunc", 1}, - {"runtime.fastrand", 1}, + {"runtime.rand32", 1}, {"runtime.makemap64", 1}, {"runtime.makemap", 1}, {"runtime.makemap_small", 1}, diff --git a/src/cmd/internal/moddeps/moddeps_test.go b/src/cmd/internal/moddeps/moddeps_test.go index ae890b66cb..2def029325 100644 --- a/src/cmd/internal/moddeps/moddeps_test.go +++ b/src/cmd/internal/moddeps/moddeps_test.go @@ -195,8 +195,6 @@ func TestAllDependencies(t *testing.T) { Env: append(append(os.Environ(), modcacheEnv...), // Set GOROOT. "GOROOT="+gorootCopyDir, - // Explicitly clear GOROOT_FINAL so that GOROOT=gorootCopyDir is definitely used. - "GOROOT_FINAL=", // Add GOROOTcopy/bin and bundleDir to front of PATH. "PATH="+filepath.Join(gorootCopyDir, "bin")+string(filepath.ListSeparator)+ bundleDir+string(filepath.ListSeparator)+os.Getenv("PATH"), @@ -443,11 +441,18 @@ func findGorootModules(t *testing.T) []gorootModule { goBin := testenv.GoToolPath(t) goroot.once.Do(func() { - goroot.err = filepath.WalkDir(testenv.GOROOT(t), func(path string, info fs.DirEntry, err error) error { + // If the root itself is a symlink to a directory, + // we want to follow it (see https://go.dev/issue/64375). + // Add a trailing separator to force that to happen. + root := testenv.GOROOT(t) + if !os.IsPathSeparator(root[len(root)-1]) { + root += string(filepath.Separator) + } + goroot.err = filepath.WalkDir(root, func(path string, info fs.DirEntry, err error) error { if err != nil { return err } - if info.IsDir() && (info.Name() == "vendor" || info.Name() == "testdata") { + if info.IsDir() && path != root && (info.Name() == "vendor" || info.Name() == "testdata") { return filepath.SkipDir } if info.IsDir() && path == filepath.Join(testenv.GOROOT(t), "pkg") { @@ -458,7 +463,7 @@ func findGorootModules(t *testing.T) []gorootModule { // running time of this test anyway.) return filepath.SkipDir } - if info.IsDir() && (strings.HasPrefix(info.Name(), "_") || strings.HasPrefix(info.Name(), ".")) { + if info.IsDir() && path != root && (strings.HasPrefix(info.Name(), "_") || strings.HasPrefix(info.Name(), ".")) { // _ and . prefixed directories can be used for internal modules // without a vendor directory that don't contribute to the build // but might be used for example as code generators. diff --git a/src/cmd/internal/obj/arm/asm5.go b/src/cmd/internal/obj/arm/asm5.go index 177ffd9797..4e6eff9e17 100644 --- a/src/cmd/internal/obj/arm/asm5.go +++ b/src/cmd/internal/obj/arm/asm5.go @@ -979,7 +979,7 @@ func (c *ctxt5) aclass(a *obj.Addr) int { if immrot(^uint32(c.instoffset)) != 0 { return C_NCON } - if uint32(c.instoffset) <= 0xffff && buildcfg.GOARM == 7 { + if uint32(c.instoffset) <= 0xffff && buildcfg.GOARM.Version == 7 { return C_SCON } if x, y := immrot2a(uint32(c.instoffset)); x != 0 && y != 0 { @@ -1099,6 +1099,32 @@ func (c *ctxt5) oplook(p *obj.Prog) *Optab { fmt.Printf("\t\t%d %d\n", p.From.Type, p.To.Type) } + if (p.As == ASRL || p.As == ASRA) && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 { + // Right shifts are weird - a shift that looks like "shift by constant 0" actually + // means "shift by constant 32". Use left shift in this situation instead. + // See issue 64715. + // TODO: rotate by 0? Not currently supported, but if we ever do then include it here. + p.As = ASLL + } + if p.As != AMOVB && p.As != AMOVBS && p.As != AMOVBU && p.As != AMOVH && p.As != AMOVHS && p.As != AMOVHU && p.As != AXTAB && p.As != AXTABU && p.As != AXTAH && p.As != AXTAHU { + // Same here, but for shifts encoded in Addrs. + // Don't do it for the extension ops, which + // need to keep their RR shifts. + fixShift := func(a *obj.Addr) { + if a.Type == obj.TYPE_SHIFT { + typ := a.Offset & SHIFT_RR + isConst := a.Offset&(1<<4) == 0 + amount := a.Offset >> 7 & 0x1f + if isConst && amount == 0 && (typ == SHIFT_LR || typ == SHIFT_AR || typ == SHIFT_RR) { + a.Offset -= typ + a.Offset += SHIFT_LL + } + } + } + fixShift(&p.From) + fixShift(&p.To) + } + ops := oprange[p.As&obj.AMask] c1 := &xcmp[a1] c3 := &xcmp[a3] @@ -3044,16 +3070,16 @@ func (c *ctxt5) omvl(p *obj.Prog, a *obj.Addr, dr int) uint32 { } func (c *ctxt5) chipzero5(e float64) int { - // We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions. - if buildcfg.GOARM < 7 || math.Float64bits(e) != 0 { + // We use GOARM.Version=7 and !GOARM.SoftFloat to gate the use of VFPv3 vmov (imm) instructions. + if buildcfg.GOARM.Version < 7 || buildcfg.GOARM.SoftFloat || math.Float64bits(e) != 0 { return -1 } return 0 } func (c *ctxt5) chipfloat5(e float64) int { - // We use GOARM=7 to gate the use of VFPv3 vmov (imm) instructions. - if buildcfg.GOARM < 7 { + // We use GOARM.Version=7 and !GOARM.SoftFloat to gate the use of VFPv3 vmov (imm) instructions. + if buildcfg.GOARM.Version < 7 || buildcfg.GOARM.SoftFloat { return -1 } diff --git a/src/cmd/internal/obj/arm/obj5.go b/src/cmd/internal/obj/arm/obj5.go index fb7c260f89..def4f526ad 100644 --- a/src/cmd/internal/obj/arm/obj5.go +++ b/src/cmd/internal/obj/arm/obj5.go @@ -66,7 +66,7 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { ctxt.Diag("%v: TLS MRC instruction must write to R0 as it might get translated into a BL instruction", p.Line()) } - if buildcfg.GOARM < 7 { + if buildcfg.GOARM.Version < 7 { // Replace it with BL runtime.read_tls_fallback(SB) for ARM CPUs that lack the tls extension. if progedit_tlsfallback == nil { progedit_tlsfallback = ctxt.Lookup("runtime.read_tls_fallback") diff --git a/src/cmd/internal/obj/arm64/asm7.go b/src/cmd/internal/obj/arm64/asm7.go index 0991ec9201..03f0fb06da 100644 --- a/src/cmd/internal/obj/arm64/asm7.go +++ b/src/cmd/internal/obj/arm64/asm7.go @@ -7741,7 +7741,7 @@ func (c *ctxt7) opldpstp(p *obj.Prog, o *Optab, vo int32, rbase, rl, rh int16, l c.ctxt.Diag("invalid register pair %v\n", p) } case ALDP, ALDPW, ALDPSW: - if rl < REG_R0 || REG_R30 < rl || rh < REG_R0 || REG_R30 < rh { + if rl < REG_R0 || REG_R31 < rl || rh < REG_R0 || REG_R31 < rh { c.ctxt.Diag("invalid register pair %v\n", p) } case ASTP, ASTPW: diff --git a/src/cmd/internal/obj/link.go b/src/cmd/internal/obj/link.go index 0b7878656c..5be493e176 100644 --- a/src/cmd/internal/obj/link.go +++ b/src/cmd/internal/obj/link.go @@ -314,7 +314,7 @@ type Prog struct { RegTo2 int16 // 2nd destination operand Mark uint16 // bitmask of arch-specific items Optab uint16 // arch-specific opcode index - Scond uint8 // bits that describe instruction suffixes (e.g. ARM conditions) + Scond uint8 // bits that describe instruction suffixes (e.g. ARM conditions, RISCV Rounding Mode) Back uint8 // for x86 back end: backwards branch state Ft uint8 // for x86 back end: type index of Prog.From Tt uint8 // for x86 back end: type index of Prog.To diff --git a/src/cmd/internal/obj/loong64/a.out.go b/src/cmd/internal/obj/loong64/a.out.go index 99a7da388f..d944fcfcb8 100644 --- a/src/cmd/internal/obj/loong64/a.out.go +++ b/src/cmd/internal/obj/loong64/a.out.go @@ -157,14 +157,14 @@ const ( REGZERO = REG_R0 // set to zero REGLINK = REG_R1 REGSP = REG_R3 - REGRET = REG_R19 + REGRET = REG_R20 // not use REGARG = -1 // -1 disables passing the first argument in register - REGRT1 = REG_R19 // reserved for runtime, duffzero and duffcopy - REGRT2 = REG_R20 // reserved for runtime, duffcopy + REGRT1 = REG_R20 // reserved for runtime, duffzero and duffcopy + REGRT2 = REG_R21 // reserved for runtime, duffcopy REGCTXT = REG_R29 // context for closures REGG = REG_R22 // G in loong64 REGTMP = REG_R30 // used by the assembler - FREGRET = REG_F0 + FREGRET = REG_F0 // not use ) var LOONG64DWARFRegisters = map[int16]int16{} @@ -227,6 +227,7 @@ const ( C_ADDR C_TLS_LE C_TLS_IE + C_GOTADDR C_TEXTSIZE C_NCLASS // must be the last diff --git a/src/cmd/internal/obj/loong64/asm.go b/src/cmd/internal/obj/loong64/asm.go index 19250c94ee..f0f8abb59c 100644 --- a/src/cmd/internal/obj/loong64/asm.go +++ b/src/cmd/internal/obj/loong64/asm.go @@ -349,6 +349,8 @@ var optab = []Optab{ {AWORD, C_LCON, C_NONE, C_NONE, C_NONE, C_NONE, 40, 4, 0, 0}, {AWORD, C_DCON, C_NONE, C_NONE, C_NONE, C_NONE, 61, 4, 0, 0}, + {AMOVV, C_GOTADDR, C_NONE, C_NONE, C_REG, C_NONE, 65, 8, 0, 0}, + {ATEQ, C_SCON, C_REG, C_NONE, C_REG, C_NONE, 15, 8, 0, 0}, {ATEQ, C_SCON, C_NONE, C_NONE, C_REG, C_NONE, 15, 8, 0, 0}, @@ -676,6 +678,9 @@ func (c *ctxt0) aclass(a *obj.Addr) int { return C_SOREG } return C_LOREG + + case obj.NAME_GOTREF: + return C_GOTADDR } return C_GOK @@ -1602,7 +1607,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { rel.Siz = 4 rel.Sym = p.To.Sym rel.Add = p.To.Offset - rel.Type = objabi.R_ADDRLOONG64U + rel.Type = objabi.R_LOONG64_ADDR_HI o2 = OP_12IRR(c.opirr(p.As), uint32(0), uint32(REGTMP), uint32(p.From.Reg)) rel2 := obj.Addrel(c.cursym) @@ -1610,7 +1615,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { rel2.Siz = 4 rel2.Sym = p.To.Sym rel2.Add = p.To.Offset - rel2.Type = objabi.R_ADDRLOONG64 + rel2.Type = objabi.R_LOONG64_ADDR_LO case 51: // mov addr,r ==> pcalau12i + lw o1 = OP_IR(c.opir(APCALAU12I), uint32(0), uint32(REGTMP)) @@ -1619,14 +1624,14 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { rel.Siz = 4 rel.Sym = p.From.Sym rel.Add = p.From.Offset - rel.Type = objabi.R_ADDRLOONG64U + rel.Type = objabi.R_LOONG64_ADDR_HI o2 = OP_12IRR(c.opirr(-p.As), uint32(0), uint32(REGTMP), uint32(p.To.Reg)) rel2 := obj.Addrel(c.cursym) rel2.Off = int32(c.pc + 4) rel2.Siz = 4 rel2.Sym = p.From.Sym rel2.Add = p.From.Offset - rel2.Type = objabi.R_ADDRLOONG64 + rel2.Type = objabi.R_LOONG64_ADDR_LO case 52: // mov $lext, r // NOTE: this case does not use REGTMP. If it ever does, @@ -1637,14 +1642,14 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { rel.Siz = 4 rel.Sym = p.From.Sym rel.Add = p.From.Offset - rel.Type = objabi.R_ADDRLOONG64U + rel.Type = objabi.R_LOONG64_ADDR_HI o2 = OP_12IRR(c.opirr(add), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg)) rel2 := obj.Addrel(c.cursym) rel2.Off = int32(c.pc + 4) rel2.Siz = 4 rel2.Sym = p.From.Sym rel2.Add = p.From.Offset - rel2.Type = objabi.R_ADDRLOONG64 + rel2.Type = objabi.R_LOONG64_ADDR_LO case 53: // mov r, tlsvar ==> lu12i.w + ori + add r2, regtmp + sw o(regtmp) // NOTE: this case does not use REGTMP. If it ever does, @@ -1655,14 +1660,14 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { rel.Siz = 4 rel.Sym = p.To.Sym rel.Add = p.To.Offset - rel.Type = objabi.R_ADDRLOONG64TLSU + rel.Type = objabi.R_LOONG64_TLS_LE_HI o2 = OP_12IRR(c.opirr(AOR), uint32(0), uint32(REGTMP), uint32(REGTMP)) rel2 := obj.Addrel(c.cursym) rel2.Off = int32(c.pc + 4) rel2.Siz = 4 rel2.Sym = p.To.Sym rel2.Add = p.To.Offset - rel2.Type = objabi.R_ADDRLOONG64TLS + rel2.Type = objabi.R_LOONG64_TLS_LE_LO o3 = OP_RRR(c.oprrr(AADDV), uint32(REG_R2), uint32(REGTMP), uint32(REGTMP)) o4 = OP_12IRR(c.opirr(p.As), uint32(0), uint32(REGTMP), uint32(p.From.Reg)) @@ -1675,14 +1680,14 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { rel.Siz = 4 rel.Sym = p.From.Sym rel.Add = p.From.Offset - rel.Type = objabi.R_ADDRLOONG64TLSU + rel.Type = objabi.R_LOONG64_TLS_LE_HI o2 = OP_12IRR(c.opirr(AOR), uint32(0), uint32(REGTMP), uint32(REGTMP)) rel2 := obj.Addrel(c.cursym) rel2.Off = int32(c.pc + 4) rel2.Siz = 4 rel2.Sym = p.From.Sym rel2.Add = p.From.Offset - rel2.Type = objabi.R_ADDRLOONG64TLS + rel2.Type = objabi.R_LOONG64_TLS_LE_LO o3 = OP_RRR(c.oprrr(AADDV), uint32(REG_R2), uint32(REGTMP), uint32(REGTMP)) o4 = OP_12IRR(c.opirr(-p.As), uint32(0), uint32(REGTMP), uint32(p.To.Reg)) @@ -1695,14 +1700,14 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { rel.Siz = 4 rel.Sym = p.From.Sym rel.Add = p.From.Offset - rel.Type = objabi.R_ADDRLOONG64TLSU + rel.Type = objabi.R_LOONG64_TLS_LE_HI o2 = OP_12IRR(c.opirr(AOR), uint32(0), uint32(REGTMP), uint32(REGTMP)) rel2 := obj.Addrel(c.cursym) rel2.Off = int32(c.pc + 4) rel2.Siz = 4 rel2.Sym = p.From.Sym rel2.Add = p.From.Offset - rel2.Type = objabi.R_ADDRLOONG64TLS + rel2.Type = objabi.R_LOONG64_TLS_LE_LO o3 = OP_RRR(c.oprrr(AADDV), uint32(REG_R2), uint32(REGTMP), uint32(p.To.Reg)) case 56: // mov r, tlsvar IE model ==> (pcalau12i + ld.d)tlsvar@got + add.d + st.d @@ -1712,7 +1717,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { rel.Siz = 4 rel.Sym = p.To.Sym rel.Add = 0x0 - rel.Type = objabi.R_LOONG64_TLS_IE_PCREL_HI + rel.Type = objabi.R_LOONG64_TLS_IE_HI o2 = OP_12IRR(c.opirr(-p.As), uint32(0), uint32(REGTMP), uint32(REGTMP)) rel2 := obj.Addrel(c.cursym) rel2.Off = int32(c.pc + 4) @@ -1730,7 +1735,7 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { rel.Siz = 4 rel.Sym = p.From.Sym rel.Add = 0x0 - rel.Type = objabi.R_LOONG64_TLS_IE_PCREL_HI + rel.Type = objabi.R_LOONG64_TLS_IE_HI o2 = OP_12IRR(c.opirr(-p.As), uint32(0), uint32(REGTMP), uint32(REGTMP)) rel2 := obj.Addrel(c.cursym) rel2.Off = int32(c.pc + 4) @@ -1776,6 +1781,22 @@ func (c *ctxt0) asmout(p *obj.Prog, o *Optab, out []uint32) { case 64: // movv c_reg, c_fcc0 ==> movgr2cf cd, rj a := OP_TEN(8, 1334) o1 = OP_RR(a, uint32(p.From.Reg), uint32(p.To.Reg)) + + case 65: // mov sym@GOT, r ==> pcalau12i + ld.d + o1 = OP_IR(c.opir(APCALAU12I), uint32(0), uint32(p.To.Reg)) + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 4 + rel.Sym = p.From.Sym + rel.Type = objabi.R_LOONG64_GOT_HI + rel.Add = 0x0 + o2 = OP_12IRR(c.opirr(-p.As), uint32(0), uint32(p.To.Reg), uint32(p.To.Reg)) + rel2 := obj.Addrel(c.cursym) + rel2.Off = int32(c.pc + 4) + rel2.Siz = 4 + rel2.Sym = p.From.Sym + rel2.Type = objabi.R_LOONG64_GOT_LO + rel2.Add = 0x0 } out[0] = o1 diff --git a/src/cmd/internal/obj/loong64/cnames.go b/src/cmd/internal/obj/loong64/cnames.go index 8b8af6ba31..94b1b54c93 100644 --- a/src/cmd/internal/obj/loong64/cnames.go +++ b/src/cmd/internal/obj/loong64/cnames.go @@ -39,6 +39,7 @@ var cnames0 = []string{ "ADDR", "TLS_LE", "TLS_IE", + "GOTADDR", "TEXTSIZE", "NCLASS", } diff --git a/src/cmd/internal/obj/loong64/obj.go b/src/cmd/internal/obj/loong64/obj.go index 1eedd46c69..5fa67f3acd 100644 --- a/src/cmd/internal/obj/loong64/obj.go +++ b/src/cmd/internal/obj/loong64/obj.go @@ -6,6 +6,7 @@ package loong64 import ( "cmd/internal/obj" + "cmd/internal/objabi" "cmd/internal/sys" "internal/abi" "log" @@ -84,6 +85,122 @@ func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { p.As = AADDVU } } + + if ctxt.Flag_dynlink { + rewriteToUseGot(ctxt, p, newprog) + } +} + +func rewriteToUseGot(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) { + // ADUFFxxx $offset + // becomes + // MOVV runtime.duffxxx@GOT, REGTMP + // ADD $offset, REGTMP + // JAL REGTMP + if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO { + var sym *obj.LSym + if p.As == obj.ADUFFZERO { + sym = ctxt.Lookup("runtime.duffzero") + } else { + sym = ctxt.Lookup("runtime.duffcopy") + } + offset := p.To.Offset + p.As = AMOVV + p.From.Type = obj.TYPE_MEM + p.From.Sym = sym + p.From.Name = obj.NAME_GOTREF + p.To.Type = obj.TYPE_REG + p.To.Reg = REGTMP + p.To.Name = obj.NAME_NONE + p.To.Offset = 0 + p.To.Sym = nil + p1 := obj.Appendp(p, newprog) + p1.As = AADDV + p1.From.Type = obj.TYPE_CONST + p1.From.Offset = offset + p1.To.Type = obj.TYPE_REG + p1.To.Reg = REGTMP + p2 := obj.Appendp(p1, newprog) + p2.As = AJAL + p2.To.Type = obj.TYPE_MEM + p2.To.Reg = REGTMP + } + + // We only care about global data: NAME_EXTERN means a global + // symbol in the Go sense, and p.Sym.Local is true for a few + // internally defined symbols. + if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { + // MOVV $sym, Rx becomes MOVV sym@GOT, Rx + // MOVV $sym+, Rx becomes MOVV sym@GOT, Rx; ADD , Rx + if p.As != AMOVV { + ctxt.Diag("do not know how to handle TYPE_ADDR in %v with -shared", p) + } + if p.To.Type != obj.TYPE_REG { + ctxt.Diag("do not know how to handle LEAQ-type insn to non-register in %v with -shared", p) + } + p.From.Type = obj.TYPE_MEM + p.From.Name = obj.NAME_GOTREF + if p.From.Offset != 0 { + q := obj.Appendp(p, newprog) + q.As = AADDV + q.From.Type = obj.TYPE_CONST + q.From.Offset = p.From.Offset + q.To = p.To + p.From.Offset = 0 + } + } + if p.GetFrom3() != nil && p.GetFrom3().Name == obj.NAME_EXTERN { + ctxt.Diag("don't know how to handle %v with -shared", p) + } + + var source *obj.Addr + // MOVx sym, Ry becomes MOVV sym@GOT, REGTMP; MOVx (REGTMP), Ry + // MOVx Ry, sym becomes MOVV sym@GOT, REGTMP; MOVx Ry, (REGTMP) + // An addition may be inserted between the two MOVs if there is an offset. + if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() { + if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { + ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -shared", p) + } + source = &p.From + } else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() { + source = &p.To + } else { + return + } + if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP { + return + } + if source.Sym.Type == objabi.STLSBSS { + return + } + if source.Type != obj.TYPE_MEM { + ctxt.Diag("don't know how to handle %v with -shared", p) + } + p1 := obj.Appendp(p, newprog) + p2 := obj.Appendp(p1, newprog) + p1.As = AMOVV + p1.From.Type = obj.TYPE_MEM + p1.From.Sym = source.Sym + p1.From.Name = obj.NAME_GOTREF + p1.To.Type = obj.TYPE_REG + p1.To.Reg = REGTMP + + p2.As = p.As + p2.From = p.From + p2.To = p.To + if p.From.Name == obj.NAME_EXTERN { + p2.From.Reg = REGTMP + p2.From.Name = obj.NAME_NONE + p2.From.Sym = nil + } else if p.To.Name == obj.NAME_EXTERN { + p2.To.Reg = REGTMP + p2.To.Name = obj.NAME_NONE + p2.To.Sym = nil + } else { + return + } + + obj.Nopout(p) } func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { @@ -279,18 +396,18 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { if c.cursym.Func().Text.From.Sym.Wrapper() && c.cursym.Func().Text.Mark&LEAF == 0 { // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame // - // MOV g_panic(g), R1 - // BEQ R1, end - // MOV panic_argp(R1), R2 - // ADD $(autosize+FIXED_FRAME), R29, R3 - // BNE R2, R3, end - // ADD $FIXED_FRAME, R29, R2 - // MOV R2, panic_argp(R1) + // MOV g_panic(g), R20 + // BEQ R20, end + // MOV panic_argp(R20), R24 + // ADD $(autosize+FIXED_FRAME), R3, R30 + // BNE R24, R30, end + // ADD $FIXED_FRAME, R3, R24 + // MOV R24, panic_argp(R20) // end: // NOP // // The NOP is needed to give the jumps somewhere to land. - // It is a liblink NOP, not an hardware NOP: it encodes to 0 instruction bytes. + // It is a liblink NOP, not a hardware NOP: it encodes to 0 instruction bytes. // // We don't generate this for leafs because that means the wrapped // function was inlined into the wrapper. @@ -302,12 +419,12 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q.From.Reg = REGG q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic q.To.Type = obj.TYPE_REG - q.To.Reg = REG_R19 + q.To.Reg = REG_R20 q = obj.Appendp(q, newprog) q.As = ABEQ q.From.Type = obj.TYPE_REG - q.From.Reg = REG_R19 + q.From.Reg = REG_R20 q.To.Type = obj.TYPE_BRANCH q.Mark |= BRANCH p1 = q @@ -315,10 +432,10 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q = obj.Appendp(q, newprog) q.As = mov q.From.Type = obj.TYPE_MEM - q.From.Reg = REG_R19 + q.From.Reg = REG_R20 q.From.Offset = 0 // Panic.argp q.To.Type = obj.TYPE_REG - q.To.Reg = REG_R4 + q.To.Reg = REG_R24 q = obj.Appendp(q, newprog) q.As = add @@ -326,13 +443,13 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q.From.Offset = int64(autosize) + ctxt.Arch.FixedFrameSize q.Reg = REGSP q.To.Type = obj.TYPE_REG - q.To.Reg = REG_R5 + q.To.Reg = REG_R30 q = obj.Appendp(q, newprog) q.As = ABNE q.From.Type = obj.TYPE_REG - q.From.Reg = REG_R4 - q.Reg = REG_R5 + q.From.Reg = REG_R24 + q.Reg = REG_R30 q.To.Type = obj.TYPE_BRANCH q.Mark |= BRANCH p2 = q @@ -343,14 +460,14 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { q.From.Offset = ctxt.Arch.FixedFrameSize q.Reg = REGSP q.To.Type = obj.TYPE_REG - q.To.Reg = REG_R4 + q.To.Reg = REG_R24 q = obj.Appendp(q, newprog) q.As = mov q.From.Type = obj.TYPE_REG - q.From.Reg = REG_R4 + q.From.Reg = REG_R24 q.To.Type = obj.TYPE_MEM - q.To.Reg = REG_R19 + q.To.Reg = REG_R20 q.To.Offset = 0 // Panic.argp q = obj.Appendp(q, newprog) @@ -503,6 +620,10 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p = c.ctxt.StartUnsafePoint(p, c.newprog) + // Spill Arguments. This has to happen before we open + // any more frame space. + p = c.cursym.Func().SpillRegisterArgs(p, c.newprog) + // MOV REGLINK, -8/-16(SP) p = obj.Appendp(p, c.newprog) p.As = mov @@ -567,13 +688,15 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p.To.Reg = REGSP p.Spadj = int32(-frameSize) + // Unspill arguments + p = c.cursym.Func().UnspillRegisterArgs(p, c.newprog) p = c.ctxt.EndUnsafePoint(p, c.newprog, -1) } // Jump back to here after morestack returns. startPred := p - // MOV g_stackguard(g), R19 + // MOV g_stackguard(g), R20 p = obj.Appendp(p, c.newprog) p.As = mov @@ -584,7 +707,7 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p.From.Offset = 3 * int64(c.ctxt.Arch.PtrSize) // G.stackguard1 } p.To.Type = obj.TYPE_REG - p.To.Reg = REG_R19 + p.To.Reg = REG_R20 // Mark the stack bound check and morestack call async nonpreemptible. // If we get preempted here, when resumed the preemption request is @@ -595,15 +718,15 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { var q *obj.Prog if framesize <= abi.StackSmall { // small stack: SP < stackguard - // AGTU SP, stackguard, R19 + // AGTU SP, stackguard, R20 p = obj.Appendp(p, c.newprog) p.As = ASGTU p.From.Type = obj.TYPE_REG p.From.Reg = REGSP - p.Reg = REG_R19 + p.Reg = REG_R20 p.To.Type = obj.TYPE_REG - p.To.Reg = REG_R19 + p.To.Reg = REG_R20 } else { // large stack: SP-framesize < stackguard-StackSmall offset := int64(framesize) - abi.StackSmall @@ -615,8 +738,8 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { // stack guard to incorrectly succeed. We explicitly // guard against underflow. // - // SGTU $(framesize-StackSmall), SP, R4 - // BNE R4, label-of-call-to-morestack + // SGTU $(framesize-StackSmall), SP, R24 + // BNE R24, label-of-call-to-morestack p = obj.Appendp(p, c.newprog) p.As = ASGTU @@ -624,13 +747,13 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p.From.Offset = offset p.Reg = REGSP p.To.Type = obj.TYPE_REG - p.To.Reg = REG_R4 + p.To.Reg = REG_R24 p = obj.Appendp(p, c.newprog) q = p p.As = ABNE p.From.Type = obj.TYPE_REG - p.From.Reg = REG_R4 + p.From.Reg = REG_R24 p.To.Type = obj.TYPE_BRANCH p.Mark |= BRANCH } @@ -642,35 +765,35 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p.From.Offset = -offset p.Reg = REGSP p.To.Type = obj.TYPE_REG - p.To.Reg = REG_R4 + p.To.Reg = REG_R24 p = obj.Appendp(p, c.newprog) p.As = ASGTU p.From.Type = obj.TYPE_REG - p.From.Reg = REG_R4 - p.Reg = REG_R19 + p.From.Reg = REG_R24 + p.Reg = REG_R20 p.To.Type = obj.TYPE_REG - p.To.Reg = REG_R19 + p.To.Reg = REG_R20 } - // q1: BNE R19, done + // q1: BNE R20, done p = obj.Appendp(p, c.newprog) q1 := p p.As = ABNE p.From.Type = obj.TYPE_REG - p.From.Reg = REG_R19 + p.From.Reg = REG_R20 p.To.Type = obj.TYPE_BRANCH p.Mark |= BRANCH - // MOV LINK, R5 + // MOV LINK, R31 p = obj.Appendp(p, c.newprog) p.As = mov p.From.Type = obj.TYPE_REG p.From.Reg = REGLINK p.To.Type = obj.TYPE_REG - p.To.Reg = REG_R5 + p.To.Reg = REG_R31 if q != nil { q.To.SetTarget(p) p.Mark |= LABEL @@ -678,6 +801,10 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { p = c.ctxt.EmitEntryStackMap(c.cursym, p, c.newprog) + // Spill the register args that could be clobbered by the + // morestack code + p = c.cursym.Func().SpillRegisterArgs(p, c.newprog) + // JAL runtime.morestack(SB) p = obj.Appendp(p, c.newprog) @@ -692,6 +819,7 @@ func (c *ctxt0) stacksplit(p *obj.Prog, framesize int32) *obj.Prog { } p.Mark |= BRANCH + p = c.cursym.Func().UnspillRegisterArgs(p, c.newprog) p = c.ctxt.EndUnsafePoint(p, c.newprog, -1) // JMP start diff --git a/src/cmd/internal/obj/ppc64/a.out.go b/src/cmd/internal/obj/ppc64/a.out.go index 13143f5beb..ab1b4eb19f 100644 --- a/src/cmd/internal/obj/ppc64/a.out.go +++ b/src/cmd/internal/obj/ppc64/a.out.go @@ -422,15 +422,18 @@ const ( C_U15CON /* 15 bit unsigned constant */ C_S16CON /* 16 bit signed constant */ C_U16CON /* 16 bit unsigned constant */ + C_16CON /* Any constant which fits into 16 bits. Can be signed or unsigned */ + C_U31CON /* 31 bit unsigned constant */ + C_S32CON /* 32 bit signed constant */ + C_U32CON /* 32 bit unsigned constant */ C_32CON /* Any constant which fits into 32 bits. Can be signed or unsigned */ C_S34CON /* 34 bit signed constant */ C_64CON /* Any constant which fits into 64 bits. Can be signed or unsigned */ C_SACON /* $n(REG) where n <= int16 */ C_LACON /* $n(REG) where n <= int32 */ C_DACON /* $n(REG) where n <= int64 */ - C_SBRA /* A short offset argument to a branching instruction */ - C_LBRA /* A long offset argument to a branching instruction */ - C_LBRAPIC /* Like C_LBRA, but requires an extra NOP for potential TOC restore by the linker. */ + C_BRA /* A short offset argument to a branching instruction */ + C_BRAPIC /* Like C_BRA, but requires an extra NOP for potential TOC restore by the linker. */ C_ZOREG /* An $0+reg memory op */ C_SOREG /* An $n+reg memory arg where n is a 16 bit signed offset */ C_LOREG /* An $n+reg memory arg where n is a 32 bit signed offset */ @@ -446,16 +449,6 @@ const ( C_TEXTSIZE /* An argument with Type obj.TYPE_TEXTSIZE */ C_NCLASS /* must be the last */ - - /* Aliased names which should be cleaned up, or integrated. */ - C_SCON = C_U15CON - C_ADDCON = C_S16CON - C_ANDCON = C_U16CON - C_LCON = C_32CON - - /* Aliased names which may be generated by ppc64map for the optab. */ - C_S32CON = C_32CON - C_U32CON = C_32CON ) const ( diff --git a/src/cmd/internal/obj/ppc64/anames9.go b/src/cmd/internal/obj/ppc64/anames9.go index 72d1f4915d..81f73dcea6 100644 --- a/src/cmd/internal/obj/ppc64/anames9.go +++ b/src/cmd/internal/obj/ppc64/anames9.go @@ -27,15 +27,18 @@ var cnames9 = []string{ "U15CON", "S16CON", "U16CON", + "16CON", + "U31CON", + "S32CON", + "U32CON", "32CON", "S34CON", "64CON", "SACON", "LACON", "DACON", - "SBRA", - "LBRA", - "LBRAPIC", + "BRA", + "BRAPIC", "ZOREG", "SOREG", "LOREG", diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go index 0f01dfa8db..2793600cd0 100644 --- a/src/cmd/internal/obj/ppc64/asm9.go +++ b/src/cmd/internal/obj/ppc64/asm9.go @@ -110,60 +110,58 @@ var optab []Optab var optabBase = []Optab{ {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0}, - {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0}, + {as: obj.ATEXT, a1: C_LOREG, a3: C_32CON, a6: C_TEXTSIZE, type_: 0, size: 0}, {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0}, - {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0}, + {as: obj.ATEXT, a1: C_ADDR, a3: C_32CON, a6: C_TEXTSIZE, type_: 0, size: 0}, /* move register */ {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4}, - {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, - {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4}, - {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, - {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4}, - {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8}, - {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8}, - {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4}, - {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4}, + {as: AADD, a1: C_S16CON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, + {as: AADD, a1: C_S16CON, a6: C_REG, type_: 4, size: 4}, + {as: AADD, a1: C_U16CON, a2: C_REG, a6: C_REG, type_: 22, size: 8}, + {as: AADD, a1: C_U16CON, a6: C_REG, type_: 22, size: 8}, + {as: AADDIS, a1: C_S16CON, a2: C_REG, a6: C_REG, type_: 20, size: 4}, + {as: AADDIS, a1: C_S16CON, a6: C_REG, type_: 20, size: 4}, {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4}, - {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, - {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4}, - {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, - {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, + {as: AADDC, a1: C_S16CON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, + {as: AADDC, a1: C_S16CON, a6: C_REG, type_: 4, size: 4}, + {as: AADDC, a1: C_32CON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, + {as: AADDC, a1: C_32CON, a6: C_REG, type_: 22, size: 12}, {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */ {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4}, {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4}, - {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4}, - {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, - {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8}, - {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8}, - {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12}, - {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12}, - {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4}, - {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, + {as: AANDCC, a1: C_U16CON, a6: C_REG, type_: 58, size: 4}, + {as: AANDCC, a1: C_U16CON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, + {as: AANDCC, a1: C_S16CON, a6: C_REG, type_: 23, size: 8}, + {as: AANDCC, a1: C_S16CON, a2: C_REG, a6: C_REG, type_: 23, size: 8}, + {as: AANDCC, a1: C_32CON, a6: C_REG, type_: 23, size: 12}, + {as: AANDCC, a1: C_32CON, a2: C_REG, a6: C_REG, type_: 23, size: 12}, + {as: AANDISCC, a1: C_U16CON, a6: C_REG, type_: 58, size: 4}, + {as: AANDISCC, a1: C_U16CON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4}, - {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, - {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4}, - {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, - {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4}, - {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, - {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, + {as: AMULLW, a1: C_S16CON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, + {as: AMULLW, a1: C_S16CON, a6: C_REG, type_: 4, size: 4}, + {as: AMULLW, a1: C_32CON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, + {as: AMULLW, a1: C_32CON, a6: C_REG, type_: 22, size: 12}, {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4}, - {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4}, - {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12}, + {as: ASUBC, a1: C_REG, a3: C_S16CON, a6: C_REG, type_: 27, size: 4}, + {as: ASUBC, a1: C_REG, a3: C_32CON, a6: C_REG, type_: 28, size: 12}, {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */ {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4}, - {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4}, - {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, - {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8}, - {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8}, - {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12}, - {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12}, - {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4}, - {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, + {as: AOR, a1: C_U16CON, a6: C_REG, type_: 58, size: 4}, + {as: AOR, a1: C_U16CON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, + {as: AOR, a1: C_S16CON, a6: C_REG, type_: 23, size: 8}, + {as: AOR, a1: C_S16CON, a2: C_REG, a6: C_REG, type_: 23, size: 8}, + {as: AOR, a1: C_U32CON, a2: C_REG, a6: C_REG, type_: 21, size: 8}, + {as: AOR, a1: C_U32CON, a6: C_REG, type_: 21, size: 8}, + {as: AOR, a1: C_32CON, a6: C_REG, type_: 23, size: 12}, + {as: AOR, a1: C_32CON, a2: C_REG, a6: C_REG, type_: 23, size: 12}, + {as: AORIS, a1: C_U16CON, a6: C_REG, type_: 58, size: 4}, + {as: AORIS, a1: C_U16CON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */ {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4}, {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */ @@ -172,33 +170,33 @@ var optabBase = []Optab{ {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4}, {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, - {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4}, - {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4}, - {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4}, - {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4}, - {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4}, - {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4}, + {as: ASLD, a1: C_U15CON, a2: C_REG, a6: C_REG, type_: 25, size: 4}, + {as: ASLD, a1: C_U15CON, a6: C_REG, type_: 25, size: 4}, + {as: AEXTSWSLI, a1: C_U15CON, a6: C_REG, type_: 25, size: 4}, + {as: AEXTSWSLI, a1: C_U15CON, a2: C_REG, a6: C_REG, type_: 25, size: 4}, + {as: ASLW, a1: C_U15CON, a2: C_REG, a6: C_REG, type_: 57, size: 4}, + {as: ASLW, a1: C_U15CON, a6: C_REG, type_: 57, size: 4}, {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4}, {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, - {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4}, - {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4}, + {as: ASRAW, a1: C_U15CON, a2: C_REG, a6: C_REG, type_: 56, size: 4}, + {as: ASRAW, a1: C_U15CON, a6: C_REG, type_: 56, size: 4}, {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4}, {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, - {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4}, - {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4}, - {as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4}, - {as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 63, size: 4}, - {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4}, - {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 63, size: 4}, - {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4}, - {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4}, - {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4}, + {as: ASRAD, a1: C_U15CON, a2: C_REG, a6: C_REG, type_: 56, size: 4}, + {as: ASRAD, a1: C_U15CON, a6: C_REG, type_: 56, size: 4}, + {as: ARLWNM, a1: C_U15CON, a2: C_REG, a3: C_32CON, a6: C_REG, type_: 63, size: 4}, + {as: ARLWNM, a1: C_U15CON, a2: C_REG, a3: C_U15CON, a4: C_U15CON, a6: C_REG, type_: 63, size: 4}, + {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_32CON, a6: C_REG, type_: 63, size: 4}, + {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_U15CON, a4: C_U15CON, a6: C_REG, type_: 63, size: 4}, + {as: ACLRLSLWI, a1: C_U15CON, a2: C_REG, a3: C_32CON, a6: C_REG, type_: 62, size: 4}, + {as: ARLDMI, a1: C_U15CON, a2: C_REG, a3: C_32CON, a6: C_REG, type_: 30, size: 4}, + {as: ARLDC, a1: C_U15CON, a2: C_REG, a3: C_32CON, a6: C_REG, type_: 29, size: 4}, {as: ARLDC, a1: C_REG, a3: C_U8CON, a4: C_U8CON, a6: C_REG, type_: 9, size: 4}, - {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4}, - {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, - {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, - {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, - {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, + {as: ARLDCL, a1: C_U15CON, a2: C_REG, a3: C_32CON, a6: C_REG, type_: 29, size: 4}, + {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_32CON, a6: C_REG, type_: 14, size: 4}, + {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_32CON, a6: C_REG, type_: 14, size: 4}, + {as: ARLDICL, a1: C_U15CON, a2: C_REG, a3: C_32CON, a6: C_REG, type_: 14, size: 4}, + {as: ARLDCL, a1: C_REG, a3: C_32CON, a6: C_REG, type_: 14, size: 4}, {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4}, {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4}, {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4}, @@ -232,8 +230,7 @@ var optabBase = []Optab{ {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4}, - {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4}, - {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4}, + {as: AMOVD, a1: C_16CON, a6: C_REG, type_: 3, size: 4}, {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4}, {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4}, @@ -245,8 +242,7 @@ var optabBase = []Optab{ {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4}, {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4}, - {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4}, - {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4}, + {as: AMOVW, a1: C_16CON, a6: C_REG, type_: 3, size: 4}, {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4}, {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4}, {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, @@ -258,7 +254,7 @@ var optabBase = []Optab{ {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4}, {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4}, - {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8}, + {as: AFMOVD, a1: C_S16CON, a6: C_FREG, type_: 24, size: 8}, {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4}, {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4}, {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4}, @@ -275,29 +271,28 @@ var optabBase = []Optab{ {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4}, {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4}, {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4}, - {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4}, + {as: AMOVFL, a1: C_FREG, a3: C_32CON, a6: C_FPSCR, type_: 64, size: 4}, {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4}, - {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4}, + {as: AMOVFL, a1: C_32CON, a6: C_FPSCR, type_: 65, size: 4}, {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4}, - {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4}, + {as: AMOVFL, a1: C_REG, a6: C_32CON, type_: 69, size: 4}, {as: ASYSCALL, type_: 5, size: 4}, {as: ASYSCALL, a1: C_REG, type_: 77, size: 12}, - {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12}, - {as: ABEQ, a6: C_SBRA, type_: 16, size: 4}, - {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4}, - {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label - {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop - {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr - {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr - {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label - {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label - {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi - {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh - {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi - {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4}, + {as: ASYSCALL, a1: C_U15CON, type_: 77, size: 12}, + {as: ABEQ, a6: C_BRA, type_: 16, size: 4}, + {as: ABEQ, a1: C_CREG, a6: C_BRA, type_: 16, size: 4}, + {as: ABR, a6: C_BRA, type_: 11, size: 4}, // b label + {as: ABR, a6: C_BRAPIC, type_: 11, size: 8}, // b label; nop + {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr + {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr + {as: ABC, a1: C_U15CON, a2: C_CRBIT, a6: C_BRA, type_: 16, size: 4}, // bc bo, bi, label + {as: ABC, a1: C_U15CON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi + {as: ABC, a1: C_U15CON, a2: C_CRBIT, a3: C_U15CON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh + {as: ABC, a1: C_U15CON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi + {as: ABDNZ, a6: C_BRA, type_: 16, size: 4}, {as: ASYNC, type_: 46, size: 4}, - {as: AWORD, a1: C_LCON, type_: 40, size: 4}, + {as: AWORD, a1: C_32CON, type_: 40, size: 4}, {as: ADWORD, a1: C_64CON, type_: 31, size: 8}, {as: ADWORD, a1: C_LACON, type_: 31, size: 8}, {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4}, @@ -313,19 +308,19 @@ var optabBase = []Optab{ {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16}, {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12}, {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12}, - {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4}, + {as: AMTFSB0, a1: C_U15CON, type_: 52, size: 4}, /* Other ISA 2.05+ instructions */ {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */ {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */ {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */ {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4}, - {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */ - {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */ - {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */ - {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */ - {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */ - {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */ - {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */ + {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_U15CON, type_: 92, size: 4}, /* floating test for sw divide, x-form */ + {as: AFTSQRT, a1: C_FREG, a6: C_U15CON, type_: 93, size: 4}, /* floating test for sw square root, x-form */ + {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */ + {as: ADARN, a1: C_U15CON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */ + {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */ + {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_U15CON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */ + {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */ /* Misc ISA 3.0 instructions */ {as: ASETB, a1: C_CREG, a6: C_REG, type_: 110, size: 4}, @@ -368,7 +363,7 @@ var optabBase = []Optab{ /* Vector shift */ {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */ {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */ - {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */ + {as: AVSOI, a1: C_U16CON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */ /* Vector count */ {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */ @@ -392,10 +387,8 @@ var optabBase = []Optab{ {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */ /* Vector splat */ - {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */ - {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, - {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */ - {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4}, + {as: AVSPLTB, a1: C_S16CON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, + {as: AVSPLTISB, a1: C_S16CON, a6: C_VREG, type_: 82, size: 4}, /* Vector AES */ {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */ @@ -403,7 +396,7 @@ var optabBase = []Optab{ {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */ /* Vector SHA */ - {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */ + {as: AVSHASIGMA, a1: C_U16CON, a2: C_VREG, a3: C_U16CON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */ /* VSX vector load */ {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */ @@ -447,14 +440,14 @@ var optabBase = []Optab{ {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */ /* VSX splat */ - {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */ - {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */ + {as: AXXSPLTW, a1: C_VSREG, a3: C_U15CON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */ + {as: AXXSPLTIB, a1: C_U15CON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */ /* VSX permute */ {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */ /* VSX shift */ - {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */ + {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_U15CON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */ /* VSX reverse bytes */ {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */ @@ -479,45 +472,45 @@ var optabBase = []Optab{ {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4}, {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4}, - {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4}, - {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4}, + {as: ACMP, a1: C_REG, a6: C_S16CON, type_: 71, size: 4}, + {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_S16CON, type_: 71, size: 4}, {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4}, {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4}, - {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4}, - {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4}, + {as: ACMPU, a1: C_REG, a6: C_U16CON, type_: 71, size: 4}, + {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_U16CON, type_: 71, size: 4}, {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4}, {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4}, - {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4}, - {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4}, + {as: ATW, a1: C_32CON, a2: C_REG, a6: C_REG, type_: 60, size: 4}, + {as: ATW, a1: C_32CON, a2: C_REG, a6: C_S16CON, type_: 61, size: 4}, {as: ADCBF, a1: C_SOREG, type_: 43, size: 4}, {as: ADCBF, a1: C_XOREG, type_: 43, size: 4}, - {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4}, - {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4}, - {as: ADCBF, a1: C_XOREG, a6: C_SCON, type_: 43, size: 4}, + {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_U15CON, type_: 43, size: 4}, + {as: ADCBF, a1: C_SOREG, a6: C_U15CON, type_: 43, size: 4}, + {as: ADCBF, a1: C_XOREG, a6: C_U15CON, type_: 43, size: 4}, {as: ASTDCCC, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4}, {as: ASTDCCC, a1: C_REG, a6: C_XOREG, type_: 44, size: 4}, {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4}, - {as: ALDAR, a1: C_XOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4}, + {as: ALDAR, a1: C_XOREG, a3: C_U16CON, a6: C_REG, type_: 45, size: 4}, {as: AEIEIO, type_: 46, size: 4}, {as: ATLBIE, a1: C_REG, type_: 49, size: 4}, - {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4}, + {as: ATLBIE, a1: C_U15CON, a6: C_REG, type_: 49, size: 4}, {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4}, {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4}, {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4}, - {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4}, + {as: ASTSW, a1: C_REG, a3: C_32CON, a6: C_ZOREG, type_: 41, size: 4}, {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4}, - {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4}, + {as: ALSW, a1: C_ZOREG, a3: C_32CON, a6: C_REG, type_: 42, size: 4}, {as: obj.AUNDEF, type_: 78, size: 4}, - {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0}, - {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0}, + {as: obj.APCDATA, a1: C_32CON, a6: C_32CON, type_: 0, size: 0}, + {as: obj.AFUNCDATA, a1: C_U15CON, a6: C_ADDR, type_: 0, size: 0}, {as: obj.ANOP, type_: 0, size: 0}, - {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689 - {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior + {as: obj.ANOP, a1: C_32CON, type_: 0, size: 0}, // NOP operand variations added for #40689 + {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0}, - {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL - {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL - {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code + {as: obj.ADUFFZERO, a6: C_BRA, type_: 11, size: 4}, // same as ABR/ABL + {as: obj.ADUFFCOPY, a6: C_BRA, type_: 11, size: 4}, // same as ABR/ABL + {as: obj.APCALIGN, a1: C_32CON, type_: 0, size: 0}, // align code } // These are opcodes above which may generate different sequences depending on whether prefix opcode support @@ -552,7 +545,7 @@ var prefixableOptab = []PrefixableOptab{ {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8}, {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8}, - {Optab: Optab{as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8}, + {Optab: Optab{as: AMOVW, a1: C_32CON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8}, {Optab: Optab{as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8}, {Optab: Optab{as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8}, {Optab: Optab{as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8}, @@ -574,8 +567,8 @@ var prefixableOptab = []PrefixableOptab{ {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8}, {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8}, - {Optab: Optab{as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8}, - {Optab: Optab{as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8}, + {Optab: Optab{as: AADD, a1: C_32CON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8}, + {Optab: Optab{as: AADD, a1: C_32CON, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8}, {Optab: Optab{as: AADD, a1: C_S34CON, a2: C_REG, a6: C_REG, type_: 22, size: 20}, minGOPPC64: 10, pfxsize: 8}, {Optab: Optab{as: AADD, a1: C_S34CON, a6: C_REG, type_: 22, size: 20}, minGOPPC64: 10, pfxsize: 8}, } @@ -955,7 +948,7 @@ func (c *ctxt9) aclass(a *obj.Addr) int { f64 := a.Val.(float64) if f64 == 0 { if math.Signbit(f64) { - return C_ADDCON + return C_S16CON } return C_ZCON } @@ -1017,7 +1010,7 @@ func (c *ctxt9) aclass(a *obj.Addr) int { case sbits <= 16: return C_U16CON case sbits <= 31: - return C_U32CON + return C_U31CON case sbits <= 32: return C_U32CON case sbits <= 33: @@ -1041,9 +1034,9 @@ func (c *ctxt9) aclass(a *obj.Addr) int { case obj.TYPE_BRANCH: if a.Sym != nil && c.ctxt.Flag_dynlink && !pfxEnabled { - return C_LBRAPIC + return C_BRAPIC } - return C_SBRA + return C_BRA } return C_GOK @@ -1114,7 +1107,7 @@ func (c *ctxt9) oplook(p *obj.Prog) *Optab { return &ops[0] } -// Compare two operand types (ex C_REG, or C_SCON) +// Compare two operand types (ex C_REG, or C_U15CON) // and return true if b is compatible with a. // // Argument comparison isn't reflexitive, so care must be taken. @@ -1145,13 +1138,20 @@ func cmp(a int, b int) bool { return cmp(C_U5CON, b) case C_U15CON: return cmp(C_U8CON, b) - case C_U16CON: - return cmp(C_U15CON, b) - case C_S16CON: return cmp(C_U15CON, b) - case C_32CON: + case C_U16CON: + return cmp(C_U15CON, b) + case C_16CON: return cmp(C_S16CON, b) || cmp(C_U16CON, b) + case C_U31CON: + return cmp(C_U16CON, b) + case C_U32CON: + return cmp(C_U31CON, b) + case C_S32CON: + return cmp(C_U31CON, b) || cmp(C_S16CON, b) + case C_32CON: + return cmp(C_S32CON, b) || cmp(C_U32CON, b) case C_S34CON: return cmp(C_32CON, b) case C_64CON: @@ -1160,9 +1160,6 @@ func cmp(a int, b int) bool { case C_LACON: return cmp(C_SACON, b) - case C_LBRA: - return cmp(C_SBRA, b) - case C_SOREG: return cmp(C_ZOREG, b) @@ -2289,6 +2286,8 @@ const ( OP_OR = 31<<26 | 444<<1 | 0<<10 | 0 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0 + OP_XORI = 26<<26 | 0<<1 | 0<<10 | 0 + OP_XORIS = 27<<26 | 0<<1 | 0<<10 | 0 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0 @@ -2541,34 +2540,26 @@ func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) { } o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg)) - case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */ + case 3: /* mov $soreg/16con, r ==> addi/ori $i,reg',r */ d := c.vregoff(&p.From) v := int32(d) r := int(p.From.Reg) - // p.From may be a constant value or an offset(reg) type argument. - isZeroOrR0 := r&0x1f == 0 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) { c.ctxt.Diag("literal operation on R0\n%v", p) } - a := OP_ADDI - if int64(int16(d)) != d { - // Operand is 16 bit value with sign bit set - if o.a1 == C_ANDCON { - // Needs unsigned 16 bit so use ORI - if isZeroOrR0 { - o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v)) - break - } - // With ADDCON, needs signed 16 bit value, fall through to use ADDI - } else if o.a1 != C_ADDCON { - log.Fatalf("invalid handling of %v", p) + if int64(int16(d)) == d { + // MOVD $int16, Ry or MOVD $offset(Rx), Ry + o1 = AOP_IRR(uint32(OP_ADDI), uint32(p.To.Reg), uint32(r), uint32(v)) + } else { + // MOVD $uint16, Ry + if int64(uint16(d)) != d || (r != 0 && r != REGZERO) { + c.ctxt.Diag("Rule expects a uint16 constant load. got:\n%v", p) } + o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v)) } - o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v)) - case 4: /* add/mul $scon,[r1],r2 */ v := c.regoff(&p.From) @@ -2654,7 +2645,7 @@ func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) { } o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r)) - case 11: /* br/bl lbra */ + case 11: /* br/bl bra */ v := int32(0) if p.To.Target() != nil { @@ -2688,7 +2679,7 @@ func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) { case 13: /* mov[bhwd]{z,} r,r */ // This needs to handle "MOV* $0, Rx". This shows up because $0 also - // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON + // matches C_REG if r0iszero. This happens because C_REG sorts before C_U16CON // TODO: fix the above behavior and cleanup this exception. if p.From.Type == obj.TYPE_CONST { o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0) @@ -2776,8 +2767,7 @@ func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) { c.ctxt.Diag("unexpected op in rldc case\n%v", p) } - case 17, /* bc bo,bi,lbra (same for now) */ - 16: /* bc bo,bi,sbra */ + case 16: /* bc bo,bi,bra */ a := 0 r := int(p.Reg) @@ -2880,6 +2870,23 @@ func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) { } o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) + case 21: /* or $u32con,rx[,ry] => oris + ori (similar for xor) */ + var opu, opl uint32 + r := uint32(p.Reg) + if r == 0 { + r = uint32(p.To.Reg) + } + switch p.As { + case AOR: + opu, opl = OP_ORIS, OP_ORI + case AXOR: + opu, opl = OP_XORIS, OP_XORI + default: + c.ctxt.Diag("unhandled opcode.\n%v", p) + } + o1 = LOP_IRR(opu, uint32(p.To.Reg), r, uint32(p.From.Offset>>16)) + o2 = LOP_IRR(opl, uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.From.Offset)&0xFFFF) + case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add, add $s34con,r1 ==> addis+ori+slw+ori+add */ if p.To.Reg == REGTMP || p.Reg == REGTMP { c.ctxt.Diag("can't synthesize large constant\n%v", p) @@ -2921,8 +2928,8 @@ func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) { r = int(p.To.Reg) } - // With ADDCON operand, generate 2 instructions using ADDI for signed value, - // with LCON operand generate 3 instructions. + // With S16CON operand, generate 2 instructions using ADDI for signed value, + // with 32CON operand generate 3 instructions. if o.size == 8 { o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d))) o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) diff --git a/src/cmd/internal/obj/ppc64/asm_test.go b/src/cmd/internal/obj/ppc64/asm_test.go index 7167a6a947..0ef457e8d0 100644 --- a/src/cmd/internal/obj/ppc64/asm_test.go +++ b/src/cmd/internal/obj/ppc64/asm_test.go @@ -516,17 +516,19 @@ func TestAddrClassifier(t *testing.T) { {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 32}, C_U8CON}, {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 1 << 14}, C_U15CON}, {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 1 << 15}, C_U16CON}, - {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 1 + 1<<16}, C_U32CON}, + {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 1 + 1<<16}, C_U31CON}, + {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 1 << 31}, C_U32CON}, {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 1 << 32}, C_S34CON}, {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 1 << 33}, C_64CON}, {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: -1}, C_S16CON}, {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: -0x10001}, C_S32CON}, + {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 0x10001}, C_U31CON}, {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: -(1 << 33)}, C_S34CON}, {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: -(1 << 34)}, C_64CON}, // Branch like arguments - {obj.Addr{Type: obj.TYPE_BRANCH, Sym: &obj.LSym{Type: objabi.SDATA}}, cmplx{C_SBRA, C_LBRAPIC, C_LBRAPIC, C_SBRA}}, - {obj.Addr{Type: obj.TYPE_BRANCH}, C_SBRA}, + {obj.Addr{Type: obj.TYPE_BRANCH, Sym: &obj.LSym{Type: objabi.SDATA}}, cmplx{C_BRA, C_BRAPIC, C_BRAPIC, C_BRA}}, + {obj.Addr{Type: obj.TYPE_BRANCH}, C_BRA}, } pic_ctxt9 := ctxt9{ctxt: &obj.Link{Flag_shared: true, Arch: &Linkppc64}, autosize: 0} diff --git a/src/cmd/internal/obj/riscv/asm_test.go b/src/cmd/internal/obj/riscv/asm_test.go index afe0525532..96ea230841 100644 --- a/src/cmd/internal/obj/riscv/asm_test.go +++ b/src/cmd/internal/obj/riscv/asm_test.go @@ -9,8 +9,10 @@ import ( "fmt" "internal/testenv" "os" + "os/exec" "path/filepath" "runtime" + "strings" "testing" ) @@ -277,3 +279,33 @@ func TestBranch(t *testing.T) { t.Errorf("Branch test failed: %v\n%s", err, out) } } + +func TestPCAlign(t *testing.T) { + dir := t.TempDir() + tmpfile := filepath.Join(dir, "x.s") + asm := ` +TEXT _stub(SB),$0-0 + FENCE + PCALIGN $8 + FENCE + RET +` + if err := os.WriteFile(tmpfile, []byte(asm), 0644); err != nil { + t.Fatal(err) + } + cmd := exec.Command(testenv.GoToolPath(t), "tool", "asm", "-o", filepath.Join(dir, "x.o"), "-S", tmpfile) + cmd.Env = append(os.Environ(), "GOARCH=riscv64", "GOOS=linux") + out, err := cmd.CombinedOutput() + if err != nil { + t.Errorf("Failed to assemble: %v\n%s", err, out) + } + // The expected instruction sequence after alignment: + // FENCE + // NOP + // FENCE + // RET + want := "0f 00 f0 0f 13 00 00 00 0f 00 f0 0f 67 80 00 00" + if !strings.Contains(string(out), want) { + t.Errorf("PCALIGN test failed - got %s\nwant %s", out, want) + } +} diff --git a/src/cmd/internal/obj/riscv/cpu.go b/src/cmd/internal/obj/riscv/cpu.go index edd1ac820b..919f07b1a9 100644 --- a/src/cmd/internal/obj/riscv/cpu.go +++ b/src/cmd/internal/obj/riscv/cpu.go @@ -28,7 +28,12 @@ package riscv -import "cmd/internal/obj" +import ( + "errors" + "fmt" + + "cmd/internal/obj" +) //go:generate go run ../stringer.go -i $GOFILE -o anames.go -p riscv @@ -607,6 +612,50 @@ const ( ALAST ) +// opSuffix encoding to uint8 which fit into p.Scond +var rmSuffixSet = map[string]uint8{ + "RNE": RM_RNE, + "RTZ": RM_RTZ, + "RDN": RM_RDN, + "RUP": RM_RUP, + "RMM": RM_RMM, +} + +const rmSuffixBit uint8 = 1 << 7 + +func rmSuffixEncode(s string) (uint8, error) { + if s == "" { + return 0, errors.New("empty suffix") + } + enc, ok := rmSuffixSet[s] + if !ok { + return 0, fmt.Errorf("invalid encoding for unknown suffix:%q", s) + } + return enc | rmSuffixBit, nil +} + +func rmSuffixString(u uint8) (string, error) { + if u&rmSuffixBit == 0 { + return "", fmt.Errorf("invalid suffix, require round mode bit:%x", u) + } + + u &^= rmSuffixBit + for k, v := range rmSuffixSet { + if v == u { + return k, nil + } + } + return "", fmt.Errorf("unknown suffix:%x", u) +} + +const ( + RM_RNE uint8 = iota // Round to Nearest, ties to Even + RM_RTZ // Round towards Zero + RM_RDN // Round Down + RM_RUP // Round Up + RM_RMM // Round to Nearest, ties to Max Magnitude +) + // All unary instructions which write to their arguments (as opposed to reading // from them) go here. The assembly parser uses this information to populate // its AST in a semantically reasonable way. diff --git a/src/cmd/internal/obj/riscv/list.go b/src/cmd/internal/obj/riscv/list.go index de90961e32..bc87539f27 100644 --- a/src/cmd/internal/obj/riscv/list.go +++ b/src/cmd/internal/obj/riscv/list.go @@ -13,6 +13,7 @@ import ( func init() { obj.RegisterRegister(obj.RBaseRISCV, REG_END, RegName) obj.RegisterOpcode(obj.ABaseRISCV, Anames) + obj.RegisterOpSuffix("riscv64", opSuffixString) } func RegName(r int) string { @@ -31,3 +32,18 @@ func RegName(r int) string { return fmt.Sprintf("Rgok(%d)", r-obj.RBaseRISCV) } } + +func opSuffixString(s uint8) string { + if s&rmSuffixBit == 0 { + return "" + } + + ss, err := rmSuffixString(s) + if err != nil { + ss = fmt.Sprintf("", s) + } + if ss == "" { + return ss + } + return fmt.Sprintf(".%s", ss) +} diff --git a/src/cmd/internal/obj/riscv/obj.go b/src/cmd/internal/obj/riscv/obj.go index 93bda45096..3ec740f85a 100644 --- a/src/cmd/internal/obj/riscv/obj.go +++ b/src/cmd/internal/obj/riscv/obj.go @@ -28,6 +28,7 @@ import ( "internal/abi" "log" "math/bits" + "strings" ) func buildop(ctxt *obj.Link) {} @@ -308,6 +309,12 @@ func setPCs(p *obj.Prog, pc int64) int64 { for _, ins := range instructionsForProg(p) { pc += int64(ins.length()) } + + if p.As == obj.APCALIGN { + alignedValue := p.From.Offset + v := pcAlignPadLength(pc, alignedValue) + pc += int64(v) + } } return pc } @@ -733,6 +740,16 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: high, Sym: cursym} p.Link.To.Offset = low } + + case obj.APCALIGN: + alignedValue := p.From.Offset + if (alignedValue&(alignedValue-1) != 0) || 4 > alignedValue || alignedValue > 2048 { + ctxt.Diag("alignment value of an instruction must be a power of two and in the range [4, 2048], got %d\n", alignedValue) + } + // Update the current text symbol alignment value. + if int32(alignedValue) > cursym.Func().Align { + cursym.Func().Align = int32(alignedValue) + } } } @@ -744,6 +761,10 @@ func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { } } +func pcAlignPadLength(pc int64, alignedValue int64) int { + return int(-pc & (alignedValue - 1)) +} + func stacksplit(ctxt *obj.Link, p *obj.Prog, cursym *obj.LSym, newprog obj.ProgAlloc, framesize int64) *obj.Prog { // Leaf function with no frame is effectively NOSPLIT. if framesize == 0 { @@ -1708,6 +1729,7 @@ var encodings = [ALAST & obj.AMask]encoding{ obj.ANOP: pseudoOpEncoding, obj.ADUFFZERO: pseudoOpEncoding, obj.ADUFFCOPY: pseudoOpEncoding, + obj.APCALIGN: pseudoOpEncoding, } // encodingForAs returns the encoding for an obj.As. @@ -2252,8 +2274,12 @@ func instructionsForProg(p *obj.Prog) []*instruction { ins.imm = 0x0ff case AFCVTWS, AFCVTLS, AFCVTWUS, AFCVTLUS, AFCVTWD, AFCVTLD, AFCVTWUD, AFCVTLUD: - // Set the rounding mode in funct3 to round to zero. - ins.funct3 = 1 + // Set the default rounding mode in funct3 to round to zero. + if p.Scond&rmSuffixBit == 0 { + ins.funct3 = uint32(RM_RTZ) + } else { + ins.funct3 = uint32(p.Scond &^ rmSuffixBit) + } case AFNES, AFNED: // Replace FNE[SD] with FEQ[SD] and NOT. @@ -2425,6 +2451,17 @@ func assemble(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { rel.Sym = addr.Sym rel.Add = addr.Offset rel.Type = rt + + case obj.APCALIGN: + alignedValue := p.From.Offset + v := pcAlignPadLength(p.Pc, alignedValue) + offset := p.Pc + for ; v >= 4; v -= 4 { + // NOP + cursym.WriteBytes(ctxt, offset, []byte{0x13, 0, 0, 0}) + offset += 4 + } + continue } offset := p.Pc @@ -2446,6 +2483,14 @@ func isUnsafePoint(p *obj.Prog) bool { return p.Mark&USES_REG_TMP == USES_REG_TMP || p.From.Reg == REG_TMP || p.To.Reg == REG_TMP || p.Reg == REG_TMP } +func ParseSuffix(prog *obj.Prog, cond string) (err error) { + switch prog.As { + case AFCVTWS, AFCVTLS, AFCVTWUS, AFCVTLUS, AFCVTWD, AFCVTLD, AFCVTWUD, AFCVTLUD: + prog.Scond, err = rmSuffixEncode(strings.TrimPrefix(cond, ".")) + } + return +} + var LinkRISCV64 = obj.LinkArch{ Arch: sys.ArchRISCV64, Init: buildop, diff --git a/src/cmd/internal/obj/s390x/asmz.go b/src/cmd/internal/obj/s390x/asmz.go index bf6d48e305..7b560e0053 100644 --- a/src/cmd/internal/obj/s390x/asmz.go +++ b/src/cmd/internal/obj/s390x/asmz.go @@ -4434,7 +4434,7 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { } zRRE(op_KDSA, uint32(p.From.Reg), uint32(p.To.Reg), asm) - case 126: // KMA and KMCTR - CIPHER MESSAGE WITH AUTHENTICATION; CIPHER MESSAGE WITH + case 126: // KMA and KMCTR - CIPHER MESSAGE WITH AUTHENTICATION; CIPHER MESSAGE WITH COUNTER var opcode uint32 switch p.As { default: @@ -4458,16 +4458,13 @@ func (c *ctxtz) asmout(p *obj.Prog, asm *[]byte) { if p.Reg&1 != 0 { c.ctxt.Diag("third argument must be even register in %v", p) } - if p.Reg == p.To.Reg || p.Reg == p.From.Reg { - c.ctxt.Diag("third argument must not be input or output argument registers in %v", p) - } if p.As == AKMA { opcode = op_KMA } else if p.As == AKMCTR { opcode = op_KMCTR } } - zRRF(opcode, uint32(p.From.Reg), 0, uint32(p.Reg), uint32(p.To.Reg), asm) + zRRF(opcode, uint32(p.Reg), 0, uint32(p.From.Reg), uint32(p.To.Reg), asm) } } diff --git a/src/cmd/internal/objabi/funcid.go b/src/cmd/internal/objabi/funcid.go index 007107e778..d9b47f1ec9 100644 --- a/src/cmd/internal/objabi/funcid.go +++ b/src/cmd/internal/objabi/funcid.go @@ -14,6 +14,7 @@ var funcIDs = map[string]abi.FuncID{ "asmcgocall": abi.FuncID_asmcgocall, "asyncPreempt": abi.FuncID_asyncPreempt, "cgocallback": abi.FuncID_cgocallback, + "corostart": abi.FuncID_corostart, "debugCallV2": abi.FuncID_debugCallV2, "gcBgMarkWorker": abi.FuncID_gcBgMarkWorker, "rt0_go": abi.FuncID_rt0_go, diff --git a/src/cmd/internal/objabi/pkgspecial.go b/src/cmd/internal/objabi/pkgspecial.go index 9bf07153a4..6c1f460f07 100644 --- a/src/cmd/internal/objabi/pkgspecial.go +++ b/src/cmd/internal/objabi/pkgspecial.go @@ -46,10 +46,11 @@ var runtimePkgs = []string{ "runtime/internal/atomic", "runtime/internal/math", "runtime/internal/sys", - "runtime/internal/syscall", + "internal/runtime/syscall", "internal/abi", "internal/bytealg", + "internal/chacha8rand", "internal/coverage/rtcov", "internal/cpu", "internal/goarch", @@ -79,7 +80,8 @@ var allowAsmABIPkgs = []string{ "reflect", "syscall", "internal/bytealg", - "runtime/internal/syscall", + "internal/chacha8rand", + "internal/runtime/syscall", "runtime/internal/startlinetest", } diff --git a/src/cmd/internal/objabi/reloctype.go b/src/cmd/internal/objabi/reloctype.go index e3e042a511..8b9927d6eb 100644 --- a/src/cmd/internal/objabi/reloctype.go +++ b/src/cmd/internal/objabi/reloctype.go @@ -320,31 +320,34 @@ const ( // Loong64. - // R_ADDRLOONG64 resolves to the low 12 bits of an external address, by encoding - // it into the instruction. - R_ADDRLOONG64 - - // R_ADDRLOONG64U resolves to the sign-adjusted "upper" 20 bits (bit 5-24) of an + // R_LOONG64_ADDR_HI resolves to the sign-adjusted "upper" 20 bits (bit 5-24) of an // external address, by encoding it into the instruction. - R_ADDRLOONG64U + // R_LOONG64_ADDR_LO resolves to the low 12 bits of an external address, by encoding + // it into the instruction. + R_LOONG64_ADDR_HI + R_LOONG64_ADDR_LO - // R_ADDRLOONG64TLS resolves to the low 12 bits of a TLS address (offset from + // R_LOONG64_TLS_LE_HI resolves to the high 20 bits of a TLS address (offset from // thread pointer), by encoding it into the instruction. - R_ADDRLOONG64TLS - - // R_ADDRLOONG64TLSU resolves to the high 20 bits of a TLS address (offset from + // R_LOONG64_TLS_LE_LO resolves to the low 12 bits of a TLS address (offset from // thread pointer), by encoding it into the instruction. - R_ADDRLOONG64TLSU + R_LOONG64_TLS_LE_HI + R_LOONG64_TLS_LE_LO // R_CALLLOONG64 resolves to non-PC-relative target address of a CALL (BL/JIRL) // instruction, by encoding the address into the instruction. R_CALLLOONG64 - // R_LOONG64_TLS_IE_PCREL_HI and R_LOONG64_TLS_IE_LO relocates a pcalau12i, ld.d + // R_LOONG64_TLS_IE_HI and R_LOONG64_TLS_IE_LO relocates a pcalau12i, ld.d // pair to compute the address of the GOT slot of the tls symbol. - R_LOONG64_TLS_IE_PCREL_HI + R_LOONG64_TLS_IE_HI R_LOONG64_TLS_IE_LO + // R_LOONG64_GOT_HI and R_LOONG64_GOT_LO resolves a GOT-relative instruction sequence, + // usually an pcalau12i followed by another ld or addi instruction. + R_LOONG64_GOT_HI + R_LOONG64_GOT_LO + // R_JMPLOONG64 resolves to non-PC-relative target address of a JMP instruction, // by encoding the address into the instruction. R_JMPLOONG64 diff --git a/src/cmd/internal/objabi/reloctype_string.go b/src/cmd/internal/objabi/reloctype_string.go index e8793dedc1..6d8cbb5dd2 100644 --- a/src/cmd/internal/objabi/reloctype_string.go +++ b/src/cmd/internal/objabi/reloctype_string.go @@ -82,26 +82,28 @@ func _() { _ = x[R_RISCV_RVC_BRANCH-72] _ = x[R_RISCV_RVC_JUMP-73] _ = x[R_PCRELDBL-74] - _ = x[R_ADDRLOONG64-75] - _ = x[R_ADDRLOONG64U-76] - _ = x[R_ADDRLOONG64TLS-77] - _ = x[R_ADDRLOONG64TLSU-78] + _ = x[R_LOONG64_ADDR_HI-75] + _ = x[R_LOONG64_ADDR_LO-76] + _ = x[R_LOONG64_TLS_LE_HI-77] + _ = x[R_LOONG64_TLS_LE_LO-78] _ = x[R_CALLLOONG64-79] - _ = x[R_LOONG64_TLS_IE_PCREL_HI-80] + _ = x[R_LOONG64_TLS_IE_HI-80] _ = x[R_LOONG64_TLS_IE_LO-81] - _ = x[R_JMPLOONG64-82] - _ = x[R_ADDRMIPSU-83] - _ = x[R_ADDRMIPSTLS-84] - _ = x[R_ADDRCUOFF-85] - _ = x[R_WASMIMPORT-86] - _ = x[R_XCOFFREF-87] - _ = x[R_PEIMAGEOFF-88] - _ = x[R_INITORDER-89] + _ = x[R_LOONG64_GOT_HI-82] + _ = x[R_LOONG64_GOT_LO-83] + _ = x[R_JMPLOONG64-84] + _ = x[R_ADDRMIPSU-85] + _ = x[R_ADDRMIPSTLS-86] + _ = x[R_ADDRCUOFF-87] + _ = x[R_WASMIMPORT-88] + _ = x[R_XCOFFREF-89] + _ = x[R_PEIMAGEOFF-90] + _ = x[R_INITORDER-91] } -const _RelocType_name = "R_ADDRR_ADDRPOWERR_ADDRARM64R_ADDRMIPSR_ADDROFFR_SIZER_CALLR_CALLARMR_CALLARM64R_CALLINDR_CALLPOWERR_CALLMIPSR_CONSTR_PCRELR_TLS_LER_TLS_IER_GOTOFFR_PLT0R_PLT1R_PLT2R_USEFIELDR_USETYPER_USEIFACER_USEIFACEMETHODR_USENAMEDMETHODR_METHODOFFR_KEEPR_POWER_TOCR_GOTPCRELR_JMPMIPSR_DWARFSECREFR_DWARFFILEREFR_ARM64_TLS_LER_ARM64_TLS_IER_ARM64_GOTPCRELR_ARM64_GOTR_ARM64_PCRELR_ARM64_PCREL_LDST8R_ARM64_PCREL_LDST16R_ARM64_PCREL_LDST32R_ARM64_PCREL_LDST64R_ARM64_LDST8R_ARM64_LDST16R_ARM64_LDST32R_ARM64_LDST64R_ARM64_LDST128R_POWER_TLS_LER_POWER_TLS_IER_POWER_TLSR_POWER_TLS_IE_PCREL34R_POWER_TLS_LE_TPREL34R_ADDRPOWER_DSR_ADDRPOWER_GOTR_ADDRPOWER_GOT_PCREL34R_ADDRPOWER_PCRELR_ADDRPOWER_TOCRELR_ADDRPOWER_TOCREL_DSR_ADDRPOWER_D34R_ADDRPOWER_PCREL34R_RISCV_JALR_RISCV_JAL_TRAMPR_RISCV_CALLR_RISCV_PCREL_ITYPER_RISCV_PCREL_STYPER_RISCV_TLS_IER_RISCV_TLS_LER_RISCV_GOT_HI20R_RISCV_PCREL_HI20R_RISCV_PCREL_LO12_IR_RISCV_PCREL_LO12_SR_RISCV_BRANCHR_RISCV_RVC_BRANCHR_RISCV_RVC_JUMPR_PCRELDBLR_ADDRLOONG64R_ADDRLOONG64UR_ADDRLOONG64TLSR_ADDRLOONG64TLSUR_CALLLOONG64R_LOONG64_TLS_IE_PCREL_HIR_LOONG64_TLS_IE_LOR_JMPLOONG64R_ADDRMIPSUR_ADDRMIPSTLSR_ADDRCUOFFR_WASMIMPORTR_XCOFFREFR_PEIMAGEOFFR_INITORDER" +const _RelocType_name = "R_ADDRR_ADDRPOWERR_ADDRARM64R_ADDRMIPSR_ADDROFFR_SIZER_CALLR_CALLARMR_CALLARM64R_CALLINDR_CALLPOWERR_CALLMIPSR_CONSTR_PCRELR_TLS_LER_TLS_IER_GOTOFFR_PLT0R_PLT1R_PLT2R_USEFIELDR_USETYPER_USEIFACER_USEIFACEMETHODR_USENAMEDMETHODR_METHODOFFR_KEEPR_POWER_TOCR_GOTPCRELR_JMPMIPSR_DWARFSECREFR_DWARFFILEREFR_ARM64_TLS_LER_ARM64_TLS_IER_ARM64_GOTPCRELR_ARM64_GOTR_ARM64_PCRELR_ARM64_PCREL_LDST8R_ARM64_PCREL_LDST16R_ARM64_PCREL_LDST32R_ARM64_PCREL_LDST64R_ARM64_LDST8R_ARM64_LDST16R_ARM64_LDST32R_ARM64_LDST64R_ARM64_LDST128R_POWER_TLS_LER_POWER_TLS_IER_POWER_TLSR_POWER_TLS_IE_PCREL34R_POWER_TLS_LE_TPREL34R_ADDRPOWER_DSR_ADDRPOWER_GOTR_ADDRPOWER_GOT_PCREL34R_ADDRPOWER_PCRELR_ADDRPOWER_TOCRELR_ADDRPOWER_TOCREL_DSR_ADDRPOWER_D34R_ADDRPOWER_PCREL34R_RISCV_JALR_RISCV_JAL_TRAMPR_RISCV_CALLR_RISCV_PCREL_ITYPER_RISCV_PCREL_STYPER_RISCV_TLS_IER_RISCV_TLS_LER_RISCV_GOT_HI20R_RISCV_PCREL_HI20R_RISCV_PCREL_LO12_IR_RISCV_PCREL_LO12_SR_RISCV_BRANCHR_RISCV_RVC_BRANCHR_RISCV_RVC_JUMPR_PCRELDBLR_LOONG64_ADDR_HIR_LOONG64_ADDR_LOR_LOONG64_TLS_LE_HIR_LOONG64_TLS_LE_LOR_CALLLOONG64R_LOONG64_TLS_IE_HIR_LOONG64_TLS_IE_LOR_LOONG64_GOT_HIR_LOONG64_GOT_LOR_JMPLOONG64R_ADDRMIPSUR_ADDRMIPSTLSR_ADDRCUOFFR_WASMIMPORTR_XCOFFREFR_PEIMAGEOFFR_INITORDER" -var _RelocType_index = [...]uint16{0, 6, 17, 28, 38, 47, 53, 59, 68, 79, 88, 99, 109, 116, 123, 131, 139, 147, 153, 159, 165, 175, 184, 194, 210, 226, 237, 243, 254, 264, 273, 286, 300, 314, 328, 344, 355, 368, 387, 407, 427, 447, 460, 474, 488, 502, 517, 531, 545, 556, 578, 600, 614, 629, 652, 669, 687, 708, 723, 742, 753, 770, 782, 801, 820, 834, 848, 864, 882, 902, 922, 936, 954, 970, 980, 993, 1007, 1023, 1040, 1053, 1078, 1097, 1109, 1120, 1133, 1144, 1156, 1166, 1178, 1189} +var _RelocType_index = [...]uint16{0, 6, 17, 28, 38, 47, 53, 59, 68, 79, 88, 99, 109, 116, 123, 131, 139, 147, 153, 159, 165, 175, 184, 194, 210, 226, 237, 243, 254, 264, 273, 286, 300, 314, 328, 344, 355, 368, 387, 407, 427, 447, 460, 474, 488, 502, 517, 531, 545, 556, 578, 600, 614, 629, 652, 669, 687, 708, 723, 742, 753, 770, 782, 801, 820, 834, 848, 864, 882, 902, 922, 936, 954, 970, 980, 997, 1014, 1033, 1052, 1065, 1084, 1103, 1119, 1135, 1147, 1158, 1171, 1182, 1194, 1204, 1216, 1227} func (i RelocType) String() string { i -= 1 diff --git a/src/cmd/internal/testdir/testdir_test.go b/src/cmd/internal/testdir/testdir_test.go index 1b91dbe3ce..6f1c56eb2d 100644 --- a/src/cmd/internal/testdir/testdir_test.go +++ b/src/cmd/internal/testdir/testdir_test.go @@ -62,6 +62,7 @@ var ( goarch string // Target GOARCH cgoEnabled bool goExperiment string + goDebug string // dirs are the directories to look for *.go files in. // TODO(bradfitz): just use all directories? @@ -100,6 +101,7 @@ func Test(t *testing.T) { GOOS string GOARCH string GOEXPERIMENT string + GODEBUG string CGO_ENABLED string } if err := json.NewDecoder(stdout).Decode(&env); err != nil { @@ -112,6 +114,7 @@ func Test(t *testing.T) { goarch = env.GOARCH cgoEnabled, _ = strconv.ParseBool(env.CGO_ENABLED) goExperiment = env.GOEXPERIMENT + goDebug = env.GODEBUG common := testCommon{ gorootTestDir: filepath.Join(testenv.GOROOT(t), "test"), @@ -537,6 +540,7 @@ func (t test) run() error { } goexp := goExperiment + godebug := goDebug // collect flags for len(args) > 0 && strings.HasPrefix(args[0], "-") { @@ -569,6 +573,14 @@ func (t test) run() error { goexp += args[0] runenv = append(runenv, "GOEXPERIMENT="+goexp) + case "-godebug": // set GODEBUG environment + args = args[1:] + if godebug != "" { + godebug += "," + } + godebug += args[0] + runenv = append(runenv, "GODEBUG="+godebug) + default: flags = append(flags, args[0]) } @@ -1014,7 +1026,7 @@ func (t test) run() error { runInDir = "" var out []byte var err error - if len(flags)+len(args) == 0 && t.goGcflagsIsEmpty() && !*linkshared && goarch == runtime.GOARCH && goos == runtime.GOOS && goexp == goExperiment { + if len(flags)+len(args) == 0 && t.goGcflagsIsEmpty() && !*linkshared && goarch == runtime.GOARCH && goos == runtime.GOOS && goexp == goExperiment && godebug == goDebug { // If we're not using special go command flags, // skip all the go command machinery. // This avoids any time the go command would @@ -1458,7 +1470,7 @@ var ( archVariants = map[string][]string{ "386": {"GO386", "sse2", "softfloat"}, "amd64": {"GOAMD64", "v1", "v2", "v3", "v4"}, - "arm": {"GOARM", "5", "6", "7"}, + "arm": {"GOARM", "5", "6", "7", "7,softfloat"}, "arm64": {}, "loong64": {}, "mips": {"GOMIPS", "hardfloat", "softfloat"}, @@ -1468,7 +1480,7 @@ var ( "ppc64x": {}, // A pseudo-arch representing both ppc64 and ppc64le "s390x": {}, "wasm": {}, - "riscv64": {}, + "riscv64": {"GORISCV64", "rva20u64", "rva22u64"}, } ) diff --git a/src/cmd/link/doc.go b/src/cmd/link/doc.go index c5f43a2954..bd620f9878 100644 --- a/src/cmd/link/doc.go +++ b/src/cmd/link/doc.go @@ -43,10 +43,12 @@ Flags: or initialized to a constant string expression. -X will not work if the initializer makes a function call or refers to other variables. Note that before Go 1.5 this option took two separate arguments. - -a - Disassemble output. -asan Link with C/C++ address sanitizer support. + -aslr + Enable ASLR for buildmode=c-shared on windows (default true). + -bindnow + Mark a dynamically linked ELF object for immediate function binding (default false). -buildid id Record id as Go toolchain build id. -buildmode mode @@ -64,8 +66,6 @@ Flags: The dynamic header is on by default, even without any references to dynamic libraries, because many common system tools now assume the presence of the header. - -debugtramp int - Debug trampolines. -dumpdep Dump symbol dependency graph. -extar ar @@ -104,8 +104,6 @@ Flags: Set runtime.MemProfileRate to rate. -msan Link with C/C++ memory sanitizer support. - -n - Dump symbol table. -o file Write output to file (default a.out, or a.out.exe on Windows). -pluginpath path @@ -116,13 +114,9 @@ Flags: Link with race detection libraries. -s Omit the symbol table and debug information. - -shared - Generated shared object (implies -linkmode external; experimental). -tmpdir dir Write temporary files to dir. Temporary files are only used in external linking mode. - -u - Reject unsafe packages. -v Print trace of linker operations. -w diff --git a/src/cmd/link/internal/arm64/asm.go b/src/cmd/link/internal/arm64/asm.go index 6645795506..7b85bb3e26 100644 --- a/src/cmd/link/internal/arm64/asm.go +++ b/src/cmd/link/internal/arm64/asm.go @@ -1222,7 +1222,7 @@ func gensymlate(ctxt *ld.Link, ldr *loader.Loader) { // that relocations can target them with smaller addends. // On Windows, we only get 21 bits, again (presumably) signed. // Also, on Windows (always) and Darwin (for very large binaries), the external - // linker does't support CALL relocations with addend, so we generate "label" + // linker doesn't support CALL relocations with addend, so we generate "label" // symbols for functions of which we can target the middle (Duff's devices). if !ctxt.IsDarwin() && !ctxt.IsWindows() || !ctxt.IsExternal() { return diff --git a/src/cmd/link/internal/dwtest/dwtest.go b/src/cmd/link/internal/dwtest/dwtest.go index c68edf4187..c69a5aa4fc 100644 --- a/src/cmd/link/internal/dwtest/dwtest.go +++ b/src/cmd/link/internal/dwtest/dwtest.go @@ -69,8 +69,8 @@ func (ex *Examiner) Populate(rdr *dwarf.Reader) error { return nil } -func (e *Examiner) DIEs() []*dwarf.Entry { - return e.dies +func (ex *Examiner) DIEs() []*dwarf.Entry { + return ex.dies } func indent(ilevel int) { @@ -90,7 +90,7 @@ func (ex *Examiner) DumpEntry(idx int, dumpKids bool, ilevel int) { fmt.Printf("0x%x: %v\n", idx, entry.Tag) for _, f := range entry.Field { indent(ilevel) - fmt.Printf("at=%v val=0x%x\n", f.Attr, f.Val) + fmt.Printf("at=%v val=%v\n", f.Attr, f.Val) } if dumpKids { ksl := ex.kids[idx] diff --git a/src/cmd/link/internal/ld/data.go b/src/cmd/link/internal/ld/data.go index 2d761c7ee7..b4930277e4 100644 --- a/src/cmd/link/internal/ld/data.go +++ b/src/cmd/link/internal/ld/data.go @@ -43,7 +43,9 @@ import ( "debug/elf" "encoding/binary" "fmt" + "internal/abi" "log" + "math/rand" "os" "sort" "strconv" @@ -56,10 +58,11 @@ import ( func isRuntimeDepPkg(pkg string) bool { switch pkg { case "runtime", - "sync/atomic", // runtime may call to sync/atomic, due to go:linkname - "internal/abi", // used by reflectcall (and maybe more) - "internal/bytealg", // for IndexByte - "internal/cpu": // for cpu features + "sync/atomic", // runtime may call to sync/atomic, due to go:linkname + "internal/abi", // used by reflectcall (and maybe more) + "internal/bytealg", // for IndexByte + "internal/chacha8rand", // for rand + "internal/cpu": // for cpu features return true } return strings.HasPrefix(pkg, "runtime/internal/") && !strings.HasSuffix(pkg, "_test") @@ -120,10 +123,11 @@ func trampoline(ctxt *Link, s loader.Sym) { } if ldr.SymValue(rs) == 0 && ldr.SymType(rs) != sym.SDYNIMPORT && ldr.SymType(rs) != sym.SUNDEFEXT { - // Symbols in the same package are laid out together. + // Symbols in the same package are laid out together (if we + // don't randomize the function order). // Except that if SymPkg(s) == "", it is a host object symbol // which may call an external symbol via PLT. - if ldr.SymPkg(s) != "" && ldr.SymPkg(rs) == ldr.SymPkg(s) { + if ldr.SymPkg(s) != "" && ldr.SymPkg(rs) == ldr.SymPkg(s) && *flagRandLayout == 0 { // RISC-V is only able to reach +/-1MiB via a JAL instruction. // We need to generate a trampoline when an address is // currently unknown. @@ -132,7 +136,7 @@ func trampoline(ctxt *Link, s loader.Sym) { } } // Runtime packages are laid out together. - if isRuntimeDepPkg(ldr.SymPkg(s)) && isRuntimeDepPkg(ldr.SymPkg(rs)) { + if isRuntimeDepPkg(ldr.SymPkg(s)) && isRuntimeDepPkg(ldr.SymPkg(rs)) && *flagRandLayout == 0 { continue } } @@ -2395,6 +2399,26 @@ func (ctxt *Link) textaddress() { ldr := ctxt.loader + if *flagRandLayout != 0 { + r := rand.New(rand.NewSource(*flagRandLayout)) + textp := ctxt.Textp + i := 0 + // don't move the buildid symbol + if len(textp) > 0 && ldr.SymName(textp[0]) == "go:buildid" { + i++ + } + // Skip over C symbols, as functions in a (C object) section must stay together. + // TODO: maybe we can move a section as a whole. + // Note: we load C symbols before Go symbols, so we can scan from the start. + for i < len(textp) && (ldr.SubSym(textp[i]) != 0 || ldr.AttrSubSymbol(textp[i])) { + i++ + } + textp = textp[i:] + r.Shuffle(len(textp), func(i, j int) { + textp[i], textp[j] = textp[j], textp[i] + }) + } + text := ctxt.xdefine("runtime.text", sym.STEXT, 0) etext := ctxt.xdefine("runtime.etext", sym.STEXT, 0) ldr.SetSymSect(text, sect) @@ -2555,8 +2579,8 @@ func assignAddress(ctxt *Link, sect *sym.Section, n int, s loader.Sym, va uint64 sect.Align = align } - funcsize := uint64(MINFUNC) // spacing required for findfunctab - if ldr.SymSize(s) > MINFUNC { + funcsize := uint64(abi.MINFUNC) // spacing required for findfunctab + if ldr.SymSize(s) > abi.MINFUNC { funcsize = uint64(ldr.SymSize(s)) } @@ -2610,7 +2634,7 @@ func assignAddress(ctxt *Link, sect *sym.Section, n int, s loader.Sym, va uint64 // Assign its address directly in order to be the // first symbol of this new section. ntext.SetType(sym.STEXT) - ntext.SetSize(int64(MINFUNC)) + ntext.SetSize(int64(abi.MINFUNC)) ntext.SetOnList(true) ntext.SetAlign(sectAlign) ctxt.tramps = append(ctxt.tramps, ntext.Sym()) diff --git a/src/cmd/link/internal/ld/deadcode.go b/src/cmd/link/internal/ld/deadcode.go index 70b4a7ca30..de4395d5df 100644 --- a/src/cmd/link/internal/ld/deadcode.go +++ b/src/cmd/link/internal/ld/deadcode.go @@ -201,7 +201,7 @@ func (d *deadcodePass) flood() { rs := r.Sym() if d.ldr.IsItab(rs) { // This relocation can also point at an itab, in which case it - // means "the _type field of that itab". + // means "the Type field of that itab". rs = decodeItabType(d.ldr, d.ctxt.Arch, rs) } if !d.ldr.IsGoType(rs) && !d.ctxt.linkShared { diff --git a/src/cmd/link/internal/ld/decodesym.go b/src/cmd/link/internal/ld/decodesym.go index 05da11ec1e..aa40496492 100644 --- a/src/cmd/link/internal/ld/decodesym.go +++ b/src/cmd/link/internal/ld/decodesym.go @@ -301,8 +301,8 @@ func decodetypeGcprogShlib(ctxt *Link, data []byte) uint64 { return decodeInuxi(ctxt.Arch, data[2*int32(ctxt.Arch.PtrSize)+8+1*int32(ctxt.Arch.PtrSize):], ctxt.Arch.PtrSize) } -// decodeItabType returns the itab._type field from an itab. +// decodeItabType returns the itab.Type field from an itab. func decodeItabType(ldr *loader.Loader, arch *sys.Arch, symIdx loader.Sym) loader.Sym { relocs := ldr.Relocs(symIdx) - return decodeRelocSym(ldr, symIdx, &relocs, int32(arch.PtrSize)) + return decodeRelocSym(ldr, symIdx, &relocs, int32(abi.ITabTypeOff(arch.PtrSize))) } diff --git a/src/cmd/link/internal/ld/dwarf.go b/src/cmd/link/internal/ld/dwarf.go index dcbe719c96..e2bb3f45f9 100644 --- a/src/cmd/link/internal/ld/dwarf.go +++ b/src/cmd/link/internal/ld/dwarf.go @@ -748,6 +748,7 @@ func (d *dwctxt) defptrto(dwtype loader.Sym) loader.Sym { // pointers of slices. Link to the ones we can find. gts := d.ldr.Lookup("type:"+ptrname, 0) if gts != 0 && d.ldr.AttrReachable(gts) { + newattr(pdie, dwarf.DW_AT_go_kind, dwarf.DW_CLS_CONSTANT, int64(objabi.KindPtr), 0) newattr(pdie, dwarf.DW_AT_go_runtime_type, dwarf.DW_CLS_GO_TYPEREF, 0, dwSym(gts)) } @@ -849,13 +850,6 @@ func mkinternaltypename(base string, arg1 string, arg2 string) string { return fmt.Sprintf("%s<%s,%s>", base, arg1, arg2) } -// synthesizemaptypes is way too closely married to runtime/hashmap.c -const ( - MaxKeySize = abi.MapMaxKeyBytes - MaxValSize = abi.MapMaxElemBytes - BucketSize = abi.MapBucketCount -) - func (d *dwctxt) mkinternaltype(ctxt *Link, abbrev int, typename, keyname, valname string, f func(*dwarf.DWDie)) loader.Sym { name := mkinternaltypename(typename, keyname, valname) symname := dwarf.InfoPrefix + name @@ -890,11 +884,11 @@ func (d *dwctxt) synthesizemaptypes(ctxt *Link, die *dwarf.DWDie) { // compute size info like hashmap.c does. indirectKey, indirectVal := false, false - if keysize > MaxKeySize { + if keysize > abi.MapMaxKeyBytes { keysize = int64(d.arch.PtrSize) indirectKey = true } - if valsize > MaxValSize { + if valsize > abi.MapMaxElemBytes { valsize = int64(d.arch.PtrSize) indirectVal = true } @@ -902,28 +896,28 @@ func (d *dwctxt) synthesizemaptypes(ctxt *Link, die *dwarf.DWDie) { // Construct type to represent an array of BucketSize keys keyname := d.nameFromDIESym(keytype) dwhks := d.mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]key", keyname, "", func(dwhk *dwarf.DWDie) { - newattr(dwhk, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, BucketSize*keysize, 0) + newattr(dwhk, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.MapBucketCount*keysize, 0) t := keytype if indirectKey { t = d.defptrto(keytype) } d.newrefattr(dwhk, dwarf.DW_AT_type, t) fld := d.newdie(dwhk, dwarf.DW_ABRV_ARRAYRANGE, "size") - newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, BucketSize, 0) + newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, abi.MapBucketCount, 0) d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym) }) // Construct type to represent an array of BucketSize values valname := d.nameFromDIESym(valtype) dwhvs := d.mkinternaltype(ctxt, dwarf.DW_ABRV_ARRAYTYPE, "[]val", valname, "", func(dwhv *dwarf.DWDie) { - newattr(dwhv, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, BucketSize*valsize, 0) + newattr(dwhv, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.MapBucketCount*valsize, 0) t := valtype if indirectVal { t = d.defptrto(valtype) } d.newrefattr(dwhv, dwarf.DW_AT_type, t) fld := d.newdie(dwhv, dwarf.DW_ABRV_ARRAYRANGE, "size") - newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, BucketSize, 0) + newattr(fld, dwarf.DW_AT_count, dwarf.DW_CLS_CONSTANT, abi.MapBucketCount, 0) d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym) }) @@ -935,20 +929,20 @@ func (d *dwctxt) synthesizemaptypes(ctxt *Link, die *dwarf.DWDie) { fld := d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "keys") d.newrefattr(fld, dwarf.DW_AT_type, dwhks) - newmemberoffsetattr(fld, BucketSize) + newmemberoffsetattr(fld, abi.MapBucketCount) fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "values") d.newrefattr(fld, dwarf.DW_AT_type, dwhvs) - newmemberoffsetattr(fld, BucketSize+BucketSize*int32(keysize)) + newmemberoffsetattr(fld, abi.MapBucketCount+abi.MapBucketCount*int32(keysize)) fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "overflow") d.newrefattr(fld, dwarf.DW_AT_type, d.defptrto(d.dtolsym(dwhb.Sym))) - newmemberoffsetattr(fld, BucketSize+BucketSize*(int32(keysize)+int32(valsize))) + newmemberoffsetattr(fld, abi.MapBucketCount+abi.MapBucketCount*(int32(keysize)+int32(valsize))) if d.arch.RegSize > d.arch.PtrSize { fld = d.newdie(dwhb, dwarf.DW_ABRV_STRUCTFIELD, "pad") d.newrefattr(fld, dwarf.DW_AT_type, d.uintptrInfoSym) - newmemberoffsetattr(fld, BucketSize+BucketSize*(int32(keysize)+int32(valsize))+int32(d.arch.PtrSize)) + newmemberoffsetattr(fld, abi.MapBucketCount+abi.MapBucketCount*(int32(keysize)+int32(valsize))+int32(d.arch.PtrSize)) } - newattr(dwhb, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, BucketSize+BucketSize*keysize+BucketSize*valsize+int64(d.arch.RegSize), 0) + newattr(dwhb, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, abi.MapBucketCount+abi.MapBucketCount*keysize+abi.MapBucketCount*valsize+int64(d.arch.RegSize), 0) }) // Construct hash @@ -1760,12 +1754,13 @@ func dwarfGenerateDebugInfo(ctxt *Link) { // Some types that must exist to define other ones (uintptr in particular // is needed for array size) - d.mkBuiltinType(ctxt, dwarf.DW_ABRV_BARE_PTRTYPE, "unsafe.Pointer") - die := d.mkBuiltinType(ctxt, dwarf.DW_ABRV_BASETYPE, "uintptr") - newattr(die, dwarf.DW_AT_encoding, dwarf.DW_CLS_CONSTANT, dwarf.DW_ATE_unsigned, 0) - newattr(die, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, int64(d.arch.PtrSize), 0) - newattr(die, dwarf.DW_AT_go_kind, dwarf.DW_CLS_CONSTANT, objabi.KindUintptr, 0) - newattr(die, dwarf.DW_AT_go_runtime_type, dwarf.DW_CLS_ADDRESS, 0, dwSym(d.lookupOrDiag("type:uintptr"))) + unsafeptrDie := d.mkBuiltinType(ctxt, dwarf.DW_ABRV_BARE_PTRTYPE, "unsafe.Pointer") + newattr(unsafeptrDie, dwarf.DW_AT_go_runtime_type, dwarf.DW_CLS_GO_TYPEREF, 0, dwSym(d.lookupOrDiag("type:unsafe.Pointer"))) + uintptrDie := d.mkBuiltinType(ctxt, dwarf.DW_ABRV_BASETYPE, "uintptr") + newattr(uintptrDie, dwarf.DW_AT_encoding, dwarf.DW_CLS_CONSTANT, dwarf.DW_ATE_unsigned, 0) + newattr(uintptrDie, dwarf.DW_AT_byte_size, dwarf.DW_CLS_CONSTANT, int64(d.arch.PtrSize), 0) + newattr(uintptrDie, dwarf.DW_AT_go_kind, dwarf.DW_CLS_CONSTANT, objabi.KindUintptr, 0) + newattr(uintptrDie, dwarf.DW_AT_go_runtime_type, dwarf.DW_CLS_GO_TYPEREF, 0, dwSym(d.lookupOrDiag("type:uintptr"))) d.uintptrInfoSym = d.mustFind("uintptr") @@ -1791,7 +1786,7 @@ func dwarfGenerateDebugInfo(ctxt *Link) { "type:internal/abi.SliceType", "type:internal/abi.StructType", "type:internal/abi.InterfaceType", - "type:runtime.itab", + "type:internal/abi.ITab", "type:internal/abi.Imethod"} { d.defgotype(d.lookupOrDiag(typ)) } diff --git a/src/cmd/link/internal/ld/dwarf_test.go b/src/cmd/link/internal/ld/dwarf_test.go index 6ca2a844f5..8cea573999 100644 --- a/src/cmd/link/internal/ld/dwarf_test.go +++ b/src/cmd/link/internal/ld/dwarf_test.go @@ -65,7 +65,7 @@ func TestRuntimeTypesPresent(t *testing.T) { "internal/abi.SliceType": true, "internal/abi.StructType": true, "internal/abi.InterfaceType": true, - "runtime.itab": true, + "internal/abi.ITab": true, } found := findTypes(t, dwarf, want) @@ -1985,17 +1985,7 @@ func main() { } } -func TestZeroSizedVariable(t *testing.T) { - testenv.MustHaveGoBuild(t) - - mustHaveDWARF(t) - t.Parallel() - - // This test verifies that the compiler emits DIEs for zero sized variables - // (for example variables of type 'struct {}'). - // See go.dev/issues/54615. - - const prog = ` +const zeroSizedVarProg = ` package main import ( @@ -2008,10 +1998,24 @@ func main() { } ` +func TestZeroSizedVariable(t *testing.T) { + testenv.MustHaveGoBuild(t) + + mustHaveDWARF(t) + t.Parallel() + + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + // This test verifies that the compiler emits DIEs for zero sized variables + // (for example variables of type 'struct {}'). + // See go.dev/issues/54615. + for _, opt := range []string{NoOpt, DefaultOpt} { opt := opt t.Run(opt, func(t *testing.T) { - _, ex := gobuildAndExamine(t, prog, opt) + _, ex := gobuildAndExamine(t, zeroSizedVarProg, opt) // Locate the main.zeroSizedVariable DIE abcs := ex.Named("zeroSizedVariable") @@ -2024,3 +2028,50 @@ func main() { }) } } + +func TestConsistentGoKindAndRuntimeType(t *testing.T) { + testenv.MustHaveGoBuild(t) + + mustHaveDWARF(t) + t.Parallel() + + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + // Ensure that if we emit a "go runtime type" attr on a type DIE, + // we also include the "go kind" attribute. See issue #64231. + _, ex := gobuildAndExamine(t, zeroSizedVarProg, DefaultOpt) + + // Walk all dies. + typesChecked := 0 + failures := 0 + for _, die := range ex.DIEs() { + // For any type DIE with DW_AT_go_runtime_type set... + rtt, hasRT := die.Val(intdwarf.DW_AT_go_runtime_type).(uint64) + if !hasRT || rtt == 0 { + continue + } + // ... except unsafe.Pointer... + if name, _ := die.Val(intdwarf.DW_AT_name).(string); name == "unsafe.Pointer" { + continue + } + typesChecked++ + // ... we want to see a meaningful DW_AT_go_kind value. + if val, ok := die.Val(intdwarf.DW_AT_go_kind).(int64); !ok || val == 0 { + failures++ + // dump DIEs for first 10 failures. + if failures <= 10 { + idx := ex.IdxFromOffset(die.Offset) + t.Logf("type DIE has DW_AT_go_runtime_type but invalid DW_AT_go_kind:\n") + ex.DumpEntry(idx, false, 0) + } + t.Errorf("bad type DIE at offset %d\n", die.Offset) + } + } + if typesChecked == 0 { + t.Fatalf("something went wrong, 0 types checked") + } else { + t.Logf("%d types checked\n", typesChecked) + } +} diff --git a/src/cmd/link/internal/ld/elf.go b/src/cmd/link/internal/ld/elf.go index be9e22946a..7c035df97e 100644 --- a/src/cmd/link/internal/ld/elf.go +++ b/src/cmd/link/internal/ld/elf.go @@ -1056,11 +1056,17 @@ func elfdynhash(ctxt *Link) { } s = ldr.CreateSymForUpdate(".dynamic", 0) - if ctxt.BuildMode == BuildModePIE { - // https://github.com/bminor/glibc/blob/895ef79e04a953cac1493863bcae29ad85657ee1/elf/elf.h#L986 - const DTFLAGS_1_PIE = 0x08000000 - Elfwritedynent(ctxt.Arch, s, elf.DT_FLAGS_1, uint64(DTFLAGS_1_PIE)) + + var dtFlags1 elf.DynFlag1 + if *flagBindNow { + dtFlags1 |= elf.DF_1_NOW + Elfwritedynent(ctxt.Arch, s, elf.DT_FLAGS, uint64(elf.DF_BIND_NOW)) } + if ctxt.BuildMode == BuildModePIE { + dtFlags1 |= elf.DF_1_PIE + } + Elfwritedynent(ctxt.Arch, s, elf.DT_FLAGS_1, uint64(dtFlags1)) + elfverneed = nfile if elfverneed != 0 { elfWriteDynEntSym(ctxt, s, elf.DT_VERNEED, gnuVersionR.Sym()) @@ -1107,6 +1113,7 @@ func elfphload(seg *sym.Segment) *ElfPhdr { func elfphrelro(seg *sym.Segment) { ph := newElfPhdr() ph.Type = elf.PT_GNU_RELRO + ph.Flags = elf.PF_R ph.Vaddr = seg.Vaddr ph.Paddr = seg.Vaddr ph.Memsz = seg.Length @@ -1556,7 +1563,11 @@ func (ctxt *Link) doelf() { /* global offset table */ got := ldr.CreateSymForUpdate(".got", 0) - got.SetType(sym.SELFGOT) // writable + if ctxt.UseRelro() { + got.SetType(sym.SRODATARELRO) + } else { + got.SetType(sym.SELFGOT) // writable + } /* ppc64 glink resolver */ if ctxt.IsPPC64() { @@ -1569,7 +1580,11 @@ func (ctxt *Link) doelf() { hash.SetType(sym.SELFROSECT) gotplt := ldr.CreateSymForUpdate(".got.plt", 0) - gotplt.SetType(sym.SELFSECT) // writable + if ctxt.UseRelro() && *flagBindNow { + gotplt.SetType(sym.SRODATARELRO) + } else { + gotplt.SetType(sym.SELFSECT) // writable + } plt := ldr.CreateSymForUpdate(".plt", 0) if ctxt.IsPPC64() { @@ -1591,9 +1606,12 @@ func (ctxt *Link) doelf() { /* define dynamic elf table */ dynamic := ldr.CreateSymForUpdate(".dynamic", 0) - if thearch.ELF.DynamicReadOnly { + switch { + case thearch.ELF.DynamicReadOnly: dynamic.SetType(sym.SELFROSECT) - } else { + case ctxt.UseRelro(): + dynamic.SetType(sym.SRODATARELRO) + default: dynamic.SetType(sym.SELFSECT) } diff --git a/src/cmd/link/internal/ld/elf_test.go b/src/cmd/link/internal/ld/elf_test.go index e535af6a1c..16bf4039b1 100644 --- a/src/cmd/link/internal/ld/elf_test.go +++ b/src/cmd/link/internal/ld/elf_test.go @@ -8,6 +8,7 @@ package ld import ( "debug/elf" + "fmt" "internal/testenv" "os" "path/filepath" @@ -182,3 +183,152 @@ func main() { } } } + +func TestElfBindNow(t *testing.T) { + t.Parallel() + testenv.MustHaveGoBuild(t) + + const ( + prog = `package main; func main() {}` + // with default buildmode code compiles in a statically linked binary, hence CGO + progC = `package main; import "C"; func main() {}` + ) + + tests := []struct { + name string + args []string + prog string + mustHaveBuildModePIE bool + mustHaveCGO bool + mustInternalLink bool + wantDfBindNow bool + wantDf1Now bool + wantDf1Pie bool + }{ + {name: "default", prog: prog}, + { + name: "pie-linkmode-internal", + args: []string{"-buildmode=pie", "-ldflags", "-linkmode=internal"}, + prog: prog, + mustHaveBuildModePIE: true, + mustInternalLink: true, + wantDf1Pie: true, + }, + { + name: "bindnow-linkmode-internal", + args: []string{"-ldflags", "-bindnow -linkmode=internal"}, + prog: progC, + mustHaveCGO: true, + mustInternalLink: true, + wantDfBindNow: true, + wantDf1Now: true, + }, + { + name: "bindnow-pie-linkmode-internal", + args: []string{"-buildmode=pie", "-ldflags", "-bindnow -linkmode=internal"}, + prog: prog, + mustHaveBuildModePIE: true, + mustInternalLink: true, + wantDfBindNow: true, + wantDf1Now: true, + wantDf1Pie: true, + }, + { + name: "bindnow-pie-linkmode-external", + args: []string{"-buildmode=pie", "-ldflags", "-bindnow -linkmode=external"}, + prog: prog, + mustHaveBuildModePIE: true, + mustHaveCGO: true, + wantDfBindNow: true, + wantDf1Now: true, + wantDf1Pie: true, + }, + } + + gotDynFlag := func(flags []uint64, dynFlag uint64) bool { + for _, flag := range flags { + if gotFlag := dynFlag&flag != 0; gotFlag { + return true + } + } + + return false + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + if test.mustInternalLink { + testenv.MustInternalLink(t, test.mustHaveCGO) + } + if test.mustHaveCGO { + testenv.MustHaveCGO(t) + } + if test.mustHaveBuildModePIE { + testenv.MustHaveBuildMode(t, "pie") + } + if test.mustHaveBuildModePIE && test.mustInternalLink { + testenv.MustInternalLinkPIE(t) + } + + var ( + dir = t.TempDir() + src = filepath.Join(dir, fmt.Sprintf("elf_%s.go", test.name)) + binFile = filepath.Join(dir, test.name) + ) + + if err := os.WriteFile(src, []byte(test.prog), 0666); err != nil { + t.Fatal(err) + } + + cmdArgs := append([]string{"build", "-o", binFile}, append(test.args, src)...) + cmd := testenv.Command(t, testenv.GoToolPath(t), cmdArgs...) + + if out, err := cmd.CombinedOutput(); err != nil { + t.Fatalf("failed to build %v: %v:\n%s", cmd.Args, err, out) + } + + fi, err := os.Open(binFile) + if err != nil { + t.Fatalf("failed to open built file: %v", err) + } + defer fi.Close() + + elfFile, err := elf.NewFile(fi) + if err != nil { + t.Skip("The system may not support ELF, skipped.") + } + defer elfFile.Close() + + flags, err := elfFile.DynValue(elf.DT_FLAGS) + if err != nil { + t.Fatalf("failed to get DT_FLAGS: %v", err) + } + + flags1, err := elfFile.DynValue(elf.DT_FLAGS_1) + if err != nil { + t.Fatalf("failed to get DT_FLAGS_1: %v", err) + } + + gotDfBindNow := gotDynFlag(flags, uint64(elf.DF_BIND_NOW)) + gotDf1Now := gotDynFlag(flags1, uint64(elf.DF_1_NOW)) + + bindNowFlagsMatch := gotDfBindNow == test.wantDfBindNow && gotDf1Now == test.wantDf1Now + + // some external linkers may set one of the two flags but not both. + if !test.mustInternalLink { + bindNowFlagsMatch = gotDfBindNow == test.wantDfBindNow || gotDf1Now == test.wantDf1Now + } + + if !bindNowFlagsMatch { + t.Fatalf("Dynamic flags mismatch:\n"+ + "DT_FLAGS BIND_NOW got: %v, want: %v\n"+ + "DT_FLAGS_1 DF_1_NOW got: %v, want: %v", + gotDfBindNow, test.wantDfBindNow, gotDf1Now, test.wantDf1Now) + } + + if gotDf1Pie := gotDynFlag(flags1, uint64(elf.DF_1_PIE)); gotDf1Pie != test.wantDf1Pie { + t.Fatalf("DT_FLAGS_1 DF_1_PIE got: %v, want: %v", gotDf1Pie, test.wantDf1Pie) + } + }) + } +} diff --git a/src/cmd/link/internal/ld/inittask.go b/src/cmd/link/internal/ld/inittask.go index c4c5beb55e..ccf90b8b8c 100644 --- a/src/cmd/link/internal/ld/inittask.go +++ b/src/cmd/link/internal/ld/inittask.go @@ -152,7 +152,7 @@ func (ctxt *Link) inittaskSym(rootNames []string, symName string) loader.Sym { // Figure out the schedule. sched := ldr.MakeSymbolBuilder(symName) - sched.SetType(sym.SNOPTRDATA) // Could be SRODATA, but see isue 58857. + sched.SetType(sym.SNOPTRDATA) // Could be SRODATA, but see issue 58857. for !h.empty() { // Pick the lexicographically first initializable package. s := h.pop(ldr) diff --git a/src/cmd/link/internal/ld/lib.go b/src/cmd/link/internal/ld/lib.go index b603fba6c7..97f3ed37e3 100644 --- a/src/cmd/link/internal/ld/lib.go +++ b/src/cmd/link/internal/ld/lib.go @@ -273,10 +273,6 @@ var ( symSize int32 ) -const ( - MINFUNC = 16 // minimum size for a function -) - // Symbol version of ABIInternal symbols. It is sym.SymVerABIInternal if ABI wrappers // are used, 0 otherwise. var abiInternalVer = sym.SymVerABIInternal @@ -878,7 +874,17 @@ func (ctxt *Link) linksetup() { sb := ctxt.loader.MakeSymbolUpdater(goarm) sb.SetType(sym.SDATA) sb.SetSize(0) - sb.AddUint8(uint8(buildcfg.GOARM)) + sb.AddUint8(uint8(buildcfg.GOARM.Version)) + + goarmsoftfp := ctxt.loader.LookupOrCreateSym("runtime.goarmsoftfp", 0) + sb2 := ctxt.loader.MakeSymbolUpdater(goarmsoftfp) + sb2.SetType(sym.SDATA) + sb2.SetSize(0) + if buildcfg.GOARM.SoftFloat { + sb2.AddUint8(1) + } else { + sb2.AddUint8(0) + } } // Set runtime.disableMemoryProfiling bool if @@ -1593,12 +1599,16 @@ func (ctxt *Link) hostlink() { } var altLinker string - if ctxt.IsELF && ctxt.DynlinkingGo() { - // We force all symbol resolution to be done at program startup + if ctxt.IsELF && (ctxt.DynlinkingGo() || *flagBindNow) { + // For ELF targets, when producing dynamically linked Go code + // or when immediate binding is explicitly requested, + // we force all symbol resolution to be done at program startup // because lazy PLT resolution can use large amounts of stack at // times we cannot allow it to do so. argv = append(argv, "-Wl,-z,now") + } + if ctxt.IsELF && ctxt.DynlinkingGo() { // Do not let the host linker generate COPY relocations. These // can move symbols out of sections that rely on stable offsets // from the beginning of the section (like sym.STYPE). @@ -1868,9 +1878,10 @@ func (ctxt *Link) hostlink() { ctxt.Logf("\n") } - out, err := exec.Command(argv[0], argv[1:]...).CombinedOutput() + cmd := exec.Command(argv[0], argv[1:]...) + out, err := cmd.CombinedOutput() if err != nil { - Exitf("running %s failed: %v\n%s", argv[0], err, out) + Exitf("running %s failed: %v\n%s\n%s", argv[0], err, cmd, out) } // Filter out useless linker warnings caused by bugs outside Go. @@ -1953,7 +1964,7 @@ func (ctxt *Link) hostlink() { ctxt.Logf("\n") } if out, err := cmd.CombinedOutput(); err != nil { - Exitf("%s: running dsymutil failed: %v\n%s", os.Args[0], err, out) + Exitf("%s: running dsymutil failed: %v\n%s\n%s", os.Args[0], err, cmd, out) } // Remove STAB (symbolic debugging) symbols after we are done with them (by dsymutil). // They contain temporary file paths and make the build not reproducible. @@ -1972,8 +1983,9 @@ func (ctxt *Link) hostlink() { } ctxt.Logf("\n") } - if out, err := exec.Command(stripCmd, stripArgs...).CombinedOutput(); err != nil { - Exitf("%s: running strip failed: %v\n%s", os.Args[0], err, out) + cmd = exec.Command(stripCmd, stripArgs...) + if out, err := cmd.CombinedOutput(); err != nil { + Exitf("%s: running strip failed: %v\n%s\n%s", os.Args[0], err, cmd, out) } // Skip combining if `dsymutil` didn't generate a file. See #11994. if _, err := os.Stat(dsym); os.IsNotExist(err) { diff --git a/src/cmd/link/internal/ld/macho.go b/src/cmd/link/internal/ld/macho.go index fc38b0d99d..91e908c97f 100644 --- a/src/cmd/link/internal/ld/macho.go +++ b/src/cmd/link/internal/ld/macho.go @@ -478,13 +478,11 @@ func (ctxt *Link) domacho() { if ctxt.LinkMode == LinkInternal && machoPlatform == PLATFORM_MACOS { var version uint32 switch ctxt.Arch.Family { - case sys.AMD64: + case sys.ARM64, sys.AMD64: // This must be fairly recent for Apple signing (go.dev/issue/30488). // Having too old a version here was also implicated in some problems // calling into macOS libraries (go.dev/issue/56784). // In general this can be the most recent supported macOS version. - version = 10<<16 | 13<<8 | 0<<0 // 10.13.0 - case sys.ARM64: version = 11<<16 | 0<<8 | 0<<0 // 11.0.0 } ml := newMachoLoad(ctxt.Arch, LC_BUILD_VERSION, 4) diff --git a/src/cmd/link/internal/ld/main.go b/src/cmd/link/internal/ld/main.go index e120f90a22..13077668e7 100644 --- a/src/cmd/link/internal/ld/main.go +++ b/src/cmd/link/internal/ld/main.go @@ -63,6 +63,7 @@ func init() { // Flags used by the linker. The exported flags are used by the architecture-specific packages. var ( flagBuildid = flag.String("buildid", "", "record `id` as Go toolchain build id") + flagBindNow = flag.Bool("bindnow", false, "mark a dynamically linked ELF object for immediate function binding") flagOutfile = flag.String("o", "", "write output to `file`") flagPluginPath = flag.String("pluginpath", "", "full path name for plugin") @@ -90,7 +91,7 @@ var ( flagF = flag.Bool("f", false, "ignore version mismatch") flagG = flag.Bool("g", false, "disable go package data checks") flagH = flag.Bool("h", false, "halt on error") - flagN = flag.Bool("n", false, "dump symbol table") + flagN = flag.Bool("n", false, "no-op (deprecated)") FlagS = flag.Bool("s", false, "disable symbol table") flag8 bool // use 64-bit addresses in symbol table flagInterpreter = flag.String("I", "", "use `linker` as ELF dynamic linker") @@ -102,6 +103,7 @@ var ( FlagTextAddr = flag.Int64("T", -1, "set the start address of text symbols") flagEntrySymbol = flag.String("E", "", "set `entry` symbol name") flagPruneWeakMap = flag.Bool("pruneweakmap", true, "prune weak mapinit refs") + flagRandLayout = flag.Int64("randlayout", 0, "randomize function layout") cpuprofile = flag.String("cpuprofile", "", "write cpu profile to `file`") memprofile = flag.String("memprofile", "", "write memory profile to `file`") memprofilerate = flag.Int64("memprofilerate", 0, "set runtime.MemProfileRate to `rate`") @@ -167,12 +169,12 @@ func Main(arch *sys.Arch, theArch Arch) { } } - if final := gorootFinal(); final == "$GOROOT" { - // cmd/go sets GOROOT_FINAL to the dummy value "$GOROOT" when -trimpath is set, - // but runtime.GOROOT() should return the empty string, not a bogus value. - // (See https://go.dev/issue/51461.) + if buildcfg.GOROOT == "" { + // cmd/go clears the GOROOT variable when -trimpath is set, + // so omit it from the binary even if cmd/link itself has an + // embedded GOROOT value reported by runtime.GOROOT. } else { - addstrdata1(ctxt, "runtime.defaultGOROOT="+final) + addstrdata1(ctxt, "runtime.defaultGOROOT="+buildcfg.GOROOT) } buildVersion := buildcfg.Version diff --git a/src/cmd/link/internal/ld/pcln.go b/src/cmd/link/internal/ld/pcln.go index 5734b92507..57c88c03af 100644 --- a/src/cmd/link/internal/ld/pcln.go +++ b/src/cmd/link/internal/ld/pcln.go @@ -13,7 +13,6 @@ import ( "fmt" "internal/abi" "internal/buildcfg" - "os" "path/filepath" "strings" ) @@ -808,18 +807,10 @@ func (ctxt *Link) pclntab(container loader.Bitmap) *pclntab { return state } -func gorootFinal() string { - root := buildcfg.GOROOT - if final := os.Getenv("GOROOT_FINAL"); final != "" { - root = final - } - return root -} - func expandGoroot(s string) string { const n = len("$GOROOT") if len(s) >= n+1 && s[:n] == "$GOROOT" && (s[n] == '/' || s[n] == '\\') { - if final := gorootFinal(); final != "" { + if final := buildcfg.GOROOT; final != "" { return filepath.ToSlash(filepath.Join(final, s[n:])) } } @@ -827,9 +818,8 @@ func expandGoroot(s string) string { } const ( - BUCKETSIZE = 256 * MINFUNC SUBBUCKETS = 16 - SUBBUCKETSIZE = BUCKETSIZE / SUBBUCKETS + SUBBUCKETSIZE = abi.FuncTabBucketSize / SUBBUCKETS NOIDX = 0x7fffffff ) @@ -847,7 +837,7 @@ func (ctxt *Link) findfunctab(state *pclntab, container loader.Bitmap) { // that map to that subbucket. n := int32((max - min + SUBBUCKETSIZE - 1) / SUBBUCKETSIZE) - nbuckets := int32((max - min + BUCKETSIZE - 1) / BUCKETSIZE) + nbuckets := int32((max - min + abi.FuncTabBucketSize - 1) / abi.FuncTabBucketSize) size := 4*int64(nbuckets) + int64(n) @@ -878,7 +868,7 @@ func (ctxt *Link) findfunctab(state *pclntab, container loader.Bitmap) { q = ldr.SymValue(e) } - //print("%d: [%lld %lld] %s\n", idx, p, q, s->name); + //fmt.Printf("%d: [%x %x] %s\n", idx, p, q, ldr.SymName(s)) for ; p < q; p += SUBBUCKETSIZE { i = int((p - min) / SUBBUCKETSIZE) if indexes[i] > idx { diff --git a/src/cmd/link/internal/ld/xcoff.go b/src/cmd/link/internal/ld/xcoff.go index 2f887366b7..d915ab393b 100644 --- a/src/cmd/link/internal/ld/xcoff.go +++ b/src/cmd/link/internal/ld/xcoff.go @@ -1140,7 +1140,7 @@ func (f *xcoffFile) asmaixsym(ctxt *Link) { putaixsym(ctxt, s, TextSym) } - if ctxt.Debugvlog != 0 || *flagN { + if ctxt.Debugvlog != 0 { ctxt.Logf("symsize = %d\n", uint32(symSize)) } xfile.updatePreviousFile(ctxt, true) diff --git a/src/cmd/link/internal/loadelf/ldelf.go b/src/cmd/link/internal/loadelf/ldelf.go index 82e7dc30b7..c5ea6f7f89 100644 --- a/src/cmd/link/internal/loadelf/ldelf.go +++ b/src/cmd/link/internal/loadelf/ldelf.go @@ -242,10 +242,6 @@ func parseArmAttributes(e binary.ByteOrder, data []byte) (found bool, ehdrFlags // object, and the returned ehdrFlags contains what this Load function computes. // TODO: find a better place for this logic. func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, f *bio.Reader, pkg string, length int64, pn string, initEhdrFlags uint32) (textp []loader.Sym, ehdrFlags uint32, err error) { - newSym := func(name string, version int) loader.Sym { - return l.CreateStaticSym(name) - } - lookup := l.LookupOrCreateCgoExport errorf := func(str string, args ...interface{}) ([]loader.Sym, uint32, error) { return nil, 0, fmt.Errorf("loadelf: %s: %v", pn, fmt.Sprintf(str, args...)) } @@ -515,7 +511,7 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, f *bio.Reader, } sectsymNames[name] = true - sb := l.MakeSymbolUpdater(lookup(name, localSymVersion)) + sb := l.MakeSymbolUpdater(l.LookupOrCreateCgoExport(name, localSymVersion)) switch sect.flags & (elf.SHF_ALLOC | elf.SHF_WRITE | elf.SHF_EXECINSTR) { default: @@ -556,7 +552,7 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, f *bio.Reader, for i := 1; i < elfobj.nsymtab; i++ { var elfsym ElfSym - if err := readelfsym(newSym, lookup, l, arch, elfobj, i, &elfsym, 1, localSymVersion); err != nil { + if err := readelfsym(l, arch, elfobj, i, &elfsym, 1, localSymVersion); err != nil { return errorf("%s: malformed elf file: %v", pn, err) } symbols[i] = elfsym.sym @@ -770,7 +766,7 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, f *bio.Reader, rSym = 0 } else { var elfsym ElfSym - if err := readelfsym(newSym, lookup, l, arch, elfobj, int(symIdx), &elfsym, 0, 0); err != nil { + if err := readelfsym(l, arch, elfobj, int(symIdx), &elfsym, 0, 0); err != nil { return errorf("malformed elf file: %v", err) } elfsym.sym = symbols[symIdx] @@ -847,7 +843,7 @@ func elfmap(elfobj *ElfObj, sect *ElfSect) (err error) { return nil } -func readelfsym(newSym, lookup func(string, int) loader.Sym, l *loader.Loader, arch *sys.Arch, elfobj *ElfObj, i int, elfsym *ElfSym, needSym int, localSymVersion int) (err error) { +func readelfsym(l *loader.Loader, arch *sys.Arch, elfobj *ElfObj, i int, elfsym *ElfSym, needSym int, localSymVersion int) (err error) { if i >= elfobj.nsymtab || i < 0 { err = fmt.Errorf("invalid elf symbol index") return err @@ -898,7 +894,7 @@ func readelfsym(newSym, lookup func(string, int) loader.Sym, l *loader.Loader, a switch elfsym.bind { case elf.STB_GLOBAL: if needSym != 0 { - s = lookup(elfsym.name, 0) + s = l.LookupOrCreateCgoExport(elfsym.name, 0) // for global scoped hidden symbols we should insert it into // symbol hash table, but mark them as hidden. @@ -927,7 +923,7 @@ func readelfsym(newSym, lookup func(string, int) loader.Sym, l *loader.Loader, a // We need to be able to look this up, // so put it in the hash table. if needSym != 0 { - s = lookup(elfsym.name, localSymVersion) + s = l.LookupOrCreateCgoExport(elfsym.name, localSymVersion) l.SetAttrVisibilityHidden(s, true) } break @@ -940,13 +936,13 @@ func readelfsym(newSym, lookup func(string, int) loader.Sym, l *loader.Loader, a // FIXME: pass empty string here for name? This would // reduce mem use, but also (possibly) make it harder // to debug problems. - s = newSym(elfsym.name, localSymVersion) + s = l.CreateStaticSym(elfsym.name) l.SetAttrVisibilityHidden(s, true) } case elf.STB_WEAK: if needSym != 0 { - s = lookup(elfsym.name, 0) + s = l.LookupOrCreateCgoExport(elfsym.name, 0) if elfsym.other == 2 { l.SetAttrVisibilityHidden(s, true) } diff --git a/src/cmd/link/internal/loader/loader.go b/src/cmd/link/internal/loader/loader.go index 5dd657b4d7..3edb5e2f6f 100644 --- a/src/cmd/link/internal/loader/loader.go +++ b/src/cmd/link/internal/loader/loader.go @@ -988,7 +988,7 @@ func (l *Loader) AttrExternal(i Sym) bool { return l.attrExternal.Has(l.extIndex(i)) } -// SetAttrExternal sets the "external" property for an host object +// SetAttrExternal sets the "external" property for a host object // symbol (see AttrExternal). func (l *Loader) SetAttrExternal(i Sym, v bool) { if !l.IsExternal(i) { diff --git a/src/cmd/link/internal/loadpe/ldpe.go b/src/cmd/link/internal/loadpe/ldpe.go index e4134607c4..1ba6debb4e 100644 --- a/src/cmd/link/internal/loadpe/ldpe.go +++ b/src/cmd/link/internal/loadpe/ldpe.go @@ -493,17 +493,10 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, input *bio.Read continue } if pesym.SectionNumber == IMAGE_SYM_ABSOLUTE && bytes.Equal(pesym.Name[:], []byte("@feat.00")) { - // Microsoft's linker looks at whether all input objects have an empty - // section called @feat.00. If all of them do, then it enables SEH; - // otherwise it doesn't enable that feature. So, since around the Windows - // XP SP2 era, most tools that make PE objects just tack on that section, - // so that it won't gimp Microsoft's linker logic. Go doesn't support SEH, - // so in theory, none of this really matters to us. But actually, if the - // linker tries to ingest an object with @feat.00 -- which are produced by - // LLVM's resource compiler, for example -- it chokes because of the - // IMAGE_SYM_ABSOLUTE section that it doesn't know how to deal with. Since - // @feat.00 is just a marking anyway, skip IMAGE_SYM_ABSOLUTE sections that - // are called @feat.00. + // The PE documentation says that, on x86 platforms, the absolute symbol named @feat.00 + // is used to indicate that the COFF object supports SEH. + // Go doesn't support SEH on windows/386, so we can ignore this symbol. + // See https://learn.microsoft.com/en-us/windows/win32/debug/pe-format#the-sxdata-section continue } var sect *pe.Section diff --git a/src/cmd/link/internal/loadpe/seh.go b/src/cmd/link/internal/loadpe/seh.go index 0e2cda21dd..545958f1d6 100644 --- a/src/cmd/link/internal/loadpe/seh.go +++ b/src/cmd/link/internal/loadpe/seh.go @@ -16,8 +16,9 @@ import ( const ( UNW_FLAG_EHANDLER = 1 << 3 UNW_FLAG_UHANDLER = 2 << 3 - UNW_FLAG_CHAININFO = 3 << 3 - unwStaticDataSize = 8 + UNW_FLAG_CHAININFO = 4 << 3 + unwStaticDataSize = 4 // Bytes of unwind data before the variable length part. + unwCodeSize = 2 // Bytes per unwind code. ) // processSEH walks all pdata relocations looking for exception handler function symbols. @@ -81,14 +82,14 @@ func findHandlerInXDataAMD64(ldr *loader.Loader, xsym sym.LoaderSym, add int64) // Nothing to do. return 0 } - codes := data[3] + codes := data[2] if codes%2 != 0 { // There are always an even number of unwind codes, even if the last one is unused. codes += 1 } // The exception handler relocation is the first relocation after the unwind codes, // unless it is chained, but we will handle this case later. - targetOff := add + unwStaticDataSize*(1+int64(codes)) + targetOff := add + unwStaticDataSize + unwCodeSize*int64(codes) xrels := ldr.Relocs(xsym) xrelsCount := xrels.Count() idx := sort.Search(xrelsCount, func(i int) bool { diff --git a/src/cmd/link/internal/loadxcoff/ldxcoff.go b/src/cmd/link/internal/loadxcoff/ldxcoff.go index 920e1c85fd..29d162596a 100644 --- a/src/cmd/link/internal/loadxcoff/ldxcoff.go +++ b/src/cmd/link/internal/loadxcoff/ldxcoff.go @@ -155,7 +155,6 @@ func Load(l *loader.Loader, arch *sys.Arch, localSymVersion int, input *bio.Read } } return textp, nil - } // Convert symbol xcoff type to sym.SymKind diff --git a/src/cmd/link/internal/loong64/asm.go b/src/cmd/link/internal/loong64/asm.go index 3c58c27d82..cb1805ccd7 100644 --- a/src/cmd/link/internal/loong64/asm.go +++ b/src/cmd/link/internal/loong64/asm.go @@ -14,7 +14,47 @@ import ( "log" ) -func gentext(ctxt *ld.Link, ldr *loader.Loader) {} +func gentext(ctxt *ld.Link, ldr *loader.Loader) { + initfunc, addmoduledata := ld.PrepareAddmoduledata(ctxt) + if initfunc == nil { + return + } + + o := func(op uint32) { + initfunc.AddUint32(ctxt.Arch, op) + } + + // Emit the following function: + // + // local.dso_init: + // la.pcrel $a0, local.moduledata + // b runtime.addmoduledata + + // 0000000000000000 : + // 0: 1a000004 pcalau12i $a0, 0 + // 0: R_LARCH_PCALA_HI20 local.moduledata + o(0x1a000004) + rel, _ := initfunc.AddRel(objabi.R_LOONG64_ADDR_HI) + rel.SetOff(0) + rel.SetSiz(4) + rel.SetSym(ctxt.Moduledata) + + // 4: 02c00084 addi.d $a0, $a0, 0 + // 4: R_LARCH_PCALA_LO12 local.moduledata + o(0x02c00084) + rel2, _ := initfunc.AddRel(objabi.R_LOONG64_ADDR_LO) + rel2.SetOff(4) + rel2.SetSiz(4) + rel2.SetSym(ctxt.Moduledata) + + // 8: 50000000 b 0 + // 8: R_LARCH_B26 runtime.addmoduledata + o(0x50000000) + rel3, _ := initfunc.AddRel(objabi.R_CALLLOONG64) + rel3.SetOff(8) + rel3.SetSiz(4) + rel3.SetSym(addmoduledata) +} func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool { log.Fatalf("adddynrel not implemented") @@ -44,12 +84,12 @@ func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, default: return false } - case objabi.R_ADDRLOONG64TLS: + case objabi.R_LOONG64_TLS_LE_LO: out.Write64(uint64(sectoff)) out.Write64(uint64(elf.R_LARCH_TLS_LE_LO12) | uint64(elfsym)<<32) out.Write64(uint64(r.Xadd)) - case objabi.R_ADDRLOONG64TLSU: + case objabi.R_LOONG64_TLS_LE_HI: out.Write64(uint64(sectoff)) out.Write64(uint64(elf.R_LARCH_TLS_LE_HI20) | uint64(elfsym)<<32) out.Write64(uint64(r.Xadd)) @@ -59,7 +99,7 @@ func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, out.Write64(uint64(elf.R_LARCH_B26) | uint64(elfsym)<<32) out.Write64(uint64(r.Xadd)) - case objabi.R_LOONG64_TLS_IE_PCREL_HI: + case objabi.R_LOONG64_TLS_IE_HI: out.Write64(uint64(sectoff)) out.Write64(uint64(elf.R_LARCH_TLS_IE_PC_HI20) | uint64(elfsym)<<32) out.Write64(uint64(0x0)) @@ -69,15 +109,25 @@ func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, out.Write64(uint64(elf.R_LARCH_TLS_IE_PC_LO12) | uint64(elfsym)<<32) out.Write64(uint64(0x0)) - case objabi.R_ADDRLOONG64: + case objabi.R_LOONG64_ADDR_LO: out.Write64(uint64(sectoff)) out.Write64(uint64(elf.R_LARCH_PCALA_LO12) | uint64(elfsym)<<32) out.Write64(uint64(r.Xadd)) - case objabi.R_ADDRLOONG64U: + case objabi.R_LOONG64_ADDR_HI: out.Write64(uint64(sectoff)) out.Write64(uint64(elf.R_LARCH_PCALA_HI20) | uint64(elfsym)<<32) out.Write64(uint64(r.Xadd)) + + case objabi.R_LOONG64_GOT_HI: + out.Write64(uint64(sectoff)) + out.Write64(uint64(elf.R_LARCH_GOT_PC_HI20) | uint64(elfsym)<<32) + out.Write64(uint64(0x0)) + + case objabi.R_LOONG64_GOT_LO: + out.Write64(uint64(sectoff)) + out.Write64(uint64(elf.R_LARCH_GOT_PC_LO12) | uint64(elfsym)<<32) + out.Write64(uint64(0x0)) } return true @@ -97,8 +147,8 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade switch r.Type() { default: return val, 0, false - case objabi.R_ADDRLOONG64, - objabi.R_ADDRLOONG64U: + case objabi.R_LOONG64_ADDR_HI, + objabi.R_LOONG64_ADDR_LO: // set up addend for eventual relocation via outer symbol. rs, _ := ld.FoldSubSymbolOffset(ldr, rs) rst := ldr.SymType(rs) @@ -106,12 +156,14 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade ldr.Errorf(s, "missing section for %s", ldr.SymName(rs)) } return val, 1, true - case objabi.R_ADDRLOONG64TLS, - objabi.R_ADDRLOONG64TLSU, + case objabi.R_LOONG64_TLS_LE_HI, + objabi.R_LOONG64_TLS_LE_LO, objabi.R_CALLLOONG64, objabi.R_JMPLOONG64, - objabi.R_LOONG64_TLS_IE_PCREL_HI, - objabi.R_LOONG64_TLS_IE_LO: + objabi.R_LOONG64_TLS_IE_HI, + objabi.R_LOONG64_TLS_IE_LO, + objabi.R_LOONG64_GOT_HI, + objabi.R_LOONG64_GOT_LO: return val, 1, true } } @@ -124,18 +176,18 @@ func archreloc(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, r loade return r.Add(), noExtReloc, isOk case objabi.R_GOTOFF: return ldr.SymValue(r.Sym()) + r.Add() - ldr.SymValue(syms.GOT), noExtReloc, isOk - case objabi.R_ADDRLOONG64, - objabi.R_ADDRLOONG64U: + case objabi.R_LOONG64_ADDR_HI, + objabi.R_LOONG64_ADDR_LO: pc := ldr.SymValue(s) + int64(r.Off()) t := calculatePCAlignedReloc(r.Type(), ldr.SymAddr(rs)+r.Add(), pc) - if r.Type() == objabi.R_ADDRLOONG64 { + if r.Type() == objabi.R_LOONG64_ADDR_LO { return int64(val&0xffc003ff | (t << 10)), noExtReloc, isOk } return int64(val&0xfe00001f | (t << 5)), noExtReloc, isOk - case objabi.R_ADDRLOONG64TLS, - objabi.R_ADDRLOONG64TLSU: + case objabi.R_LOONG64_TLS_LE_HI, + objabi.R_LOONG64_TLS_LE_LO: t := ldr.SymAddr(rs) + r.Add() - if r.Type() == objabi.R_ADDRLOONG64TLS { + if r.Type() == objabi.R_LOONG64_TLS_LE_LO { return int64(val&0xffc003ff | ((t & 0xfff) << 10)), noExtReloc, isOk } return int64(val&0xfe00001f | (((t) >> 12 << 5) & 0x1ffffe0)), noExtReloc, isOk @@ -155,17 +207,19 @@ func archrelocvariant(*ld.Target, *loader.Loader, loader.Reloc, sym.RelocVariant func extreloc(target *ld.Target, ldr *loader.Loader, r loader.Reloc, s loader.Sym) (loader.ExtReloc, bool) { switch r.Type() { - case objabi.R_ADDRLOONG64, - objabi.R_ADDRLOONG64U: + case objabi.R_LOONG64_ADDR_HI, + objabi.R_LOONG64_ADDR_LO, + objabi.R_LOONG64_GOT_HI, + objabi.R_LOONG64_GOT_LO: return ld.ExtrelocViaOuterSym(ldr, r, s), true - case objabi.R_ADDRLOONG64TLS, - objabi.R_ADDRLOONG64TLSU, + case objabi.R_LOONG64_TLS_LE_HI, + objabi.R_LOONG64_TLS_LE_LO, objabi.R_CONST, objabi.R_GOTOFF, objabi.R_CALLLOONG64, objabi.R_JMPLOONG64, - objabi.R_LOONG64_TLS_IE_PCREL_HI, + objabi.R_LOONG64_TLS_IE_HI, objabi.R_LOONG64_TLS_IE_LO: return ld.ExtrelocSimple(ldr, r), true } @@ -174,7 +228,7 @@ func extreloc(target *ld.Target, ldr *loader.Loader, r loader.Reloc, s loader.Sy func isRequestingLowPageBits(t objabi.RelocType) bool { switch t { - case objabi.R_ADDRLOONG64: + case objabi.R_LOONG64_ADDR_LO: return true } return false diff --git a/src/cmd/link/internal/mips/obj.go b/src/cmd/link/internal/mips/obj.go index e07ee0b208..c76e267cc2 100644 --- a/src/cmd/link/internal/mips/obj.go +++ b/src/cmd/link/internal/mips/obj.go @@ -103,5 +103,4 @@ func archinit(ctxt *ld.Link) { func adddynrel(target *ld.Target, ldr *loader.Loader, syms *ld.ArchSyms, s loader.Sym, r loader.Reloc, rIdx int) bool { ld.Exitf("adddynrel currently unimplemented for MIPS") return false - } diff --git a/src/cmd/link/internal/ppc64/asm.go b/src/cmd/link/internal/ppc64/asm.go index 91eef5e461..09647d84b1 100644 --- a/src/cmd/link/internal/ppc64/asm.go +++ b/src/cmd/link/internal/ppc64/asm.go @@ -917,7 +917,6 @@ func xcoffreloc1(arch *sys.Arch, out *ld.OutBuf, ldr *loader.Loader, s loader.Sy emitReloc(ld.XCOFF_R_REF|0x3F<<8, 0) } return true - } func elfreloc1(ctxt *ld.Link, out *ld.OutBuf, ldr *loader.Loader, s loader.Sym, r loader.ExtReloc, ri int, sectoff int64) bool { diff --git a/src/cmd/link/internal/riscv64/asm.go b/src/cmd/link/internal/riscv64/asm.go index d95de6cb36..6a4dd01240 100644 --- a/src/cmd/link/internal/riscv64/asm.go +++ b/src/cmd/link/internal/riscv64/asm.go @@ -170,8 +170,8 @@ func genSymsLate(ctxt *ld.Link, ldr *loader.Loader) { relocs := ldr.Relocs(s) for ri := 0; ri < relocs.Count(); ri++ { r := relocs.At(ri) - if r.Type() != objabi.R_RISCV_PCREL_ITYPE && r.Type() != objabi.R_RISCV_PCREL_STYPE && - r.Type() != objabi.R_RISCV_TLS_IE { + if r.Type() != objabi.R_RISCV_CALL && r.Type() != objabi.R_RISCV_PCREL_ITYPE && + r.Type() != objabi.R_RISCV_PCREL_STYPE && r.Type() != objabi.R_RISCV_TLS_IE { continue } if r.Off() == 0 && ldr.SymType(s) == sym.STEXT { diff --git a/src/cmd/link/internal/wasm/asm.go b/src/cmd/link/internal/wasm/asm.go index 413a809414..2f511b97c7 100644 --- a/src/cmd/link/internal/wasm/asm.go +++ b/src/cmd/link/internal/wasm/asm.go @@ -14,6 +14,7 @@ import ( "cmd/link/internal/sym" "encoding/binary" "fmt" + "internal/abi" "internal/buildcfg" "io" "regexp" @@ -154,8 +155,8 @@ func assignAddress(ldr *loader.Loader, sect *sym.Section, n int, s loader.Sym, v // However, there is no PC register, only PC_F and PC_B. PC_F denotes the function, // PC_B the resume point inside of that function. The entry of the function has PC_B = 0. ldr.SetSymSect(s, sect) - ldr.SetSymValue(s, int64(funcValueOffset+va/ld.MINFUNC)<<16) // va starts at zero - va += uint64(ld.MINFUNC) + ldr.SetSymValue(s, int64(funcValueOffset+va/abi.MINFUNC)<<16) // va starts at zero + va += uint64(abi.MINFUNC) return sect, n, va } diff --git a/src/cmd/link/link_test.go b/src/cmd/link/link_test.go index 897607c4fa..6afde4b085 100644 --- a/src/cmd/link/link_test.go +++ b/src/cmd/link/link_test.go @@ -348,7 +348,7 @@ func TestXFlag(t *testing.T) { } } -var testMachOBuildVersionSrc = ` +var trivialSrc = ` package main func main() { } ` @@ -361,7 +361,7 @@ func TestMachOBuildVersion(t *testing.T) { tmpdir := t.TempDir() src := filepath.Join(tmpdir, "main.go") - err := os.WriteFile(src, []byte(testMachOBuildVersionSrc), 0666) + err := os.WriteFile(src, []byte(trivialSrc), 0666) if err != nil { t.Fatal(err) } @@ -388,9 +388,9 @@ func TestMachOBuildVersion(t *testing.T) { found := false const LC_BUILD_VERSION = 0x32 checkMin := func(ver uint32) { - major, minor := (ver>>16)&0xff, (ver>>8)&0xff - if major != 10 || minor < 9 { - t.Errorf("LC_BUILD_VERSION version %d.%d < 10.9", major, minor) + major, minor, patch := (ver>>16)&0xff, (ver>>8)&0xff, (ver>>0)&0xff + if major < 11 { + t.Errorf("LC_BUILD_VERSION version %d.%d.%d < 11.0.0", major, minor, patch) } } for _, cmd := range exem.Loads { @@ -1375,3 +1375,43 @@ func TestFlagS(t *testing.T) { } } } + +func TestRandLayout(t *testing.T) { + // Test that the -randlayout flag randomizes function order and + // generates a working binary. + testenv.MustHaveGoBuild(t) + + t.Parallel() + + tmpdir := t.TempDir() + + src := filepath.Join(tmpdir, "hello.go") + err := os.WriteFile(src, []byte(trivialSrc), 0666) + if err != nil { + t.Fatal(err) + } + + var syms [2]string + for i, seed := range []string{"123", "456"} { + exe := filepath.Join(tmpdir, "hello"+seed+".exe") + cmd := testenv.Command(t, testenv.GoToolPath(t), "build", "-ldflags=-randlayout="+seed, "-o", exe, src) + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("build failed: %v\n%s", err, out) + } + cmd = testenv.Command(t, exe) + err = cmd.Run() + if err != nil { + t.Fatalf("executable failed to run: %v\n%s", err, out) + } + cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "nm", exe) + out, err = cmd.CombinedOutput() + if err != nil { + t.Fatalf("fail to run \"go tool nm\": %v\n%s", err, out) + } + syms[i] = string(out) + } + if syms[0] == syms[1] { + t.Errorf("randlayout with different seeds produced same layout:\n%s\n===\n\n%s", syms[0], syms[1]) + } +} diff --git a/src/cmd/objdump/objdump_test.go b/src/cmd/objdump/objdump_test.go index 6e781c924d..a755ec3b64 100644 --- a/src/cmd/objdump/objdump_test.go +++ b/src/cmd/objdump/objdump_test.go @@ -152,9 +152,6 @@ func testDisasm(t *testing.T, srcfname string, printCode bool, printGnuAsm bool, cmd := testenv.Command(t, testenv.GoToolPath(t), args...) // "Bad line" bug #36683 is sensitive to being run in the source directory. cmd.Dir = "testdata" - // Ensure that the source file location embedded in the binary matches our - // actual current GOROOT, instead of GOROOT_FINAL if set. - cmd.Env = append(os.Environ(), "GOROOT_FINAL=") t.Logf("Running %v", cmd.Args) out, err := cmd.CombinedOutput() if err != nil { diff --git a/src/cmd/preprofile/main.go b/src/cmd/preprofile/main.go new file mode 100644 index 0000000000..806f25fee8 --- /dev/null +++ b/src/cmd/preprofile/main.go @@ -0,0 +1,187 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Preprofile handles pprof files. +// +// Usage: +// +// go tool preprofile [-v] [-o output] [-i (pprof)input] +// +// + +package main + +import ( + "bufio" + "flag" + "fmt" + "internal/profile" + "log" + "os" + "path/filepath" + "strconv" +) + +// The current Go Compiler consumes significantly long compilation time when the PGO +// is enabled. To optimize the existing flow and reduce build time of multiple Go +// services, we create a standalone tool, PGO preprocessor, to extract information +// from collected profiling files and to cache the WeightedCallGraph in one time +// fashion. By adding the new tool to the Go compiler, it will reduce the time +// of repeated profiling file parsing and avoid WeightedCallGraph reconstruction +// in current Go Compiler. +// The format of the pre-processed output is as follows. +// +// Header +// caller_name +// callee_name +// "call site offset" "call edge weight" +// ... +// caller_name +// callee_name +// "call site offset" "call edge weight" + +func usage() { + fmt.Fprintf(os.Stderr, "MUST have (pprof) input file \n") + fmt.Fprintf(os.Stderr, "usage: go tool preprofile [-v] [-o output] [-i (pprof)input] \n\n") + flag.PrintDefaults() + os.Exit(2) +} + +type NodeMapKey struct { + CallerName string + CalleeName string + CallSiteOffset int // Line offset from function start line. +} + +func readPprofFile(profileFile string, outputFile string, verbose bool) bool { + // open the pprof profile file + f, err := os.Open(profileFile) + if err != nil { + log.Fatal("failed to open file " + profileFile) + return false + } + defer f.Close() + p, err := profile.Parse(f) + if err != nil { + log.Fatal("failed to Parse profile file.") + return false + } + + if len(p.Sample) == 0 { + // We accept empty profiles, but there is nothing to do. + return false + } + + valueIndex := -1 + for i, s := range p.SampleType { + // Samples count is the raw data collected, and CPU nanoseconds is just + // a scaled version of it, so either one we can find is fine. + if (s.Type == "samples" && s.Unit == "count") || + (s.Type == "cpu" && s.Unit == "nanoseconds") { + valueIndex = i + break + } + } + + if valueIndex == -1 { + log.Fatal("failed to find CPU samples count or CPU nanoseconds value-types in profile.") + return false + } + + // The processing here is equivalent to cmd/compile/internal/pgo.createNamedEdgeMap. + g := profile.NewGraph(p, &profile.Options{ + SampleValue: func(v []int64) int64 { return v[valueIndex] }, + }) + + TotalEdgeWeight := int64(0) + + NodeMap := make(map[NodeMapKey]int64) + + for _, n := range g.Nodes { + canonicalName := n.Info.Name + // Create the key to the nodeMapKey. + nodeinfo := NodeMapKey{ + CallerName: canonicalName, + CallSiteOffset: n.Info.Lineno - n.Info.StartLine, + } + + if n.Info.StartLine == 0 { + if verbose { + log.Println("[PGO] warning: " + canonicalName + " relative line number is missing from the profile") + } + } + + for _, e := range n.Out { + TotalEdgeWeight += e.WeightValue() + nodeinfo.CalleeName = e.Dest.Info.Name + if w, ok := NodeMap[nodeinfo]; ok { + w += e.WeightValue() + } else { + w = e.WeightValue() + NodeMap[nodeinfo] = w + } + } + } + + var fNodeMap *os.File + if outputFile == "" { + fNodeMap = os.Stdout + } else { + dirPath := filepath.Dir(outputFile) + _, err := os.Stat(dirPath) + if err != nil { + log.Fatal("Directory does not exist: ", dirPath) + } + base := filepath.Base(outputFile) + outputFile = filepath.Join(dirPath, base) + + // write out NodeMap to a file + fNodeMap, err = os.Create(outputFile) + if err != nil { + log.Fatal("Error creating output file:", err) + return false + } + + defer fNodeMap.Close() // Close the file when done writing + } + + w := bufio.NewWriter(fNodeMap) + w.WriteString("GO PREPROFILE V1\n") + count := 1 + separator := " " + for key, element := range NodeMap { + line := key.CallerName + "\n" + w.WriteString(line) + line = key.CalleeName + "\n" + w.WriteString(line) + line = strconv.Itoa(key.CallSiteOffset) + line = line + separator + strconv.FormatInt(element, 10) + "\n" + w.WriteString(line) + w.Flush() + count += 1 + } + + if TotalEdgeWeight == 0 { + return false + } + + return true +} + +var dumpCode = flag.String("o", "", "dump output file ") +var input = flag.String("i", "", "input pprof file ") +var verbose = flag.Bool("v", false, "verbose log") + +func main() { + log.SetFlags(0) + log.SetPrefix("preprofile: ") + + flag.Usage = usage + flag.Parse() + if *input == "" { + usage() + } else { + readPprofFile(*input, *dumpCode, *verbose) + } +} diff --git a/src/cmd/relnote/relnote_test.go b/src/cmd/relnote/relnote_test.go new file mode 100644 index 0000000000..c20f80efc4 --- /dev/null +++ b/src/cmd/relnote/relnote_test.go @@ -0,0 +1,39 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "flag" + "internal/testenv" + "io/fs" + "os" + "path/filepath" + "testing" + + "golang.org/x/build/relnote" +) + +var flagCheck = flag.Bool("check", false, "run API release note checks") + +// Check that each file in api/next has corresponding release note files in doc/next. +func TestCheckAPIFragments(t *testing.T) { + if !*flagCheck { + t.Skip("-check not specified") + } + root := testenv.GOROOT(t) + rootFS := os.DirFS(root) + files, err := fs.Glob(rootFS, "api/next/*.txt") + if err != nil { + t.Fatal(err) + } + t.Logf("checking release notes for %d files in api/next", len(files)) + docFS := os.DirFS(filepath.Join(root, "doc", "next")) + // Check that each api/next file has a corresponding release note fragment. + for _, apiFile := range files { + if err := relnote.CheckAPIFile(rootFS, apiFile, docFS, "doc/next"); err != nil { + t.Errorf("%s: %v", apiFile, err) + } + } +} diff --git a/src/cmd/trace/annotations.go b/src/cmd/trace/annotations.go index 0addc240be..df194a7598 100644 --- a/src/cmd/trace/annotations.go +++ b/src/cmd/trace/annotations.go @@ -9,8 +9,8 @@ import ( "fmt" "html/template" "internal/trace" + "internal/trace/traceviewer" "log" - "math" "net/http" "net/url" "reflect" @@ -808,122 +808,9 @@ func newRegionFilter(r *http.Request) (*regionFilter, error) { }, nil } -type durationHistogram struct { - Count int - Buckets []int - MinBucket, MaxBucket int -} - -// Five buckets for every power of 10. -var logDiv = math.Log(math.Pow(10, 1.0/5)) - -func (h *durationHistogram) add(d time.Duration) { - var bucket int - if d > 0 { - bucket = int(math.Log(float64(d)) / logDiv) - } - if len(h.Buckets) <= bucket { - h.Buckets = append(h.Buckets, make([]int, bucket-len(h.Buckets)+1)...) - h.Buckets = h.Buckets[:cap(h.Buckets)] - } - h.Buckets[bucket]++ - if bucket < h.MinBucket || h.MaxBucket == 0 { - h.MinBucket = bucket - } - if bucket > h.MaxBucket { - h.MaxBucket = bucket - } - h.Count++ -} - -func (h *durationHistogram) BucketMin(bucket int) time.Duration { - return time.Duration(math.Exp(float64(bucket) * logDiv)) -} - -func niceDuration(d time.Duration) string { - var rnd time.Duration - var unit string - switch { - case d < 10*time.Microsecond: - rnd, unit = time.Nanosecond, "ns" - case d < 10*time.Millisecond: - rnd, unit = time.Microsecond, "µs" - case d < 10*time.Second: - rnd, unit = time.Millisecond, "ms" - default: - rnd, unit = time.Second, "s " - } - return fmt.Sprintf("%d%s", d/rnd, unit) -} - -func (h *durationHistogram) ToHTML(urlmaker func(min, max time.Duration) string) template.HTML { - if h == nil || h.Count == 0 { - return template.HTML("") - } - - const barWidth = 400 - - maxCount := 0 - for _, count := range h.Buckets { - if count > maxCount { - maxCount = count - } - } - - w := new(strings.Builder) - fmt.Fprintf(w, ``) - for i := h.MinBucket; i <= h.MaxBucket; i++ { - // Tick label. - if h.Buckets[i] > 0 { - fmt.Fprintf(w, ``, urlmaker(h.BucketMin(i), h.BucketMin(i+1)), niceDuration(h.BucketMin(i))) - } else { - fmt.Fprintf(w, ``, niceDuration(h.BucketMin(i))) - } - // Bucket bar. - width := h.Buckets[i] * barWidth / maxCount - fmt.Fprintf(w, ``, width) - // Bucket count. - fmt.Fprintf(w, ``, h.Buckets[i]) - fmt.Fprintf(w, "\n") - - } - // Final tick label. - fmt.Fprintf(w, ``, niceDuration(h.BucketMin(h.MaxBucket+1))) - fmt.Fprintf(w, `
    %s
    %s
     
    %d
    %s
    `) - return template.HTML(w.String()) -} - -func (h *durationHistogram) String() string { - const barWidth = 40 - - labels := []string{} - maxLabel := 0 - maxCount := 0 - for i := h.MinBucket; i <= h.MaxBucket; i++ { - // TODO: This formatting is pretty awful. - label := fmt.Sprintf("[%-12s%-11s)", h.BucketMin(i).String()+",", h.BucketMin(i+1)) - labels = append(labels, label) - if len(label) > maxLabel { - maxLabel = len(label) - } - count := h.Buckets[i] - if count > maxCount { - maxCount = count - } - } - - w := new(strings.Builder) - for i := h.MinBucket; i <= h.MaxBucket; i++ { - count := h.Buckets[i] - bar := count * barWidth / maxCount - fmt.Fprintf(w, "%*s %-*s %d\n", maxLabel, labels[i-h.MinBucket], barWidth, strings.Repeat("█", bar), count) - } - return w.String() -} - type regionStats struct { regionTypeID - Histogram durationHistogram + Histogram traceviewer.TimeHistogram } func (s *regionStats) UserRegionURL() func(min, max time.Duration) string { @@ -933,7 +820,7 @@ func (s *regionStats) UserRegionURL() func(min, max time.Duration) string { } func (s *regionStats) add(region regionDesc) { - s.Histogram.add(region.duration()) + s.Histogram.Add(region.duration()) } var templUserRegionTypes = template.Must(template.New("").Parse(` @@ -966,8 +853,8 @@ var templUserRegionTypes = template.Must(template.New("").Parse(` type taskStats struct { Type string - Count int // Complete + incomplete tasks - Histogram durationHistogram // Complete tasks only + Count int // Complete + incomplete tasks + Histogram traceviewer.TimeHistogram // Complete tasks only } func (s *taskStats) UserTaskURL(complete bool) func(min, max time.Duration) string { @@ -979,7 +866,7 @@ func (s *taskStats) UserTaskURL(complete bool) func(min, max time.Duration) stri func (s *taskStats) add(task *taskDesc) { s.Count++ if task.complete() { - s.Histogram.add(task.duration()) + s.Histogram.Add(task.duration()) } } @@ -1169,7 +1056,7 @@ func isUserAnnotationEvent(ev *trace.Event) (taskID uint64, ok bool) { var templUserRegionType = template.Must(template.New("").Funcs(template.FuncMap{ "prettyDuration": func(nsec int64) template.HTML { d := time.Duration(nsec) * time.Nanosecond - return template.HTML(niceDuration(d)) + return template.HTML(d.String()) }, "percent": func(dividend, divisor int64) template.HTML { if divisor == 0 { diff --git a/src/cmd/trace/goroutines.go b/src/cmd/trace/goroutines.go index 7850fc0ff1..28eace82fa 100644 --- a/src/cmd/trace/goroutines.go +++ b/src/cmd/trace/goroutines.go @@ -169,7 +169,7 @@ func httpGoroutine(w http.ResponseWriter, r *http.Request) { var templGoroutine = template.Must(template.New("").Funcs(template.FuncMap{ "prettyDuration": func(nsec int64) template.HTML { d := time.Duration(nsec) * time.Nanosecond - return template.HTML(niceDuration(d)) + return template.HTML(d.String()) }, "percent": func(dividend, divisor int64) template.HTML { if divisor == 0 { diff --git a/src/cmd/trace/main.go b/src/cmd/trace/main.go index 9e9e7f3e49..5f0d6f612b 100644 --- a/src/cmd/trace/main.go +++ b/src/cmd/trace/main.go @@ -7,11 +7,11 @@ package main import ( "bufio" "cmd/internal/browser" + cmdv2 "cmd/trace/v2" "flag" "fmt" - "html/template" "internal/trace" - "io" + "internal/trace/traceviewer" "log" "net" "net/http" @@ -46,7 +46,7 @@ Supported profile types are: Flags: -http=addr: HTTP service address (e.g., ':6060') -pprof=type: print a pprof-like profile instead - -d: print debug info such as parsed events + -d=int: print debug info such as parsed events (1 for high-level, 2 for low-level) Note that while the various profiles available when launching 'go tool trace' work on every browser, the trace viewer itself @@ -57,7 +57,7 @@ and is only actively tested on that browser. var ( httpFlag = flag.String("http", "localhost:0", "HTTP service address (e.g., ':6060')") pprofFlag = flag.String("pprof", "", "print a pprof-like profile instead") - debugFlag = flag.Bool("d", false, "print debug information such as parsed events list") + debugFlag = flag.Int("d", 0, "print debug information (1 for basic debug info, 2 for lower-level info)") // The binary file name, left here for serveSVGProfile. programBinary string @@ -83,7 +83,14 @@ func main() { flag.Usage() } - var pprofFunc func(io.Writer, *http.Request) error + if isTraceV2(traceFile) { + if err := cmdv2.Main(traceFile, *httpFlag, *pprofFlag, *debugFlag); err != nil { + dief("%s\n", err) + } + return + } + + var pprofFunc traceviewer.ProfileFunc switch *pprofFlag { case "net": pprofFunc = pprofByGoroutine(computePprofIO) @@ -95,7 +102,11 @@ func main() { pprofFunc = pprofByGoroutine(computePprofSched) } if pprofFunc != nil { - if err := pprofFunc(os.Stdout, &http.Request{}); err != nil { + records, err := pprofFunc(&http.Request{}) + if err != nil { + dief("failed to generate pprof: %v\n", err) + } + if err := traceviewer.BuildProfile(records).Write(os.Stdout); err != nil { dief("failed to generate pprof: %v\n", err) } os.Exit(0) @@ -115,7 +126,7 @@ func main() { dief("%v\n", err) } - if *debugFlag { + if *debugFlag != 0 { trace.Print(res.Events) os.Exit(0) } @@ -131,13 +142,35 @@ func main() { log.Printf("Opening browser. Trace viewer is listening on %s", addr) browser.Open(addr) + // Install MMU handler. + http.HandleFunc("/mmu", traceviewer.MMUHandlerFunc(ranges, mutatorUtil)) + + // Install main handler. + http.Handle("/", traceviewer.MainHandler([]traceviewer.View{ + {Type: traceviewer.ViewProc, Ranges: ranges}, + })) + // Start http server. - http.HandleFunc("/", httpMain) err = http.Serve(ln, nil) dief("failed to start http server: %v\n", err) } -var ranges []Range +// isTraceV2 returns true if filename holds a v2 trace. +func isTraceV2(filename string) bool { + file, err := os.Open(filename) + if err != nil { + return false + } + defer file.Close() + + ver, _, err := trace.ReadVersion(file) + if err != nil { + return false + } + return ver >= 1022 +} + +var ranges []traceviewer.Range var loader struct { once sync.Once @@ -175,209 +208,6 @@ func parseTrace() (trace.ParseResult, error) { return loader.res, loader.err } -// httpMain serves the starting page. -func httpMain(w http.ResponseWriter, r *http.Request) { - if err := templMain.Execute(w, ranges); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } -} - -var templMain = template.Must(template.New("").Parse(` - - - -

    cmd/trace: the Go trace event viewer

    -

    - This web server provides various visualizations of an event log gathered during - the execution of a Go program that uses the runtime/trace package. -

    - -

    Event timelines for running goroutines

    -{{if $}} -

    - Large traces are split into multiple sections of equal data size - (not duration) to avoid overwhelming the visualizer. -

    - -{{else}} - -{{end}} -

    - This view displays a timeline for each of the GOMAXPROCS logical - processors, showing which goroutine (if any) was running on that - logical processor at each moment. - - Each goroutine has an identifying number (e.g. G123), main function, - and color. - - A colored bar represents an uninterrupted span of execution. - - Execution of a goroutine may migrate from one logical processor to another, - causing a single colored bar to be horizontally continuous but - vertically displaced. -

    -

    - Clicking on a span reveals information about it, such as its - duration, its causal predecessors and successors, and the stack trace - at the final moment when it yielded the logical processor, for example - because it made a system call or tried to acquire a mutex. - - Directly underneath each bar, a smaller bar or more commonly a fine - vertical line indicates an event occurring during its execution. - Some of these are related to garbage collection; most indicate that - a goroutine yielded its logical processor but then immediately resumed execution - on the same logical processor. Clicking on the event displays the stack trace - at the moment it occurred. -

    -

    - The causal relationships between spans of goroutine execution - can be displayed by clicking the Flow Events button at the top. -

    -

    - At the top ("STATS"), there are three additional timelines that - display statistical information. - - "Goroutines" is a time series of the count of existing goroutines; - clicking on it displays their breakdown by state at that moment: - running, runnable, or waiting. - - "Heap" is a time series of the amount of heap memory allocated (in orange) - and (in green) the allocation limit at which the next GC cycle will begin. - - "Threads" shows the number of kernel threads in existence: there is - always one kernel thread per logical processor, and additional threads - are created for calls to non-Go code such as a system call or a - function written in C. -

    -

    - Above the event trace for the first logical processor are - traces for various runtime-internal events. - - The "GC" bar shows when the garbage collector is running, and in which stage. - Garbage collection may temporarily affect all the logical processors - and the other metrics. - - The "Network", "Timers", and "Syscalls" traces indicate events in - the runtime that cause goroutines to wake up. -

    -

    - The visualization allows you to navigate events at scales ranging from several - seconds to a handful of nanoseconds. - - Consult the documentation for the Chromium Trace Event Profiling Tool - for help navigating the view. -

    - - -

    - This view displays information about each set of goroutines that - shares the same main function. - - Clicking on a main function shows links to the four types of - blocking profile (see below) applied to that subset of goroutines. - - It also shows a table of specific goroutine instances, with various - execution statistics and a link to the event timeline for each one. - - The timeline displays only the selected goroutine and any others it - interacts with via block/unblock events. (The timeline is - goroutine-oriented rather than logical processor-oriented.) -

    - -

    Profiles

    -

    - Each link below displays a global profile in zoomable graph form as - produced by pprof's "web" command. - - In addition there is a link to download the profile for offline - analysis with pprof. - - All four profiles represent causes of delay that prevent a goroutine - from running on a logical processor: because it was waiting for the network, - for a synchronization operation on a mutex or channel, for a system call, - or for a logical processor to become available. -

    - - -

    User-defined tasks and regions

    -

    - The trace API allows a target program to annotate a region of code - within a goroutine, such as a key function, so that its performance - can be analyzed. - - Log events may be - associated with a region to record progress and relevant values. - - The API also allows annotation of higher-level - tasks, - which may involve work across many goroutines. -

    -

    - The links below display, for each region and task, a histogram of its execution times. - - Each histogram bucket contains a sample trace that records the - sequence of events such as goroutine creations, log events, and - subregion start/end times. - - For each task, you can click through to a logical-processor or - goroutine-oriented view showing the tasks and regions on the - timeline. - - Such information may help uncover which steps in a region are - unexpectedly slow, or reveal relationships between the data values - logged in a request and its running time. -

    - - -

    Garbage collection metrics

    - -

    - This chart indicates the maximum GC pause time (the largest x value - for which y is zero), and more generally, the fraction of time that - the processors are available to application goroutines ("mutators"), - for any time window of a specified size, in the worst case. -

    - - -`)) - func dief(msg string, args ...any) { fmt.Fprintf(os.Stderr, msg, args...) os.Exit(1) @@ -408,3 +238,11 @@ func reportMemoryUsage(msg string) { fmt.Printf("Enter to continue...") fmt.Scanf("%s", &dummy) } + +func mutatorUtil(flags trace.UtilFlags) ([][]trace.MutatorUtil, error) { + events, err := parseEvents() + if err != nil { + return nil, err + } + return trace.MutatorUtilization(events, flags), nil +} diff --git a/src/cmd/trace/pprof.go b/src/cmd/trace/pprof.go index a73ff5336a..3722b37ab8 100644 --- a/src/cmd/trace/pprof.go +++ b/src/cmd/trace/pprof.go @@ -7,51 +7,25 @@ package main import ( - "bufio" "fmt" "internal/trace" - "io" + "internal/trace/traceviewer" "net/http" - "os" - "os/exec" - "path/filepath" - "runtime" "sort" "strconv" "time" - - "github.com/google/pprof/profile" ) -func goCmd() string { - var exeSuffix string - if runtime.GOOS == "windows" { - exeSuffix = ".exe" - } - path := filepath.Join(runtime.GOROOT(), "bin", "go"+exeSuffix) - if _, err := os.Stat(path); err == nil { - return path - } - return "go" -} - func init() { - http.HandleFunc("/io", serveSVGProfile(pprofByGoroutine(computePprofIO))) - http.HandleFunc("/block", serveSVGProfile(pprofByGoroutine(computePprofBlock))) - http.HandleFunc("/syscall", serveSVGProfile(pprofByGoroutine(computePprofSyscall))) - http.HandleFunc("/sched", serveSVGProfile(pprofByGoroutine(computePprofSched))) + http.HandleFunc("/io", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofIO))) + http.HandleFunc("/block", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofBlock))) + http.HandleFunc("/syscall", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofSyscall))) + http.HandleFunc("/sched", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofSched))) - http.HandleFunc("/regionio", serveSVGProfile(pprofByRegion(computePprofIO))) - http.HandleFunc("/regionblock", serveSVGProfile(pprofByRegion(computePprofBlock))) - http.HandleFunc("/regionsyscall", serveSVGProfile(pprofByRegion(computePprofSyscall))) - http.HandleFunc("/regionsched", serveSVGProfile(pprofByRegion(computePprofSched))) -} - -// Record represents one entry in pprof-like profiles. -type Record struct { - stk []*trace.Frame - n uint64 - time int64 + http.HandleFunc("/regionio", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofIO))) + http.HandleFunc("/regionblock", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofBlock))) + http.HandleFunc("/regionsyscall", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofSyscall))) + http.HandleFunc("/regionsched", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofSched))) } // interval represents a time interval in the trace. @@ -59,34 +33,34 @@ type interval struct { begin, end int64 // nanoseconds. } -func pprofByGoroutine(compute func(io.Writer, map[uint64][]interval, []*trace.Event) error) func(w io.Writer, r *http.Request) error { - return func(w io.Writer, r *http.Request) error { +func pprofByGoroutine(compute computePprofFunc) traceviewer.ProfileFunc { + return func(r *http.Request) ([]traceviewer.ProfileRecord, error) { id := r.FormValue("id") events, err := parseEvents() if err != nil { - return err + return nil, err } gToIntervals, err := pprofMatchingGoroutines(id, events) if err != nil { - return err + return nil, err } - return compute(w, gToIntervals, events) + return compute(gToIntervals, events) } } -func pprofByRegion(compute func(io.Writer, map[uint64][]interval, []*trace.Event) error) func(w io.Writer, r *http.Request) error { - return func(w io.Writer, r *http.Request) error { +func pprofByRegion(compute computePprofFunc) traceviewer.ProfileFunc { + return func(r *http.Request) ([]traceviewer.ProfileRecord, error) { filter, err := newRegionFilter(r) if err != nil { - return err + return nil, err } gToIntervals, err := pprofMatchingRegions(filter) if err != nil { - return err + return nil, err } events, _ := parseEvents() - return compute(w, gToIntervals, events) + return compute(gToIntervals, events) } } @@ -170,9 +144,11 @@ func pprofMatchingRegions(filter *regionFilter) (map[uint64][]interval, error) { return gToIntervals, nil } +type computePprofFunc func(gToIntervals map[uint64][]interval, events []*trace.Event) ([]traceviewer.ProfileRecord, error) + // computePprofIO generates IO pprof-like profile (time spent in IO wait, currently only network blocking event). -func computePprofIO(w io.Writer, gToIntervals map[uint64][]interval, events []*trace.Event) error { - prof := make(map[uint64]Record) +func computePprofIO(gToIntervals map[uint64][]interval, events []*trace.Event) ([]traceviewer.ProfileRecord, error) { + prof := make(map[uint64]traceviewer.ProfileRecord) for _, ev := range events { if ev.Type != trace.EvGoBlockNet || ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 { continue @@ -180,18 +156,18 @@ func computePprofIO(w io.Writer, gToIntervals map[uint64][]interval, events []*t overlapping := pprofOverlappingDuration(gToIntervals, ev) if overlapping > 0 { rec := prof[ev.StkID] - rec.stk = ev.Stk - rec.n++ - rec.time += overlapping.Nanoseconds() + rec.Stack = ev.Stk + rec.Count++ + rec.Time += overlapping prof[ev.StkID] = rec } } - return buildProfile(prof).Write(w) + return recordsOf(prof), nil } // computePprofBlock generates blocking pprof-like profile (time spent blocked on synchronization primitives). -func computePprofBlock(w io.Writer, gToIntervals map[uint64][]interval, events []*trace.Event) error { - prof := make(map[uint64]Record) +func computePprofBlock(gToIntervals map[uint64][]interval, events []*trace.Event) ([]traceviewer.ProfileRecord, error) { + prof := make(map[uint64]traceviewer.ProfileRecord) for _, ev := range events { switch ev.Type { case trace.EvGoBlockSend, trace.EvGoBlockRecv, trace.EvGoBlockSelect, @@ -208,18 +184,18 @@ func computePprofBlock(w io.Writer, gToIntervals map[uint64][]interval, events [ overlapping := pprofOverlappingDuration(gToIntervals, ev) if overlapping > 0 { rec := prof[ev.StkID] - rec.stk = ev.Stk - rec.n++ - rec.time += overlapping.Nanoseconds() + rec.Stack = ev.Stk + rec.Count++ + rec.Time += overlapping prof[ev.StkID] = rec } } - return buildProfile(prof).Write(w) + return recordsOf(prof), nil } // computePprofSyscall generates syscall pprof-like profile (time spent blocked in syscalls). -func computePprofSyscall(w io.Writer, gToIntervals map[uint64][]interval, events []*trace.Event) error { - prof := make(map[uint64]Record) +func computePprofSyscall(gToIntervals map[uint64][]interval, events []*trace.Event) ([]traceviewer.ProfileRecord, error) { + prof := make(map[uint64]traceviewer.ProfileRecord) for _, ev := range events { if ev.Type != trace.EvGoSysCall || ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 { continue @@ -227,19 +203,19 @@ func computePprofSyscall(w io.Writer, gToIntervals map[uint64][]interval, events overlapping := pprofOverlappingDuration(gToIntervals, ev) if overlapping > 0 { rec := prof[ev.StkID] - rec.stk = ev.Stk - rec.n++ - rec.time += overlapping.Nanoseconds() + rec.Stack = ev.Stk + rec.Count++ + rec.Time += overlapping prof[ev.StkID] = rec } } - return buildProfile(prof).Write(w) + return recordsOf(prof), nil } // computePprofSched generates scheduler latency pprof-like profile // (time between a goroutine become runnable and actually scheduled for execution). -func computePprofSched(w io.Writer, gToIntervals map[uint64][]interval, events []*trace.Event) error { - prof := make(map[uint64]Record) +func computePprofSched(gToIntervals map[uint64][]interval, events []*trace.Event) ([]traceviewer.ProfileRecord, error) { + prof := make(map[uint64]traceviewer.ProfileRecord) for _, ev := range events { if (ev.Type != trace.EvGoUnblock && ev.Type != trace.EvGoCreate) || ev.Link == nil || ev.StkID == 0 || len(ev.Stk) == 0 { @@ -248,13 +224,13 @@ func computePprofSched(w io.Writer, gToIntervals map[uint64][]interval, events [ overlapping := pprofOverlappingDuration(gToIntervals, ev) if overlapping > 0 { rec := prof[ev.StkID] - rec.stk = ev.Stk - rec.n++ - rec.time += overlapping.Nanoseconds() + rec.Stack = ev.Stk + rec.Count++ + rec.Time += overlapping prof[ev.StkID] = rec } } - return buildProfile(prof).Write(w) + return recordsOf(prof), nil } // pprofOverlappingDuration returns the overlapping duration between @@ -278,100 +254,10 @@ func pprofOverlappingDuration(gToIntervals map[uint64][]interval, ev *trace.Even return overlapping } -// serveSVGProfile serves pprof-like profile generated by prof as svg. -func serveSVGProfile(prof func(w io.Writer, r *http.Request) error) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - - if r.FormValue("raw") != "" { - w.Header().Set("Content-Type", "application/octet-stream") - if err := prof(w, r); err != nil { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - w.Header().Set("X-Go-Pprof", "1") - http.Error(w, fmt.Sprintf("failed to get profile: %v", err), http.StatusInternalServerError) - return - } - return - } - - blockf, err := os.CreateTemp("", "block") - if err != nil { - http.Error(w, fmt.Sprintf("failed to create temp file: %v", err), http.StatusInternalServerError) - return - } - defer func() { - blockf.Close() - os.Remove(blockf.Name()) - }() - blockb := bufio.NewWriter(blockf) - if err := prof(blockb, r); err != nil { - http.Error(w, fmt.Sprintf("failed to generate profile: %v", err), http.StatusInternalServerError) - return - } - if err := blockb.Flush(); err != nil { - http.Error(w, fmt.Sprintf("failed to flush temp file: %v", err), http.StatusInternalServerError) - return - } - if err := blockf.Close(); err != nil { - http.Error(w, fmt.Sprintf("failed to close temp file: %v", err), http.StatusInternalServerError) - return - } - svgFilename := blockf.Name() + ".svg" - if output, err := exec.Command(goCmd(), "tool", "pprof", "-svg", "-output", svgFilename, blockf.Name()).CombinedOutput(); err != nil { - http.Error(w, fmt.Sprintf("failed to execute go tool pprof: %v\n%s", err, output), http.StatusInternalServerError) - return - } - defer os.Remove(svgFilename) - w.Header().Set("Content-Type", "image/svg+xml") - http.ServeFile(w, r, svgFilename) +func recordsOf(records map[uint64]traceviewer.ProfileRecord) []traceviewer.ProfileRecord { + result := make([]traceviewer.ProfileRecord, 0, len(records)) + for _, record := range records { + result = append(result, record) } -} - -func buildProfile(prof map[uint64]Record) *profile.Profile { - p := &profile.Profile{ - PeriodType: &profile.ValueType{Type: "trace", Unit: "count"}, - Period: 1, - SampleType: []*profile.ValueType{ - {Type: "contentions", Unit: "count"}, - {Type: "delay", Unit: "nanoseconds"}, - }, - } - locs := make(map[uint64]*profile.Location) - funcs := make(map[string]*profile.Function) - for _, rec := range prof { - var sloc []*profile.Location - for _, frame := range rec.stk { - loc := locs[frame.PC] - if loc == nil { - fn := funcs[frame.File+frame.Fn] - if fn == nil { - fn = &profile.Function{ - ID: uint64(len(p.Function) + 1), - Name: frame.Fn, - SystemName: frame.Fn, - Filename: frame.File, - } - p.Function = append(p.Function, fn) - funcs[frame.File+frame.Fn] = fn - } - loc = &profile.Location{ - ID: uint64(len(p.Location) + 1), - Address: frame.PC, - Line: []profile.Line{ - { - Function: fn, - Line: int64(frame.Line), - }, - }, - } - p.Location = append(p.Location, loc) - locs[frame.PC] = loc - } - sloc = append(sloc, loc) - } - p.Sample = append(p.Sample, &profile.Sample{ - Value: []int64{int64(rec.n), rec.time}, - Location: sloc, - }) - } - return p + return result } diff --git a/src/cmd/trace/trace.go b/src/cmd/trace/trace.go index 618df42033..438b8dd328 100644 --- a/src/cmd/trace/trace.go +++ b/src/cmd/trace/trace.go @@ -5,29 +5,24 @@ package main import ( - "cmd/internal/traceviewer" - "embed" - "encoding/json" "fmt" "internal/trace" - "io" + "internal/trace/traceviewer" "log" "math" "net/http" "runtime/debug" "sort" "strconv" - "strings" "time" -) -//go:embed static/trace_viewer_full.html static/webcomponents.min.js -var staticContent embed.FS + "internal/trace/traceviewer/format" +) func init() { http.HandleFunc("/trace", httpTrace) http.HandleFunc("/jsontrace", httpJsonTrace) - http.Handle("/static/", http.FileServer(http.FS(staticContent))) + http.Handle("/static/", traceviewer.StaticHandler()) } // httpTrace serves either whole trace (goid==0) or trace for goid goroutine. @@ -37,143 +32,9 @@ func httpTrace(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusInternalServerError) return } - if err := r.ParseForm(); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - html := strings.ReplaceAll(templTrace, "{{PARAMS}}", r.Form.Encode()) - w.Write([]byte(html)) - + traceviewer.TraceHandler().ServeHTTP(w, r) } -// https://chromium.googlesource.com/catapult/+/9508452e18f130c98499cb4c4f1e1efaedee8962/tracing/docs/embedding-trace-viewer.md -// This is almost verbatim copy of https://chromium-review.googlesource.com/c/catapult/+/2062938/2/tracing/bin/index.html -var templTrace = ` - - - - - - - - - - - - - -` - // httpJsonTrace serves json trace, requested from within templTrace HTML. func httpJsonTrace(w http.ResponseWriter, r *http.Request) { defer debug.FreeOSMemory() @@ -203,7 +64,7 @@ func httpJsonTrace(w http.ResponseWriter, r *http.Request) { log.Printf("failed to find goroutine %d", goid) return } - params.mode = modeGoroutineOriented + params.mode = traceviewer.ModeGoroutineOriented params.startTime = g.StartTime if g.EndTime != 0 { params.endTime = g.EndTime @@ -225,7 +86,7 @@ func httpJsonTrace(w http.ResponseWriter, r *http.Request) { return } goid := task.events[0].G - params.mode = modeGoroutineOriented | modeTaskOriented + params.mode = traceviewer.ModeGoroutineOriented | traceviewer.ModeTaskOriented params.startTime = task.firstTimestamp() - 1 params.endTime = task.lastTimestamp() + 1 params.maing = goid @@ -250,7 +111,7 @@ func httpJsonTrace(w http.ResponseWriter, r *http.Request) { log.Printf("failed to find task with id %d", taskid) return } - params.mode = modeTaskOriented + params.mode = traceviewer.ModeTaskOriented params.startTime = task.firstTimestamp() - 1 params.endTime = task.lastTimestamp() + 1 params.tasks = task.descendants() @@ -272,247 +133,31 @@ func httpJsonTrace(w http.ResponseWriter, r *http.Request) { } } - c := viewerDataTraceConsumer(w, start, end) + c := traceviewer.ViewerDataTraceConsumer(w, start, end) if err := generateTrace(params, c); err != nil { log.Printf("failed to generate trace: %v", err) return } } -type Range struct { - Name string - Start int - End int - StartTime int64 - EndTime int64 -} - -func (r Range) URL() string { - return fmt.Sprintf("/trace?start=%d&end=%d", r.Start, r.End) -} - // splitTrace splits the trace into a number of ranges, // each resulting in approx 100MB of json output // (trace viewer can hardly handle more). -func splitTrace(res trace.ParseResult) []Range { +func splitTrace(res trace.ParseResult) []traceviewer.Range { params := &traceParams{ parsed: res, endTime: math.MaxInt64, } - s, c := splittingTraceConsumer(100 << 20) // 100M + s, c := traceviewer.SplittingTraceConsumer(100 << 20) // 100M if err := generateTrace(params, c); err != nil { dief("%v\n", err) } return s.Ranges } -type splitter struct { - Ranges []Range -} - -// walkStackFrames calls fn for id and all of its parent frames from allFrames. -func walkStackFrames(allFrames map[string]traceviewer.Frame, id int, fn func(id int)) { - for id != 0 { - f, ok := allFrames[strconv.Itoa(id)] - if !ok { - break - } - fn(id) - id = f.Parent - } -} - -func stackFrameEncodedSize(id uint, f traceviewer.Frame) int { - // We want to know the marginal size of traceviewer.Data.Frames for - // each event. Running full JSON encoding of the map for each event is - // far too slow. - // - // Since the format is fixed, we can easily compute the size without - // encoding. - // - // A single entry looks like one of the following: - // - // "1":{"name":"main.main:30"}, - // "10":{"name":"pkg.NewSession:173","parent":9}, - // - // The parent is omitted if 0. The trailing comma is omitted from the - // last entry, but we don't need that much precision. - const ( - baseSize = len(`"`) + len(`":{"name":"`) + len(`"},`) - - // Don't count the trailing quote on the name, as that is - // counted in baseSize. - parentBaseSize = len(`,"parent":`) - ) - - size := baseSize - - size += len(f.Name) - - // Bytes for id (always positive). - for id > 0 { - size += 1 - id /= 10 - } - - if f.Parent > 0 { - size += parentBaseSize - // Bytes for parent (always positive). - for f.Parent > 0 { - size += 1 - f.Parent /= 10 - } - } - - return size -} - -func splittingTraceConsumer(max int) (*splitter, traceConsumer) { - type eventSz struct { - Time float64 - Sz int - Frames []int - } - - var ( - // data.Frames contains only the frames for required events. - data = traceviewer.Data{Frames: make(map[string]traceviewer.Frame)} - - allFrames = make(map[string]traceviewer.Frame) - - sizes []eventSz - cw countingWriter - ) - - s := new(splitter) - - return s, traceConsumer{ - consumeTimeUnit: func(unit string) { - data.TimeUnit = unit - }, - consumeViewerEvent: func(v *traceviewer.Event, required bool) { - if required { - // Store required events inside data so flush - // can include them in the required part of the - // trace. - data.Events = append(data.Events, v) - walkStackFrames(allFrames, v.Stack, func(id int) { - s := strconv.Itoa(id) - data.Frames[s] = allFrames[s] - }) - walkStackFrames(allFrames, v.EndStack, func(id int) { - s := strconv.Itoa(id) - data.Frames[s] = allFrames[s] - }) - return - } - enc := json.NewEncoder(&cw) - enc.Encode(v) - size := eventSz{Time: v.Time, Sz: cw.size + 1} // +1 for ",". - // Add referenced stack frames. Their size is computed - // in flush, where we can dedup across events. - walkStackFrames(allFrames, v.Stack, func(id int) { - size.Frames = append(size.Frames, id) - }) - walkStackFrames(allFrames, v.EndStack, func(id int) { - size.Frames = append(size.Frames, id) // This may add duplicates. We'll dedup later. - }) - sizes = append(sizes, size) - cw.size = 0 - }, - consumeViewerFrame: func(k string, v traceviewer.Frame) { - allFrames[k] = v - }, - flush: func() { - // Calculate size of the mandatory part of the trace. - // This includes thread names and stack frames for - // required events. - cw.size = 0 - enc := json.NewEncoder(&cw) - enc.Encode(data) - requiredSize := cw.size - - // Then calculate size of each individual event and - // their stack frames, grouping them into ranges. We - // only include stack frames relevant to the events in - // the range to reduce overhead. - - var ( - start = 0 - - eventsSize = 0 - - frames = make(map[string]traceviewer.Frame) - framesSize = 0 - ) - for i, ev := range sizes { - eventsSize += ev.Sz - - // Add required stack frames. Note that they - // may already be in the map. - for _, id := range ev.Frames { - s := strconv.Itoa(id) - _, ok := frames[s] - if ok { - continue - } - f := allFrames[s] - frames[s] = f - framesSize += stackFrameEncodedSize(uint(id), f) - } - - total := requiredSize + framesSize + eventsSize - if total < max { - continue - } - - // Reached max size, commit this range and - // start a new range. - startTime := time.Duration(sizes[start].Time * 1000) - endTime := time.Duration(ev.Time * 1000) - ranges = append(ranges, Range{ - Name: fmt.Sprintf("%v-%v", startTime, endTime), - Start: start, - End: i + 1, - StartTime: int64(startTime), - EndTime: int64(endTime), - }) - start = i + 1 - frames = make(map[string]traceviewer.Frame) - framesSize = 0 - eventsSize = 0 - } - if len(ranges) <= 1 { - s.Ranges = nil - return - } - - if end := len(sizes) - 1; start < end { - ranges = append(ranges, Range{ - Name: fmt.Sprintf("%v-%v", time.Duration(sizes[start].Time*1000), time.Duration(sizes[end].Time*1000)), - Start: start, - End: end, - StartTime: int64(sizes[start].Time * 1000), - EndTime: int64(sizes[end].Time * 1000), - }) - } - s.Ranges = ranges - }, - } -} - -type countingWriter struct { - size int -} - -func (cw *countingWriter) Write(data []byte) (int, error) { - cw.size += len(data) - return len(data), nil -} - type traceParams struct { parsed trace.ParseResult - mode traceviewMode + mode traceviewer.Mode startTime int64 endTime int64 maing uint64 // for goroutine-oriented view, place this goroutine on the top row @@ -520,59 +165,18 @@ type traceParams struct { tasks []*taskDesc // Tasks to be displayed. tasks[0] is the top-most task } -type traceviewMode uint - -const ( - modeGoroutineOriented traceviewMode = 1 << iota - modeTaskOriented -) - type traceContext struct { *traceParams - consumer traceConsumer - frameTree frameNode - frameSeq int - arrowSeq uint64 - gcount uint64 - - heapStats, prevHeapStats heapStats - threadStats, prevThreadStats threadStats - gstates, prevGstates [gStateCount]int64 - + consumer traceviewer.TraceConsumer + emitter *traceviewer.Emitter + arrowSeq uint64 + gcount uint64 regionID int // last emitted region id. incremented in each emitRegion call. } -type heapStats struct { - heapAlloc uint64 - nextGC uint64 -} - -type threadStats struct { - insyscallRuntime int64 // system goroutine in syscall - insyscall int64 // user goroutine in syscall - prunning int64 // thread running P -} - -type frameNode struct { - id int - children map[uint64]frameNode -} - -type gState int - -const ( - gDead gState = iota - gRunnable - gRunning - gWaiting - gWaitingGC - - gStateCount -) - type gInfo struct { - state gState // current state - name string // name chosen for this goroutine at first EvGoStart + state traceviewer.GState // current state + name string // name chosen for this goroutine at first EvGoStart isSystemG bool start *trace.Event // most recent EvGoStart markAssist *trace.Event // if non-nil, the mark assist currently running. @@ -596,19 +200,6 @@ type SortIndexArg struct { Index int `json:"sort_index"` } -type traceConsumer struct { - consumeTimeUnit func(unit string) - consumeViewerEvent func(v *traceviewer.Event, required bool) - consumeViewerFrame func(key string, f traceviewer.Frame) - flush func() -} - -const ( - procsSection = 0 // where Goroutines or per-P timelines are presented. - statsSection = 1 // where counters are presented. - tasksSection = 2 // where Task hierarchy & timeline is presented. -) - // generateTrace generates json trace for trace-viewer: // https://github.com/google/trace-viewer // Trace format is described at: @@ -616,14 +207,22 @@ const ( // If mode==goroutineMode, generate trace for goroutine goid, otherwise whole trace. // startTime, endTime determine part of the trace that we are interested in. // gset restricts goroutines that are included in the resulting trace. -func generateTrace(params *traceParams, consumer traceConsumer) error { - defer consumer.flush() +func generateTrace(params *traceParams, consumer traceviewer.TraceConsumer) error { + emitter := traceviewer.NewEmitter( + consumer, + time.Duration(params.startTime), + time.Duration(params.endTime), + ) + if params.mode&traceviewer.ModeGoroutineOriented != 0 { + emitter.SetResourceType("G") + } else { + emitter.SetResourceType("PROCS") + } + defer emitter.Flush() - ctx := &traceContext{traceParams: params} - ctx.frameTree.children = make(map[uint64]frameNode) + ctx := &traceContext{traceParams: params, emitter: emitter} ctx.consumer = consumer - ctx.consumer.consumeTimeUnit("ns") maxProc := 0 ginfos := make(map[uint64]*gInfo) stacks := params.parsed.Stacks @@ -640,17 +239,17 @@ func generateTrace(params *traceParams, consumer traceConsumer) error { // Since we make many calls to setGState, we record a sticky // error in setGStateErr and check it after every event. var setGStateErr error - setGState := func(ev *trace.Event, g uint64, oldState, newState gState) { + setGState := func(ev *trace.Event, g uint64, oldState, newState traceviewer.GState) { info := getGInfo(g) - if oldState == gWaiting && info.state == gWaitingGC { - // For checking, gWaiting counts as any gWaiting*. + if oldState == traceviewer.GWaiting && info.state == traceviewer.GWaitingGC { + // For checking, traceviewer.GWaiting counts as any traceviewer.GWaiting*. oldState = info.state } if info.state != oldState && setGStateErr == nil { setGStateErr = fmt.Errorf("expected G %d to be in state %d, but got state %d", g, oldState, info.state) } - ctx.gstates[info.state]-- - ctx.gstates[newState]++ + + emitter.GoroutineTransition(time.Duration(ev.Ts), info.state, newState) info.state = newState } @@ -658,13 +257,13 @@ func generateTrace(params *traceParams, consumer traceConsumer) error { // Handle state transitions before we filter out events. switch ev.Type { case trace.EvGoStart, trace.EvGoStartLabel: - setGState(ev, ev.G, gRunnable, gRunning) + setGState(ev, ev.G, traceviewer.GRunnable, traceviewer.GRunning) info := getGInfo(ev.G) info.start = ev case trace.EvProcStart: - ctx.threadStats.prunning++ + emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateRunning, 1) case trace.EvProcStop: - ctx.threadStats.prunning-- + emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateRunning, -1) case trace.EvGoCreate: newG := ev.Args[0] info := getGInfo(newG) @@ -682,58 +281,59 @@ func generateTrace(params *traceParams, consumer traceConsumer) error { info.isSystemG = trace.IsSystemGoroutine(fname) ctx.gcount++ - setGState(ev, newG, gDead, gRunnable) + setGState(ev, newG, traceviewer.GDead, traceviewer.GRunnable) case trace.EvGoEnd: ctx.gcount-- - setGState(ev, ev.G, gRunning, gDead) + setGState(ev, ev.G, traceviewer.GRunning, traceviewer.GDead) case trace.EvGoUnblock: - setGState(ev, ev.Args[0], gWaiting, gRunnable) + setGState(ev, ev.Args[0], traceviewer.GWaiting, traceviewer.GRunnable) case trace.EvGoSysExit: - setGState(ev, ev.G, gWaiting, gRunnable) + setGState(ev, ev.G, traceviewer.GWaiting, traceviewer.GRunnable) if getGInfo(ev.G).isSystemG { - ctx.threadStats.insyscallRuntime-- + emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateInSyscallRuntime, -1) } else { - ctx.threadStats.insyscall-- + emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateInSyscall, -1) } case trace.EvGoSysBlock: - setGState(ev, ev.G, gRunning, gWaiting) + setGState(ev, ev.G, traceviewer.GRunning, traceviewer.GWaiting) if getGInfo(ev.G).isSystemG { - ctx.threadStats.insyscallRuntime++ + emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateInSyscallRuntime, 1) } else { - ctx.threadStats.insyscall++ + emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateInSyscall, 1) } case trace.EvGoSched, trace.EvGoPreempt: - setGState(ev, ev.G, gRunning, gRunnable) + setGState(ev, ev.G, traceviewer.GRunning, traceviewer.GRunnable) case trace.EvGoStop, trace.EvGoSleep, trace.EvGoBlock, trace.EvGoBlockSend, trace.EvGoBlockRecv, trace.EvGoBlockSelect, trace.EvGoBlockSync, trace.EvGoBlockCond, trace.EvGoBlockNet: - setGState(ev, ev.G, gRunning, gWaiting) + setGState(ev, ev.G, traceviewer.GRunning, traceviewer.GWaiting) case trace.EvGoBlockGC: - setGState(ev, ev.G, gRunning, gWaitingGC) + setGState(ev, ev.G, traceviewer.GRunning, traceviewer.GWaitingGC) case trace.EvGCMarkAssistStart: getGInfo(ev.G).markAssist = ev case trace.EvGCMarkAssistDone: getGInfo(ev.G).markAssist = nil case trace.EvGoWaiting: - setGState(ev, ev.G, gRunnable, gWaiting) + setGState(ev, ev.G, traceviewer.GRunnable, traceviewer.GWaiting) case trace.EvGoInSyscall: // Cancel out the effect of EvGoCreate at the beginning. - setGState(ev, ev.G, gRunnable, gWaiting) + setGState(ev, ev.G, traceviewer.GRunnable, traceviewer.GWaiting) if getGInfo(ev.G).isSystemG { - ctx.threadStats.insyscallRuntime++ + emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateInSyscallRuntime, 1) } else { - ctx.threadStats.insyscall++ + emitter.IncThreadStateCount(time.Duration(ev.Ts), traceviewer.ThreadStateInSyscall, 1) } case trace.EvHeapAlloc: - ctx.heapStats.heapAlloc = ev.Args[0] + emitter.HeapAlloc(time.Duration(ev.Ts), ev.Args[0]) case trace.EvHeapGoal: - ctx.heapStats.nextGC = ev.Args[0] + emitter.HeapGoal(time.Duration(ev.Ts), ev.Args[0]) } if setGStateErr != nil { return setGStateErr } - if ctx.gstates[gRunnable] < 0 || ctx.gstates[gRunning] < 0 || ctx.threadStats.insyscall < 0 || ctx.threadStats.insyscallRuntime < 0 { - return fmt.Errorf("invalid state after processing %v: runnable=%d running=%d insyscall=%d insyscallRuntime=%d", ev, ctx.gstates[gRunnable], ctx.gstates[gRunning], ctx.threadStats.insyscall, ctx.threadStats.insyscallRuntime) + + if err := emitter.Err(); err != nil { + return fmt.Errorf("invalid state after processing %v: %s", ev, err) } // Ignore events that are from uninteresting goroutines @@ -752,12 +352,12 @@ func generateTrace(params *traceParams, consumer traceConsumer) error { // Emit trace objects. switch ev.Type { case trace.EvProcStart: - if ctx.mode&modeGoroutineOriented != 0 { + if ctx.mode&traceviewer.ModeGoroutineOriented != 0 { continue } ctx.emitInstant(ev, "proc start", "") case trace.EvProcStop: - if ctx.mode&modeGoroutineOriented != 0 { + if ctx.mode&traceviewer.ModeGoroutineOriented != 0 { continue } ctx.emitInstant(ev, "proc stop", "") @@ -765,7 +365,7 @@ func generateTrace(params *traceParams, consumer traceConsumer) error { ctx.emitSlice(ev, "GC") case trace.EvGCDone: case trace.EvSTWStart: - if ctx.mode&modeGoroutineOriented != 0 { + if ctx.mode&traceviewer.ModeGoroutineOriented != 0 { continue } ctx.emitSlice(ev, fmt.Sprintf("STW (%s)", ev.SArgs[0])) @@ -832,46 +432,10 @@ func generateTrace(params *traceParams, consumer traceConsumer) error { ctx.emitInstant(ev, "CPU profile sample", "") } } - // Emit any counter updates. - ctx.emitThreadCounters(ev) - ctx.emitHeapCounters(ev) - ctx.emitGoroutineCounters(ev) - } - - ctx.emitSectionFooter(statsSection, "STATS", 0) - - if ctx.mode&modeTaskOriented != 0 { - ctx.emitSectionFooter(tasksSection, "TASKS", 1) - } - - if ctx.mode&modeGoroutineOriented != 0 { - ctx.emitSectionFooter(procsSection, "G", 2) - } else { - ctx.emitSectionFooter(procsSection, "PROCS", 2) - } - - ctx.emitFooter(&traceviewer.Event{Name: "thread_name", Phase: "M", PID: procsSection, TID: trace.GCP, Arg: &NameArg{"GC"}}) - ctx.emitFooter(&traceviewer.Event{Name: "thread_sort_index", Phase: "M", PID: procsSection, TID: trace.GCP, Arg: &SortIndexArg{-6}}) - - ctx.emitFooter(&traceviewer.Event{Name: "thread_name", Phase: "M", PID: procsSection, TID: trace.NetpollP, Arg: &NameArg{"Network"}}) - ctx.emitFooter(&traceviewer.Event{Name: "thread_sort_index", Phase: "M", PID: procsSection, TID: trace.NetpollP, Arg: &SortIndexArg{-5}}) - - ctx.emitFooter(&traceviewer.Event{Name: "thread_name", Phase: "M", PID: procsSection, TID: trace.TimerP, Arg: &NameArg{"Timers"}}) - ctx.emitFooter(&traceviewer.Event{Name: "thread_sort_index", Phase: "M", PID: procsSection, TID: trace.TimerP, Arg: &SortIndexArg{-4}}) - - ctx.emitFooter(&traceviewer.Event{Name: "thread_name", Phase: "M", PID: procsSection, TID: trace.SyscallP, Arg: &NameArg{"Syscalls"}}) - ctx.emitFooter(&traceviewer.Event{Name: "thread_sort_index", Phase: "M", PID: procsSection, TID: trace.SyscallP, Arg: &SortIndexArg{-3}}) - - // Display rows for Ps if we are in the default trace view mode (not goroutine-oriented presentation) - if ctx.mode&modeGoroutineOriented == 0 { - for i := 0; i <= maxProc; i++ { - ctx.emitFooter(&traceviewer.Event{Name: "thread_name", Phase: "M", PID: procsSection, TID: uint64(i), Arg: &NameArg{fmt.Sprintf("Proc %v", i)}}) - ctx.emitFooter(&traceviewer.Event{Name: "thread_sort_index", Phase: "M", PID: procsSection, TID: uint64(i), Arg: &SortIndexArg{i}}) - } } // Display task and its regions if we are in task-oriented presentation mode. - if ctx.mode&modeTaskOriented != 0 { + if ctx.mode&traceviewer.ModeTaskOriented != 0 { // sort tasks based on the task start time. sortedTask := make([]*taskDesc, len(ctx.tasks)) copy(sortedTask, ctx.tasks) @@ -888,7 +452,7 @@ func generateTrace(params *traceParams, consumer traceConsumer) error { // If we are in goroutine-oriented mode, we draw regions. // TODO(hyangah): add this for task/P-oriented mode (i.e., focustask view) too. - if ctx.mode&modeGoroutineOriented != 0 { + if ctx.mode&traceviewer.ModeGoroutineOriented != 0 { for _, s := range task.regions { ctx.emitRegion(s) } @@ -897,34 +461,34 @@ func generateTrace(params *traceParams, consumer traceConsumer) error { } // Display goroutine rows if we are either in goroutine-oriented mode. - if ctx.mode&modeGoroutineOriented != 0 { + if ctx.mode&traceviewer.ModeGoroutineOriented != 0 { for k, v := range ginfos { if !ctx.gs[k] { continue } - ctx.emitFooter(&traceviewer.Event{Name: "thread_name", Phase: "M", PID: procsSection, TID: k, Arg: &NameArg{v.name}}) + emitter.Resource(k, v.name) } - // Row for the main goroutine (maing) - ctx.emitFooter(&traceviewer.Event{Name: "thread_sort_index", Phase: "M", PID: procsSection, TID: ctx.maing, Arg: &SortIndexArg{-2}}) + emitter.Focus(ctx.maing) + // Row for GC or global state (specified with G=0) - ctx.emitFooter(&traceviewer.Event{Name: "thread_sort_index", Phase: "M", PID: procsSection, TID: 0, Arg: &SortIndexArg{-1}}) + ctx.emitFooter(&format.Event{Name: "thread_sort_index", Phase: "M", PID: format.ProcsSection, TID: 0, Arg: &SortIndexArg{-1}}) + } else { + // Display rows for Ps if we are in the default trace view mode. + for i := 0; i <= maxProc; i++ { + emitter.Resource(uint64(i), fmt.Sprintf("Proc %v", i)) + } } return nil } -func (ctx *traceContext) emit(e *traceviewer.Event) { - ctx.consumer.consumeViewerEvent(e, false) +func (ctx *traceContext) emit(e *format.Event) { + ctx.consumer.ConsumeViewerEvent(e, false) } -func (ctx *traceContext) emitFooter(e *traceviewer.Event) { - ctx.consumer.consumeViewerEvent(e, true) +func (ctx *traceContext) emitFooter(e *format.Event) { + ctx.consumer.ConsumeViewerEvent(e, true) } -func (ctx *traceContext) emitSectionFooter(sectionID uint64, name string, priority int) { - ctx.emitFooter(&traceviewer.Event{Name: "process_name", Phase: "M", PID: sectionID, Arg: &NameArg{name}}) - ctx.emitFooter(&traceviewer.Event{Name: "process_sort_index", Phase: "M", PID: sectionID, Arg: &SortIndexArg{priority}}) -} - func (ctx *traceContext) time(ev *trace.Event) float64 { // Trace viewer wants timestamps in microseconds. return float64(ev.Ts) / 1000 @@ -942,7 +506,7 @@ func tsWithinRange(ts, s, e int64) bool { } func (ctx *traceContext) proc(ev *trace.Event) uint64 { - if ctx.mode&modeGoroutineOriented != 0 && ev.P < trace.FakeP { + if ctx.mode&traceviewer.ModeGoroutineOriented != 0 && ev.P < trace.FakeP { return ev.G } else { return uint64(ev.P) @@ -953,7 +517,7 @@ func (ctx *traceContext) emitSlice(ev *trace.Event, name string) { ctx.emit(ctx.makeSlice(ev, name)) } -func (ctx *traceContext) makeSlice(ev *trace.Event, name string) *traceviewer.Event { +func (ctx *traceContext) makeSlice(ev *trace.Event, name string) *format.Event { // If ViewerEvent.Dur is not a positive value, // trace viewer handles it as a non-terminating time interval. // Avoid it by setting the field with a small value. @@ -961,18 +525,18 @@ func (ctx *traceContext) makeSlice(ev *trace.Event, name string) *traceviewer.Ev if ev.Link.Ts-ev.Ts <= 0 { durationUsec = 0.0001 // 0.1 nanoseconds } - sl := &traceviewer.Event{ + sl := &format.Event{ Name: name, Phase: "X", Time: ctx.time(ev), Dur: durationUsec, TID: ctx.proc(ev), - Stack: ctx.stack(ev.Stk), - EndStack: ctx.stack(ev.Link.Stk), + Stack: ctx.emitter.Stack(ev.Stk), + EndStack: ctx.emitter.Stack(ev.Link.Stk), } // grey out non-overlapping events if the event is not a global event (ev.G == 0) - if ctx.mode&modeTaskOriented != 0 && ev.G != 0 { + if ctx.mode&traceviewer.ModeTaskOriented != 0 && ev.G != 0 { // include P information. if t := ev.Type; t == trace.EvGoStart || t == trace.EvGoStartLabel { type Arg struct { @@ -1000,25 +564,24 @@ func (ctx *traceContext) emitTask(task *taskDesc, sortIndex int) { taskName := task.name durationUsec := float64(task.lastTimestamp()-task.firstTimestamp()) / 1e3 - ctx.emitFooter(&traceviewer.Event{Name: "thread_name", Phase: "M", PID: tasksSection, TID: taskRow, Arg: &NameArg{fmt.Sprintf("T%d %s", task.id, taskName)}}) - ctx.emit(&traceviewer.Event{Name: "thread_sort_index", Phase: "M", PID: tasksSection, TID: taskRow, Arg: &SortIndexArg{sortIndex}}) + ctx.emitter.Task(taskRow, taskName, sortIndex) ts := float64(task.firstTimestamp()) / 1e3 - sl := &traceviewer.Event{ + sl := &format.Event{ Name: taskName, Phase: "X", Time: ts, Dur: durationUsec, - PID: tasksSection, + PID: format.TasksSection, TID: taskRow, Cname: pickTaskColor(task.id), } targ := TaskArg{ID: task.id} if task.create != nil { - sl.Stack = ctx.stack(task.create.Stk) + sl.Stack = ctx.emitter.Stack(task.create.Stk) targ.StartG = task.create.G } if task.end != nil { - sl.EndStack = ctx.stack(task.end.Stk) + sl.EndStack = ctx.emitter.Stack(task.end.Stk) targ.EndG = task.end.G } sl.Arg = targ @@ -1026,8 +589,8 @@ func (ctx *traceContext) emitTask(task *taskDesc, sortIndex int) { if task.create != nil && task.create.Type == trace.EvUserTaskCreate && task.create.Args[1] != 0 { ctx.arrowSeq++ - ctx.emit(&traceviewer.Event{Name: "newTask", Phase: "s", TID: task.create.Args[1], ID: ctx.arrowSeq, Time: ts, PID: tasksSection}) - ctx.emit(&traceviewer.Event{Name: "newTask", Phase: "t", TID: taskRow, ID: ctx.arrowSeq, Time: ts, PID: tasksSection}) + ctx.emit(&format.Event{Name: "newTask", Phase: "s", TID: task.create.Args[1], ID: ctx.arrowSeq, Time: ts, PID: format.TasksSection}) + ctx.emit(&format.Event{Name: "newTask", Phase: "t", TID: taskRow, ID: ctx.arrowSeq, Time: ts, PID: format.TasksSection}) } } @@ -1048,7 +611,7 @@ func (ctx *traceContext) emitRegion(s regionDesc) { scopeID := fmt.Sprintf("%x", id) name := s.Name - sl0 := &traceviewer.Event{ + sl0 := &format.Event{ Category: "Region", Name: name, Phase: "b", @@ -1059,11 +622,11 @@ func (ctx *traceContext) emitRegion(s regionDesc) { Cname: pickTaskColor(s.TaskID), } if s.Start != nil { - sl0.Stack = ctx.stack(s.Start.Stk) + sl0.Stack = ctx.emitter.Stack(s.Start.Stk) } ctx.emit(sl0) - sl1 := &traceviewer.Event{ + sl1 := &format.Event{ Category: "Region", Name: name, Phase: "e", @@ -1075,70 +638,18 @@ func (ctx *traceContext) emitRegion(s regionDesc) { Arg: RegionArg{TaskID: s.TaskID}, } if s.End != nil { - sl1.Stack = ctx.stack(s.End.Stk) + sl1.Stack = ctx.emitter.Stack(s.End.Stk) } ctx.emit(sl1) } -type heapCountersArg struct { - Allocated uint64 - NextGC uint64 -} - -func (ctx *traceContext) emitHeapCounters(ev *trace.Event) { - if ctx.prevHeapStats == ctx.heapStats { - return - } - diff := uint64(0) - if ctx.heapStats.nextGC > ctx.heapStats.heapAlloc { - diff = ctx.heapStats.nextGC - ctx.heapStats.heapAlloc - } - if tsWithinRange(ev.Ts, ctx.startTime, ctx.endTime) { - ctx.emit(&traceviewer.Event{Name: "Heap", Phase: "C", Time: ctx.time(ev), PID: 1, Arg: &heapCountersArg{ctx.heapStats.heapAlloc, diff}}) - } - ctx.prevHeapStats = ctx.heapStats -} - -type goroutineCountersArg struct { - Running uint64 - Runnable uint64 - GCWaiting uint64 -} - -func (ctx *traceContext) emitGoroutineCounters(ev *trace.Event) { - if ctx.prevGstates == ctx.gstates { - return - } - if tsWithinRange(ev.Ts, ctx.startTime, ctx.endTime) { - ctx.emit(&traceviewer.Event{Name: "Goroutines", Phase: "C", Time: ctx.time(ev), PID: 1, Arg: &goroutineCountersArg{uint64(ctx.gstates[gRunning]), uint64(ctx.gstates[gRunnable]), uint64(ctx.gstates[gWaitingGC])}}) - } - ctx.prevGstates = ctx.gstates -} - -type threadCountersArg struct { - Running int64 - InSyscall int64 -} - -func (ctx *traceContext) emitThreadCounters(ev *trace.Event) { - if ctx.prevThreadStats == ctx.threadStats { - return - } - if tsWithinRange(ev.Ts, ctx.startTime, ctx.endTime) { - ctx.emit(&traceviewer.Event{Name: "Threads", Phase: "C", Time: ctx.time(ev), PID: 1, Arg: &threadCountersArg{ - Running: ctx.threadStats.prunning, - InSyscall: ctx.threadStats.insyscall}}) - } - ctx.prevThreadStats = ctx.threadStats -} - func (ctx *traceContext) emitInstant(ev *trace.Event, name, category string) { if !tsWithinRange(ev.Ts, ctx.startTime, ctx.endTime) { return } cname := "" - if ctx.mode&modeTaskOriented != 0 { + if ctx.mode&traceviewer.ModeTaskOriented != 0 { taskID, isUserAnnotation := isUserAnnotationEvent(ev) show := false @@ -1163,14 +674,14 @@ func (ctx *traceContext) emitInstant(ev *trace.Event, name, category string) { } arg = &Arg{ev.Args[0]} } - ctx.emit(&traceviewer.Event{ + ctx.emit(&format.Event{ Name: name, Category: category, Phase: "I", Scope: "t", Time: ctx.time(ev), TID: ctx.proc(ev), - Stack: ctx.stack(ev.Stk), + Stack: ctx.emitter.Stack(ev.Stk), Cname: cname, Arg: arg}) } @@ -1181,7 +692,7 @@ func (ctx *traceContext) emitArrow(ev *trace.Event, name string) { // For example, a goroutine was unblocked but was not scheduled before trace stop. return } - if ctx.mode&modeGoroutineOriented != 0 && (!ctx.gs[ev.Link.G] || ev.Link.Ts < ctx.startTime || ev.Link.Ts > ctx.endTime) { + if ctx.mode&traceviewer.ModeGoroutineOriented != 0 && (!ctx.gs[ev.Link.G] || ev.Link.Ts < ctx.startTime || ev.Link.Ts > ctx.endTime) { return } @@ -1192,7 +703,7 @@ func (ctx *traceContext) emitArrow(ev *trace.Event, name string) { } color := "" - if ctx.mode&modeTaskOriented != 0 { + if ctx.mode&traceviewer.ModeTaskOriented != 0 { overlapping := false // skip non-overlapping arrows. for _, task := range ctx.tasks { @@ -1207,32 +718,8 @@ func (ctx *traceContext) emitArrow(ev *trace.Event, name string) { } ctx.arrowSeq++ - ctx.emit(&traceviewer.Event{Name: name, Phase: "s", TID: ctx.proc(ev), ID: ctx.arrowSeq, Time: ctx.time(ev), Stack: ctx.stack(ev.Stk), Cname: color}) - ctx.emit(&traceviewer.Event{Name: name, Phase: "t", TID: ctx.proc(ev.Link), ID: ctx.arrowSeq, Time: ctx.time(ev.Link), Cname: color}) -} - -func (ctx *traceContext) stack(stk []*trace.Frame) int { - return ctx.buildBranch(ctx.frameTree, stk) -} - -// buildBranch builds one branch in the prefix tree rooted at ctx.frameTree. -func (ctx *traceContext) buildBranch(parent frameNode, stk []*trace.Frame) int { - if len(stk) == 0 { - return parent.id - } - last := len(stk) - 1 - frame := stk[last] - stk = stk[:last] - - node, ok := parent.children[frame.PC] - if !ok { - ctx.frameSeq++ - node.id = ctx.frameSeq - node.children = make(map[uint64]frameNode) - parent.children[frame.PC] = node - ctx.consumer.consumeViewerFrame(strconv.Itoa(node.id), traceviewer.Frame{Name: fmt.Sprintf("%v:%v", frame.Fn, frame.Line), Parent: parent.id}) - } - return ctx.buildBranch(node, stk) + ctx.emit(&format.Event{Name: name, Phase: "s", TID: ctx.proc(ev), ID: ctx.arrowSeq, Time: ctx.time(ev), Stack: ctx.emitter.Stack(ev.Stk), Cname: color}) + ctx.emit(&format.Event{Name: name, Phase: "t", TID: ctx.proc(ev.Link), ID: ctx.arrowSeq, Time: ctx.time(ev.Link), Cname: color}) } // firstTimestamp returns the timestamp of the first event record. @@ -1253,61 +740,6 @@ func lastTimestamp() int64 { return 0 } -type jsonWriter struct { - w io.Writer - enc *json.Encoder -} - -func viewerDataTraceConsumer(w io.Writer, start, end int64) traceConsumer { - allFrames := make(map[string]traceviewer.Frame) - requiredFrames := make(map[string]traceviewer.Frame) - enc := json.NewEncoder(w) - written := 0 - index := int64(-1) - - io.WriteString(w, "{") - return traceConsumer{ - consumeTimeUnit: func(unit string) { - io.WriteString(w, `"displayTimeUnit":`) - enc.Encode(unit) - io.WriteString(w, ",") - }, - consumeViewerEvent: func(v *traceviewer.Event, required bool) { - index++ - if !required && (index < start || index > end) { - // not in the range. Skip! - return - } - walkStackFrames(allFrames, v.Stack, func(id int) { - s := strconv.Itoa(id) - requiredFrames[s] = allFrames[s] - }) - walkStackFrames(allFrames, v.EndStack, func(id int) { - s := strconv.Itoa(id) - requiredFrames[s] = allFrames[s] - }) - if written == 0 { - io.WriteString(w, `"traceEvents": [`) - } - if written > 0 { - io.WriteString(w, ",") - } - enc.Encode(v) - // TODO: get rid of the extra \n inserted by enc.Encode. - // Same should be applied to splittingTraceConsumer. - written++ - }, - consumeViewerFrame: func(k string, v traceviewer.Frame) { - allFrames[k] = v - }, - flush: func() { - io.WriteString(w, `], "stackFrames":`) - enc.Encode(requiredFrames) - io.WriteString(w, `}`) - }, - } -} - // Mapping from more reasonable color names to the reserved color names in // https://github.com/catapult-project/catapult/blob/master/tracing/tracing/base/color_scheme.html#L50 // The chrome trace viewer allows only those as cname values. diff --git a/src/cmd/trace/trace_test.go b/src/cmd/trace/trace_test.go index 87fd3a3515..d315fad471 100644 --- a/src/cmd/trace/trace_test.go +++ b/src/cmd/trace/trace_test.go @@ -7,9 +7,10 @@ package main import ( - "cmd/internal/traceviewer" "context" "internal/trace" + "internal/trace/traceviewer" + "internal/trace/traceviewer/format" "io" rtrace "runtime/trace" "strings" @@ -78,10 +79,10 @@ func TestGoroutineCount(t *testing.T) { // Use the default viewerDataTraceConsumer but replace // consumeViewerEvent to intercept the ViewerEvents for testing. - c := viewerDataTraceConsumer(io.Discard, 0, 1<<63-1) - c.consumeViewerEvent = func(ev *traceviewer.Event, _ bool) { + c := traceviewer.ViewerDataTraceConsumer(io.Discard, 0, 1<<63-1) + c.ConsumeViewerEvent = func(ev *format.Event, _ bool) { if ev.Name == "Goroutines" { - cnt := ev.Arg.(*goroutineCountersArg) + cnt := ev.Arg.(*format.GoroutineCountersArg) if cnt.Runnable+cnt.Running > 2 { t.Errorf("goroutine count=%+v; want no more than 2 goroutines in runnable/running state", cnt) } @@ -131,7 +132,7 @@ func TestGoroutineFilter(t *testing.T) { gs: map[uint64]bool{10: true}, } - c := viewerDataTraceConsumer(io.Discard, 0, 1<<63-1) + c := traceviewer.ViewerDataTraceConsumer(io.Discard, 0, 1<<63-1) if err := generateTrace(params, c); err != nil { t.Fatalf("generateTrace failed: %v", err) } @@ -163,10 +164,10 @@ func TestPreemptedMarkAssist(t *testing.T) { endTime: int64(1<<63 - 1), } - c := viewerDataTraceConsumer(io.Discard, 0, 1<<63-1) + c := traceviewer.ViewerDataTraceConsumer(io.Discard, 0, 1<<63-1) marks := 0 - c.consumeViewerEvent = func(ev *traceviewer.Event, _ bool) { + c.ConsumeViewerEvent = func(ev *format.Event, _ bool) { if strings.Contains(ev.Name, "MARK ASSIST") { marks++ } @@ -208,16 +209,16 @@ func TestFoo(t *testing.T) { params := &traceParams{ parsed: res, - mode: modeTaskOriented, + mode: traceviewer.ModeTaskOriented, startTime: task.firstTimestamp() - 1, endTime: task.lastTimestamp() + 1, tasks: []*taskDesc{task}, } - c := viewerDataTraceConsumer(io.Discard, 0, 1<<63-1) + c := traceviewer.ViewerDataTraceConsumer(io.Discard, 0, 1<<63-1) var logBeforeTaskEnd, logAfterTaskEnd bool - c.consumeViewerEvent = func(ev *traceviewer.Event, _ bool) { + c.ConsumeViewerEvent = func(ev *format.Event, _ bool) { if ev.Name == "log before task ends" { logBeforeTaskEnd = true } diff --git a/src/cmd/trace/trace_unix_test.go b/src/cmd/trace/trace_unix_test.go index 87ad86fce8..e634635427 100644 --- a/src/cmd/trace/trace_unix_test.go +++ b/src/cmd/trace/trace_unix_test.go @@ -8,9 +8,10 @@ package main import ( "bytes" - "cmd/internal/traceviewer" "internal/goexperiment" traceparser "internal/trace" + "internal/trace/traceviewer" + "internal/trace/traceviewer/format" "io" "runtime" "runtime/trace" @@ -87,10 +88,10 @@ func TestGoroutineInSyscall(t *testing.T) { // Check only one thread for the pipe read goroutine is // considered in-syscall. - c := viewerDataTraceConsumer(io.Discard, 0, 1<<63-1) - c.consumeViewerEvent = func(ev *traceviewer.Event, _ bool) { + c := traceviewer.ViewerDataTraceConsumer(io.Discard, 0, 1<<63-1) + c.ConsumeViewerEvent = func(ev *format.Event, _ bool) { if ev.Name == "Threads" { - arg := ev.Arg.(*threadCountersArg) + arg := ev.Arg.(*format.ThreadCountersArg) if arg.InSyscall > 1 { t.Errorf("%d threads in syscall at time %v; want less than 1 thread in syscall", arg.InSyscall, ev.Time) } diff --git a/src/cmd/trace/v2/gen.go b/src/cmd/trace/v2/gen.go new file mode 100644 index 0000000000..f6a4bb643b --- /dev/null +++ b/src/cmd/trace/v2/gen.go @@ -0,0 +1,394 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "fmt" + "internal/trace" + "internal/trace/traceviewer" + tracev2 "internal/trace/v2" + "strings" +) + +// generator is an interface for generating a JSON trace for the trace viewer +// from a trace. Each method in this interface is a handler for a kind of event +// that is interesting to render in the UI via the JSON trace. +type generator interface { + // Global parts. + Sync() // Notifies the generator of an EventSync event. + StackSample(ctx *traceContext, ev *tracev2.Event) + GlobalRange(ctx *traceContext, ev *tracev2.Event) + GlobalMetric(ctx *traceContext, ev *tracev2.Event) + + // Goroutine parts. + GoroutineLabel(ctx *traceContext, ev *tracev2.Event) + GoroutineRange(ctx *traceContext, ev *tracev2.Event) + GoroutineTransition(ctx *traceContext, ev *tracev2.Event) + + // Proc parts. + ProcRange(ctx *traceContext, ev *tracev2.Event) + ProcTransition(ctx *traceContext, ev *tracev2.Event) + + // User annotations. + Log(ctx *traceContext, ev *tracev2.Event) + + // Finish indicates the end of the trace and finalizes generation. + Finish(ctx *traceContext) +} + +// runGenerator produces a trace into ctx by running the generator over the parsed trace. +func runGenerator(ctx *traceContext, g generator, parsed *parsedTrace, opts *genOpts) { + for i := range parsed.events { + ev := &parsed.events[i] + + switch ev.Kind() { + case tracev2.EventSync: + g.Sync() + case tracev2.EventStackSample: + g.StackSample(ctx, ev) + case tracev2.EventRangeBegin, tracev2.EventRangeActive, tracev2.EventRangeEnd: + r := ev.Range() + switch r.Scope.Kind { + case tracev2.ResourceGoroutine: + g.GoroutineRange(ctx, ev) + case tracev2.ResourceProc: + g.ProcRange(ctx, ev) + case tracev2.ResourceNone: + g.GlobalRange(ctx, ev) + } + case tracev2.EventMetric: + g.GlobalMetric(ctx, ev) + case tracev2.EventLabel: + l := ev.Label() + if l.Resource.Kind == tracev2.ResourceGoroutine { + g.GoroutineLabel(ctx, ev) + } + case tracev2.EventStateTransition: + switch ev.StateTransition().Resource.Kind { + case tracev2.ResourceProc: + g.ProcTransition(ctx, ev) + case tracev2.ResourceGoroutine: + g.GoroutineTransition(ctx, ev) + } + case tracev2.EventLog: + g.Log(ctx, ev) + } + } + for i, task := range opts.tasks { + emitTask(ctx, task, i) + if opts.mode&traceviewer.ModeGoroutineOriented != 0 { + for _, region := range task.Regions { + emitRegion(ctx, region) + } + } + } + g.Finish(ctx) +} + +// emitTask emits information about a task into the trace viewer's event stream. +// +// sortIndex sets the order in which this task will appear related to other tasks, +// lowest first. +func emitTask(ctx *traceContext, task *trace.UserTaskSummary, sortIndex int) { + // Collect information about the task. + var startStack, endStack tracev2.Stack + var startG, endG tracev2.GoID + startTime, endTime := ctx.startTime, ctx.endTime + if task.Start != nil { + startStack = task.Start.Stack() + startG = task.Start.Goroutine() + startTime = task.Start.Time() + } + if task.End != nil { + endStack = task.End.Stack() + endG = task.End.Goroutine() + endTime = task.End.Time() + } + arg := struct { + ID uint64 `json:"id"` + StartG uint64 `json:"start_g,omitempty"` + EndG uint64 `json:"end_g,omitempty"` + }{ + ID: uint64(task.ID), + StartG: uint64(startG), + EndG: uint64(endG), + } + + // Emit the task slice and notify the emitter of the task. + ctx.Task(uint64(task.ID), fmt.Sprintf("T%d %s", task.ID, task.Name), sortIndex) + ctx.TaskSlice(traceviewer.SliceEvent{ + Name: task.Name, + Ts: ctx.elapsed(startTime), + Dur: endTime.Sub(startTime), + Resource: uint64(task.ID), + Stack: ctx.Stack(viewerFrames(startStack)), + EndStack: ctx.Stack(viewerFrames(endStack)), + Arg: arg, + }) + // Emit an arrow from the parent to the child. + if task.Parent != nil && task.Start != nil && task.Start.Kind() == tracev2.EventTaskBegin { + ctx.TaskArrow(traceviewer.ArrowEvent{ + Name: "newTask", + Start: ctx.elapsed(task.Start.Time()), + End: ctx.elapsed(task.Start.Time()), + FromResource: uint64(task.Parent.ID), + ToResource: uint64(task.ID), + FromStack: ctx.Stack(viewerFrames(task.Start.Stack())), + }) + } +} + +// emitRegion emits goroutine-based slice events to the UI. The caller +// must be emitting for a goroutine-oriented trace. +// +// TODO(mknyszek): Make regions part of the regular generator loop and +// treat them like ranges so that we can emit regions in traces oriented +// by proc or thread. +func emitRegion(ctx *traceContext, region *trace.UserRegionSummary) { + if region.Name == "" { + return + } + // Collect information about the region. + var startStack, endStack tracev2.Stack + goroutine := tracev2.NoGoroutine + startTime, endTime := ctx.startTime, ctx.endTime + if region.Start != nil { + startStack = region.Start.Stack() + startTime = region.Start.Time() + goroutine = region.Start.Goroutine() + } + if region.End != nil { + endStack = region.End.Stack() + endTime = region.End.Time() + goroutine = region.End.Goroutine() + } + if goroutine == tracev2.NoGoroutine { + return + } + arg := struct { + TaskID uint64 `json:"taskid"` + }{ + TaskID: uint64(region.TaskID), + } + ctx.AsyncSlice(traceviewer.AsyncSliceEvent{ + SliceEvent: traceviewer.SliceEvent{ + Name: region.Name, + Ts: ctx.elapsed(startTime), + Dur: endTime.Sub(startTime), + Resource: uint64(goroutine), + Stack: ctx.Stack(viewerFrames(startStack)), + EndStack: ctx.Stack(viewerFrames(endStack)), + Arg: arg, + }, + Category: "Region", + Scope: fmt.Sprintf("%x", region.TaskID), + TaskColorIndex: uint64(region.TaskID), + }) +} + +// Building blocks for generators. + +// stackSampleGenerator implements a generic handler for stack sample events. +// The provided resource is the resource the stack sample should count against. +type stackSampleGenerator[R resource] struct { + // getResource is a function to extract a resource ID from a stack sample event. + getResource func(*tracev2.Event) R +} + +// StackSample implements a stack sample event handler. It expects ev to be one such event. +func (g *stackSampleGenerator[R]) StackSample(ctx *traceContext, ev *tracev2.Event) { + id := g.getResource(ev) + if id == R(noResource) { + // We have nowhere to put this in the UI. + return + } + ctx.Instant(traceviewer.InstantEvent{ + Name: "CPU profile sample", + Ts: ctx.elapsed(ev.Time()), + Resource: uint64(id), + Stack: ctx.Stack(viewerFrames(ev.Stack())), + }) +} + +// globalRangeGenerator implements a generic handler for EventRange* events that pertain +// to tracev2.ResourceNone (the global scope). +type globalRangeGenerator struct { + ranges map[string]activeRange + seenSync bool +} + +// Sync notifies the generator of an EventSync event. +func (g *globalRangeGenerator) Sync() { + g.seenSync = true +} + +// GlobalRange implements a handler for EventRange* events whose Scope.Kind is ResourceNone. +// It expects ev to be one such event. +func (g *globalRangeGenerator) GlobalRange(ctx *traceContext, ev *tracev2.Event) { + if g.ranges == nil { + g.ranges = make(map[string]activeRange) + } + r := ev.Range() + switch ev.Kind() { + case tracev2.EventRangeBegin: + g.ranges[r.Name] = activeRange{ev.Time(), ev.Stack()} + case tracev2.EventRangeActive: + // If we've seen a Sync event, then Active events are always redundant. + if !g.seenSync { + // Otherwise, they extend back to the start of the trace. + g.ranges[r.Name] = activeRange{ctx.startTime, ev.Stack()} + } + case tracev2.EventRangeEnd: + // Only emit GC events, because we have nowhere to + // put other events. + ar := g.ranges[r.Name] + if strings.Contains(r.Name, "GC") { + ctx.Slice(traceviewer.SliceEvent{ + Name: r.Name, + Ts: ctx.elapsed(ar.time), + Dur: ev.Time().Sub(ar.time), + Resource: trace.GCP, + Stack: ctx.Stack(viewerFrames(ar.stack)), + EndStack: ctx.Stack(viewerFrames(ev.Stack())), + }) + } + delete(g.ranges, r.Name) + } +} + +// Finish flushes any outstanding ranges at the end of the trace. +func (g *globalRangeGenerator) Finish(ctx *traceContext) { + for name, ar := range g.ranges { + if !strings.Contains(name, "GC") { + continue + } + ctx.Slice(traceviewer.SliceEvent{ + Name: name, + Ts: ctx.elapsed(ar.time), + Dur: ctx.endTime.Sub(ar.time), + Resource: trace.GCP, + Stack: ctx.Stack(viewerFrames(ar.stack)), + }) + } +} + +// globalMetricGenerator implements a generic handler for Metric events. +type globalMetricGenerator struct { +} + +// GlobalMetric implements an event handler for EventMetric events. ev must be one such event. +func (g *globalMetricGenerator) GlobalMetric(ctx *traceContext, ev *tracev2.Event) { + m := ev.Metric() + switch m.Name { + case "/memory/classes/heap/objects:bytes": + ctx.HeapAlloc(ctx.elapsed(ev.Time()), m.Value.Uint64()) + case "/gc/heap/goal:bytes": + ctx.HeapGoal(ctx.elapsed(ev.Time()), m.Value.Uint64()) + case "/sched/gomaxprocs:threads": + ctx.Gomaxprocs(m.Value.Uint64()) + } +} + +// procRangeGenerator implements a generic handler for EventRange* events whose Scope.Kind is +// ResourceProc. +type procRangeGenerator struct { + ranges map[tracev2.Range]activeRange + seenSync bool +} + +// Sync notifies the generator of an EventSync event. +func (g *procRangeGenerator) Sync() { + g.seenSync = true +} + +// ProcRange implements a handler for EventRange* events whose Scope.Kind is ResourceProc. +// It expects ev to be one such event. +func (g *procRangeGenerator) ProcRange(ctx *traceContext, ev *tracev2.Event) { + if g.ranges == nil { + g.ranges = make(map[tracev2.Range]activeRange) + } + r := ev.Range() + switch ev.Kind() { + case tracev2.EventRangeBegin: + g.ranges[r] = activeRange{ev.Time(), ev.Stack()} + case tracev2.EventRangeActive: + // If we've seen a Sync event, then Active events are always redundant. + if !g.seenSync { + // Otherwise, they extend back to the start of the trace. + g.ranges[r] = activeRange{ctx.startTime, ev.Stack()} + } + case tracev2.EventRangeEnd: + // Emit proc-based ranges. + ar := g.ranges[r] + ctx.Slice(traceviewer.SliceEvent{ + Name: r.Name, + Ts: ctx.elapsed(ar.time), + Dur: ev.Time().Sub(ar.time), + Resource: uint64(r.Scope.Proc()), + Stack: ctx.Stack(viewerFrames(ar.stack)), + EndStack: ctx.Stack(viewerFrames(ev.Stack())), + }) + delete(g.ranges, r) + } +} + +// Finish flushes any outstanding ranges at the end of the trace. +func (g *procRangeGenerator) Finish(ctx *traceContext) { + for r, ar := range g.ranges { + ctx.Slice(traceviewer.SliceEvent{ + Name: r.Name, + Ts: ctx.elapsed(ar.time), + Dur: ctx.endTime.Sub(ar.time), + Resource: uint64(r.Scope.Proc()), + Stack: ctx.Stack(viewerFrames(ar.stack)), + }) + } +} + +// activeRange represents an active EventRange* range. +type activeRange struct { + time tracev2.Time + stack tracev2.Stack +} + +// completedRange represents a completed EventRange* range. +type completedRange struct { + name string + startTime tracev2.Time + endTime tracev2.Time + startStack tracev2.Stack + endStack tracev2.Stack + arg any +} + +type logEventGenerator[R resource] struct { + // getResource is a function to extract a resource ID from a Log event. + getResource func(*tracev2.Event) R +} + +// Log implements a log event handler. It expects ev to be one such event. +func (g *logEventGenerator[R]) Log(ctx *traceContext, ev *tracev2.Event) { + id := g.getResource(ev) + if id == R(noResource) { + // We have nowhere to put this in the UI. + return + } + + // Construct the name to present. + log := ev.Log() + name := log.Message + if log.Category != "" { + name = "[" + log.Category + "] " + name + } + + // Emit an instant event. + ctx.Instant(traceviewer.InstantEvent{ + Name: name, + Ts: ctx.elapsed(ev.Time()), + Category: "user event", + Resource: uint64(id), + Stack: ctx.Stack(viewerFrames(ev.Stack())), + }) +} diff --git a/src/cmd/trace/v2/goroutinegen.go b/src/cmd/trace/v2/goroutinegen.go new file mode 100644 index 0000000000..c76bd8487a --- /dev/null +++ b/src/cmd/trace/v2/goroutinegen.go @@ -0,0 +1,167 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + tracev2 "internal/trace/v2" +) + +var _ generator = &goroutineGenerator{} + +type goroutineGenerator struct { + globalRangeGenerator + globalMetricGenerator + stackSampleGenerator[tracev2.GoID] + logEventGenerator[tracev2.GoID] + + gStates map[tracev2.GoID]*gState[tracev2.GoID] + focus tracev2.GoID + filter map[tracev2.GoID]struct{} +} + +func newGoroutineGenerator(ctx *traceContext, focus tracev2.GoID, filter map[tracev2.GoID]struct{}) *goroutineGenerator { + gg := new(goroutineGenerator) + rg := func(ev *tracev2.Event) tracev2.GoID { + return ev.Goroutine() + } + gg.stackSampleGenerator.getResource = rg + gg.logEventGenerator.getResource = rg + gg.gStates = make(map[tracev2.GoID]*gState[tracev2.GoID]) + gg.focus = focus + gg.filter = filter + + // Enable a filter on the emitter. + if filter != nil { + ctx.SetResourceFilter(func(resource uint64) bool { + _, ok := filter[tracev2.GoID(resource)] + return ok + }) + } + return gg +} + +func (g *goroutineGenerator) Sync() { + g.globalRangeGenerator.Sync() +} + +func (g *goroutineGenerator) GoroutineLabel(ctx *traceContext, ev *tracev2.Event) { + l := ev.Label() + g.gStates[l.Resource.Goroutine()].setLabel(l.Label) +} + +func (g *goroutineGenerator) GoroutineRange(ctx *traceContext, ev *tracev2.Event) { + r := ev.Range() + switch ev.Kind() { + case tracev2.EventRangeBegin: + g.gStates[r.Scope.Goroutine()].rangeBegin(ev.Time(), r.Name, ev.Stack()) + case tracev2.EventRangeActive: + g.gStates[r.Scope.Goroutine()].rangeActive(r.Name) + case tracev2.EventRangeEnd: + gs := g.gStates[r.Scope.Goroutine()] + gs.rangeEnd(ev.Time(), r.Name, ev.Stack(), ctx) + } +} + +func (g *goroutineGenerator) GoroutineTransition(ctx *traceContext, ev *tracev2.Event) { + st := ev.StateTransition() + goID := st.Resource.Goroutine() + + // If we haven't seen this goroutine before, create a new + // gState for it. + gs, ok := g.gStates[goID] + if !ok { + gs = newGState[tracev2.GoID](goID) + g.gStates[goID] = gs + } + + // Try to augment the name of the goroutine. + gs.augmentName(st.Stack) + + // Handle the goroutine state transition. + from, to := st.Goroutine() + if from == to { + // Filter out no-op events. + return + } + if from.Executing() && !to.Executing() { + if to == tracev2.GoWaiting { + // Goroutine started blocking. + gs.block(ev.Time(), ev.Stack(), st.Reason, ctx) + } else { + gs.stop(ev.Time(), ev.Stack(), ctx) + } + } + if !from.Executing() && to.Executing() { + start := ev.Time() + if from == tracev2.GoUndetermined { + // Back-date the event to the start of the trace. + start = ctx.startTime + } + gs.start(start, goID, ctx) + } + + if from == tracev2.GoWaiting { + // Goroutine unblocked. + gs.unblock(ev.Time(), ev.Stack(), ev.Goroutine(), ctx) + } + if from == tracev2.GoNotExist && to == tracev2.GoRunnable { + // Goroutine was created. + gs.created(ev.Time(), ev.Goroutine(), ev.Stack()) + } + if from == tracev2.GoSyscall && to != tracev2.GoRunning { + // Exiting blocked syscall. + gs.syscallEnd(ev.Time(), true, ctx) + gs.blockedSyscallEnd(ev.Time(), ev.Stack(), ctx) + } else if from == tracev2.GoSyscall { + // Check if we're exiting a syscall in a non-blocking way. + gs.syscallEnd(ev.Time(), false, ctx) + } + + // Handle syscalls. + if to == tracev2.GoSyscall { + start := ev.Time() + if from == tracev2.GoUndetermined { + // Back-date the event to the start of the trace. + start = ctx.startTime + } + // Write down that we've entered a syscall. Note: we might have no G or P here + // if we're in a cgo callback or this is a transition from GoUndetermined + // (i.e. the G has been blocked in a syscall). + gs.syscallBegin(start, goID, ev.Stack()) + } + + // Note down the goroutine transition. + _, inMarkAssist := gs.activeRanges["GC mark assist"] + ctx.GoroutineTransition(ctx.elapsed(ev.Time()), viewerGState(from, inMarkAssist), viewerGState(to, inMarkAssist)) +} + +func (g *goroutineGenerator) ProcRange(ctx *traceContext, ev *tracev2.Event) { + // TODO(mknyszek): Extend procRangeGenerator to support rendering proc ranges + // that overlap with a goroutine's execution. +} + +func (g *goroutineGenerator) ProcTransition(ctx *traceContext, ev *tracev2.Event) { + // Not needed. All relevant information for goroutines can be derived from goroutine transitions. +} + +func (g *goroutineGenerator) Finish(ctx *traceContext) { + ctx.SetResourceType("G") + + // Finish off global ranges. + g.globalRangeGenerator.Finish(ctx) + + // Finish off all the goroutine slices. + for id, gs := range g.gStates { + gs.finish(ctx) + + // Tell the emitter about the goroutines we want to render. + ctx.Resource(uint64(id), gs.name()) + } + + // Set the goroutine to focus on. + if g.focus != tracev2.NoGoroutine { + ctx.Focus(uint64(g.focus)) + } +} diff --git a/src/cmd/trace/v2/goroutines.go b/src/cmd/trace/v2/goroutines.go new file mode 100644 index 0000000000..3cf366635a --- /dev/null +++ b/src/cmd/trace/v2/goroutines.go @@ -0,0 +1,420 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Goroutine-related profiles. + +package trace + +import ( + "cmp" + "fmt" + "html/template" + "internal/trace" + "internal/trace/traceviewer" + tracev2 "internal/trace/v2" + "log" + "net/http" + "slices" + "sort" + "strings" + "time" +) + +// GoroutinesHandlerFunc returns a HandlerFunc that serves list of goroutine groups. +func GoroutinesHandlerFunc(summaries map[tracev2.GoID]*trace.GoroutineSummary) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // goroutineGroup describes a group of goroutines grouped by name. + type goroutineGroup struct { + Name string // Start function. + N int // Total number of goroutines in this group. + ExecTime time.Duration // Total execution time of all goroutines in this group. + } + // Accumulate groups by Name. + groupsByName := make(map[string]goroutineGroup) + for _, summary := range summaries { + group := groupsByName[summary.Name] + group.Name = summary.Name + group.N++ + group.ExecTime += summary.ExecTime + groupsByName[summary.Name] = group + } + var groups []goroutineGroup + for _, group := range groupsByName { + groups = append(groups, group) + } + slices.SortFunc(groups, func(a, b goroutineGroup) int { + return cmp.Compare(b.ExecTime, a.ExecTime) + }) + w.Header().Set("Content-Type", "text/html;charset=utf-8") + if err := templGoroutines.Execute(w, groups); err != nil { + log.Printf("failed to execute template: %v", err) + return + } + } +} + +var templGoroutines = template.Must(template.New("").Parse(` + + + +

    Goroutines

    +Below is a table of all goroutines in the trace grouped by start location and sorted by the total execution time of the group.
    +
    +Click a start location to view more details about that group.
    +
    + + + + + + +{{range $}} + + + + + +{{end}} +
    Start locationCountTotal execution time
    {{or .Name "(Inactive, no stack trace sampled)"}}{{.N}}{{.ExecTime}}
    + + +`)) + +// GoroutineHandler creates a handler that serves information about +// goroutines in a particular group. +func GoroutineHandler(summaries map[tracev2.GoID]*trace.GoroutineSummary) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + goroutineName := r.FormValue("name") + + type goroutine struct { + *trace.GoroutineSummary + NonOverlappingStats map[string]time.Duration + HasRangeTime bool + } + + // Collect all the goroutines in the group. + var ( + goroutines []goroutine + name string + totalExecTime, execTime time.Duration + maxTotalTime time.Duration + ) + validNonOverlappingStats := make(map[string]struct{}) + validRangeStats := make(map[string]struct{}) + for _, summary := range summaries { + totalExecTime += summary.ExecTime + + if summary.Name != goroutineName { + continue + } + nonOverlappingStats := summary.NonOverlappingStats() + for name := range nonOverlappingStats { + validNonOverlappingStats[name] = struct{}{} + } + var totalRangeTime time.Duration + for name, dt := range summary.RangeTime { + validRangeStats[name] = struct{}{} + totalRangeTime += dt + } + goroutines = append(goroutines, goroutine{ + GoroutineSummary: summary, + NonOverlappingStats: nonOverlappingStats, + HasRangeTime: totalRangeTime != 0, + }) + name = summary.Name + execTime += summary.ExecTime + if maxTotalTime < summary.TotalTime { + maxTotalTime = summary.TotalTime + } + } + + // Compute the percent of total execution time these goroutines represent. + execTimePercent := "" + if totalExecTime > 0 { + execTimePercent = fmt.Sprintf("%.2f%%", float64(execTime)/float64(totalExecTime)*100) + } + + // Sort. + sortBy := r.FormValue("sortby") + if _, ok := validNonOverlappingStats[sortBy]; ok { + slices.SortFunc(goroutines, func(a, b goroutine) int { + return cmp.Compare(b.NonOverlappingStats[sortBy], a.NonOverlappingStats[sortBy]) + }) + } else { + // Sort by total time by default. + slices.SortFunc(goroutines, func(a, b goroutine) int { + return cmp.Compare(b.TotalTime, a.TotalTime) + }) + } + + // Write down all the non-overlapping stats and sort them. + allNonOverlappingStats := make([]string, 0, len(validNonOverlappingStats)) + for name := range validNonOverlappingStats { + allNonOverlappingStats = append(allNonOverlappingStats, name) + } + slices.SortFunc(allNonOverlappingStats, func(a, b string) int { + if a == b { + return 0 + } + if a == "Execution time" { + return -1 + } + if b == "Execution time" { + return 1 + } + return cmp.Compare(a, b) + }) + + // Write down all the range stats and sort them. + allRangeStats := make([]string, 0, len(validRangeStats)) + for name := range validRangeStats { + allRangeStats = append(allRangeStats, name) + } + sort.Strings(allRangeStats) + + err := templGoroutine.Execute(w, struct { + Name string + N int + ExecTimePercent string + MaxTotal time.Duration + Goroutines []goroutine + NonOverlappingStats []string + RangeStats []string + }{ + Name: name, + N: len(goroutines), + ExecTimePercent: execTimePercent, + MaxTotal: maxTotalTime, + Goroutines: goroutines, + NonOverlappingStats: allNonOverlappingStats, + RangeStats: allRangeStats, + }) + if err != nil { + http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError) + return + } + } +} + +func stat2Color(statName string) string { + color := "#636363" + if strings.HasPrefix(statName, "Block time") { + color = "#d01c8b" + } + switch statName { + case "Sched wait time": + color = "#2c7bb6" + case "Syscall execution time": + color = "#7b3294" + case "Execution time": + color = "#d7191c" + } + return color +} + +var templGoroutine = template.Must(template.New("").Funcs(template.FuncMap{ + "percent": func(dividend, divisor time.Duration) template.HTML { + if divisor == 0 { + return "" + } + return template.HTML(fmt.Sprintf("(%.1f%%)", float64(dividend)/float64(divisor)*100)) + }, + "headerStyle": func(statName string) template.HTMLAttr { + return template.HTMLAttr(fmt.Sprintf("style=\"background-color: %s;\"", stat2Color(statName))) + }, + "barStyle": func(statName string, dividend, divisor time.Duration) template.HTMLAttr { + width := "0" + if divisor != 0 { + width = fmt.Sprintf("%.2f%%", float64(dividend)/float64(divisor)*100) + } + return template.HTMLAttr(fmt.Sprintf("style=\"width: %s; background-color: %s;\"", width, stat2Color(statName))) + }, +}).Parse(` + +Goroutines: {{.Name}} + + + + +

    Goroutines

    + +Table of contents + + +

    Summary

    + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
    Goroutine start location:{{.Name}}
    Count:{{.N}}
    Execution Time:{{.ExecTimePercent}} of total program execution time
    Network wait profile: graph (download)
    Sync block profile: graph (download)
    Syscall profile: graph (download)
    Scheduler wait profile: graph (download)
    + +

    Breakdown

    + +The table below breaks down where each goroutine is spent its time during the +traced period. +All of the columns except total time are non-overlapping. +
    +
    + + + + + + +{{range $.NonOverlappingStats}} + +{{end}} + +{{range .Goroutines}} + + + + + {{$Goroutine := .}} + {{range $.NonOverlappingStats}} + {{$Time := index $Goroutine.NonOverlappingStats .}} + + {{end}} + +{{end}} +
    Goroutine Total {{.}}
    {{.ID}} {{ .TotalTime.String }} +
    + {{$Goroutine := .}} + {{range $.NonOverlappingStats}} + {{$Time := index $Goroutine.NonOverlappingStats .}} + {{if $Time}} +   + {{end}} + {{end}} +
    +
    {{$Time.String}}
    + +

    Special ranges

    + +The table below describes how much of the traced period each goroutine spent in +certain special time ranges. +If a goroutine has spent no time in any special time ranges, it is excluded from +the table. +For example, how much time it spent helping the GC. Note that these times do +overlap with the times from the first table. +In general the goroutine may not be executing in these special time ranges. +For example, it may have blocked while trying to help the GC. +This must be taken into account when interpreting the data. +
    +
    + + + + + +{{range $.RangeStats}} + +{{end}} + +{{range .Goroutines}} + {{if .HasRangeTime}} + + + + {{$Goroutine := .}} + {{range $.RangeStats}} + {{$Time := index $Goroutine.RangeTime .}} + + {{end}} + + {{end}} +{{end}} +
    Goroutine Total {{.}}
    {{.ID}} {{ .TotalTime.String }} {{$Time.String}}
    +`)) diff --git a/src/cmd/trace/v2/gstate.go b/src/cmd/trace/v2/gstate.go new file mode 100644 index 0000000000..aeba7ecbc1 --- /dev/null +++ b/src/cmd/trace/v2/gstate.go @@ -0,0 +1,373 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "fmt" + "internal/trace" + "internal/trace/traceviewer" + "internal/trace/traceviewer/format" + tracev2 "internal/trace/v2" + "strings" +) + +// resource is a generic constraint interface for resource IDs. +type resource interface { + tracev2.GoID | tracev2.ProcID | tracev2.ThreadID +} + +// noResource indicates the lack of a resource. +const noResource = -1 + +// gState represents the trace viewer state of a goroutine in a trace. +// +// The type parameter on this type is the resource which is used to construct +// a timeline of events. e.g. R=ProcID for a proc-oriented view, R=GoID for +// a goroutine-oriented view, etc. +type gState[R resource] struct { + baseName string + named bool // Whether baseName has been set. + label string // EventLabel extension. + isSystemG bool + + executing R // The resource this goroutine is executing on. (Could be itself.) + + // lastStopStack is the stack trace at the point of the last + // call to the stop method. This tends to be a more reliable way + // of picking up stack traces, since the parser doesn't provide + // a stack for every state transition event. + lastStopStack tracev2.Stack + + // activeRanges is the set of all active ranges on the goroutine. + activeRanges map[string]activeRange + + // completedRanges is a list of ranges that completed since before the + // goroutine stopped executing. These are flushed on every stop or block. + completedRanges []completedRange + + // startRunning is the most recent event that caused a goroutine to + // transition to GoRunning. + startRunningTime tracev2.Time + + // startSyscall is the most recent event that caused a goroutine to + // transition to GoSyscall. + syscall struct { + time tracev2.Time + stack tracev2.Stack + active bool + } + + // startBlockReason is the StateTransition.Reason of the most recent + // event that caused a gorotuine to transition to GoWaiting. + startBlockReason string + + // startCause is the event that allowed this goroutine to start running. + // It's used to generate flow events. This is typically something like + // an unblock event or a goroutine creation event. + // + // startCause.resource is the resource on which startCause happened, but is + // listed separately because the cause may have happened on a resource that + // isn't R (or perhaps on some abstract nebulous resource, like trace.NetpollP). + startCause struct { + time tracev2.Time + name string + resource uint64 + stack tracev2.Stack + } +} + +// newGState constructs a new goroutine state for the goroutine +// identified by the provided ID. +func newGState[R resource](goID tracev2.GoID) *gState[R] { + return &gState[R]{ + baseName: fmt.Sprintf("G%d", goID), + executing: R(noResource), + activeRanges: make(map[string]activeRange), + } +} + +// augmentName attempts to use stk to augment the name of the goroutine +// with stack information. This stack must be related to the goroutine +// in some way, but it doesn't really matter which stack. +func (gs *gState[R]) augmentName(stk tracev2.Stack) { + if gs.named { + return + } + if stk == tracev2.NoStack { + return + } + name := lastFunc(stk) + gs.baseName += fmt.Sprintf(" %s", name) + gs.named = true + gs.isSystemG = trace.IsSystemGoroutine(name) +} + +// setLabel adds an additional label to the goroutine's name. +func (gs *gState[R]) setLabel(label string) { + gs.label = label +} + +// name returns a name for the goroutine. +func (gs *gState[R]) name() string { + name := gs.baseName + if gs.label != "" { + name += " (" + gs.label + ")" + } + return name +} + +// setStartCause sets the reason a goroutine will be allowed to start soon. +// For example, via unblocking or exiting a blocked syscall. +func (gs *gState[R]) setStartCause(ts tracev2.Time, name string, resource uint64, stack tracev2.Stack) { + gs.startCause.time = ts + gs.startCause.name = name + gs.startCause.resource = resource + gs.startCause.stack = stack +} + +// created indicates that this goroutine was just created by the provided creator. +func (gs *gState[R]) created(ts tracev2.Time, creator R, stack tracev2.Stack) { + if creator == R(noResource) { + return + } + gs.setStartCause(ts, "go", uint64(creator), stack) +} + +// start indicates that a goroutine has started running on a proc. +func (gs *gState[R]) start(ts tracev2.Time, resource R, ctx *traceContext) { + // Set the time for all the active ranges. + for name := range gs.activeRanges { + gs.activeRanges[name] = activeRange{ts, tracev2.NoStack} + } + + if gs.startCause.name != "" { + // It has a start cause. Emit a flow event. + ctx.Arrow(traceviewer.ArrowEvent{ + Name: gs.startCause.name, + Start: ctx.elapsed(gs.startCause.time), + End: ctx.elapsed(ts), + FromResource: uint64(gs.startCause.resource), + ToResource: uint64(resource), + FromStack: ctx.Stack(viewerFrames(gs.startCause.stack)), + }) + gs.startCause.time = 0 + gs.startCause.name = "" + gs.startCause.resource = 0 + gs.startCause.stack = tracev2.NoStack + } + gs.executing = resource + gs.startRunningTime = ts +} + +// syscallBegin indicates that the goroutine entered a syscall on a proc. +func (gs *gState[R]) syscallBegin(ts tracev2.Time, resource R, stack tracev2.Stack) { + gs.syscall.time = ts + gs.syscall.stack = stack + gs.syscall.active = true + if gs.executing == R(noResource) { + gs.executing = resource + gs.startRunningTime = ts + } +} + +// syscallEnd ends the syscall slice, wherever the syscall is at. This is orthogonal +// to blockedSyscallEnd -- both must be called when a syscall ends and that syscall +// blocked. They're kept separate because syscallEnd indicates the point at which the +// goroutine is no longer executing on the resource (e.g. a proc) whereas blockedSyscallEnd +// is the point at which the goroutine actually exited the syscall regardless of which +// resource that happened on. +func (gs *gState[R]) syscallEnd(ts tracev2.Time, blocked bool, ctx *traceContext) { + if !gs.syscall.active { + return + } + blockString := "no" + if blocked { + blockString = "yes" + } + gs.completedRanges = append(gs.completedRanges, completedRange{ + name: "syscall", + startTime: gs.syscall.time, + endTime: ts, + startStack: gs.syscall.stack, + arg: format.BlockedArg{Blocked: blockString}, + }) + gs.syscall.active = false + gs.syscall.time = 0 + gs.syscall.stack = tracev2.NoStack +} + +// blockedSyscallEnd indicates the point at which the blocked syscall ended. This is distinct +// and orthogonal to syscallEnd; both must be called if the syscall blocked. This sets up an instant +// to emit a flow event from, indicating explicitly that this goroutine was unblocked by the system. +func (gs *gState[R]) blockedSyscallEnd(ts tracev2.Time, stack tracev2.Stack, ctx *traceContext) { + name := "exit blocked syscall" + gs.setStartCause(ts, name, trace.SyscallP, stack) + + // Emit an syscall exit instant event for the "Syscall" lane. + ctx.Instant(traceviewer.InstantEvent{ + Name: name, + Ts: ctx.elapsed(ts), + Resource: trace.SyscallP, + Stack: ctx.Stack(viewerFrames(stack)), + }) +} + +// unblock indicates that the goroutine gs represents has been unblocked. +func (gs *gState[R]) unblock(ts tracev2.Time, stack tracev2.Stack, resource R, ctx *traceContext) { + name := "unblock" + viewerResource := uint64(resource) + if gs.startBlockReason != "" { + name = fmt.Sprintf("%s (%s)", name, gs.startBlockReason) + } + if strings.Contains(gs.startBlockReason, "network") { + // Attribute the network instant to the nebulous "NetpollP" if + // resource isn't a thread, because there's a good chance that + // resource isn't going to be valid in this case. + // + // TODO(mknyszek): Handle this invalidness in a more general way. + if _, ok := any(resource).(tracev2.ThreadID); !ok { + // Emit an unblock instant event for the "Network" lane. + viewerResource = trace.NetpollP + } + ctx.Instant(traceviewer.InstantEvent{ + Name: name, + Ts: ctx.elapsed(ts), + Resource: viewerResource, + Stack: ctx.Stack(viewerFrames(stack)), + }) + } + gs.startBlockReason = "" + if viewerResource != 0 { + gs.setStartCause(ts, name, viewerResource, stack) + } +} + +// block indicates that the goroutine has stopped executing on a proc -- specifically, +// it blocked for some reason. +func (gs *gState[R]) block(ts tracev2.Time, stack tracev2.Stack, reason string, ctx *traceContext) { + gs.startBlockReason = reason + gs.stop(ts, stack, ctx) +} + +// stop indicates that the goroutine has stopped executing on a proc. +func (gs *gState[R]) stop(ts tracev2.Time, stack tracev2.Stack, ctx *traceContext) { + // Emit the execution time slice. + var stk int + if gs.lastStopStack != tracev2.NoStack { + stk = ctx.Stack(viewerFrames(gs.lastStopStack)) + } + // Check invariants. + if gs.startRunningTime == 0 { + panic("silently broken trace or generator invariant (startRunningTime != 0) not held") + } + if gs.executing == R(noResource) { + panic("non-executing goroutine stopped") + } + ctx.Slice(traceviewer.SliceEvent{ + Name: gs.name(), + Ts: ctx.elapsed(gs.startRunningTime), + Dur: ts.Sub(gs.startRunningTime), + Resource: uint64(gs.executing), + Stack: stk, + }) + + // Flush completed ranges. + for _, cr := range gs.completedRanges { + ctx.Slice(traceviewer.SliceEvent{ + Name: cr.name, + Ts: ctx.elapsed(cr.startTime), + Dur: cr.endTime.Sub(cr.startTime), + Resource: uint64(gs.executing), + Stack: ctx.Stack(viewerFrames(cr.startStack)), + EndStack: ctx.Stack(viewerFrames(cr.endStack)), + Arg: cr.arg, + }) + } + gs.completedRanges = gs.completedRanges[:0] + + // Continue in-progress ranges. + for name, r := range gs.activeRanges { + // Check invariant. + if r.time == 0 { + panic("silently broken trace or generator invariant (activeRanges time != 0) not held") + } + ctx.Slice(traceviewer.SliceEvent{ + Name: name, + Ts: ctx.elapsed(r.time), + Dur: ts.Sub(r.time), + Resource: uint64(gs.executing), + Stack: ctx.Stack(viewerFrames(r.stack)), + }) + } + + // Clear the range info. + for name := range gs.activeRanges { + gs.activeRanges[name] = activeRange{0, tracev2.NoStack} + } + + gs.startRunningTime = 0 + gs.lastStopStack = stack + gs.executing = R(noResource) +} + +// finalize writes out any in-progress slices as if the goroutine stopped. +// This must only be used once the trace has been fully processed and no +// further events will be processed. This method may leave the gState in +// an inconsistent state. +func (gs *gState[R]) finish(ctx *traceContext) { + if gs.executing != R(noResource) { + gs.syscallEnd(ctx.endTime, false, ctx) + gs.stop(ctx.endTime, tracev2.NoStack, ctx) + } +} + +// rangeBegin indicates the start of a special range of time. +func (gs *gState[R]) rangeBegin(ts tracev2.Time, name string, stack tracev2.Stack) { + if gs.executing != R(noResource) { + // If we're executing, start the slice from here. + gs.activeRanges[name] = activeRange{ts, stack} + } else { + // If the goroutine isn't executing, there's no place for + // us to create a slice from. Wait until it starts executing. + gs.activeRanges[name] = activeRange{0, stack} + } +} + +// rangeActive indicates that a special range of time has been in progress. +func (gs *gState[R]) rangeActive(name string) { + if gs.executing != R(noResource) { + // If we're executing, and the range is active, then start + // from wherever the goroutine started running from. + gs.activeRanges[name] = activeRange{gs.startRunningTime, tracev2.NoStack} + } else { + // If the goroutine isn't executing, there's no place for + // us to create a slice from. Wait until it starts executing. + gs.activeRanges[name] = activeRange{0, tracev2.NoStack} + } +} + +// rangeEnd indicates the end of a special range of time. +func (gs *gState[R]) rangeEnd(ts tracev2.Time, name string, stack tracev2.Stack, ctx *traceContext) { + if gs.executing != R(noResource) { + r := gs.activeRanges[name] + gs.completedRanges = append(gs.completedRanges, completedRange{ + name: name, + startTime: r.time, + endTime: ts, + startStack: r.stack, + endStack: stack, + }) + } + delete(gs.activeRanges, name) +} + +func lastFunc(s tracev2.Stack) string { + var last tracev2.StackFrame + s.Frames(func(f tracev2.StackFrame) bool { + last = f + return true + }) + return last.Func +} diff --git a/src/cmd/trace/v2/jsontrace.go b/src/cmd/trace/v2/jsontrace.go new file mode 100644 index 0000000000..e4ca613678 --- /dev/null +++ b/src/cmd/trace/v2/jsontrace.go @@ -0,0 +1,229 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "cmp" + "log" + "math" + "net/http" + "slices" + "strconv" + "time" + + "internal/trace" + "internal/trace/traceviewer" + tracev2 "internal/trace/v2" +) + +func JSONTraceHandler(parsed *parsedTrace) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + opts := defaultGenOpts() + + switch r.FormValue("view") { + case "thread": + opts.mode = traceviewer.ModeThreadOriented + } + if goids := r.FormValue("goid"); goids != "" { + // Render trace focused on a particular goroutine. + + id, err := strconv.ParseUint(goids, 10, 64) + if err != nil { + log.Printf("failed to parse goid parameter %q: %v", goids, err) + return + } + goid := tracev2.GoID(id) + g, ok := parsed.summary.Goroutines[goid] + if !ok { + log.Printf("failed to find goroutine %d", goid) + return + } + opts.mode = traceviewer.ModeGoroutineOriented + if g.StartTime != 0 { + opts.startTime = g.StartTime.Sub(parsed.startTime()) + } else { + opts.startTime = 0 + } + if g.EndTime != 0 { + opts.endTime = g.EndTime.Sub(parsed.startTime()) + } else { // The goroutine didn't end. + opts.endTime = parsed.endTime().Sub(parsed.startTime()) + } + opts.focusGoroutine = goid + opts.goroutines = trace.RelatedGoroutinesV2(parsed.events, goid) + } else if taskids := r.FormValue("focustask"); taskids != "" { + taskid, err := strconv.ParseUint(taskids, 10, 64) + if err != nil { + log.Printf("failed to parse focustask parameter %q: %v", taskids, err) + return + } + task, ok := parsed.summary.Tasks[tracev2.TaskID(taskid)] + if !ok || (task.Start == nil && task.End == nil) { + log.Printf("failed to find task with id %d", taskid) + return + } + opts.setTask(parsed, task) + } else if taskids := r.FormValue("taskid"); taskids != "" { + taskid, err := strconv.ParseUint(taskids, 10, 64) + if err != nil { + log.Printf("failed to parse taskid parameter %q: %v", taskids, err) + return + } + task, ok := parsed.summary.Tasks[tracev2.TaskID(taskid)] + if !ok { + log.Printf("failed to find task with id %d", taskid) + return + } + // This mode is goroutine-oriented. + opts.mode = traceviewer.ModeGoroutineOriented + opts.setTask(parsed, task) + + // Pick the goroutine to orient ourselves around by just + // trying to pick the earliest event in the task that makes + // any sense. Though, we always want the start if that's there. + var firstEv *tracev2.Event + if task.Start != nil { + firstEv = task.Start + } else { + for _, logEv := range task.Logs { + if firstEv == nil || logEv.Time() < firstEv.Time() { + firstEv = logEv + } + } + if task.End != nil && (firstEv == nil || task.End.Time() < firstEv.Time()) { + firstEv = task.End + } + } + if firstEv == nil || firstEv.Goroutine() == tracev2.NoGoroutine { + log.Printf("failed to find task with id %d", taskid) + return + } + + // Set the goroutine filtering options. + goid := firstEv.Goroutine() + opts.focusGoroutine = goid + goroutines := make(map[tracev2.GoID]struct{}) + for _, task := range opts.tasks { + // Find only directly involved goroutines. + for id := range task.Goroutines { + goroutines[id] = struct{}{} + } + } + opts.goroutines = goroutines + } + + // Parse start and end options. Both or none must be present. + start := int64(0) + end := int64(math.MaxInt64) + if startStr, endStr := r.FormValue("start"), r.FormValue("end"); startStr != "" && endStr != "" { + var err error + start, err = strconv.ParseInt(startStr, 10, 64) + if err != nil { + log.Printf("failed to parse start parameter %q: %v", startStr, err) + return + } + + end, err = strconv.ParseInt(endStr, 10, 64) + if err != nil { + log.Printf("failed to parse end parameter %q: %v", endStr, err) + return + } + } + + c := traceviewer.ViewerDataTraceConsumer(w, start, end) + if err := generateTrace(parsed, opts, c); err != nil { + log.Printf("failed to generate trace: %v", err) + } + }) +} + +// traceContext is a wrapper around a traceviewer.Emitter with some additional +// information that's useful to most parts of trace viewer JSON emission. +type traceContext struct { + *traceviewer.Emitter + startTime tracev2.Time + endTime tracev2.Time +} + +// elapsed returns the elapsed time between the trace time and the start time +// of the trace. +func (ctx *traceContext) elapsed(now tracev2.Time) time.Duration { + return now.Sub(ctx.startTime) +} + +type genOpts struct { + mode traceviewer.Mode + startTime time.Duration + endTime time.Duration + + // Used if mode != 0. + focusGoroutine tracev2.GoID + goroutines map[tracev2.GoID]struct{} // Goroutines to be displayed for goroutine-oriented or task-oriented view. goroutines[0] is the main goroutine. + tasks []*trace.UserTaskSummary +} + +// setTask sets a task to focus on. +func (opts *genOpts) setTask(parsed *parsedTrace, task *trace.UserTaskSummary) { + opts.mode |= traceviewer.ModeTaskOriented + if task.Start != nil { + opts.startTime = task.Start.Time().Sub(parsed.startTime()) + } else { // The task started before the trace did. + opts.startTime = 0 + } + if task.End != nil { + opts.endTime = task.End.Time().Sub(parsed.startTime()) + } else { // The task didn't end. + opts.endTime = parsed.endTime().Sub(parsed.startTime()) + } + opts.tasks = task.Descendents() + slices.SortStableFunc(opts.tasks, func(a, b *trace.UserTaskSummary) int { + aStart, bStart := parsed.startTime(), parsed.startTime() + if a.Start != nil { + aStart = a.Start.Time() + } + if b.Start != nil { + bStart = b.Start.Time() + } + if a.Start != b.Start { + return cmp.Compare(aStart, bStart) + } + // Break ties with the end time. + aEnd, bEnd := parsed.endTime(), parsed.endTime() + if a.End != nil { + aEnd = a.End.Time() + } + if b.End != nil { + bEnd = b.End.Time() + } + return cmp.Compare(aEnd, bEnd) + }) +} + +func defaultGenOpts() *genOpts { + return &genOpts{ + startTime: time.Duration(0), + endTime: time.Duration(math.MaxInt64), + } +} + +func generateTrace(parsed *parsedTrace, opts *genOpts, c traceviewer.TraceConsumer) error { + ctx := &traceContext{ + Emitter: traceviewer.NewEmitter(c, opts.startTime, opts.endTime), + startTime: parsed.events[0].Time(), + endTime: parsed.events[len(parsed.events)-1].Time(), + } + defer ctx.Flush() + + var g generator + if opts.mode&traceviewer.ModeGoroutineOriented != 0 { + g = newGoroutineGenerator(ctx, opts.focusGoroutine, opts.goroutines) + } else if opts.mode&traceviewer.ModeThreadOriented != 0 { + g = newThreadGenerator() + } else { + g = newProcGenerator() + } + runGenerator(ctx, g, parsed, opts) + return nil +} diff --git a/src/cmd/trace/v2/jsontrace_test.go b/src/cmd/trace/v2/jsontrace_test.go new file mode 100644 index 0000000000..65ce041c4f --- /dev/null +++ b/src/cmd/trace/v2/jsontrace_test.go @@ -0,0 +1,291 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "encoding/json" + tracev1 "internal/trace" + "io" + "net/http/httptest" + "os" + "path/filepath" + "slices" + "strconv" + "strings" + "testing" + "time" + + "internal/trace/traceviewer/format" + "internal/trace/v2/raw" +) + +func TestJSONTraceHandler(t *testing.T) { + testPaths, err := filepath.Glob("./testdata/*.test") + if err != nil { + t.Fatalf("discovering tests: %v", err) + } + for _, testPath := range testPaths { + t.Run(filepath.Base(testPath), func(t *testing.T) { + parsed := getTestTrace(t, testPath) + data := recordJSONTraceHandlerResponse(t, parsed) + // TODO(mknyszek): Check that there's one at most goroutine per proc at any given time. + checkExecutionTimes(t, data) + checkPlausibleHeapMetrics(t, data) + // TODO(mknyszek): Check for plausible thread and goroutine metrics. + checkMetaNamesEmitted(t, data, "process_name", []string{"STATS", "PROCS"}) + checkMetaNamesEmitted(t, data, "thread_name", []string{"GC", "Network", "Timers", "Syscalls", "Proc 0"}) + checkProcStartStop(t, data) + checkSyscalls(t, data) + checkNetworkUnblock(t, data) + // TODO(mknyszek): Check for flow events. + }) + } +} + +func checkSyscalls(t *testing.T, data format.Data) { + data = filterViewerTrace(data, + filterEventName("syscall"), + filterStackRootFunc("main.blockingSyscall")) + if len(data.Events) <= 1 { + t.Errorf("got %d events, want > 1", len(data.Events)) + } + data = filterViewerTrace(data, filterBlocked("yes")) + if len(data.Events) != 1 { + t.Errorf("got %d events, want 1", len(data.Events)) + } +} + +type eventFilterFn func(*format.Event, *format.Data) bool + +func filterEventName(name string) eventFilterFn { + return func(e *format.Event, _ *format.Data) bool { + return e.Name == name + } +} + +// filterGoRoutineName returns an event filter that returns true if the event's +// goroutine name is equal to name. +func filterGoRoutineName(name string) eventFilterFn { + return func(e *format.Event, _ *format.Data) bool { + return parseGoroutineName(e) == name + } +} + +// parseGoroutineName returns the goroutine name from the event's name field. +// E.g. if e.Name is "G42 main.cpu10", this returns "main.cpu10". +func parseGoroutineName(e *format.Event) string { + parts := strings.SplitN(e.Name, " ", 2) + if len(parts) != 2 || !strings.HasPrefix(parts[0], "G") { + return "" + } + return parts[1] +} + +// filterBlocked returns an event filter that returns true if the event's +// "blocked" argument is equal to blocked. +func filterBlocked(blocked string) eventFilterFn { + return func(e *format.Event, _ *format.Data) bool { + m, ok := e.Arg.(map[string]any) + if !ok { + return false + } + return m["blocked"] == blocked + } +} + +// filterStackRootFunc returns an event filter that returns true if the function +// at the root of the stack trace is named name. +func filterStackRootFunc(name string) eventFilterFn { + return func(e *format.Event, data *format.Data) bool { + frames := stackFrames(data, e.Stack) + rootFrame := frames[len(frames)-1] + return strings.HasPrefix(rootFrame, name+":") + } +} + +// filterViewerTrace returns a copy of data with only the events that pass all +// of the given filters. +func filterViewerTrace(data format.Data, fns ...eventFilterFn) (filtered format.Data) { + filtered = data + filtered.Events = nil + for _, e := range data.Events { + keep := true + for _, fn := range fns { + keep = keep && fn(e, &filtered) + } + if keep { + filtered.Events = append(filtered.Events, e) + } + } + return +} + +func stackFrames(data *format.Data, stackID int) (frames []string) { + for { + frame, ok := data.Frames[strconv.Itoa(stackID)] + if !ok { + return + } + frames = append(frames, frame.Name) + stackID = frame.Parent + } +} + +func checkProcStartStop(t *testing.T, data format.Data) { + procStarted := map[uint64]bool{} + for _, e := range data.Events { + if e.Name == "proc start" { + if procStarted[e.TID] == true { + t.Errorf("proc started twice: %d", e.TID) + } + procStarted[e.TID] = true + } + if e.Name == "proc stop" { + if procStarted[e.TID] == false { + t.Errorf("proc stopped twice: %d", e.TID) + } + procStarted[e.TID] = false + } + } + if got, want := len(procStarted), 8; got != want { + t.Errorf("wrong number of procs started/stopped got=%d want=%d", got, want) + } +} + +func checkNetworkUnblock(t *testing.T, data format.Data) { + count := 0 + var netBlockEv *format.Event + for _, e := range data.Events { + if e.TID == tracev1.NetpollP && e.Name == "unblock (network)" && e.Phase == "I" && e.Scope == "t" { + count++ + netBlockEv = e + } + } + if netBlockEv == nil { + t.Error("failed to find a network unblock") + } + if count == 0 { + t.Errorf("found zero network block events, want at least one") + } + // TODO(mknyszek): Check for the flow of this event to some slice event of a goroutine running. +} + +func checkExecutionTimes(t *testing.T, data format.Data) { + cpu10 := sumExecutionTime(filterViewerTrace(data, filterGoRoutineName("main.cpu10"))) + cpu20 := sumExecutionTime(filterViewerTrace(data, filterGoRoutineName("main.cpu20"))) + if cpu10 <= 0 || cpu20 <= 0 || cpu10 >= cpu20 { + t.Errorf("bad execution times: cpu10=%v, cpu20=%v", cpu10, cpu20) + } +} + +func checkMetaNamesEmitted(t *testing.T, data format.Data, category string, want []string) { + t.Helper() + names := metaEventNameArgs(category, data) + for _, wantName := range want { + if !slices.Contains(names, wantName) { + t.Errorf("%s: names=%v, want %q", category, names, wantName) + } + } +} + +func metaEventNameArgs(category string, data format.Data) (names []string) { + for _, e := range data.Events { + if e.Name == category && e.Phase == "M" { + names = append(names, e.Arg.(map[string]any)["name"].(string)) + } + } + return +} + +func checkPlausibleHeapMetrics(t *testing.T, data format.Data) { + hms := heapMetrics(data) + var nonZeroAllocated, nonZeroNextGC bool + for _, hm := range hms { + if hm.Allocated > 0 { + nonZeroAllocated = true + } + if hm.NextGC > 0 { + nonZeroNextGC = true + } + } + + if !nonZeroAllocated { + t.Errorf("nonZeroAllocated=%v, want true", nonZeroAllocated) + } + if !nonZeroNextGC { + t.Errorf("nonZeroNextGC=%v, want true", nonZeroNextGC) + } +} + +func heapMetrics(data format.Data) (metrics []format.HeapCountersArg) { + for _, e := range data.Events { + if e.Phase == "C" && e.Name == "Heap" { + j, _ := json.Marshal(e.Arg) + var metric format.HeapCountersArg + json.Unmarshal(j, &metric) + metrics = append(metrics, metric) + } + } + return +} + +func recordJSONTraceHandlerResponse(t *testing.T, parsed *parsedTrace) format.Data { + h := JSONTraceHandler(parsed) + recorder := httptest.NewRecorder() + r := httptest.NewRequest("GET", "/jsontrace", nil) + h.ServeHTTP(recorder, r) + + var data format.Data + if err := json.Unmarshal(recorder.Body.Bytes(), &data); err != nil { + t.Fatal(err) + } + return data +} + +func sumExecutionTime(data format.Data) (sum time.Duration) { + for _, e := range data.Events { + sum += time.Duration(e.Dur) * time.Microsecond + } + return +} + +func getTestTrace(t *testing.T, testPath string) *parsedTrace { + t.Helper() + + // First read in the text trace and write it out as bytes. + f, err := os.Open(testPath) + if err != nil { + t.Fatalf("failed to open test %s: %v", testPath, err) + } + r, err := raw.NewTextReader(f) + if err != nil { + t.Fatalf("failed to read test %s: %v", testPath, err) + } + var trace bytes.Buffer + w, err := raw.NewWriter(&trace, r.Version()) + if err != nil { + t.Fatalf("failed to write out test %s: %v", testPath, err) + } + for { + ev, err := r.ReadEvent() + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("failed to read test %s: %v", testPath, err) + } + if err := w.WriteEvent(ev); err != nil { + t.Fatalf("failed to write out test %s: %v", testPath, err) + } + } + + // Parse the test trace. + parsed, err := parseTrace(&trace) + if err != nil { + t.Fatalf("failed to parse trace: %v", err) + } + return parsed +} diff --git a/src/cmd/trace/v2/main.go b/src/cmd/trace/v2/main.go new file mode 100644 index 0000000000..0a60ef04db --- /dev/null +++ b/src/cmd/trace/v2/main.go @@ -0,0 +1,190 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "fmt" + "internal/trace" + "internal/trace/traceviewer" + tracev2 "internal/trace/v2" + "io" + "log" + "net" + "net/http" + "os" + + "internal/trace/v2/raw" + + "cmd/internal/browser" +) + +// Main is the main function for cmd/trace v2. +func Main(traceFile, httpAddr, pprof string, debug int) error { + tracef, err := os.Open(traceFile) + if err != nil { + return fmt.Errorf("failed to read trace file: %w", err) + } + defer tracef.Close() + + // Debug flags. + switch debug { + case 1: + return debugProcessedEvents(tracef) + case 2: + return debugRawEvents(tracef) + } + + ln, err := net.Listen("tcp", httpAddr) + if err != nil { + return fmt.Errorf("failed to create server socket: %w", err) + } + addr := "http://" + ln.Addr().String() + + log.Print("Preparing trace for viewer...") + parsed, err := parseTrace(tracef) + if err != nil { + return err + } + // N.B. tracef not needed after this point. + // We might double-close, but that's fine; we ignore the error. + tracef.Close() + + log.Print("Splitting trace for viewer...") + ranges, err := splitTrace(parsed) + if err != nil { + return err + } + + log.Printf("Opening browser. Trace viewer is listening on %s", addr) + browser.Open(addr) + + mutatorUtil := func(flags trace.UtilFlags) ([][]trace.MutatorUtil, error) { + return trace.MutatorUtilizationV2(parsed.events, flags), nil + } + + mux := http.NewServeMux() + + // Main endpoint. + mux.Handle("/", traceviewer.MainHandler([]traceviewer.View{ + {Type: traceviewer.ViewProc, Ranges: ranges}, + // N.B. Use the same ranges for threads. It takes a long time to compute + // the split a second time, but the makeup of the events are similar enough + // that this is still a good split. + {Type: traceviewer.ViewThread, Ranges: ranges}, + })) + + // Catapult handlers. + mux.Handle("/trace", traceviewer.TraceHandler()) + mux.Handle("/jsontrace", JSONTraceHandler(parsed)) + mux.Handle("/static/", traceviewer.StaticHandler()) + + // Goroutines handlers. + mux.HandleFunc("/goroutines", GoroutinesHandlerFunc(parsed.summary.Goroutines)) + mux.HandleFunc("/goroutine", GoroutineHandler(parsed.summary.Goroutines)) + + // MMU handler. + mux.HandleFunc("/mmu", traceviewer.MMUHandlerFunc(ranges, mutatorUtil)) + + // Basic pprof endpoints. + mux.HandleFunc("/io", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofIO(), parsed))) + mux.HandleFunc("/block", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofBlock(), parsed))) + mux.HandleFunc("/syscall", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofSyscall(), parsed))) + mux.HandleFunc("/sched", traceviewer.SVGProfileHandlerFunc(pprofByGoroutine(computePprofSched(), parsed))) + + // Region-based pprof endpoints. + mux.HandleFunc("/regionio", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofIO(), parsed))) + mux.HandleFunc("/regionblock", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofBlock(), parsed))) + mux.HandleFunc("/regionsyscall", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofSyscall(), parsed))) + mux.HandleFunc("/regionsched", traceviewer.SVGProfileHandlerFunc(pprofByRegion(computePprofSched(), parsed))) + + // Region endpoints. + mux.HandleFunc("/userregions", UserRegionsHandlerFunc(parsed)) + mux.HandleFunc("/userregion", UserRegionHandlerFunc(parsed)) + + // Task endpoints. + mux.HandleFunc("/usertasks", UserTasksHandlerFunc(parsed)) + mux.HandleFunc("/usertask", UserTaskHandlerFunc(parsed)) + + err = http.Serve(ln, mux) + return fmt.Errorf("failed to start http server: %w", err) +} + +type parsedTrace struct { + events []tracev2.Event + summary *trace.Summary +} + +func parseTrace(tr io.Reader) (*parsedTrace, error) { + r, err := tracev2.NewReader(tr) + if err != nil { + return nil, fmt.Errorf("failed to create trace reader: %w", err) + } + s := trace.NewSummarizer() + t := new(parsedTrace) + for { + ev, err := r.ReadEvent() + if err == io.EOF { + break + } else if err != nil { + return nil, fmt.Errorf("failed to read event: %w", err) + } + t.events = append(t.events, ev) + s.Event(&t.events[len(t.events)-1]) + } + t.summary = s.Finalize() + return t, nil +} + +func (t *parsedTrace) startTime() tracev2.Time { + return t.events[0].Time() +} + +func (t *parsedTrace) endTime() tracev2.Time { + return t.events[len(t.events)-1].Time() +} + +// splitTrace splits the trace into a number of ranges, each resulting in approx 100 MiB of +// json output (the trace viewer can hardly handle more). +func splitTrace(parsed *parsedTrace) ([]traceviewer.Range, error) { + // TODO(mknyszek): Split traces by generation by doing a quick first pass over the + // trace to identify all the generation boundaries. + s, c := traceviewer.SplittingTraceConsumer(100 << 20) // 100 MiB + if err := generateTrace(parsed, defaultGenOpts(), c); err != nil { + return nil, err + } + return s.Ranges, nil +} + +func debugProcessedEvents(trace io.Reader) error { + tr, err := tracev2.NewReader(trace) + if err != nil { + return err + } + for { + ev, err := tr.ReadEvent() + if err == io.EOF { + return nil + } else if err != nil { + return err + } + fmt.Println(ev.String()) + } +} + +func debugRawEvents(trace io.Reader) error { + rr, err := raw.NewReader(trace) + if err != nil { + return err + } + for { + ev, err := rr.ReadEvent() + if err == io.EOF { + return nil + } else if err != nil { + return err + } + fmt.Println(ev.String()) + } +} diff --git a/src/cmd/trace/v2/pprof.go b/src/cmd/trace/v2/pprof.go new file mode 100644 index 0000000000..05895eda3d --- /dev/null +++ b/src/cmd/trace/v2/pprof.go @@ -0,0 +1,336 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Serving of pprof-like profiles. + +package trace + +import ( + "cmp" + "fmt" + "internal/trace" + "internal/trace/traceviewer" + tracev2 "internal/trace/v2" + "net/http" + "slices" + "strings" + "time" +) + +func pprofByGoroutine(compute computePprofFunc, t *parsedTrace) traceviewer.ProfileFunc { + return func(r *http.Request) ([]traceviewer.ProfileRecord, error) { + name := r.FormValue("name") + gToIntervals, err := pprofMatchingGoroutines(name, t) + if err != nil { + return nil, err + } + return compute(gToIntervals, t.events) + } +} + +func pprofByRegion(compute computePprofFunc, t *parsedTrace) traceviewer.ProfileFunc { + return func(r *http.Request) ([]traceviewer.ProfileRecord, error) { + filter, err := newRegionFilter(r) + if err != nil { + return nil, err + } + gToIntervals, err := pprofMatchingRegions(filter, t) + if err != nil { + return nil, err + } + return compute(gToIntervals, t.events) + } +} + +// pprofMatchingGoroutines returns the ids of goroutines of the matching name and its interval. +// If the id string is empty, returns nil without an error. +func pprofMatchingGoroutines(name string, t *parsedTrace) (map[tracev2.GoID][]interval, error) { + res := make(map[tracev2.GoID][]interval) + for _, g := range t.summary.Goroutines { + if g.Name != name { + continue + } + endTime := g.EndTime + if g.EndTime == 0 { + endTime = t.endTime() // Use the trace end time, since the goroutine is still live then. + } + res[g.ID] = []interval{{start: g.StartTime, end: endTime}} + } + if len(res) == 0 { + return nil, fmt.Errorf("failed to find matching goroutines for name: %s", name) + } + return res, nil +} + +// pprofMatchingRegions returns the time intervals of matching regions +// grouped by the goroutine id. If the filter is nil, returns nil without an error. +func pprofMatchingRegions(filter *regionFilter, t *parsedTrace) (map[tracev2.GoID][]interval, error) { + if filter == nil { + return nil, nil + } + + gToIntervals := make(map[tracev2.GoID][]interval) + for _, g := range t.summary.Goroutines { + for _, r := range g.Regions { + if !filter.match(t, r) { + continue + } + gToIntervals[g.ID] = append(gToIntervals[g.ID], regionInterval(t, r)) + } + } + + for g, intervals := range gToIntervals { + // In order to remove nested regions and + // consider only the outermost regions, + // first, we sort based on the start time + // and then scan through to select only the outermost regions. + slices.SortFunc(intervals, func(a, b interval) int { + if c := cmp.Compare(a.start, b.start); c != 0 { + return c + } + return cmp.Compare(a.end, b.end) + }) + var lastTimestamp tracev2.Time + var n int + // Select only the outermost regions. + for _, i := range intervals { + if lastTimestamp <= i.start { + intervals[n] = i // new non-overlapping region starts. + lastTimestamp = i.end + n++ + } + // Otherwise, skip because this region overlaps with a previous region. + } + gToIntervals[g] = intervals[:n] + } + return gToIntervals, nil +} + +type computePprofFunc func(gToIntervals map[tracev2.GoID][]interval, events []tracev2.Event) ([]traceviewer.ProfileRecord, error) + +// computePprofIO returns a computePprofFunc that generates IO pprof-like profile (time spent in +// IO wait, currently only network blocking event). +func computePprofIO() computePprofFunc { + return makeComputePprofFunc(tracev2.GoWaiting, func(reason string) bool { + return reason == "network" + }) +} + +// computePprofBlock returns a computePprofFunc that generates blocking pprof-like profile +// (time spent blocked on synchronization primitives). +func computePprofBlock() computePprofFunc { + return makeComputePprofFunc(tracev2.GoWaiting, func(reason string) bool { + return strings.Contains(reason, "chan") || strings.Contains(reason, "sync") || strings.Contains(reason, "select") + }) +} + +// computePprofSyscall returns a computePprofFunc that generates a syscall pprof-like +// profile (time spent in syscalls). +func computePprofSyscall() computePprofFunc { + return makeComputePprofFunc(tracev2.GoSyscall, func(_ string) bool { + return true + }) +} + +// computePprofSched returns a computePprofFunc that generates a scheduler latency pprof-like profile +// (time between a goroutine become runnable and actually scheduled for execution). +func computePprofSched() computePprofFunc { + return makeComputePprofFunc(tracev2.GoRunnable, func(_ string) bool { + return true + }) +} + +// makeComputePprofFunc returns a computePprofFunc that generates a profile of time goroutines spend +// in a particular state for the specified reasons. +func makeComputePprofFunc(state tracev2.GoState, trackReason func(string) bool) computePprofFunc { + return func(gToIntervals map[tracev2.GoID][]interval, events []tracev2.Event) ([]traceviewer.ProfileRecord, error) { + stacks := newStackMap() + tracking := make(map[tracev2.GoID]*tracev2.Event) + for i := range events { + ev := &events[i] + + // Filter out any non-state-transitions and events without stacks. + if ev.Kind() != tracev2.EventStateTransition { + continue + } + stack := ev.Stack() + if stack == tracev2.NoStack { + continue + } + + // The state transition has to apply to a goroutine. + st := ev.StateTransition() + if st.Resource.Kind != tracev2.ResourceGoroutine { + continue + } + id := st.Resource.Goroutine() + _, new := st.Goroutine() + + // Check if we're tracking this goroutine. + startEv := tracking[id] + if startEv == nil { + // We're not. Start tracking if the new state + // matches what we want and the transition is + // for one of the reasons we care about. + if new == state && trackReason(st.Reason) { + tracking[id] = ev + } + continue + } + // We're tracking this goroutine. + if new == state { + // We're tracking this goroutine, but it's just transitioning + // to the same state (this is a no-ip + continue + } + // The goroutine has transitioned out of the state we care about, + // so remove it from tracking and record the stack. + delete(tracking, id) + + overlapping := pprofOverlappingDuration(gToIntervals, id, interval{startEv.Time(), ev.Time()}) + if overlapping > 0 { + rec := stacks.getOrAdd(startEv.Stack()) + rec.Count++ + rec.Time += overlapping + } + } + return stacks.profile(), nil + } +} + +// pprofOverlappingDuration returns the overlapping duration between +// the time intervals in gToIntervals and the specified event. +// If gToIntervals is nil, this simply returns the event's duration. +func pprofOverlappingDuration(gToIntervals map[tracev2.GoID][]interval, id tracev2.GoID, sample interval) time.Duration { + if gToIntervals == nil { // No filtering. + return sample.duration() + } + intervals := gToIntervals[id] + if len(intervals) == 0 { + return 0 + } + + var overlapping time.Duration + for _, i := range intervals { + if o := i.overlap(sample); o > 0 { + overlapping += o + } + } + return overlapping +} + +// interval represents a time interval in the trace. +type interval struct { + start, end tracev2.Time +} + +func (i interval) duration() time.Duration { + return i.end.Sub(i.start) +} + +func (i1 interval) overlap(i2 interval) time.Duration { + // Assume start1 <= end1 and start2 <= end2 + if i1.end < i2.start || i2.end < i1.start { + return 0 + } + if i1.start < i2.start { // choose the later one + i1.start = i2.start + } + if i1.end > i2.end { // choose the earlier one + i1.end = i2.end + } + return i1.duration() +} + +// pprofMaxStack is the extent of the deduplication we're willing to do. +// +// Because slices aren't comparable and we want to leverage maps for deduplication, +// we have to choose a fixed constant upper bound on the amount of frames we want +// to support. In practice this is fine because there's a maximum depth to these +// stacks anyway. +const pprofMaxStack = 128 + +// stackMap is a map of tracev2.Stack to some value V. +type stackMap struct { + // stacks contains the full list of stacks in the set, however + // it is insufficient for deduplication because tracev2.Stack + // equality is only optimistic. If two tracev2.Stacks are equal, + // then they are guaranteed to be equal in content. If they are + // not equal, then they might still be equal in content. + stacks map[tracev2.Stack]*traceviewer.ProfileRecord + + // pcs is the source-of-truth for deduplication. It is a map of + // the actual PCs in the stack to a tracev2.Stack. + pcs map[[pprofMaxStack]uint64]tracev2.Stack +} + +func newStackMap() *stackMap { + return &stackMap{ + stacks: make(map[tracev2.Stack]*traceviewer.ProfileRecord), + pcs: make(map[[pprofMaxStack]uint64]tracev2.Stack), + } +} + +func (m *stackMap) getOrAdd(stack tracev2.Stack) *traceviewer.ProfileRecord { + // Fast path: check to see if this exact stack is already in the map. + if rec, ok := m.stacks[stack]; ok { + return rec + } + // Slow path: the stack may still be in the map. + + // Grab the stack's PCs as the source-of-truth. + var pcs [pprofMaxStack]uint64 + pcsForStack(stack, &pcs) + + // Check the source-of-truth. + var rec *traceviewer.ProfileRecord + if existing, ok := m.pcs[pcs]; ok { + // In the map. + rec = m.stacks[existing] + delete(m.stacks, existing) + } else { + // Not in the map. + rec = new(traceviewer.ProfileRecord) + } + // Insert regardless of whether we have a match in m.pcs. + // Even if we have a match, we want to keep the newest version + // of that stack, since we're much more likely tos see it again + // as we iterate through the trace linearly. Simultaneously, we + // are likely to never see the old stack again. + m.pcs[pcs] = stack + m.stacks[stack] = rec + return rec +} + +func (m *stackMap) profile() []traceviewer.ProfileRecord { + prof := make([]traceviewer.ProfileRecord, 0, len(m.stacks)) + for stack, record := range m.stacks { + rec := *record + i := 0 + stack.Frames(func(frame tracev2.StackFrame) bool { + rec.Stack = append(rec.Stack, &trace.Frame{ + PC: frame.PC, + Fn: frame.Func, + File: frame.File, + Line: int(frame.Line), + }) + i++ + // Cut this off at pprofMaxStack because that's as far + // as our deduplication goes. + return i < pprofMaxStack + }) + prof = append(prof, rec) + } + return prof +} + +// pcsForStack extracts the first pprofMaxStack PCs from stack into pcs. +func pcsForStack(stack tracev2.Stack, pcs *[pprofMaxStack]uint64) { + i := 0 + stack.Frames(func(frame tracev2.StackFrame) bool { + pcs[i] = frame.PC + i++ + return i < len(pcs) + }) +} diff --git a/src/cmd/trace/v2/procgen.go b/src/cmd/trace/v2/procgen.go new file mode 100644 index 0000000000..41e379527f --- /dev/null +++ b/src/cmd/trace/v2/procgen.go @@ -0,0 +1,212 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "fmt" + "internal/trace/traceviewer" + "internal/trace/traceviewer/format" + tracev2 "internal/trace/v2" +) + +var _ generator = &procGenerator{} + +type procGenerator struct { + globalRangeGenerator + globalMetricGenerator + procRangeGenerator + stackSampleGenerator[tracev2.ProcID] + logEventGenerator[tracev2.ProcID] + + gStates map[tracev2.GoID]*gState[tracev2.ProcID] + inSyscall map[tracev2.ProcID]*gState[tracev2.ProcID] + maxProc tracev2.ProcID +} + +func newProcGenerator() *procGenerator { + pg := new(procGenerator) + rg := func(ev *tracev2.Event) tracev2.ProcID { + return ev.Proc() + } + pg.stackSampleGenerator.getResource = rg + pg.logEventGenerator.getResource = rg + pg.gStates = make(map[tracev2.GoID]*gState[tracev2.ProcID]) + pg.inSyscall = make(map[tracev2.ProcID]*gState[tracev2.ProcID]) + return pg +} + +func (g *procGenerator) Sync() { + g.globalRangeGenerator.Sync() + g.procRangeGenerator.Sync() +} + +func (g *procGenerator) GoroutineLabel(ctx *traceContext, ev *tracev2.Event) { + l := ev.Label() + g.gStates[l.Resource.Goroutine()].setLabel(l.Label) +} + +func (g *procGenerator) GoroutineRange(ctx *traceContext, ev *tracev2.Event) { + r := ev.Range() + switch ev.Kind() { + case tracev2.EventRangeBegin: + g.gStates[r.Scope.Goroutine()].rangeBegin(ev.Time(), r.Name, ev.Stack()) + case tracev2.EventRangeActive: + g.gStates[r.Scope.Goroutine()].rangeActive(r.Name) + case tracev2.EventRangeEnd: + gs := g.gStates[r.Scope.Goroutine()] + gs.rangeEnd(ev.Time(), r.Name, ev.Stack(), ctx) + } +} + +func (g *procGenerator) GoroutineTransition(ctx *traceContext, ev *tracev2.Event) { + st := ev.StateTransition() + goID := st.Resource.Goroutine() + + // If we haven't seen this goroutine before, create a new + // gState for it. + gs, ok := g.gStates[goID] + if !ok { + gs = newGState[tracev2.ProcID](goID) + g.gStates[goID] = gs + } + // If we haven't already named this goroutine, try to name it. + gs.augmentName(st.Stack) + + // Handle the goroutine state transition. + from, to := st.Goroutine() + if from == to { + // Filter out no-op events. + return + } + if from == tracev2.GoRunning && !to.Executing() { + if to == tracev2.GoWaiting { + // Goroutine started blocking. + gs.block(ev.Time(), ev.Stack(), st.Reason, ctx) + } else { + gs.stop(ev.Time(), ev.Stack(), ctx) + } + } + if !from.Executing() && to == tracev2.GoRunning { + start := ev.Time() + if from == tracev2.GoUndetermined { + // Back-date the event to the start of the trace. + start = ctx.startTime + } + gs.start(start, ev.Proc(), ctx) + } + + if from == tracev2.GoWaiting { + // Goroutine was unblocked. + gs.unblock(ev.Time(), ev.Stack(), ev.Proc(), ctx) + } + if from == tracev2.GoNotExist && to == tracev2.GoRunnable { + // Goroutine was created. + gs.created(ev.Time(), ev.Proc(), ev.Stack()) + } + if from == tracev2.GoSyscall && to != tracev2.GoRunning { + // Goroutine exited a blocked syscall. + gs.blockedSyscallEnd(ev.Time(), ev.Stack(), ctx) + } + + // Handle syscalls. + if to == tracev2.GoSyscall && ev.Proc() != tracev2.NoProc { + start := ev.Time() + if from == tracev2.GoUndetermined { + // Back-date the event to the start of the trace. + start = ctx.startTime + } + // Write down that we've entered a syscall. Note: we might have no P here + // if we're in a cgo callback or this is a transition from GoUndetermined + // (i.e. the G has been blocked in a syscall). + gs.syscallBegin(start, ev.Proc(), ev.Stack()) + g.inSyscall[ev.Proc()] = gs + } + // Check if we're exiting a non-blocking syscall. + _, didNotBlock := g.inSyscall[ev.Proc()] + if from == tracev2.GoSyscall && didNotBlock { + gs.syscallEnd(ev.Time(), false, ctx) + delete(g.inSyscall, ev.Proc()) + } + + // Note down the goroutine transition. + _, inMarkAssist := gs.activeRanges["GC mark assist"] + ctx.GoroutineTransition(ctx.elapsed(ev.Time()), viewerGState(from, inMarkAssist), viewerGState(to, inMarkAssist)) +} + +func (g *procGenerator) ProcTransition(ctx *traceContext, ev *tracev2.Event) { + st := ev.StateTransition() + proc := st.Resource.Proc() + + g.maxProc = max(g.maxProc, proc) + viewerEv := traceviewer.InstantEvent{ + Resource: uint64(proc), + Stack: ctx.Stack(viewerFrames(ev.Stack())), + } + + from, to := st.Proc() + if from == to { + // Filter out no-op events. + return + } + if to.Executing() { + start := ev.Time() + if from == tracev2.ProcUndetermined { + start = ctx.startTime + } + viewerEv.Name = "proc start" + viewerEv.Arg = format.ThreadIDArg{ThreadID: uint64(ev.Thread())} + viewerEv.Ts = ctx.elapsed(start) + ctx.IncThreadStateCount(ctx.elapsed(start), traceviewer.ThreadStateRunning, 1) + } + if from.Executing() { + start := ev.Time() + viewerEv.Name = "proc stop" + viewerEv.Ts = ctx.elapsed(start) + ctx.IncThreadStateCount(ctx.elapsed(start), traceviewer.ThreadStateRunning, -1) + + // Check if this proc was in a syscall before it stopped. + // This means the syscall blocked. We need to emit it to the + // viewer at this point because we only display the time the + // syscall occupied a P when the viewer is in per-P mode. + // + // TODO(mknyszek): We could do better in a per-M mode because + // all events have to happen on *some* thread, and in v2 traces + // we know what that thread is. + gs, ok := g.inSyscall[proc] + if ok { + // Emit syscall slice for blocked syscall. + gs.syscallEnd(start, true, ctx) + gs.stop(start, ev.Stack(), ctx) + delete(g.inSyscall, proc) + } + } + // TODO(mknyszek): Consider modeling procs differently and have them be + // transition to and from NotExist when GOMAXPROCS changes. We can emit + // events for this to clearly delineate GOMAXPROCS changes. + + if viewerEv.Name != "" { + ctx.Instant(viewerEv) + } +} + +func (g *procGenerator) Finish(ctx *traceContext) { + ctx.SetResourceType("PROCS") + + // Finish off ranges first. It doesn't really matter for the global ranges, + // but the proc ranges need to either be a subset of a goroutine slice or + // their own slice entirely. If the former, it needs to end first. + g.procRangeGenerator.Finish(ctx) + g.globalRangeGenerator.Finish(ctx) + + // Finish off all the goroutine slices. + for _, gs := range g.gStates { + gs.finish(ctx) + } + + // Name all the procs to the emitter. + for i := uint64(0); i <= uint64(g.maxProc); i++ { + ctx.Resource(i, fmt.Sprintf("Proc %v", i)) + } +} diff --git a/src/cmd/trace/v2/regions.go b/src/cmd/trace/v2/regions.go new file mode 100644 index 0000000000..5d04fd2ae5 --- /dev/null +++ b/src/cmd/trace/v2/regions.go @@ -0,0 +1,529 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "cmp" + "fmt" + "html/template" + "internal/trace" + "internal/trace/traceviewer" + tracev2 "internal/trace/v2" + "net/http" + "net/url" + "slices" + "sort" + "strconv" + "strings" + "time" +) + +// UserTasksHandlerFunc returns a HandlerFunc that reports all regions found in the trace. +func UserRegionsHandlerFunc(t *parsedTrace) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // Summarize all the regions. + summary := make(map[regionFingerprint]regionStats) + for _, g := range t.summary.Goroutines { + for _, r := range g.Regions { + id := fingerprintRegion(r) + stats, ok := summary[id] + if !ok { + stats.regionFingerprint = id + } + stats.add(t, r) + summary[id] = stats + } + } + // Sort regions by PC and name. + userRegions := make([]regionStats, 0, len(summary)) + for _, stats := range summary { + userRegions = append(userRegions, stats) + } + slices.SortFunc(userRegions, func(a, b regionStats) int { + if c := cmp.Compare(a.Type, b.Type); c != 0 { + return c + } + return cmp.Compare(a.Frame.PC, b.Frame.PC) + }) + // Emit table. + err := templUserRegionTypes.Execute(w, userRegions) + if err != nil { + http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError) + return + } + } +} + +// regionFingerprint is a way to categorize regions that goes just one step beyond the region's Type +// by including the top stack frame. +type regionFingerprint struct { + Frame tracev2.StackFrame + Type string +} + +func fingerprintRegion(r *trace.UserRegionSummary) regionFingerprint { + return regionFingerprint{ + Frame: regionTopStackFrame(r), + Type: r.Name, + } +} + +func regionTopStackFrame(r *trace.UserRegionSummary) tracev2.StackFrame { + var frame tracev2.StackFrame + if r.Start != nil && r.Start.Stack() != tracev2.NoStack { + r.Start.Stack().Frames(func(f tracev2.StackFrame) bool { + frame = f + return false + }) + } + return frame +} + +type regionStats struct { + regionFingerprint + Histogram traceviewer.TimeHistogram +} + +func (s *regionStats) UserRegionURL() func(min, max time.Duration) string { + return func(min, max time.Duration) string { + return fmt.Sprintf("/userregion?type=%s&pc=%x&latmin=%v&latmax=%v", template.URLQueryEscaper(s.Type), s.Frame.PC, template.URLQueryEscaper(min), template.URLQueryEscaper(max)) + } +} + +func (s *regionStats) add(t *parsedTrace, region *trace.UserRegionSummary) { + s.Histogram.Add(regionInterval(t, region).duration()) +} + +var templUserRegionTypes = template.Must(template.New("").Parse(` + +Regions + + +

    Regions

    + +Below is a table containing a summary of all the user-defined regions in the trace. +Regions are grouped by the region type and the point at which the region started. +The rightmost column of the table contains a latency histogram for each region group. +Note that this histogram only counts regions that began and ended within the traced +period. +However, the "Count" column includes all regions, including those that only started +or ended during the traced period. +Regions that were active through the trace period were not recorded, and so are not +accounted for at all. +Click on the links to explore a breakdown of time spent for each region by goroutine +and user-defined task. +
    +
    + + + + + + + +{{range $}} + + + + + +{{end}} +
    Region typeCountDuration distribution (complete tasks)
    {{printf "%q" .Type}}
    {{.Frame.Func}} @ {{printf "0x%x" .Frame.PC}}
    {{.Frame.File}}:{{.Frame.Line}}
    {{.Histogram.Count}}{{.Histogram.ToHTML (.UserRegionURL)}}
    + + +`)) + +// UserRegionHandlerFunc returns a HandlerFunc that presents the details of the selected regions. +func UserRegionHandlerFunc(t *parsedTrace) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + // Construct the filter from the request. + filter, err := newRegionFilter(r) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + // Collect all the regions with their goroutines. + type region struct { + *trace.UserRegionSummary + Goroutine tracev2.GoID + NonOverlappingStats map[string]time.Duration + HasRangeTime bool + } + var regions []region + var maxTotal time.Duration + validNonOverlappingStats := make(map[string]struct{}) + validRangeStats := make(map[string]struct{}) + for _, g := range t.summary.Goroutines { + for _, r := range g.Regions { + if !filter.match(t, r) { + continue + } + nonOverlappingStats := r.NonOverlappingStats() + for name := range nonOverlappingStats { + validNonOverlappingStats[name] = struct{}{} + } + var totalRangeTime time.Duration + for name, dt := range r.RangeTime { + validRangeStats[name] = struct{}{} + totalRangeTime += dt + } + regions = append(regions, region{ + UserRegionSummary: r, + Goroutine: g.ID, + NonOverlappingStats: nonOverlappingStats, + HasRangeTime: totalRangeTime != 0, + }) + if maxTotal < r.TotalTime { + maxTotal = r.TotalTime + } + } + } + + // Sort. + sortBy := r.FormValue("sortby") + if _, ok := validNonOverlappingStats[sortBy]; ok { + slices.SortFunc(regions, func(a, b region) int { + return cmp.Compare(b.NonOverlappingStats[sortBy], a.NonOverlappingStats[sortBy]) + }) + } else { + // Sort by total time by default. + slices.SortFunc(regions, func(a, b region) int { + return cmp.Compare(b.TotalTime, a.TotalTime) + }) + } + + // Write down all the non-overlapping stats and sort them. + allNonOverlappingStats := make([]string, 0, len(validNonOverlappingStats)) + for name := range validNonOverlappingStats { + allNonOverlappingStats = append(allNonOverlappingStats, name) + } + slices.SortFunc(allNonOverlappingStats, func(a, b string) int { + if a == b { + return 0 + } + if a == "Execution time" { + return -1 + } + if b == "Execution time" { + return 1 + } + return cmp.Compare(a, b) + }) + + // Write down all the range stats and sort them. + allRangeStats := make([]string, 0, len(validRangeStats)) + for name := range validRangeStats { + allRangeStats = append(allRangeStats, name) + } + sort.Strings(allRangeStats) + + err = templUserRegionType.Execute(w, struct { + MaxTotal time.Duration + Regions []region + Name string + Filter *regionFilter + NonOverlappingStats []string + RangeStats []string + }{ + MaxTotal: maxTotal, + Regions: regions, + Name: filter.name, + Filter: filter, + NonOverlappingStats: allNonOverlappingStats, + RangeStats: allRangeStats, + }) + if err != nil { + http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError) + return + } + } +} + +var templUserRegionType = template.Must(template.New("").Funcs(template.FuncMap{ + "headerStyle": func(statName string) template.HTMLAttr { + return template.HTMLAttr(fmt.Sprintf("style=\"background-color: %s;\"", stat2Color(statName))) + }, + "barStyle": func(statName string, dividend, divisor time.Duration) template.HTMLAttr { + width := "0" + if divisor != 0 { + width = fmt.Sprintf("%.2f%%", float64(dividend)/float64(divisor)*100) + } + return template.HTMLAttr(fmt.Sprintf("style=\"width: %s; background-color: %s;\"", width, stat2Color(statName))) + }, + "filterParams": func(f *regionFilter) template.URL { + return template.URL(f.params.Encode()) + }, +}).Parse(` + +Regions: {{.Name}} + + + + +

    Regions: {{.Name}}

    + +Table of contents + + +

    Summary

    + +{{ with $p := filterParams .Filter}} + + + + + + + + + + + + + + + + + +
    Network wait profile: graph (download)
    Sync block profile: graph (download)
    Syscall profile: graph (download)
    Scheduler wait profile: graph (download)
    +{{ end }} + +

    Breakdown

    + +The table below breaks down where each goroutine is spent its time during the +traced period. +All of the columns except total time are non-overlapping. +
    +
    + + + + + + + +{{range $.NonOverlappingStats}} + +{{end}} + +{{range .Regions}} + + + + + + {{$Region := .}} + {{range $.NonOverlappingStats}} + {{$Time := index $Region.NonOverlappingStats .}} + + {{end}} + +{{end}} +
    Goroutine Task Total {{.}}
    {{.Goroutine}} {{if .TaskID}}{{.TaskID}}{{end}} {{ .TotalTime.String }} +
    + {{$Region := .}} + {{range $.NonOverlappingStats}} + {{$Time := index $Region.NonOverlappingStats .}} + {{if $Time}} +   + {{end}} + {{end}} +
    +
    {{$Time.String}}
    + +

    Special ranges

    + +The table below describes how much of the traced period each goroutine spent in +certain special time ranges. +If a goroutine has spent no time in any special time ranges, it is excluded from +the table. +For example, how much time it spent helping the GC. Note that these times do +overlap with the times from the first table. +In general the goroutine may not be executing in these special time ranges. +For example, it may have blocked while trying to help the GC. +This must be taken into account when interpreting the data. +
    +
    + + + + + + +{{range $.RangeStats}} + +{{end}} + +{{range .Regions}} + {{if .HasRangeTime}} + + + + + {{$Region := .}} + {{range $.RangeStats}} + {{$Time := index $Region.RangeTime .}} + + {{end}} + + {{end}} +{{end}} +
    Goroutine Task Total {{.}}
    {{.Goroutine}} {{if .TaskID}}{{.TaskID}}{{end}} {{ .TotalTime.String }} {{$Time.String}}
    +`)) + +// regionFilter represents a region filter specified by a user of cmd/trace. +type regionFilter struct { + name string + params url.Values + cond []func(*parsedTrace, *trace.UserRegionSummary) bool +} + +// match returns true if a region, described by its ID and summary, matches +// the filter. +func (f *regionFilter) match(t *parsedTrace, s *trace.UserRegionSummary) bool { + for _, c := range f.cond { + if !c(t, s) { + return false + } + } + return true +} + +// newRegionFilter creates a new region filter from URL query variables. +func newRegionFilter(r *http.Request) (*regionFilter, error) { + if err := r.ParseForm(); err != nil { + return nil, err + } + + var name []string + var conditions []func(*parsedTrace, *trace.UserRegionSummary) bool + filterParams := make(url.Values) + + param := r.Form + if typ, ok := param["type"]; ok && len(typ) > 0 { + name = append(name, fmt.Sprintf("%q", typ[0])) + conditions = append(conditions, func(_ *parsedTrace, r *trace.UserRegionSummary) bool { + return r.Name == typ[0] + }) + filterParams.Add("type", typ[0]) + } + if pc, err := strconv.ParseUint(r.FormValue("pc"), 16, 64); err == nil { + encPC := fmt.Sprintf("0x%x", pc) + name = append(name, "@ "+encPC) + conditions = append(conditions, func(_ *parsedTrace, r *trace.UserRegionSummary) bool { + return regionTopStackFrame(r).PC == pc + }) + filterParams.Add("pc", encPC) + } + + if lat, err := time.ParseDuration(r.FormValue("latmin")); err == nil { + name = append(name, fmt.Sprintf("(latency >= %s)", lat)) + conditions = append(conditions, func(t *parsedTrace, r *trace.UserRegionSummary) bool { + return regionInterval(t, r).duration() >= lat + }) + filterParams.Add("latmin", lat.String()) + } + if lat, err := time.ParseDuration(r.FormValue("latmax")); err == nil { + name = append(name, fmt.Sprintf("(latency <= %s)", lat)) + conditions = append(conditions, func(t *parsedTrace, r *trace.UserRegionSummary) bool { + return regionInterval(t, r).duration() <= lat + }) + filterParams.Add("latmax", lat.String()) + } + + return ®ionFilter{ + name: strings.Join(name, " "), + cond: conditions, + params: filterParams, + }, nil +} + +func regionInterval(t *parsedTrace, s *trace.UserRegionSummary) interval { + var i interval + if s.Start != nil { + i.start = s.Start.Time() + } else { + i.start = t.startTime() + } + if s.End != nil { + i.end = s.End.Time() + } else { + i.end = t.endTime() + } + return i +} diff --git a/src/cmd/trace/v2/tasks.go b/src/cmd/trace/v2/tasks.go new file mode 100644 index 0000000000..fb40811565 --- /dev/null +++ b/src/cmd/trace/v2/tasks.go @@ -0,0 +1,477 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "cmp" + "fmt" + "html/template" + "internal/trace" + "internal/trace/traceviewer" + tracev2 "internal/trace/v2" + "log" + "net/http" + "slices" + "strings" + "time" +) + +// UserTasksHandlerFunc returns a HandlerFunc that reports all tasks found in the trace. +func UserTasksHandlerFunc(t *parsedTrace) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + tasks := t.summary.Tasks + + // Summarize groups of tasks with the same name. + summary := make(map[string]taskStats) + for _, task := range tasks { + stats, ok := summary[task.Name] + if !ok { + stats.Type = task.Name + } + stats.add(task) + summary[task.Name] = stats + } + + // Sort tasks by type. + userTasks := make([]taskStats, 0, len(summary)) + for _, stats := range summary { + userTasks = append(userTasks, stats) + } + slices.SortFunc(userTasks, func(a, b taskStats) int { + return cmp.Compare(a.Type, b.Type) + }) + + // Emit table. + err := templUserTaskTypes.Execute(w, userTasks) + if err != nil { + http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError) + return + } + } +} + +type taskStats struct { + Type string + Count int // Complete + incomplete tasks + Histogram traceviewer.TimeHistogram // Complete tasks only +} + +func (s *taskStats) UserTaskURL(complete bool) func(min, max time.Duration) string { + return func(min, max time.Duration) string { + return fmt.Sprintf("/usertask?type=%s&complete=%v&latmin=%v&latmax=%v", template.URLQueryEscaper(s.Type), template.URLQueryEscaper(complete), template.URLQueryEscaper(min), template.URLQueryEscaper(max)) + } +} + +func (s *taskStats) add(task *trace.UserTaskSummary) { + s.Count++ + if task.Complete() { + s.Histogram.Add(task.End.Time().Sub(task.Start.Time())) + } +} + +var templUserTaskTypes = template.Must(template.New("").Parse(` + +Tasks + + +Search log text:

    + + + + + + +{{range $}} + + + + + +{{end}} +
    Task typeCountDuration distribution (complete tasks)
    {{.Type}}{{.Count}}{{.Histogram.ToHTML (.UserTaskURL true)}}
    + + +`)) + +// UserTaskHandlerFunc returns a HandlerFunc that presents the details of the selected tasks. +func UserTaskHandlerFunc(t *parsedTrace) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + filter, err := newTaskFilter(r) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + type event struct { + WhenString string + Elapsed time.Duration + Goroutine tracev2.GoID + What string + // TODO: include stack trace of creation time + } + type task struct { + WhenString string + ID tracev2.TaskID + Duration time.Duration + Complete bool + Events []event + Start, End time.Duration // Time since the beginning of the trace + GCTime time.Duration + } + var tasks []task + for _, summary := range t.summary.Tasks { + if !filter.match(t, summary) { + continue + } + + // Collect all the events for the task. + var rawEvents []*tracev2.Event + if summary.Start != nil { + rawEvents = append(rawEvents, summary.Start) + } + if summary.End != nil { + rawEvents = append(rawEvents, summary.End) + } + rawEvents = append(rawEvents, summary.Logs...) + for _, r := range summary.Regions { + if r.Start != nil { + rawEvents = append(rawEvents, r.Start) + } + if r.End != nil { + rawEvents = append(rawEvents, r.End) + } + } + + // Sort them. + slices.SortStableFunc(rawEvents, func(a, b *tracev2.Event) int { + return cmp.Compare(a.Time(), b.Time()) + }) + + // Summarize them. + var events []event + last := t.startTime() + for _, ev := range rawEvents { + what := describeEvent(ev) + if what == "" { + continue + } + sinceStart := ev.Time().Sub(t.startTime()) + events = append(events, event{ + WhenString: fmt.Sprintf("%2.9f", sinceStart.Seconds()), + Elapsed: ev.Time().Sub(last), + What: what, + Goroutine: primaryGoroutine(ev), + }) + last = ev.Time() + } + taskSpan := taskInterval(t, summary) + taskStart := taskSpan.start.Sub(t.startTime()) + + // Produce the task summary. + tasks = append(tasks, task{ + WhenString: fmt.Sprintf("%2.9fs", taskStart.Seconds()), + Duration: taskSpan.duration(), + ID: summary.ID, + Complete: summary.Complete(), + Events: events, + Start: taskStart, + End: taskStart + taskSpan.duration(), + }) + } + // Sort the tasks by duration. + slices.SortFunc(tasks, func(a, b task) int { + return cmp.Compare(a.Duration, b.Duration) + }) + + // Emit table. + err = templUserTaskType.Execute(w, struct { + Name string + Tasks []task + }{ + Name: filter.name, + Tasks: tasks, + }) + if err != nil { + log.Printf("failed to execute template: %v", err) + http.Error(w, fmt.Sprintf("failed to execute template: %v", err), http.StatusInternalServerError) + return + } + } +} + +var templUserTaskType = template.Must(template.New("userTask").Funcs(template.FuncMap{ + "elapsed": elapsed, + "asMillisecond": asMillisecond, + "trimSpace": strings.TrimSpace, +}).Parse(` + +Tasks: {{.Name}} + + + +

    User Task: {{.Name}}

    + +Search log text:
    + +

    + + + + + + + + + {{range $el := $.Tasks}} + + + + + + + {{range $el.Events}} + + + + + + + {{end}} + {{end}} + + +`)) + +// taskFilter represents a task filter specified by a user of cmd/trace. +type taskFilter struct { + name string + cond []func(*parsedTrace, *trace.UserTaskSummary) bool +} + +// match returns true if a task, described by its ID and summary, matches +// the filter. +func (f *taskFilter) match(t *parsedTrace, task *trace.UserTaskSummary) bool { + if t == nil { + return false + } + for _, c := range f.cond { + if !c(t, task) { + return false + } + } + return true +} + +// newTaskFilter creates a new task filter from URL query variables. +func newTaskFilter(r *http.Request) (*taskFilter, error) { + if err := r.ParseForm(); err != nil { + return nil, err + } + + var name []string + var conditions []func(*parsedTrace, *trace.UserTaskSummary) bool + + param := r.Form + if typ, ok := param["type"]; ok && len(typ) > 0 { + name = append(name, fmt.Sprintf("%q", typ[0])) + conditions = append(conditions, func(_ *parsedTrace, task *trace.UserTaskSummary) bool { + return task.Name == typ[0] + }) + } + if complete := r.FormValue("complete"); complete == "1" { + name = append(name, "complete") + conditions = append(conditions, func(_ *parsedTrace, task *trace.UserTaskSummary) bool { + return task.Complete() + }) + } else if complete == "0" { + name = append(name, "incomplete") + conditions = append(conditions, func(_ *parsedTrace, task *trace.UserTaskSummary) bool { + return !task.Complete() + }) + } + if lat, err := time.ParseDuration(r.FormValue("latmin")); err == nil { + name = append(name, fmt.Sprintf("latency >= %s", lat)) + conditions = append(conditions, func(t *parsedTrace, task *trace.UserTaskSummary) bool { + return task.Complete() && taskInterval(t, task).duration() >= lat + }) + } + if lat, err := time.ParseDuration(r.FormValue("latmax")); err == nil { + name = append(name, fmt.Sprintf("latency <= %s", lat)) + conditions = append(conditions, func(t *parsedTrace, task *trace.UserTaskSummary) bool { + return task.Complete() && taskInterval(t, task).duration() <= lat + }) + } + if text := r.FormValue("logtext"); text != "" { + name = append(name, fmt.Sprintf("log contains %q", text)) + conditions = append(conditions, func(_ *parsedTrace, task *trace.UserTaskSummary) bool { + return taskMatches(task, text) + }) + } + + return &taskFilter{name: strings.Join(name, ","), cond: conditions}, nil +} + +func taskInterval(t *parsedTrace, s *trace.UserTaskSummary) interval { + var i interval + if s.Start != nil { + i.start = s.Start.Time() + } else { + i.start = t.startTime() + } + if s.End != nil { + i.end = s.End.Time() + } else { + i.end = t.endTime() + } + return i +} + +func taskMatches(t *trace.UserTaskSummary, text string) bool { + matches := func(s string) bool { + return strings.Contains(s, text) + } + if matches(t.Name) { + return true + } + for _, r := range t.Regions { + if matches(r.Name) { + return true + } + } + for _, ev := range t.Logs { + log := ev.Log() + if matches(log.Category) { + return true + } + if matches(log.Message) { + return true + } + } + return false +} + +func describeEvent(ev *tracev2.Event) string { + switch ev.Kind() { + case tracev2.EventStateTransition: + st := ev.StateTransition() + if st.Resource.Kind != tracev2.ResourceGoroutine { + return "" + } + old, new := st.Goroutine() + return fmt.Sprintf("%s -> %s", old, new) + case tracev2.EventRegionBegin: + return fmt.Sprintf("region %q begin", ev.Region().Type) + case tracev2.EventRegionEnd: + return fmt.Sprintf("region %q end", ev.Region().Type) + case tracev2.EventTaskBegin: + t := ev.Task() + return fmt.Sprintf("task %q (D %d, parent %d) begin", t.Type, t.ID, t.Parent) + case tracev2.EventTaskEnd: + return "task end" + case tracev2.EventLog: + log := ev.Log() + if log.Category != "" { + return fmt.Sprintf("log %q", log.Message) + } + return fmt.Sprintf("log (category: %s): %q", log.Category, log.Message) + } + return "" +} + +func primaryGoroutine(ev *tracev2.Event) tracev2.GoID { + if ev.Kind() != tracev2.EventStateTransition { + return ev.Goroutine() + } + st := ev.StateTransition() + if st.Resource.Kind != tracev2.ResourceGoroutine { + return tracev2.NoGoroutine + } + return st.Resource.Goroutine() +} + +func elapsed(d time.Duration) string { + b := fmt.Appendf(nil, "%.9f", d.Seconds()) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + return string(b) +} + +func asMillisecond(d time.Duration) float64 { + return float64(d.Nanoseconds()) / float64(time.Millisecond) +} diff --git a/src/maps/maps.s b/src/cmd/trace/v2/testdata/generate.go similarity index 76% rename from src/maps/maps.s rename to src/cmd/trace/v2/testdata/generate.go index 4e5577892d..c0658b2329 100644 --- a/src/maps/maps.s +++ b/src/cmd/trace/v2/testdata/generate.go @@ -2,4 +2,5 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// need this empty asm file to enable linkname. \ No newline at end of file +//go:generate go run mktests.go +package testdata diff --git a/src/cmd/trace/v2/testdata/go122.test b/src/cmd/trace/v2/testdata/go122.test new file mode 100644 index 0000000000..2ec9e88f4f --- /dev/null +++ b/src/cmd/trace/v2/testdata/go122.test @@ -0,0 +1,4639 @@ +Trace Go1.22 +EventBatch gen=1 m=18446744073709551615 time=7689672466239 size=5 +Frequency freq=15625000 +EventBatch gen=1 m=1709048 time=7689670869319 size=423 +ProcStart dt=409 p=7 p_seq=1 +GoStart dt=31 g=34 g_seq=1 +GoStop dt=291990 reason_string=16 stack=50 +GoStart dt=21 g=34 g_seq=2 +GoStop dt=315853 reason_string=16 stack=50 +GoStart dt=30 g=34 g_seq=3 +GoUnblock dt=173432 g=1 g_seq=73 stack=52 +GoDestroy dt=96 +GoStart dt=22 g=1 g_seq=74 +HeapAlloc dt=79 heapalloc_value=26397576 +HeapAlloc dt=51 heapalloc_value=26405640 +GoCreate dt=62 new_g=50 new_stack=53 stack=54 +GoBlock dt=23 reason_string=12 stack=55 +GoStart dt=7 g=50 g_seq=1 +HeapAlloc dt=301 heapalloc_value=26413776 +HeapAlloc dt=30 heapalloc_value=26421680 +GoSyscallBegin dt=35 p_seq=2 stack=56 +GoSyscallEnd dt=39 +GoSyscallBegin dt=13 p_seq=3 stack=57 +GoSyscallEnd dt=16 +GoSyscallBegin dt=396 p_seq=4 stack=58 +GoSyscallEnd dt=16 +GoSyscallBegin dt=15 p_seq=5 stack=59 +GoSyscallEnd dt=14 +HeapAlloc dt=305 heapalloc_value=26429872 +HeapAlloc dt=34 heapalloc_value=26437248 +HeapAlloc dt=42 heapalloc_value=26445120 +GoSyscallBegin dt=42 p_seq=6 stack=60 +GoSyscallEnd dt=18 +GoSyscallBegin dt=10 p_seq=7 stack=61 +GoSyscallEnd dt=14 +GoSyscallBegin dt=23 p_seq=8 stack=62 +ProcStart dt=787251 p=7 p_seq=15 +GoSyscallEndBlocked dt=7 +GoStart dt=1 g=50 g_seq=2 +GoUnblock dt=48 g=1 g_seq=75 stack=65 +GoDestroy dt=143 +GoStart dt=30 g=1 g_seq=76 +HeapAlloc dt=621 heapalloc_value=26468232 +GoStop dt=656 reason_string=16 stack=66 +GoStart dt=103 g=1 g_seq=77 +HeapAlloc dt=42 heapalloc_value=26476424 +HeapAlloc dt=87 heapalloc_value=26484360 +GoSyscallBegin dt=18 p_seq=16 stack=67 +GoSyscallEnd dt=456 +GoSyscallBegin dt=41 p_seq=17 stack=68 +GoSyscallEnd dt=25 +GoSyscallBegin dt=16 p_seq=18 stack=69 +GoSyscallEnd dt=18 +HeapAlloc dt=193 heapalloc_value=26549896 +GoSyscallBegin dt=69 p_seq=19 stack=70 +GoSyscallEnd dt=227 +GoSyscallBegin dt=12 p_seq=20 stack=70 +GoSyscallEnd dt=105 +GoSyscallBegin dt=87 p_seq=21 stack=71 +GoSyscallEnd dt=48 +GoSyscallBegin dt=37 p_seq=22 stack=72 +GoSyscallEnd dt=51 +GoSyscallBegin dt=49 p_seq=23 stack=73 +GoSyscallEnd dt=158 +GoSyscallBegin dt=12 p_seq=24 stack=74 +GoSyscallEnd dt=67 +HeapAlloc dt=126 heapalloc_value=26558088 +HeapAlloc dt=30 heapalloc_value=26566160 +GoCreate dt=34 new_g=52 new_stack=75 stack=76 +HeapAlloc dt=205 heapalloc_value=26573872 +GoSyscallBegin dt=890 p_seq=25 stack=77 +GoSyscallEnd dt=1128 +GoBlock dt=96 reason_string=7 stack=80 +ProcStop dt=29 +ProcStart dt=384 p=6 p_seq=7 +GoStart dt=14 g=52 g_seq=4 +GoSyscallBegin dt=16 p_seq=8 stack=78 +ProcStart dt=160 p=5 p_seq=13 +GoSyscallEndBlocked dt=3 +GoStart dt=1 g=52 g_seq=5 +HeapAlloc dt=297 heapalloc_value=26581840 +HeapAlloc dt=31 heapalloc_value=26590032 +HeapAlloc dt=164 heapalloc_value=26598224 +GoSyscallBegin dt=34 p_seq=14 stack=88 +GoSyscallEnd dt=33 +GoSyscallBegin dt=14 p_seq=15 stack=89 +GoSyscallEnd dt=36 +GoSyscallBegin dt=12 p_seq=16 stack=90 +GoSyscallEnd dt=22 +GoSyscallBegin dt=15 p_seq=17 stack=91 +GoSyscallEnd dt=28 +HeapAlloc dt=18 heapalloc_value=26606416 +HeapAlloc dt=20 heapalloc_value=26614608 +GoBlock dt=16 reason_string=19 stack=92 +ProcStop dt=136 +ProcStart dt=17788 p=6 p_seq=12 +GoUnblock dt=41 g=1 g_seq=80 stack=0 +GoStart dt=136 g=1 g_seq=81 +GoSyscallBegin dt=14 p_seq=13 stack=86 +GoSyscallEnd dt=65 +GoSyscallBegin dt=72 p_seq=14 stack=95 +GoSyscallEnd dt=534 +HeapAlloc dt=284 heapalloc_value=26630992 +HeapAlloc dt=38 heapalloc_value=26639120 +EventBatch gen=1 m=1709047 time=7689670866279 size=202 +ProcStart dt=437 p=6 p_seq=2 +HeapAlloc dt=131 heapalloc_value=26373928 +HeapAlloc dt=368 heapalloc_value=26382120 +HeapAlloc dt=55 heapalloc_value=26390056 +GoStart dt=1030 g=36 g_seq=1 +GoStop dt=293329 reason_string=16 stack=50 +GoStart dt=25 g=36 g_seq=2 +GoStop dt=315834 reason_string=16 stack=50 +GoStart dt=24 g=36 g_seq=3 +GoDestroy dt=172079 +ProcStop dt=60 +ProcStart dt=1749 p=6 p_seq=3 +ProcStop dt=1621 +ProcStart dt=64901 p=5 p_seq=4 +ProcStop dt=24 +ProcStart dt=722061 p=5 p_seq=5 +ProcStop dt=31 +ProcStart dt=2847 p=5 p_seq=8 +ProcStop dt=20 +ProcStart dt=3166 p=7 p_seq=26 +GoUnblock dt=6 g=52 g_seq=3 stack=0 +GoUnblock dt=90 g=1 g_seq=78 stack=0 +GoStart dt=5 g=1 g_seq=79 +GoSyscallBegin dt=31 p_seq=27 stack=81 +GoSyscallEnd dt=35 +GoSyscallBegin dt=134 p_seq=28 stack=82 +GoSyscallEnd dt=29 +GoSyscallBegin dt=17 p_seq=29 stack=83 +GoSyscallEnd dt=30 +GoSyscallBegin dt=8 p_seq=30 stack=84 +GoSyscallEnd dt=19 +GoSyscallBegin dt=11 p_seq=31 stack=85 +GoSyscallEnd dt=24 +GoSyscallBegin dt=65 p_seq=32 stack=86 +GoSyscallEnd dt=57 +GoBlock dt=19 reason_string=7 stack=87 +ProcStop dt=38 +ProcStart dt=458 p=6 p_seq=11 +ProcStop dt=30 +ProcStart dt=377 p=5 p_seq=18 +ProcStop dt=23 +ProcStart dt=17141 p=5 p_seq=19 +GoUnblock dt=19 g=52 g_seq=6 stack=0 +GoStart dt=111 g=52 g_seq=7 +HeapAlloc dt=38 heapalloc_value=26622800 +GoSyscallBegin dt=36 p_seq=20 stack=93 +GoSyscallEnd dt=554 +GoSyscallBegin dt=83 p_seq=21 stack=94 +GoSyscallEnd dt=196 +GoDestroy dt=15 +ProcStop dt=37 +EventBatch gen=1 m=1709046 time=7689670697530 size=167 +ProcStart dt=236 p=5 p_seq=1 +ProcStop dt=281 +ProcStart dt=1683 p=2 p_seq=14 +ProcStop dt=33 +ProcStart dt=147800 p=2 p_seq=16 +ProcStop dt=29 +ProcStart dt=3880 p=1 p_seq=28 +ProcStop dt=30 +ProcStart dt=801175 p=5 p_seq=3 +ProcStop dt=19 +ProcStart dt=47961 p=6 p_seq=4 +ProcStop dt=15 +ProcStart dt=16716 p=6 p_seq=5 +GoUnblock dt=60 g=6 g_seq=2 stack=0 +GoStart dt=90 g=6 g_seq=3 +HeapAlloc dt=193 heapalloc_value=26453304 +GoBlock dt=29 reason_string=12 stack=15 +ProcStop dt=12 +ProcStart dt=704555 p=7 p_seq=10 +ProcStop dt=25 +ProcStart dt=16755 p=7 p_seq=11 +HeapAlloc dt=61 heapalloc_value=26461496 +GoCreate dt=72 new_g=51 new_stack=63 stack=0 +GoStart dt=98 g=51 g_seq=1 +GoSyscallBegin dt=45 p_seq=12 stack=64 +ProcStart dt=206 p=7 p_seq=14 +GoSyscallEndBlocked dt=3 +GoStart dt=1 g=51 g_seq=2 +GoDestroy dt=12 +ProcStop dt=18 +ProcStart dt=849 p=5 p_seq=6 +ProcStop dt=16 +ProcStart dt=1359 p=5 p_seq=7 +ProcStop dt=12 +ProcStart dt=2079 p=5 p_seq=9 +GoStart dt=1134 g=52 g_seq=1 +GoSyscallBegin dt=39 p_seq=10 stack=78 +ProcStart dt=232 p=5 p_seq=12 +GoSyscallEndBlocked dt=2 +GoStart dt=1 g=52 g_seq=2 +GoBlock dt=27 reason_string=7 stack=79 +ProcStop dt=20 +EventBatch gen=1 m=1709045 time=7689670544102 size=3297 +ProcStart dt=84 p=4 p_seq=5 +GoUnblock dt=91 g=1 g_seq=34 stack=0 +GoStart dt=157 g=1 g_seq=35 +HeapAlloc dt=117 heapalloc_value=8105520 +HeapAlloc dt=67 heapalloc_value=8113712 +HeapAlloc dt=36 heapalloc_value=8121904 +HeapAlloc dt=25 heapalloc_value=8130096 +HeapAlloc dt=25 heapalloc_value=8138288 +HeapAlloc dt=25 heapalloc_value=8146480 +HeapAlloc dt=21 heapalloc_value=8154672 +HeapAlloc dt=26 heapalloc_value=8162864 +HeapAlloc dt=18 heapalloc_value=8171056 +HeapAlloc dt=24 heapalloc_value=8179248 +HeapAlloc dt=15 heapalloc_value=8187440 +HeapAlloc dt=133 heapalloc_value=8195632 +HeapAlloc dt=105 heapalloc_value=8203824 +HeapAlloc dt=20 heapalloc_value=8212016 +HeapAlloc dt=18 heapalloc_value=8220208 +HeapAlloc dt=8 heapalloc_value=8228400 +HeapAlloc dt=8 heapalloc_value=8236592 +HeapAlloc dt=9 heapalloc_value=8244784 +GCMarkAssistBegin dt=27 stack=31 +HeapAlloc dt=69 heapalloc_value=8252784 +GoBlock dt=31 reason_string=10 stack=36 +ProcStop dt=156 +ProcStart dt=993 p=0 p_seq=11 +GoStart dt=192 g=1 g_seq=37 +GCMarkAssistEnd dt=12 +HeapAlloc dt=35 heapalloc_value=8746312 +GCSweepBegin dt=26 stack=42 +GCSweepEnd dt=777 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=22 heapalloc_value=8754504 +GCSweepBegin dt=47 stack=42 +GCSweepEnd dt=662 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=11 heapalloc_value=8762696 +GCSweepBegin dt=25 stack=42 +GCSweepEnd dt=712 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=39 heapalloc_value=8770888 +GCSweepBegin dt=27 stack=42 +GCSweepEnd dt=630 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=9 heapalloc_value=8779080 +GCSweepBegin dt=25 stack=42 +GCSweepEnd dt=1256 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=8 heapalloc_value=8787272 +GCSweepBegin dt=40 stack=42 +GCSweepEnd dt=529 swept_value=360448 reclaimed_value=0 +HeapAlloc dt=9 heapalloc_value=8795464 +HeapAlloc dt=24 heapalloc_value=8803656 +HeapAlloc dt=24 heapalloc_value=8811848 +HeapAlloc dt=25 heapalloc_value=8820040 +HeapAlloc dt=23 heapalloc_value=8828232 +HeapAlloc dt=18 heapalloc_value=8836424 +HeapAlloc dt=95 heapalloc_value=8844616 +HeapAlloc dt=25 heapalloc_value=8852808 +HeapAlloc dt=23 heapalloc_value=8861000 +HeapAlloc dt=19 heapalloc_value=8869192 +HeapAlloc dt=93 heapalloc_value=8877384 +HeapAlloc dt=23 heapalloc_value=8885576 +HeapAlloc dt=23 heapalloc_value=8893768 +HeapAlloc dt=23 heapalloc_value=8901960 +HeapAlloc dt=22 heapalloc_value=8910152 +HeapAlloc dt=18 heapalloc_value=8918344 +HeapAlloc dt=174 heapalloc_value=8926536 +HeapAlloc dt=31 heapalloc_value=8934728 +HeapAlloc dt=38 heapalloc_value=8942920 +HeapAlloc dt=31 heapalloc_value=8951112 +HeapAlloc dt=57 heapalloc_value=8959304 +HeapAlloc dt=58 heapalloc_value=8967496 +HeapAlloc dt=60 heapalloc_value=8975688 +HeapAlloc dt=44 heapalloc_value=8983880 +HeapAlloc dt=53 heapalloc_value=8992072 +HeapAlloc dt=57 heapalloc_value=9000264 +HeapAlloc dt=63 heapalloc_value=9008456 +HeapAlloc dt=55 heapalloc_value=9016648 +HeapAlloc dt=28 heapalloc_value=9024840 +HeapAlloc dt=12 heapalloc_value=9033032 +HeapAlloc dt=9 heapalloc_value=9041224 +HeapAlloc dt=8 heapalloc_value=9049416 +HeapAlloc dt=7 heapalloc_value=9057608 +HeapAlloc dt=8 heapalloc_value=9065800 +HeapAlloc dt=14 heapalloc_value=9073992 +HeapAlloc dt=8 heapalloc_value=9082184 +HeapAlloc dt=45 heapalloc_value=9090376 +HeapAlloc dt=10 heapalloc_value=9098568 +HeapAlloc dt=14 heapalloc_value=9106760 +HeapAlloc dt=8 heapalloc_value=9114952 +HeapAlloc dt=10 heapalloc_value=9123144 +HeapAlloc dt=15 heapalloc_value=9131336 +HeapAlloc dt=53 heapalloc_value=9139528 +HeapAlloc dt=27 heapalloc_value=9147720 +HeapAlloc dt=38 heapalloc_value=9155912 +HeapAlloc dt=33 heapalloc_value=9164104 +HeapAlloc dt=33 heapalloc_value=9172296 +HeapAlloc dt=34 heapalloc_value=9180488 +HeapAlloc dt=36 heapalloc_value=9188680 +HeapAlloc dt=39 heapalloc_value=9196872 +HeapAlloc dt=40 heapalloc_value=9205064 +HeapAlloc dt=59 heapalloc_value=9213256 +HeapAlloc dt=28 heapalloc_value=9221448 +HeapAlloc dt=22 heapalloc_value=9229640 +HeapAlloc dt=20 heapalloc_value=9237832 +HeapAlloc dt=25 heapalloc_value=9246024 +HeapAlloc dt=20 heapalloc_value=9254216 +HeapAlloc dt=16 heapalloc_value=9262408 +HeapAlloc dt=14 heapalloc_value=9270600 +HeapAlloc dt=18 heapalloc_value=9278792 +HeapAlloc dt=32 heapalloc_value=9286984 +HeapAlloc dt=21 heapalloc_value=9295176 +HeapAlloc dt=49 heapalloc_value=9303368 +HeapAlloc dt=23 heapalloc_value=9311560 +HeapAlloc dt=16 heapalloc_value=9319752 +HeapAlloc dt=15 heapalloc_value=9327944 +HeapAlloc dt=13 heapalloc_value=9336136 +HeapAlloc dt=15 heapalloc_value=9344328 +HeapAlloc dt=14 heapalloc_value=9352520 +HeapAlloc dt=16 heapalloc_value=9360712 +HeapAlloc dt=14 heapalloc_value=9368904 +HeapAlloc dt=19 heapalloc_value=9377096 +HeapAlloc dt=16 heapalloc_value=9385288 +HeapAlloc dt=15 heapalloc_value=9393480 +HeapAlloc dt=14 heapalloc_value=9401672 +HeapAlloc dt=16 heapalloc_value=9409864 +HeapAlloc dt=15 heapalloc_value=9418056 +HeapAlloc dt=15 heapalloc_value=9426248 +HeapAlloc dt=15 heapalloc_value=9434440 +HeapAlloc dt=18 heapalloc_value=9442632 +HeapAlloc dt=94 heapalloc_value=9450824 +HeapAlloc dt=17 heapalloc_value=9459016 +HeapAlloc dt=14 heapalloc_value=9467208 +HeapAlloc dt=16 heapalloc_value=9475400 +HeapAlloc dt=15 heapalloc_value=9483592 +HeapAlloc dt=15 heapalloc_value=9491784 +HeapAlloc dt=15 heapalloc_value=9499976 +HeapAlloc dt=49 heapalloc_value=9508168 +HeapAlloc dt=16 heapalloc_value=9516360 +HeapAlloc dt=14 heapalloc_value=9524552 +HeapAlloc dt=15 heapalloc_value=9532744 +HeapAlloc dt=15 heapalloc_value=9540936 +HeapAlloc dt=15 heapalloc_value=9549128 +HeapAlloc dt=17 heapalloc_value=9557320 +HeapAlloc dt=15 heapalloc_value=9565512 +HeapAlloc dt=21 heapalloc_value=9573704 +HeapAlloc dt=15 heapalloc_value=9581896 +HeapAlloc dt=16 heapalloc_value=9590088 +HeapAlloc dt=14 heapalloc_value=9598280 +HeapAlloc dt=16 heapalloc_value=9606472 +HeapAlloc dt=14 heapalloc_value=9614664 +HeapAlloc dt=16 heapalloc_value=9622856 +GoBlock dt=21 reason_string=19 stack=21 +ProcStop dt=157 +ProcStart dt=17320 p=2 p_seq=6 +ProcStop dt=15 +ProcStart dt=2411 p=0 p_seq=14 +ProcStop dt=8 +ProcStart dt=16766 p=0 p_seq=15 +GoUnblock dt=9 g=1 g_seq=40 stack=0 +GoStart dt=91 g=1 g_seq=41 +HeapAlloc dt=19 heapalloc_value=10859848 +HeapAlloc dt=9 heapalloc_value=10868040 +HeapAlloc dt=7 heapalloc_value=10876232 +HeapAlloc dt=6 heapalloc_value=10884424 +HeapAlloc dt=6 heapalloc_value=10892616 +HeapAlloc dt=6 heapalloc_value=10900808 +HeapAlloc dt=6 heapalloc_value=10909000 +HeapAlloc dt=6 heapalloc_value=10917192 +HeapAlloc dt=6 heapalloc_value=10925384 +HeapAlloc dt=6 heapalloc_value=10933576 +HeapAlloc dt=6 heapalloc_value=10941768 +HeapAlloc dt=6 heapalloc_value=10949960 +HeapAlloc dt=6 heapalloc_value=10958152 +HeapAlloc dt=5 heapalloc_value=10966344 +HeapAlloc dt=6 heapalloc_value=10974536 +HeapAlloc dt=6 heapalloc_value=10982728 +HeapAlloc dt=6 heapalloc_value=10990920 +HeapAlloc dt=6 heapalloc_value=10999112 +HeapAlloc dt=6 heapalloc_value=11007304 +HeapAlloc dt=5 heapalloc_value=11015496 +HeapAlloc dt=7 heapalloc_value=11023688 +HeapAlloc dt=6 heapalloc_value=11031880 +HeapAlloc dt=14 heapalloc_value=11040072 +HeapAlloc dt=7 heapalloc_value=11048264 +HeapAlloc dt=6 heapalloc_value=11056456 +HeapAlloc dt=6 heapalloc_value=11064648 +HeapAlloc dt=5 heapalloc_value=11072840 +HeapAlloc dt=6 heapalloc_value=11081032 +HeapAlloc dt=6 heapalloc_value=11089224 +HeapAlloc dt=6 heapalloc_value=11097416 +HeapAlloc dt=6 heapalloc_value=11105608 +HeapAlloc dt=6 heapalloc_value=11113800 +HeapAlloc dt=59 heapalloc_value=11121992 +HeapAlloc dt=9 heapalloc_value=11130184 +HeapAlloc dt=7 heapalloc_value=11138376 +HeapAlloc dt=6 heapalloc_value=11146568 +HeapAlloc dt=6 heapalloc_value=11154760 +HeapAlloc dt=5 heapalloc_value=11162952 +HeapAlloc dt=6 heapalloc_value=11171144 +HeapAlloc dt=6 heapalloc_value=11179336 +HeapAlloc dt=6 heapalloc_value=11187528 +HeapAlloc dt=5 heapalloc_value=11195720 +HeapAlloc dt=6 heapalloc_value=11203912 +HeapAlloc dt=6 heapalloc_value=11212104 +HeapAlloc dt=84 heapalloc_value=11220296 +HeapAlloc dt=7 heapalloc_value=11228488 +HeapAlloc dt=6 heapalloc_value=11236680 +HeapAlloc dt=6 heapalloc_value=11244872 +HeapAlloc dt=5 heapalloc_value=11253064 +HeapAlloc dt=6 heapalloc_value=11261256 +HeapAlloc dt=6 heapalloc_value=11269448 +HeapAlloc dt=6 heapalloc_value=11277640 +HeapAlloc dt=5 heapalloc_value=11285832 +HeapAlloc dt=6 heapalloc_value=11294024 +HeapAlloc dt=6 heapalloc_value=11302216 +HeapAlloc dt=5 heapalloc_value=11310408 +HeapAlloc dt=6 heapalloc_value=11318600 +HeapAlloc dt=38 heapalloc_value=11326792 +HeapAlloc dt=7 heapalloc_value=11334984 +HeapAlloc dt=6 heapalloc_value=11343176 +HeapAlloc dt=6 heapalloc_value=11351368 +HeapAlloc dt=5 heapalloc_value=11359560 +HeapAlloc dt=6 heapalloc_value=11367752 +HeapAlloc dt=6 heapalloc_value=11375944 +HeapAlloc dt=6 heapalloc_value=11384136 +HeapAlloc dt=6 heapalloc_value=11392328 +HeapAlloc dt=5 heapalloc_value=11400520 +HeapAlloc dt=6 heapalloc_value=11408712 +HeapAlloc dt=6 heapalloc_value=11416904 +HeapAlloc dt=5 heapalloc_value=11425096 +HeapAlloc dt=6 heapalloc_value=11433288 +HeapAlloc dt=6 heapalloc_value=11441480 +HeapAlloc dt=6 heapalloc_value=11449672 +HeapAlloc dt=5 heapalloc_value=11457864 +HeapAlloc dt=6 heapalloc_value=11466056 +HeapAlloc dt=79 heapalloc_value=11474248 +HeapAlloc dt=6 heapalloc_value=11482440 +HeapAlloc dt=5 heapalloc_value=11490632 +HeapAlloc dt=6 heapalloc_value=11498824 +HeapAlloc dt=6 heapalloc_value=11507016 +HeapAlloc dt=6 heapalloc_value=11515208 +HeapAlloc dt=5 heapalloc_value=11523400 +HeapAlloc dt=6 heapalloc_value=11531592 +HeapAlloc dt=5 heapalloc_value=11539784 +HeapAlloc dt=6 heapalloc_value=11547976 +HeapAlloc dt=6 heapalloc_value=11556168 +HeapAlloc dt=10 heapalloc_value=11564360 +HeapAlloc dt=6 heapalloc_value=11572552 +HeapAlloc dt=24 heapalloc_value=11580744 +HeapAlloc dt=7 heapalloc_value=11588936 +HeapAlloc dt=5 heapalloc_value=11597128 +HeapAlloc dt=6 heapalloc_value=11605320 +HeapAlloc dt=6 heapalloc_value=11613512 +HeapAlloc dt=6 heapalloc_value=11621704 +HeapAlloc dt=5 heapalloc_value=11629896 +HeapAlloc dt=6 heapalloc_value=11638088 +HeapAlloc dt=6 heapalloc_value=11646280 +HeapAlloc dt=5 heapalloc_value=11654472 +HeapAlloc dt=6 heapalloc_value=11662664 +HeapAlloc dt=6 heapalloc_value=11670856 +HeapAlloc dt=6 heapalloc_value=11679048 +HeapAlloc dt=5 heapalloc_value=11687240 +HeapAlloc dt=6 heapalloc_value=11695432 +HeapAlloc dt=6 heapalloc_value=11703624 +HeapAlloc dt=6 heapalloc_value=11711816 +HeapAlloc dt=5 heapalloc_value=11720008 +HeapAlloc dt=6 heapalloc_value=11728200 +HeapAlloc dt=6 heapalloc_value=11736392 +HeapAlloc dt=70 heapalloc_value=11744584 +HeapAlloc dt=8 heapalloc_value=11752776 +HeapAlloc dt=5 heapalloc_value=11760968 +HeapAlloc dt=6 heapalloc_value=11769160 +HeapAlloc dt=5 heapalloc_value=11777352 +HeapAlloc dt=6 heapalloc_value=11785544 +HeapAlloc dt=6 heapalloc_value=11793736 +HeapAlloc dt=6 heapalloc_value=11801928 +HeapAlloc dt=5 heapalloc_value=11810120 +HeapAlloc dt=6 heapalloc_value=11818312 +HeapAlloc dt=6 heapalloc_value=11826504 +HeapAlloc dt=6 heapalloc_value=11834696 +HeapAlloc dt=6 heapalloc_value=11842888 +HeapAlloc dt=5 heapalloc_value=11851080 +HeapAlloc dt=6 heapalloc_value=11859272 +HeapAlloc dt=5 heapalloc_value=11867464 +HeapAlloc dt=6 heapalloc_value=11875656 +GoBlock dt=9 reason_string=19 stack=21 +ProcStop dt=105 +ProcStart dt=17283 p=2 p_seq=8 +ProcStop dt=12 +ProcStart dt=4008 p=0 p_seq=18 +ProcStop dt=9 +ProcStart dt=16692 p=0 p_seq=19 +GoUnblock dt=9 g=1 g_seq=44 stack=0 +GoStart dt=76 g=1 g_seq=45 +HeapAlloc dt=16 heapalloc_value=13169992 +HeapAlloc dt=9 heapalloc_value=13178184 +HeapAlloc dt=7 heapalloc_value=13186376 +HeapAlloc dt=5 heapalloc_value=13194568 +HeapAlloc dt=6 heapalloc_value=13202760 +HeapAlloc dt=6 heapalloc_value=13210952 +HeapAlloc dt=5 heapalloc_value=13219144 +HeapAlloc dt=6 heapalloc_value=13227336 +HeapAlloc dt=6 heapalloc_value=13235528 +HeapAlloc dt=6 heapalloc_value=13243720 +HeapAlloc dt=6 heapalloc_value=13251912 +HeapAlloc dt=59 heapalloc_value=13260104 +HeapAlloc dt=8 heapalloc_value=13268296 +HeapAlloc dt=6 heapalloc_value=13276488 +HeapAlloc dt=5 heapalloc_value=13284680 +HeapAlloc dt=6 heapalloc_value=13292872 +HeapAlloc dt=5 heapalloc_value=13301064 +HeapAlloc dt=6 heapalloc_value=13309256 +HeapAlloc dt=5 heapalloc_value=13317448 +HeapAlloc dt=6 heapalloc_value=13325640 +HeapAlloc dt=6 heapalloc_value=13333832 +HeapAlloc dt=6 heapalloc_value=13342024 +HeapAlloc dt=5 heapalloc_value=13350216 +HeapAlloc dt=6 heapalloc_value=13358408 +HeapAlloc dt=6 heapalloc_value=13366600 +HeapAlloc dt=5 heapalloc_value=13374792 +HeapAlloc dt=6 heapalloc_value=13382984 +HeapAlloc dt=6 heapalloc_value=13391176 +HeapAlloc dt=6 heapalloc_value=13399368 +HeapAlloc dt=5 heapalloc_value=13407560 +HeapAlloc dt=8 heapalloc_value=13415752 +HeapAlloc dt=6 heapalloc_value=13423944 +HeapAlloc dt=7 heapalloc_value=13432136 +HeapAlloc dt=5 heapalloc_value=13440328 +HeapAlloc dt=6 heapalloc_value=13448520 +HeapAlloc dt=5 heapalloc_value=13456712 +HeapAlloc dt=6 heapalloc_value=13464904 +HeapAlloc dt=6 heapalloc_value=13473096 +HeapAlloc dt=6 heapalloc_value=13481288 +HeapAlloc dt=5 heapalloc_value=13489480 +HeapAlloc dt=5 heapalloc_value=13497672 +HeapAlloc dt=6 heapalloc_value=13505864 +HeapAlloc dt=5 heapalloc_value=13514056 +HeapAlloc dt=6 heapalloc_value=13522248 +HeapAlloc dt=5 heapalloc_value=13530440 +HeapAlloc dt=6 heapalloc_value=13538632 +HeapAlloc dt=5 heapalloc_value=13546824 +HeapAlloc dt=6 heapalloc_value=13555016 +HeapAlloc dt=6 heapalloc_value=13563208 +HeapAlloc dt=48 heapalloc_value=13571400 +HeapAlloc dt=7 heapalloc_value=13579592 +HeapAlloc dt=6 heapalloc_value=13587784 +HeapAlloc dt=5 heapalloc_value=13595976 +HeapAlloc dt=6 heapalloc_value=13604168 +HeapAlloc dt=5 heapalloc_value=13612360 +HeapAlloc dt=6 heapalloc_value=13620552 +HeapAlloc dt=5 heapalloc_value=13628744 +HeapAlloc dt=6 heapalloc_value=13636936 +HeapAlloc dt=5 heapalloc_value=13645128 +HeapAlloc dt=6 heapalloc_value=13653320 +HeapAlloc dt=14 heapalloc_value=13661512 +HeapAlloc dt=6 heapalloc_value=13669704 +HeapAlloc dt=6 heapalloc_value=13677896 +HeapAlloc dt=35 heapalloc_value=13686088 +HeapAlloc dt=7 heapalloc_value=13694280 +HeapAlloc dt=6 heapalloc_value=13702472 +HeapAlloc dt=6 heapalloc_value=13710664 +HeapAlloc dt=5 heapalloc_value=13718856 +HeapAlloc dt=6 heapalloc_value=13727048 +HeapAlloc dt=6 heapalloc_value=13735240 +HeapAlloc dt=5 heapalloc_value=13743432 +HeapAlloc dt=6 heapalloc_value=13751624 +HeapAlloc dt=5 heapalloc_value=13759816 +HeapAlloc dt=6 heapalloc_value=13768008 +HeapAlloc dt=5 heapalloc_value=13776200 +HeapAlloc dt=5 heapalloc_value=13784392 +HeapAlloc dt=6 heapalloc_value=13792584 +HeapAlloc dt=6 heapalloc_value=13800776 +HeapAlloc dt=5 heapalloc_value=13808968 +HeapAlloc dt=6 heapalloc_value=13817160 +HeapAlloc dt=5 heapalloc_value=13825352 +HeapAlloc dt=6 heapalloc_value=13833544 +HeapAlloc dt=5 heapalloc_value=13841736 +HeapAlloc dt=6 heapalloc_value=13849928 +HeapAlloc dt=5 heapalloc_value=13858120 +HeapAlloc dt=6 heapalloc_value=13866312 +HeapAlloc dt=5 heapalloc_value=13874504 +HeapAlloc dt=5 heapalloc_value=13882696 +HeapAlloc dt=6 heapalloc_value=13890888 +HeapAlloc dt=5 heapalloc_value=13899080 +HeapAlloc dt=6 heapalloc_value=13907272 +HeapAlloc dt=5 heapalloc_value=13915464 +HeapAlloc dt=6 heapalloc_value=13923656 +HeapAlloc dt=21 heapalloc_value=13931848 +HeapAlloc dt=6 heapalloc_value=13940040 +HeapAlloc dt=6 heapalloc_value=13948232 +HeapAlloc dt=6 heapalloc_value=13956424 +HeapAlloc dt=6 heapalloc_value=13964616 +HeapAlloc dt=5 heapalloc_value=13972808 +HeapAlloc dt=5 heapalloc_value=13981000 +HeapAlloc dt=6 heapalloc_value=13989192 +HeapAlloc dt=6 heapalloc_value=13997384 +HeapAlloc dt=5 heapalloc_value=14005576 +HeapAlloc dt=6 heapalloc_value=14013768 +HeapAlloc dt=5 heapalloc_value=14021960 +HeapAlloc dt=6 heapalloc_value=14030152 +HeapAlloc dt=6 heapalloc_value=14038344 +HeapAlloc dt=5 heapalloc_value=14046536 +HeapAlloc dt=6 heapalloc_value=14054728 +HeapAlloc dt=5 heapalloc_value=14062920 +HeapAlloc dt=6 heapalloc_value=14071112 +HeapAlloc dt=5 heapalloc_value=14079304 +HeapAlloc dt=5 heapalloc_value=14087496 +HeapAlloc dt=76 heapalloc_value=14095688 +HeapAlloc dt=35 heapalloc_value=14103880 +HeapAlloc dt=7 heapalloc_value=14112072 +HeapAlloc dt=5 heapalloc_value=14120264 +HeapAlloc dt=6 heapalloc_value=14128456 +HeapAlloc dt=7 heapalloc_value=14136648 +HeapAlloc dt=5 heapalloc_value=14144840 +HeapAlloc dt=5 heapalloc_value=14153032 +HeapAlloc dt=6 heapalloc_value=14161224 +HeapAlloc dt=5 heapalloc_value=14169416 +HeapAlloc dt=6 heapalloc_value=14177608 +HeapAlloc dt=10 heapalloc_value=14185800 +GoBlock dt=9 reason_string=19 stack=21 +ProcStop dt=108 +ProcStart dt=17296 p=2 p_seq=10 +ProcStop dt=12 +ProcStart dt=3626 p=0 p_seq=22 +ProcStop dt=8 +ProcStart dt=16715 p=0 p_seq=23 +GoUnblock dt=6 g=1 g_seq=48 stack=0 +GoStart dt=79 g=1 g_seq=49 +HeapAlloc dt=15 heapalloc_value=15553864 +HeapAlloc dt=13 heapalloc_value=15562056 +HeapAlloc dt=15 heapalloc_value=15570248 +HeapAlloc dt=7 heapalloc_value=15578440 +HeapAlloc dt=6 heapalloc_value=15586632 +HeapAlloc dt=6 heapalloc_value=15594824 +HeapAlloc dt=6 heapalloc_value=15603016 +HeapAlloc dt=6 heapalloc_value=15611208 +HeapAlloc dt=5 heapalloc_value=15619400 +HeapAlloc dt=6 heapalloc_value=15627592 +HeapAlloc dt=6 heapalloc_value=15635784 +HeapAlloc dt=5 heapalloc_value=15643976 +HeapAlloc dt=6 heapalloc_value=15652168 +HeapAlloc dt=5 heapalloc_value=15660360 +HeapAlloc dt=6 heapalloc_value=15668552 +HeapAlloc dt=6 heapalloc_value=15676744 +HeapAlloc dt=57 heapalloc_value=15684936 +HeapAlloc dt=7 heapalloc_value=15693128 +HeapAlloc dt=6 heapalloc_value=15701320 +HeapAlloc dt=6 heapalloc_value=15709512 +HeapAlloc dt=5 heapalloc_value=15717704 +HeapAlloc dt=6 heapalloc_value=15725896 +HeapAlloc dt=5 heapalloc_value=15734088 +HeapAlloc dt=6 heapalloc_value=15742280 +HeapAlloc dt=6 heapalloc_value=15750472 +HeapAlloc dt=10 heapalloc_value=15758664 +HeapAlloc dt=6 heapalloc_value=15766856 +HeapAlloc dt=6 heapalloc_value=15775048 +HeapAlloc dt=5 heapalloc_value=15783240 +HeapAlloc dt=6 heapalloc_value=15791432 +HeapAlloc dt=6 heapalloc_value=15799624 +HeapAlloc dt=6 heapalloc_value=15807816 +HeapAlloc dt=6 heapalloc_value=15816008 +HeapAlloc dt=7 heapalloc_value=15824200 +HeapAlloc dt=6 heapalloc_value=15832392 +HeapAlloc dt=6 heapalloc_value=15840584 +HeapAlloc dt=5 heapalloc_value=15848776 +HeapAlloc dt=6 heapalloc_value=15856968 +HeapAlloc dt=6 heapalloc_value=15865160 +HeapAlloc dt=6 heapalloc_value=15873352 +HeapAlloc dt=5 heapalloc_value=15881544 +HeapAlloc dt=6 heapalloc_value=15889736 +HeapAlloc dt=6 heapalloc_value=15897928 +HeapAlloc dt=5 heapalloc_value=15906120 +HeapAlloc dt=6 heapalloc_value=15914312 +HeapAlloc dt=5 heapalloc_value=15922504 +HeapAlloc dt=6 heapalloc_value=15930696 +HeapAlloc dt=5 heapalloc_value=15938888 +HeapAlloc dt=6 heapalloc_value=15947080 +HeapAlloc dt=5 heapalloc_value=15955272 +HeapAlloc dt=6 heapalloc_value=15963464 +HeapAlloc dt=6 heapalloc_value=15971656 +HeapAlloc dt=5 heapalloc_value=15979848 +HeapAlloc dt=6 heapalloc_value=15988040 +HeapAlloc dt=44 heapalloc_value=15996232 +HeapAlloc dt=8 heapalloc_value=16004424 +HeapAlloc dt=5 heapalloc_value=16012616 +HeapAlloc dt=6 heapalloc_value=16020808 +HeapAlloc dt=5 heapalloc_value=16029000 +HeapAlloc dt=6 heapalloc_value=16037192 +HeapAlloc dt=5 heapalloc_value=16045384 +HeapAlloc dt=6 heapalloc_value=16053576 +HeapAlloc dt=5 heapalloc_value=16061768 +HeapAlloc dt=6 heapalloc_value=16069960 +HeapAlloc dt=5 heapalloc_value=16078152 +HeapAlloc dt=6 heapalloc_value=16086344 +HeapAlloc dt=5 heapalloc_value=16094536 +HeapAlloc dt=6 heapalloc_value=16102728 +HeapAlloc dt=36 heapalloc_value=16110920 +HeapAlloc dt=8 heapalloc_value=16119112 +HeapAlloc dt=6 heapalloc_value=16127304 +HeapAlloc dt=5 heapalloc_value=16135496 +HeapAlloc dt=6 heapalloc_value=16143688 +HeapAlloc dt=5 heapalloc_value=16151880 +HeapAlloc dt=5 heapalloc_value=16160072 +HeapAlloc dt=5 heapalloc_value=16168264 +HeapAlloc dt=5 heapalloc_value=16176456 +HeapAlloc dt=5 heapalloc_value=16184648 +HeapAlloc dt=6 heapalloc_value=16192840 +HeapAlloc dt=5 heapalloc_value=16201032 +HeapAlloc dt=5 heapalloc_value=16209224 +HeapAlloc dt=5 heapalloc_value=16217416 +HeapAlloc dt=5 heapalloc_value=16225608 +HeapAlloc dt=6 heapalloc_value=16233800 +HeapAlloc dt=5 heapalloc_value=16241992 +HeapAlloc dt=73 heapalloc_value=16250184 +HeapAlloc dt=6 heapalloc_value=16258376 +HeapAlloc dt=5 heapalloc_value=16266568 +HeapAlloc dt=6 heapalloc_value=16274760 +HeapAlloc dt=371 heapalloc_value=16282952 +HeapAlloc dt=13 heapalloc_value=16291144 +HeapAlloc dt=7 heapalloc_value=16299336 +HeapAlloc dt=6 heapalloc_value=16307528 +HeapAlloc dt=6 heapalloc_value=16315720 +HeapAlloc dt=5 heapalloc_value=16323912 +HeapAlloc dt=6 heapalloc_value=16332104 +HeapAlloc dt=5 heapalloc_value=16340296 +HeapAlloc dt=5 heapalloc_value=16348488 +HeapAlloc dt=22 heapalloc_value=16356680 +HeapAlloc dt=6 heapalloc_value=16364872 +HeapAlloc dt=5 heapalloc_value=16373064 +HeapAlloc dt=6 heapalloc_value=16381256 +HeapAlloc dt=5 heapalloc_value=16389448 +HeapAlloc dt=5 heapalloc_value=16397640 +HeapAlloc dt=5 heapalloc_value=16405832 +HeapAlloc dt=5 heapalloc_value=16414024 +HeapAlloc dt=5 heapalloc_value=16422216 +HeapAlloc dt=6 heapalloc_value=16430408 +HeapAlloc dt=5 heapalloc_value=16438600 +HeapAlloc dt=6 heapalloc_value=16446792 +HeapAlloc dt=5 heapalloc_value=16454984 +HeapAlloc dt=5 heapalloc_value=16463176 +HeapAlloc dt=6 heapalloc_value=16471368 +HeapAlloc dt=5 heapalloc_value=16479560 +HeapAlloc dt=5 heapalloc_value=16487752 +HeapAlloc dt=5 heapalloc_value=16495944 +HeapAlloc dt=6 heapalloc_value=16504136 +HeapAlloc dt=5 heapalloc_value=16512328 +HeapAlloc dt=45 heapalloc_value=16520520 +HeapAlloc dt=38 heapalloc_value=16528712 +HeapAlloc dt=7 heapalloc_value=16536904 +HeapAlloc dt=5 heapalloc_value=16545096 +HeapAlloc dt=5 heapalloc_value=16553288 +HeapAlloc dt=6 heapalloc_value=16561480 +HeapAlloc dt=5 heapalloc_value=16569672 +GoBlock dt=11 reason_string=19 stack=21 +ProcStop dt=109 +ProcStart dt=18122 p=2 p_seq=12 +ProcStop dt=23 +ProcStart dt=803 p=1 p_seq=12 +GoUnblock dt=12 g=24 g_seq=10 stack=0 +GoStart dt=143 g=24 g_seq=11 +GoLabel dt=2 label_string=2 +GoBlock dt=3389 reason_string=15 stack=27 +ProcStop dt=2403 +ProcStart dt=161103 p=4 p_seq=8 +GoStart dt=172 g=38 g_seq=1 +GoStop dt=304901 reason_string=16 stack=50 +GoStart dt=21 g=38 g_seq=2 +GoStop dt=315468 reason_string=16 stack=50 +GoStart dt=20 g=38 g_seq=3 +GoDestroy dt=160861 +ProcStop dt=34 +EventBatch gen=1 m=1709044 time=7689670489757 size=2312 +ProcStart dt=310 p=3 p_seq=2 +ProcStop dt=39 +ProcStart dt=1386 p=3 p_seq=3 +ProcStop dt=138 +ProcStart dt=3920 p=0 p_seq=5 +GoStart dt=266 g=24 g_seq=7 +GoUnblock dt=50 g=1 g_seq=25 stack=41 +GoBlock dt=13 reason_string=15 stack=27 +GoStart dt=7 g=1 g_seq=26 +GCMarkAssistEnd dt=6 +HeapAlloc dt=29 heapalloc_value=3843824 +GCSweepBegin dt=57 stack=42 +GCSweepEnd dt=816 swept_value=827392 reclaimed_value=0 +GCSweepBegin dt=310 stack=43 +GCSweepEnd dt=63 swept_value=67108864 reclaimed_value=0 +HeapAlloc dt=23 heapalloc_value=3852016 +HeapAlloc dt=46 heapalloc_value=3860208 +HeapAlloc dt=27 heapalloc_value=3868400 +HeapAlloc dt=16 heapalloc_value=3876592 +HeapAlloc dt=109 heapalloc_value=3884784 +HeapAlloc dt=32 heapalloc_value=3892976 +HeapAlloc dt=33 heapalloc_value=3901168 +HeapAlloc dt=26 heapalloc_value=3909360 +HeapAlloc dt=35 heapalloc_value=3917552 +HeapAlloc dt=16 heapalloc_value=3925744 +HeapAlloc dt=16 heapalloc_value=3933936 +HeapAlloc dt=16 heapalloc_value=3942128 +HeapAlloc dt=68 heapalloc_value=3950320 +HeapAlloc dt=21 heapalloc_value=3958512 +HeapAlloc dt=20 heapalloc_value=3966704 +HeapAlloc dt=15 heapalloc_value=3974896 +HeapAlloc dt=24 heapalloc_value=3983088 +HeapAlloc dt=15 heapalloc_value=3991280 +HeapAlloc dt=16 heapalloc_value=3999472 +HeapAlloc dt=15 heapalloc_value=4007664 +HeapAlloc dt=18 heapalloc_value=4015856 +HeapAlloc dt=15 heapalloc_value=4024048 +HeapAlloc dt=21 heapalloc_value=4032240 +HeapAlloc dt=26 heapalloc_value=4040432 +HeapAlloc dt=28 heapalloc_value=4048624 +HeapAlloc dt=16 heapalloc_value=4056816 +HeapAlloc dt=16 heapalloc_value=4065008 +HeapAlloc dt=16 heapalloc_value=4073200 +HeapAlloc dt=17 heapalloc_value=4081392 +HeapAlloc dt=15 heapalloc_value=4089584 +HeapAlloc dt=19 heapalloc_value=4097776 +HeapAlloc dt=15 heapalloc_value=4105968 +HeapAlloc dt=20 heapalloc_value=4114160 +HeapAlloc dt=15 heapalloc_value=4122352 +HeapAlloc dt=16 heapalloc_value=4130544 +HeapAlloc dt=16 heapalloc_value=4138736 +HeapAlloc dt=17 heapalloc_value=4146928 +HeapAlloc dt=15 heapalloc_value=4155120 +HeapAlloc dt=20 heapalloc_value=4163312 +HeapAlloc dt=18 heapalloc_value=4171504 +HeapAlloc dt=23 heapalloc_value=4179696 +HeapAlloc dt=18 heapalloc_value=4187888 +HeapAlloc dt=20 heapalloc_value=4196080 +HeapAlloc dt=19 heapalloc_value=4204272 +HeapAlloc dt=19 heapalloc_value=4212464 +HeapAlloc dt=105 heapalloc_value=4220656 +HeapAlloc dt=45 heapalloc_value=4228848 +HeapAlloc dt=22 heapalloc_value=4237040 +HeapAlloc dt=23 heapalloc_value=4245232 +HeapAlloc dt=29 heapalloc_value=4253424 +HeapAlloc dt=21 heapalloc_value=4261616 +HeapAlloc dt=56 heapalloc_value=4269808 +HeapAlloc dt=21 heapalloc_value=4278000 +HeapAlloc dt=25 heapalloc_value=4286192 +HeapAlloc dt=15 heapalloc_value=4294384 +HeapAlloc dt=60 heapalloc_value=4302576 +HeapAlloc dt=40 heapalloc_value=4359920 +HeapAlloc dt=152 heapalloc_value=4368112 +HeapAlloc dt=30 heapalloc_value=4376304 +HeapAlloc dt=27 heapalloc_value=4384496 +HeapAlloc dt=20 heapalloc_value=4392688 +HeapAlloc dt=32 heapalloc_value=4400880 +HeapAlloc dt=25 heapalloc_value=4409072 +HeapAlloc dt=48 heapalloc_value=4417264 +HeapAlloc dt=58 heapalloc_value=4425456 +HeapAlloc dt=30 heapalloc_value=4433648 +HeapAlloc dt=23 heapalloc_value=4441840 +HeapAlloc dt=16 heapalloc_value=4450032 +HeapAlloc dt=17 heapalloc_value=4458224 +HeapAlloc dt=16 heapalloc_value=4466416 +HeapAlloc dt=19 heapalloc_value=4474608 +HeapAlloc dt=16 heapalloc_value=4482800 +HeapAlloc dt=15 heapalloc_value=4490992 +HeapAlloc dt=16 heapalloc_value=4499184 +HeapAlloc dt=16 heapalloc_value=4507376 +HeapAlloc dt=15 heapalloc_value=4515568 +HeapAlloc dt=16 heapalloc_value=4523760 +HeapAlloc dt=16 heapalloc_value=4531952 +HeapAlloc dt=21 heapalloc_value=4540144 +HeapAlloc dt=25 heapalloc_value=4548336 +HeapAlloc dt=22 heapalloc_value=4556528 +HeapAlloc dt=59 heapalloc_value=4564720 +HeapAlloc dt=21 heapalloc_value=4572912 +HeapAlloc dt=16 heapalloc_value=4581104 +HeapAlloc dt=16 heapalloc_value=4589296 +HeapAlloc dt=15 heapalloc_value=4597488 +HeapAlloc dt=24 heapalloc_value=4605680 +HeapAlloc dt=12 heapalloc_value=4613872 +HeapAlloc dt=8 heapalloc_value=4622064 +HeapAlloc dt=11 heapalloc_value=4630256 +HeapAlloc dt=7 heapalloc_value=4638448 +HeapAlloc dt=7 heapalloc_value=4646640 +HeapAlloc dt=7 heapalloc_value=4654832 +GoBlock dt=31 reason_string=19 stack=21 +ProcStop dt=34 +ProcStart dt=6196 p=4 p_seq=2 +ProcStop dt=26 +ProcStart dt=1578 p=0 p_seq=7 +ProcStop dt=12 +ProcStart dt=16743 p=0 p_seq=8 +GoUnblock dt=21 g=1 g_seq=29 stack=0 +GoStart dt=147 g=1 g_seq=30 +HeapAlloc dt=51 heapalloc_value=5768944 +HeapAlloc dt=22 heapalloc_value=5777136 +HeapAlloc dt=16 heapalloc_value=5785328 +HeapAlloc dt=15 heapalloc_value=5793520 +HeapAlloc dt=16 heapalloc_value=5801712 +HeapAlloc dt=18 heapalloc_value=5809904 +HeapAlloc dt=15 heapalloc_value=5818096 +HeapAlloc dt=15 heapalloc_value=5826288 +HeapAlloc dt=12 heapalloc_value=5834480 +HeapAlloc dt=12 heapalloc_value=5842672 +HeapAlloc dt=15 heapalloc_value=5850864 +HeapAlloc dt=16 heapalloc_value=5859056 +HeapAlloc dt=12 heapalloc_value=5867248 +HeapAlloc dt=12 heapalloc_value=5875440 +HeapAlloc dt=6 heapalloc_value=5883632 +HeapAlloc dt=8 heapalloc_value=5891824 +HeapAlloc dt=6 heapalloc_value=5900016 +HeapAlloc dt=6 heapalloc_value=5908208 +HeapAlloc dt=98 heapalloc_value=5916400 +HeapAlloc dt=21 heapalloc_value=5924592 +HeapAlloc dt=5 heapalloc_value=5932784 +HeapAlloc dt=7 heapalloc_value=5940976 +HeapAlloc dt=6 heapalloc_value=5949168 +HeapAlloc dt=9 heapalloc_value=5957360 +HeapAlloc dt=6 heapalloc_value=5965552 +HeapAlloc dt=5 heapalloc_value=5973744 +HeapAlloc dt=7 heapalloc_value=5981936 +HeapAlloc dt=5 heapalloc_value=5990128 +HeapAlloc dt=6 heapalloc_value=5998320 +HeapAlloc dt=5 heapalloc_value=6006512 +HeapAlloc dt=6 heapalloc_value=6014704 +HeapAlloc dt=9 heapalloc_value=6022896 +HeapAlloc dt=5 heapalloc_value=6031088 +HeapAlloc dt=6 heapalloc_value=6039280 +HeapAlloc dt=6 heapalloc_value=6047472 +HeapAlloc dt=40 heapalloc_value=6055664 +HeapAlloc dt=6 heapalloc_value=6063856 +HeapAlloc dt=35 heapalloc_value=6072048 +HeapAlloc dt=8 heapalloc_value=6080240 +HeapAlloc dt=9 heapalloc_value=6088432 +HeapAlloc dt=5 heapalloc_value=6096624 +HeapAlloc dt=6 heapalloc_value=6104816 +HeapAlloc dt=5 heapalloc_value=6113008 +HeapAlloc dt=6 heapalloc_value=6121200 +HeapAlloc dt=6 heapalloc_value=6129392 +HeapAlloc dt=6 heapalloc_value=6137584 +HeapAlloc dt=5 heapalloc_value=6145776 +HeapAlloc dt=9 heapalloc_value=6153968 +HeapAlloc dt=5 heapalloc_value=6162160 +HeapAlloc dt=6 heapalloc_value=6170352 +HeapAlloc dt=6 heapalloc_value=6178544 +HeapAlloc dt=8 heapalloc_value=6186736 +HeapAlloc dt=11 heapalloc_value=6301424 +HeapAlloc dt=2483 heapalloc_value=6309616 +HeapAlloc dt=9 heapalloc_value=6317808 +HeapAlloc dt=7 heapalloc_value=6326000 +HeapAlloc dt=11 heapalloc_value=6334192 +HeapAlloc dt=6 heapalloc_value=6342384 +HeapAlloc dt=6 heapalloc_value=6350576 +HeapAlloc dt=6 heapalloc_value=6358768 +HeapAlloc dt=7 heapalloc_value=6366960 +HeapAlloc dt=9 heapalloc_value=6375152 +HeapAlloc dt=5 heapalloc_value=6383344 +HeapAlloc dt=6 heapalloc_value=6391536 +HeapAlloc dt=6 heapalloc_value=6399728 +HeapAlloc dt=5 heapalloc_value=6407920 +HeapAlloc dt=5 heapalloc_value=6416112 +HeapAlloc dt=6 heapalloc_value=6424304 +HeapAlloc dt=9 heapalloc_value=6432496 +HeapAlloc dt=8 heapalloc_value=6440688 +HeapAlloc dt=9 heapalloc_value=6448880 +HeapAlloc dt=6 heapalloc_value=6457072 +HeapAlloc dt=13 heapalloc_value=6465264 +HeapAlloc dt=6 heapalloc_value=6473456 +HeapAlloc dt=5 heapalloc_value=6481648 +HeapAlloc dt=6 heapalloc_value=6489840 +HeapAlloc dt=5 heapalloc_value=6498032 +HeapAlloc dt=6 heapalloc_value=6506224 +HeapAlloc dt=8 heapalloc_value=6514416 +HeapAlloc dt=6 heapalloc_value=6522608 +HeapAlloc dt=6 heapalloc_value=6530800 +HeapAlloc dt=5 heapalloc_value=6538992 +HeapAlloc dt=81 heapalloc_value=6547184 +HeapAlloc dt=7 heapalloc_value=6555376 +HeapAlloc dt=6 heapalloc_value=6563568 +HeapAlloc dt=5 heapalloc_value=6571760 +HeapAlloc dt=20 heapalloc_value=6579952 +HeapAlloc dt=6 heapalloc_value=6588144 +HeapAlloc dt=56 heapalloc_value=6596336 +HeapAlloc dt=7 heapalloc_value=6604528 +HeapAlloc dt=7 heapalloc_value=6612720 +HeapAlloc dt=6 heapalloc_value=6620912 +HeapAlloc dt=5 heapalloc_value=6629104 +HeapAlloc dt=5 heapalloc_value=6637296 +HeapAlloc dt=6 heapalloc_value=6645488 +HeapAlloc dt=5 heapalloc_value=6653680 +HeapAlloc dt=5 heapalloc_value=6661872 +HeapAlloc dt=6 heapalloc_value=6670064 +HeapAlloc dt=5 heapalloc_value=6678256 +HeapAlloc dt=5 heapalloc_value=6686448 +HeapAlloc dt=6 heapalloc_value=6694640 +HeapAlloc dt=5 heapalloc_value=6702832 +HeapAlloc dt=5 heapalloc_value=6711024 +HeapAlloc dt=6 heapalloc_value=6719216 +HeapAlloc dt=9 heapalloc_value=6727408 +HeapAlloc dt=7 heapalloc_value=6735600 +HeapAlloc dt=5 heapalloc_value=6743792 +HeapAlloc dt=5 heapalloc_value=6751984 +HeapAlloc dt=6 heapalloc_value=6760176 +HeapAlloc dt=5 heapalloc_value=6768368 +HeapAlloc dt=5 heapalloc_value=6776560 +HeapAlloc dt=6 heapalloc_value=6784752 +HeapAlloc dt=5 heapalloc_value=6792944 +HeapAlloc dt=6 heapalloc_value=6801136 +HeapAlloc dt=36 heapalloc_value=6809328 +HeapAlloc dt=7 heapalloc_value=6817520 +HeapAlloc dt=5 heapalloc_value=6825712 +HeapAlloc dt=6 heapalloc_value=6833904 +HeapAlloc dt=6 heapalloc_value=6842096 +HeapAlloc dt=5 heapalloc_value=6850288 +HeapAlloc dt=6 heapalloc_value=6858480 +HeapAlloc dt=5 heapalloc_value=6866672 +HeapAlloc dt=5 heapalloc_value=6874864 +HeapAlloc dt=5 heapalloc_value=6883056 +HeapAlloc dt=5 heapalloc_value=6891248 +HeapAlloc dt=6 heapalloc_value=6899440 +GoBlock dt=14 reason_string=19 stack=21 +ProcStop dt=198 +ProcStart dt=2996 p=0 p_seq=10 +GoUnblock dt=12 g=1 g_seq=31 stack=0 +GoStart dt=135 g=1 g_seq=32 +HeapAlloc dt=25 heapalloc_value=6907632 +HeapAlloc dt=9 heapalloc_value=6915824 +HeapAlloc dt=6 heapalloc_value=6924016 +HeapAlloc dt=5 heapalloc_value=6932208 +HeapAlloc dt=6 heapalloc_value=6940400 +HeapAlloc dt=5 heapalloc_value=6948592 +HeapAlloc dt=5 heapalloc_value=6956784 +HeapAlloc dt=6 heapalloc_value=6964976 +HeapAlloc dt=5 heapalloc_value=6973168 +HeapAlloc dt=6 heapalloc_value=6981360 +HeapAlloc dt=5 heapalloc_value=6989552 +HeapAlloc dt=5 heapalloc_value=6997744 +HeapAlloc dt=5 heapalloc_value=7005936 +HeapAlloc dt=97 heapalloc_value=7014128 +HeapAlloc dt=7 heapalloc_value=7022320 +HeapAlloc dt=5 heapalloc_value=7030512 +HeapAlloc dt=6 heapalloc_value=7038704 +HeapAlloc dt=5 heapalloc_value=7046896 +HeapAlloc dt=5 heapalloc_value=7055088 +HeapAlloc dt=5 heapalloc_value=7063280 +HeapAlloc dt=50 heapalloc_value=7071472 +HeapAlloc dt=7 heapalloc_value=7079664 +HeapAlloc dt=6 heapalloc_value=7087856 +HeapAlloc dt=5 heapalloc_value=7096048 +HeapAlloc dt=20 heapalloc_value=7104240 +HeapAlloc dt=6 heapalloc_value=7112432 +HeapAlloc dt=8 heapalloc_value=7120624 +HeapAlloc dt=6 heapalloc_value=7128816 +HeapAlloc dt=5 heapalloc_value=7137008 +HeapAlloc dt=6 heapalloc_value=7145200 +HeapAlloc dt=8 heapalloc_value=7153392 +HeapAlloc dt=6 heapalloc_value=7161584 +HeapAlloc dt=5 heapalloc_value=7169776 +HeapAlloc dt=5 heapalloc_value=7177968 +HeapAlloc dt=6 heapalloc_value=7186160 +HeapAlloc dt=5 heapalloc_value=7194352 +HeapAlloc dt=5 heapalloc_value=7202544 +HeapAlloc dt=6 heapalloc_value=7210736 +HeapAlloc dt=5 heapalloc_value=7218928 +HeapAlloc dt=35 heapalloc_value=7227120 +HeapAlloc dt=10 heapalloc_value=7235312 +HeapAlloc dt=5 heapalloc_value=7243504 +HeapAlloc dt=5 heapalloc_value=7251696 +HeapAlloc dt=6 heapalloc_value=7259888 +HeapAlloc dt=5 heapalloc_value=7268080 +HeapAlloc dt=5 heapalloc_value=7276272 +HeapAlloc dt=5 heapalloc_value=7284464 +HeapAlloc dt=6 heapalloc_value=7292656 +HeapAlloc dt=6 heapalloc_value=7300848 +HeapAlloc dt=5 heapalloc_value=7309040 +HeapAlloc dt=13 heapalloc_value=7317232 +HeapAlloc dt=5 heapalloc_value=7325424 +HeapAlloc dt=6 heapalloc_value=7333616 +HeapAlloc dt=8 heapalloc_value=7341808 +HeapAlloc dt=5 heapalloc_value=7350000 +HeapAlloc dt=9 heapalloc_value=7358192 +HeapAlloc dt=5 heapalloc_value=7366384 +HeapAlloc dt=6 heapalloc_value=7374576 +HeapAlloc dt=5 heapalloc_value=7382768 +HeapAlloc dt=5 heapalloc_value=7390960 +HeapAlloc dt=5 heapalloc_value=7399152 +HeapAlloc dt=6 heapalloc_value=7407344 +HeapAlloc dt=5 heapalloc_value=7415536 +HeapAlloc dt=5 heapalloc_value=7423728 +HeapAlloc dt=6 heapalloc_value=7431920 +HeapAlloc dt=5 heapalloc_value=7440112 +HeapAlloc dt=5 heapalloc_value=7448304 +HeapAlloc dt=5 heapalloc_value=7456496 +HeapAlloc dt=6 heapalloc_value=7464688 +HeapAlloc dt=5 heapalloc_value=7472880 +HeapAlloc dt=5 heapalloc_value=7481072 +HeapAlloc dt=5 heapalloc_value=7489264 +HeapAlloc dt=6 heapalloc_value=7497456 +HeapAlloc dt=5 heapalloc_value=7505648 +HeapAlloc dt=5 heapalloc_value=7513840 +HeapAlloc dt=5 heapalloc_value=7522032 +HeapAlloc dt=5 heapalloc_value=7530224 +HeapAlloc dt=6 heapalloc_value=7538416 +HeapAlloc dt=5 heapalloc_value=7546608 +HeapAlloc dt=6 heapalloc_value=7554800 +HeapAlloc dt=5 heapalloc_value=7562992 +HeapAlloc dt=5 heapalloc_value=7571184 +HeapAlloc dt=6 heapalloc_value=7579376 +HeapAlloc dt=5 heapalloc_value=7587568 +HeapAlloc dt=45 heapalloc_value=7595760 +HeapAlloc dt=7 heapalloc_value=7603952 +HeapAlloc dt=5 heapalloc_value=7612144 +HeapAlloc dt=6 heapalloc_value=7620336 +HeapAlloc dt=376 heapalloc_value=7628528 +HeapAlloc dt=13 heapalloc_value=7636720 +HeapAlloc dt=7 heapalloc_value=7644912 +HeapAlloc dt=35 heapalloc_value=7653104 +GCBegin dt=23 gc_seq=3 stack=22 +STWBegin dt=73 kind_string=22 stack=28 +GoUnblock dt=258 g=4 g_seq=5 stack=29 +ProcsChange dt=80 procs_value=8 stack=30 +STWEnd dt=37 +GCMarkAssistBegin dt=96 stack=31 +GCMarkAssistEnd dt=4606 +HeapAlloc dt=187 heapalloc_value=7671600 +HeapAlloc dt=26 heapalloc_value=7679792 +HeapAlloc dt=17 heapalloc_value=7687984 +HeapAlloc dt=29 heapalloc_value=7696176 +HeapAlloc dt=16 heapalloc_value=7704368 +HeapAlloc dt=12 heapalloc_value=7712560 +HeapAlloc dt=48 heapalloc_value=7868208 +GoStop dt=4635 reason_string=16 stack=45 +GoStart dt=48 g=1 g_seq=33 +HeapAlloc dt=27 heapalloc_value=7884336 +HeapAlloc dt=11 heapalloc_value=7892528 +HeapAlloc dt=8 heapalloc_value=7900720 +HeapAlloc dt=12 heapalloc_value=7908912 +HeapAlloc dt=9 heapalloc_value=7917104 +HeapAlloc dt=9 heapalloc_value=7925296 +HeapAlloc dt=9 heapalloc_value=7933488 +HeapAlloc dt=8 heapalloc_value=7941680 +HeapAlloc dt=10 heapalloc_value=7949872 +HeapAlloc dt=8 heapalloc_value=7958064 +HeapAlloc dt=10 heapalloc_value=7966256 +HeapAlloc dt=12 heapalloc_value=7974448 +HeapAlloc dt=8 heapalloc_value=7982640 +HeapAlloc dt=8 heapalloc_value=7990832 +HeapAlloc dt=9 heapalloc_value=7999024 +HeapAlloc dt=8 heapalloc_value=8007216 +HeapAlloc dt=54 heapalloc_value=8015408 +HeapAlloc dt=10 heapalloc_value=8023600 +HeapAlloc dt=8 heapalloc_value=8031792 +HeapAlloc dt=9 heapalloc_value=8039984 +HeapAlloc dt=8 heapalloc_value=8048176 +HeapAlloc dt=9 heapalloc_value=8056368 +HeapAlloc dt=8 heapalloc_value=8064560 +HeapAlloc dt=9 heapalloc_value=8072752 +HeapAlloc dt=8 heapalloc_value=8080944 +HeapAlloc dt=9 heapalloc_value=8089136 +HeapAlloc dt=8 heapalloc_value=8097328 +GoBlock dt=20 reason_string=19 stack=21 +ProcStop dt=35 +ProcStart dt=147580 p=3 p_seq=6 +GoStart dt=144 g=4 g_seq=10 +GoBlock dt=38 reason_string=15 stack=32 +GoUnblock dt=41 g=25 g_seq=4 stack=0 +GoStart dt=6 g=25 g_seq=5 +GoLabel dt=1 label_string=4 +GoBlock dt=5825 reason_string=15 stack=27 +ProcStop dt=299 +ProcStart dt=158874 p=3 p_seq=7 +GoStart dt=231 g=35 g_seq=1 +GoStop dt=305629 reason_string=16 stack=51 +GoStart dt=79 g=35 g_seq=2 +GoStop dt=315206 reason_string=16 stack=50 +GoStart dt=36 g=35 g_seq=3 +GoDestroy dt=160337 +ProcStop dt=68 +EventBatch gen=1 m=1709042 time=7689670149213 size=4550 +ProcStart dt=287 p=2 p_seq=1 +GoStart dt=328 g=7 g_seq=1 +HeapAlloc dt=7006 heapalloc_value=2793472 +HeapAlloc dt=74 heapalloc_value=2801664 +GoBlock dt=275 reason_string=12 stack=18 +ProcStop dt=34 +ProcStart dt=327698 p=0 p_seq=3 +ProcStop dt=7 +ProcStart dt=2124 p=2 p_seq=3 +GoUnblock dt=32 g=24 g_seq=2 stack=0 +HeapAlloc dt=302 heapalloc_value=4038656 +HeapAlloc dt=104 heapalloc_value=4046848 +HeapAlloc dt=52 heapalloc_value=4055040 +GoStart dt=1147 g=24 g_seq=3 +GoLabel dt=5 label_string=2 +GoBlock dt=128 reason_string=15 stack=27 +GoUnblock dt=72 g=1 g_seq=21 stack=0 +GoStart dt=11 g=1 g_seq=22 +HeapAlloc dt=44 heapalloc_value=4063232 +HeapAlloc dt=43 heapalloc_value=4071424 +HeapAlloc dt=28 heapalloc_value=4079616 +HeapAlloc dt=24 heapalloc_value=4087808 +HeapAlloc dt=84 heapalloc_value=4096000 +HeapAlloc dt=25 heapalloc_value=4104192 +HeapAlloc dt=20 heapalloc_value=4112384 +HeapAlloc dt=24 heapalloc_value=4120576 +HeapAlloc dt=20 heapalloc_value=4128768 +HeapAlloc dt=19 heapalloc_value=4136960 +HeapAlloc dt=24 heapalloc_value=4145152 +HeapAlloc dt=20 heapalloc_value=4153344 +HeapAlloc dt=19 heapalloc_value=4161536 +HeapAlloc dt=20 heapalloc_value=4169728 +HeapAlloc dt=24 heapalloc_value=4177920 +HeapAlloc dt=33 heapalloc_value=4186112 +HeapAlloc dt=26 heapalloc_value=4194304 +HeapAlloc dt=31 heapalloc_value=4235264 +HeapAlloc dt=363 heapalloc_value=4243456 +HeapAlloc dt=61 heapalloc_value=4251648 +HeapAlloc dt=14 heapalloc_value=4259840 +HeapAlloc dt=12 heapalloc_value=4268032 +HeapAlloc dt=9 heapalloc_value=4276224 +HeapAlloc dt=9 heapalloc_value=4284416 +HeapAlloc dt=9 heapalloc_value=4292608 +HeapAlloc dt=8 heapalloc_value=4300800 +HeapAlloc dt=162 heapalloc_value=4308992 +HeapAlloc dt=14 heapalloc_value=4317184 +HeapAlloc dt=8 heapalloc_value=4325376 +HeapAlloc dt=53 heapalloc_value=4333568 +HeapAlloc dt=10 heapalloc_value=4341760 +HeapAlloc dt=16 heapalloc_value=4349952 +HeapAlloc dt=14 heapalloc_value=4358144 +GCMarkAssistBegin dt=27 stack=31 +GCMarkAssistEnd dt=18 +GCMarkAssistBegin dt=4 stack=31 +GoBlock dt=198 reason_string=13 stack=33 +ProcStop dt=19 +ProcStart dt=387 p=2 p_seq=4 +GoUnblock dt=265 g=24 g_seq=4 stack=0 +GoStart dt=69 g=24 g_seq=5 +GoLabel dt=1 label_string=2 +GoBlock dt=132 reason_string=10 stack=35 +GoStart dt=20 g=1 g_seq=24 +GCMarkAssistEnd dt=2 +HeapAlloc dt=13 heapalloc_value=4366336 +GCMarkAssistBegin dt=7 stack=31 +GoBlock dt=25 reason_string=10 stack=36 +ProcStop dt=24 +ProcStart dt=4689 p=1 p_seq=7 +ProcStop dt=23 +ProcStart dt=36183 p=1 p_seq=8 +ProcStop dt=24 +ProcStart dt=1076 p=1 p_seq=9 +GoUnblock dt=12 g=22 g_seq=4 stack=0 +GoStart dt=118 g=22 g_seq=5 +GoLabel dt=1 label_string=2 +GoBlock dt=7117 reason_string=15 stack=27 +ProcStop dt=41 +ProcStart dt=150567 p=4 p_seq=7 +GoUnblock dt=41 g=23 g_seq=4 stack=0 +HeapAlloc dt=108 heapalloc_value=17163592 +HeapAlloc dt=61 heapalloc_value=17166856 +HeapAlloc dt=2994 heapalloc_value=17608712 +GoStart dt=1008 g=23 g_seq=5 +GoLabel dt=4 label_string=4 +GoBlock dt=40 reason_string=15 stack=27 +GoUnblock dt=49 g=1 g_seq=52 stack=0 +GoStart dt=7 g=1 g_seq=53 +HeapAlloc dt=30 heapalloc_value=17616904 +HeapAlloc dt=52 heapalloc_value=17625096 +HeapAlloc dt=35 heapalloc_value=17633288 +HeapAlloc dt=27 heapalloc_value=17641480 +HeapAlloc dt=28 heapalloc_value=17649672 +HeapAlloc dt=87 heapalloc_value=17657864 +HeapAlloc dt=32 heapalloc_value=17666056 +HeapAlloc dt=24 heapalloc_value=17674248 +HeapAlloc dt=22 heapalloc_value=17682440 +HeapAlloc dt=16 heapalloc_value=17690632 +HeapAlloc dt=15 heapalloc_value=17698824 +HeapAlloc dt=20 heapalloc_value=17707016 +HeapAlloc dt=19 heapalloc_value=17715208 +HeapAlloc dt=15 heapalloc_value=17723400 +HeapAlloc dt=18 heapalloc_value=17731592 +HeapAlloc dt=20 heapalloc_value=17739784 +HeapAlloc dt=15 heapalloc_value=17747976 +HeapAlloc dt=17 heapalloc_value=17756168 +HeapAlloc dt=67 heapalloc_value=17764360 +HeapAlloc dt=28 heapalloc_value=17772552 +HeapAlloc dt=22 heapalloc_value=17780744 +HeapAlloc dt=19 heapalloc_value=17788936 +HeapAlloc dt=22 heapalloc_value=17797128 +HeapAlloc dt=19 heapalloc_value=17805320 +HeapAlloc dt=19 heapalloc_value=17813512 +HeapAlloc dt=19 heapalloc_value=17821704 +HeapAlloc dt=15 heapalloc_value=17829896 +HeapAlloc dt=21 heapalloc_value=17838088 +HeapAlloc dt=19 heapalloc_value=17846280 +HeapAlloc dt=16 heapalloc_value=17854472 +HeapAlloc dt=14 heapalloc_value=17862664 +HeapAlloc dt=18 heapalloc_value=17870856 +HeapAlloc dt=58 heapalloc_value=17879048 +HeapAlloc dt=19 heapalloc_value=17887240 +HeapAlloc dt=15 heapalloc_value=17895432 +HeapAlloc dt=19 heapalloc_value=17903624 +HeapAlloc dt=21 heapalloc_value=17911816 +HeapAlloc dt=17 heapalloc_value=17920008 +HeapAlloc dt=19 heapalloc_value=17928200 +HeapAlloc dt=19 heapalloc_value=17936392 +HeapAlloc dt=16 heapalloc_value=17944584 +HeapAlloc dt=15 heapalloc_value=17952776 +HeapAlloc dt=15 heapalloc_value=17960968 +HeapAlloc dt=19 heapalloc_value=17969160 +HeapAlloc dt=16 heapalloc_value=17977352 +HeapAlloc dt=16 heapalloc_value=17985544 +HeapAlloc dt=16 heapalloc_value=17993736 +HeapAlloc dt=19 heapalloc_value=18001928 +HeapAlloc dt=15 heapalloc_value=18010120 +HeapAlloc dt=16 heapalloc_value=18018312 +HeapAlloc dt=15 heapalloc_value=18026504 +HeapAlloc dt=19 heapalloc_value=18034696 +HeapAlloc dt=14 heapalloc_value=18042888 +HeapAlloc dt=17 heapalloc_value=18051080 +HeapAlloc dt=18 heapalloc_value=18059272 +HeapAlloc dt=20 heapalloc_value=18067464 +HeapAlloc dt=17 heapalloc_value=18075656 +HeapAlloc dt=125 heapalloc_value=18083848 +GoStop dt=20 reason_string=16 stack=46 +GoUnblock dt=288 g=25 g_seq=6 stack=0 +GoStart dt=7 g=25 g_seq=7 +GoLabel dt=1 label_string=2 +HeapAlloc dt=255 heapalloc_value=18091752 +GoBlock dt=30 reason_string=10 stack=35 +GoStart dt=5 g=1 g_seq=54 +HeapAlloc dt=25 heapalloc_value=18099944 +HeapAlloc dt=19 heapalloc_value=18108136 +HeapAlloc dt=45 heapalloc_value=18116328 +HeapAlloc dt=9 heapalloc_value=18124520 +HeapAlloc dt=80 heapalloc_value=18132712 +HeapAlloc dt=11 heapalloc_value=18140904 +HeapAlloc dt=6 heapalloc_value=18149096 +HeapAlloc dt=7 heapalloc_value=18157288 +HeapAlloc dt=7 heapalloc_value=18165480 +HeapAlloc dt=12 heapalloc_value=18173672 +HeapAlloc dt=11 heapalloc_value=18181864 +HeapAlloc dt=11 heapalloc_value=18190056 +HeapAlloc dt=7 heapalloc_value=18198248 +HeapAlloc dt=62 heapalloc_value=18206440 +HeapAlloc dt=8 heapalloc_value=18214632 +HeapAlloc dt=7 heapalloc_value=18222824 +HeapAlloc dt=6 heapalloc_value=18231016 +HeapAlloc dt=7 heapalloc_value=18239208 +HeapAlloc dt=11 heapalloc_value=18247400 +HeapAlloc dt=6 heapalloc_value=18255592 +HeapAlloc dt=7 heapalloc_value=18263784 +HeapAlloc dt=11 heapalloc_value=18271976 +HeapAlloc dt=6 heapalloc_value=18280168 +HeapAlloc dt=7 heapalloc_value=18288360 +HeapAlloc dt=7 heapalloc_value=18296552 +HeapAlloc dt=6 heapalloc_value=18304744 +HeapAlloc dt=10 heapalloc_value=18312936 +HeapAlloc dt=7 heapalloc_value=18321128 +HeapAlloc dt=7 heapalloc_value=18329320 +HeapAlloc dt=7 heapalloc_value=18337512 +HeapAlloc dt=31 heapalloc_value=18345704 +HeapAlloc dt=17 heapalloc_value=18353896 +HeapAlloc dt=7 heapalloc_value=18362088 +HeapAlloc dt=13 heapalloc_value=18370280 +HeapAlloc dt=6 heapalloc_value=18378472 +HeapAlloc dt=7 heapalloc_value=18386664 +HeapAlloc dt=7 heapalloc_value=18394856 +HeapAlloc dt=11 heapalloc_value=18403048 +HeapAlloc dt=6 heapalloc_value=18411240 +HeapAlloc dt=7 heapalloc_value=18419432 +HeapAlloc dt=7 heapalloc_value=18427624 +HeapAlloc dt=6 heapalloc_value=18435816 +HeapAlloc dt=7 heapalloc_value=18444008 +HeapAlloc dt=7 heapalloc_value=18452200 +GCMarkAssistBegin dt=13 stack=31 +GoBlock dt=35 reason_string=10 stack=36 +ProcStop dt=22 +ProcStart dt=936 p=1 p_seq=13 +GoStart dt=212 g=25 g_seq=9 +GoUnblock dt=31 g=1 g_seq=55 stack=41 +GoBlock dt=7 reason_string=15 stack=27 +GoStart dt=13 g=1 g_seq=56 +GCMarkAssistEnd dt=4 +HeapAlloc dt=30 heapalloc_value=16971400 +GCSweepBegin dt=41 stack=42 +GCSweepEnd dt=310 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=23 heapalloc_value=16979592 +GCSweepBegin dt=30 stack=42 +GCSweepEnd dt=934 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=80 heapalloc_value=16987784 +GCSweepBegin dt=43 stack=42 +GCSweepEnd dt=1671 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=6 heapalloc_value=16995976 +GCSweepBegin dt=41 stack=42 +GCSweepEnd dt=1680 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=13 heapalloc_value=17004168 +GCSweepBegin dt=44 stack=42 +GCSweepEnd dt=1555 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=12 heapalloc_value=17012360 +GCSweepBegin dt=46 stack=42 +GCSweepEnd dt=1914 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=16 heapalloc_value=17020552 +GCSweepBegin dt=47 stack=42 +GCSweepEnd dt=1545 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=10 heapalloc_value=17028744 +GCSweepBegin dt=37 stack=42 +GCSweepEnd dt=1763 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=9 heapalloc_value=17036936 +GCSweepBegin dt=37 stack=42 +GCSweepEnd dt=1712 swept_value=827392 reclaimed_value=0 +HeapAlloc dt=18 heapalloc_value=17045128 +GCSweepBegin dt=34 stack=42 +GCSweepEnd dt=1009 swept_value=466944 reclaimed_value=0 +HeapAlloc dt=9 heapalloc_value=17053320 +HeapAlloc dt=28 heapalloc_value=17061512 +HeapAlloc dt=25 heapalloc_value=17069704 +HeapAlloc dt=34 heapalloc_value=17077896 +HeapAlloc dt=39 heapalloc_value=17086088 +HeapAlloc dt=72 heapalloc_value=17094280 +HeapAlloc dt=32 heapalloc_value=17102472 +HeapAlloc dt=16 heapalloc_value=17110664 +HeapAlloc dt=15 heapalloc_value=17118856 +HeapAlloc dt=14 heapalloc_value=17127048 +HeapAlloc dt=16 heapalloc_value=17135240 +HeapAlloc dt=15 heapalloc_value=17143432 +HeapAlloc dt=19 heapalloc_value=17151624 +HeapAlloc dt=15 heapalloc_value=17159816 +HeapAlloc dt=54 heapalloc_value=17585800 +GoBlock dt=482 reason_string=19 stack=21 +ProcStop dt=210 +ProcStart dt=17621 p=0 p_seq=26 +ProcStop dt=24 +ProcStart dt=5194 p=1 p_seq=16 +ProcStop dt=17 +ProcStart dt=16724 p=1 p_seq=17 +GoUnblock dt=27 g=1 g_seq=59 stack=0 +GoStart dt=127 g=1 g_seq=60 +HeapAlloc dt=55 heapalloc_value=18617992 +HeapAlloc dt=64 heapalloc_value=18626184 +HeapAlloc dt=65 heapalloc_value=18634376 +HeapAlloc dt=61 heapalloc_value=18642568 +HeapAlloc dt=54 heapalloc_value=18650760 +HeapAlloc dt=66 heapalloc_value=18658952 +HeapAlloc dt=67 heapalloc_value=18667144 +HeapAlloc dt=54 heapalloc_value=18675336 +HeapAlloc dt=57 heapalloc_value=18683528 +HeapAlloc dt=45 heapalloc_value=18691720 +HeapAlloc dt=84 heapalloc_value=18699912 +HeapAlloc dt=26 heapalloc_value=18708104 +HeapAlloc dt=18 heapalloc_value=18716296 +HeapAlloc dt=15 heapalloc_value=18724488 +HeapAlloc dt=24 heapalloc_value=18732680 +HeapAlloc dt=26 heapalloc_value=18740872 +HeapAlloc dt=21 heapalloc_value=18749064 +HeapAlloc dt=15 heapalloc_value=18757256 +HeapAlloc dt=31 heapalloc_value=18765448 +HeapAlloc dt=7 heapalloc_value=18773640 +HeapAlloc dt=7 heapalloc_value=18781832 +HeapAlloc dt=113 heapalloc_value=18790024 +HeapAlloc dt=8 heapalloc_value=18798216 +HeapAlloc dt=6 heapalloc_value=18806408 +HeapAlloc dt=7 heapalloc_value=18814600 +HeapAlloc dt=6 heapalloc_value=18822792 +HeapAlloc dt=6 heapalloc_value=18830984 +HeapAlloc dt=7 heapalloc_value=18839176 +HeapAlloc dt=6 heapalloc_value=18847368 +HeapAlloc dt=6 heapalloc_value=18855560 +HeapAlloc dt=6 heapalloc_value=18863752 +HeapAlloc dt=6 heapalloc_value=18871944 +HeapAlloc dt=6 heapalloc_value=18880136 +HeapAlloc dt=6 heapalloc_value=18888328 +HeapAlloc dt=6 heapalloc_value=18896520 +HeapAlloc dt=7 heapalloc_value=18904712 +HeapAlloc dt=6 heapalloc_value=18912904 +HeapAlloc dt=38 heapalloc_value=18921096 +HeapAlloc dt=7 heapalloc_value=18929288 +HeapAlloc dt=6 heapalloc_value=18937480 +HeapAlloc dt=14 heapalloc_value=18945672 +HeapAlloc dt=6 heapalloc_value=18953864 +HeapAlloc dt=6 heapalloc_value=18962056 +HeapAlloc dt=6 heapalloc_value=18970248 +HeapAlloc dt=7 heapalloc_value=18978440 +HeapAlloc dt=6 heapalloc_value=18986632 +HeapAlloc dt=6 heapalloc_value=18994824 +HeapAlloc dt=13 heapalloc_value=19003016 +HeapAlloc dt=7 heapalloc_value=19011208 +HeapAlloc dt=6 heapalloc_value=19019400 +HeapAlloc dt=6 heapalloc_value=19027592 +HeapAlloc dt=6 heapalloc_value=19035784 +HeapAlloc dt=7 heapalloc_value=19043976 +HeapAlloc dt=6 heapalloc_value=19052168 +HeapAlloc dt=6 heapalloc_value=19060360 +HeapAlloc dt=6 heapalloc_value=19068552 +HeapAlloc dt=6 heapalloc_value=19076744 +HeapAlloc dt=7 heapalloc_value=19084936 +HeapAlloc dt=6 heapalloc_value=19093128 +HeapAlloc dt=6 heapalloc_value=19101320 +HeapAlloc dt=6 heapalloc_value=19109512 +HeapAlloc dt=7 heapalloc_value=19117704 +HeapAlloc dt=5 heapalloc_value=19125896 +HeapAlloc dt=7 heapalloc_value=19134088 +HeapAlloc dt=6 heapalloc_value=19142280 +HeapAlloc dt=6 heapalloc_value=19150472 +HeapAlloc dt=6 heapalloc_value=19158664 +HeapAlloc dt=6 heapalloc_value=19166856 +HeapAlloc dt=7 heapalloc_value=19175048 +HeapAlloc dt=6 heapalloc_value=19183240 +HeapAlloc dt=6 heapalloc_value=19191432 +HeapAlloc dt=6 heapalloc_value=19199624 +HeapAlloc dt=7 heapalloc_value=19207816 +HeapAlloc dt=6 heapalloc_value=19216008 +HeapAlloc dt=6 heapalloc_value=19224200 +HeapAlloc dt=6 heapalloc_value=19232392 +HeapAlloc dt=7 heapalloc_value=19240584 +HeapAlloc dt=6 heapalloc_value=19248776 +HeapAlloc dt=6 heapalloc_value=19256968 +HeapAlloc dt=6 heapalloc_value=19265160 +HeapAlloc dt=6 heapalloc_value=19273352 +HeapAlloc dt=6 heapalloc_value=19281544 +HeapAlloc dt=6 heapalloc_value=19289736 +HeapAlloc dt=7 heapalloc_value=19297928 +HeapAlloc dt=6 heapalloc_value=19306120 +HeapAlloc dt=62 heapalloc_value=19314312 +HeapAlloc dt=7 heapalloc_value=19322504 +HeapAlloc dt=6 heapalloc_value=19330696 +HeapAlloc dt=6 heapalloc_value=19338888 +HeapAlloc dt=35 heapalloc_value=19347080 +HeapAlloc dt=7 heapalloc_value=19355272 +HeapAlloc dt=6 heapalloc_value=19363464 +HeapAlloc dt=6 heapalloc_value=19371656 +HeapAlloc dt=6 heapalloc_value=19379848 +HeapAlloc dt=6 heapalloc_value=19388040 +HeapAlloc dt=6 heapalloc_value=19396232 +HeapAlloc dt=7 heapalloc_value=19404424 +HeapAlloc dt=6 heapalloc_value=19412616 +HeapAlloc dt=7 heapalloc_value=19420808 +HeapAlloc dt=6 heapalloc_value=19429000 +HeapAlloc dt=6 heapalloc_value=19437192 +HeapAlloc dt=6 heapalloc_value=19445384 +HeapAlloc dt=7 heapalloc_value=19453576 +HeapAlloc dt=6 heapalloc_value=19461768 +HeapAlloc dt=10 heapalloc_value=19469960 +HeapAlloc dt=6 heapalloc_value=19478152 +HeapAlloc dt=6 heapalloc_value=19486344 +HeapAlloc dt=6 heapalloc_value=19494536 +HeapAlloc dt=6 heapalloc_value=19502728 +HeapAlloc dt=7 heapalloc_value=19510920 +HeapAlloc dt=6 heapalloc_value=19519112 +HeapAlloc dt=6 heapalloc_value=19527304 +HeapAlloc dt=6 heapalloc_value=19535496 +HeapAlloc dt=6 heapalloc_value=19543688 +HeapAlloc dt=35 heapalloc_value=19551880 +HeapAlloc dt=7 heapalloc_value=19560072 +HeapAlloc dt=6 heapalloc_value=19568264 +HeapAlloc dt=6 heapalloc_value=19576456 +HeapAlloc dt=6 heapalloc_value=19584648 +HeapAlloc dt=7 heapalloc_value=19592840 +HeapAlloc dt=7 heapalloc_value=19601032 +HeapAlloc dt=6 heapalloc_value=19609224 +HeapAlloc dt=6 heapalloc_value=19617416 +HeapAlloc dt=6 heapalloc_value=19625608 +HeapAlloc dt=6 heapalloc_value=19633800 +GoBlock dt=12 reason_string=19 stack=21 +ProcStop dt=171 +ProcStart dt=17527 p=0 p_seq=28 +ProcStop dt=24 +ProcStart dt=1830 p=1 p_seq=20 +ProcStop dt=13 +ProcStart dt=16742 p=1 p_seq=21 +GoUnblock dt=20 g=1 g_seq=63 stack=0 +GoStart dt=121 g=1 g_seq=64 +HeapAlloc dt=62 heapalloc_value=20665992 +HeapAlloc dt=21 heapalloc_value=20674184 +HeapAlloc dt=25 heapalloc_value=20682376 +HeapAlloc dt=20 heapalloc_value=20690568 +HeapAlloc dt=12 heapalloc_value=20698760 +HeapAlloc dt=16 heapalloc_value=20706952 +HeapAlloc dt=15 heapalloc_value=20715144 +HeapAlloc dt=18 heapalloc_value=20723336 +HeapAlloc dt=12 heapalloc_value=20731528 +HeapAlloc dt=16 heapalloc_value=20739720 +HeapAlloc dt=12 heapalloc_value=20747912 +HeapAlloc dt=12 heapalloc_value=20756104 +HeapAlloc dt=12 heapalloc_value=20764296 +HeapAlloc dt=12 heapalloc_value=20772488 +HeapAlloc dt=9 heapalloc_value=20780680 +HeapAlloc dt=5 heapalloc_value=20788872 +HeapAlloc dt=6 heapalloc_value=20797064 +HeapAlloc dt=9 heapalloc_value=20805256 +HeapAlloc dt=5 heapalloc_value=20813448 +HeapAlloc dt=6 heapalloc_value=20821640 +HeapAlloc dt=5 heapalloc_value=20829832 +HeapAlloc dt=6 heapalloc_value=20838024 +HeapAlloc dt=15 heapalloc_value=20846216 +HeapAlloc dt=12 heapalloc_value=20854408 +HeapAlloc dt=11 heapalloc_value=20862600 +HeapAlloc dt=13 heapalloc_value=20870792 +HeapAlloc dt=5 heapalloc_value=20878984 +HeapAlloc dt=106 heapalloc_value=20887176 +HeapAlloc dt=8 heapalloc_value=20895368 +HeapAlloc dt=5 heapalloc_value=20903560 +HeapAlloc dt=6 heapalloc_value=20911752 +HeapAlloc dt=6 heapalloc_value=20919944 +HeapAlloc dt=5 heapalloc_value=20928136 +HeapAlloc dt=9 heapalloc_value=20936328 +HeapAlloc dt=6 heapalloc_value=20944520 +HeapAlloc dt=5 heapalloc_value=20952712 +HeapAlloc dt=6 heapalloc_value=20960904 +HeapAlloc dt=5 heapalloc_value=20969096 +HeapAlloc dt=6 heapalloc_value=20977288 +HeapAlloc dt=5 heapalloc_value=20985480 +HeapAlloc dt=5 heapalloc_value=20993672 +HeapAlloc dt=10 heapalloc_value=21001864 +HeapAlloc dt=6 heapalloc_value=21010056 +HeapAlloc dt=37 heapalloc_value=21018248 +HeapAlloc dt=7 heapalloc_value=21026440 +HeapAlloc dt=6 heapalloc_value=21034632 +HeapAlloc dt=34 heapalloc_value=21042824 +HeapAlloc dt=6 heapalloc_value=21051016 +HeapAlloc dt=6 heapalloc_value=21059208 +HeapAlloc dt=11 heapalloc_value=21067400 +HeapAlloc dt=6 heapalloc_value=21075592 +HeapAlloc dt=5 heapalloc_value=21083784 +HeapAlloc dt=6 heapalloc_value=21091976 +HeapAlloc dt=5 heapalloc_value=21100168 +HeapAlloc dt=9 heapalloc_value=21108360 +HeapAlloc dt=6 heapalloc_value=21116552 +HeapAlloc dt=6 heapalloc_value=21124744 +HeapAlloc dt=10 heapalloc_value=21132936 +HeapAlloc dt=5 heapalloc_value=21141128 +HeapAlloc dt=6 heapalloc_value=21149320 +HeapAlloc dt=5 heapalloc_value=21157512 +HeapAlloc dt=6 heapalloc_value=21165704 +HeapAlloc dt=5 heapalloc_value=21173896 +HeapAlloc dt=6 heapalloc_value=21182088 +HeapAlloc dt=5 heapalloc_value=21190280 +HeapAlloc dt=9 heapalloc_value=21198472 +HeapAlloc dt=6 heapalloc_value=21206664 +HeapAlloc dt=6 heapalloc_value=21214856 +HeapAlloc dt=6 heapalloc_value=21223048 +HeapAlloc dt=5 heapalloc_value=21231240 +HeapAlloc dt=6 heapalloc_value=21239432 +HeapAlloc dt=5 heapalloc_value=21247624 +HeapAlloc dt=6 heapalloc_value=21255816 +HeapAlloc dt=5 heapalloc_value=21264008 +HeapAlloc dt=6 heapalloc_value=21272200 +HeapAlloc dt=5 heapalloc_value=21280392 +HeapAlloc dt=6 heapalloc_value=21288584 +HeapAlloc dt=5 heapalloc_value=21296776 +HeapAlloc dt=6 heapalloc_value=21304968 +HeapAlloc dt=5 heapalloc_value=21313160 +HeapAlloc dt=6 heapalloc_value=21321352 +HeapAlloc dt=6 heapalloc_value=21329544 +HeapAlloc dt=6 heapalloc_value=21337736 +HeapAlloc dt=6 heapalloc_value=21345928 +HeapAlloc dt=6 heapalloc_value=21354120 +HeapAlloc dt=5 heapalloc_value=21362312 +HeapAlloc dt=6 heapalloc_value=21370504 +HeapAlloc dt=6 heapalloc_value=21378696 +HeapAlloc dt=6 heapalloc_value=21386888 +HeapAlloc dt=5 heapalloc_value=21395080 +HeapAlloc dt=6 heapalloc_value=21403272 +HeapAlloc dt=96 heapalloc_value=21411464 +HeapAlloc dt=7 heapalloc_value=21419656 +HeapAlloc dt=6 heapalloc_value=21427848 +HeapAlloc dt=21 heapalloc_value=21968520 +HeapAlloc dt=1835 heapalloc_value=21976712 +HeapAlloc dt=11 heapalloc_value=21984904 +HeapAlloc dt=8 heapalloc_value=21993096 +HeapAlloc dt=7 heapalloc_value=22001288 +HeapAlloc dt=8 heapalloc_value=22009480 +HeapAlloc dt=7 heapalloc_value=22017672 +HeapAlloc dt=8 heapalloc_value=22025864 +HeapAlloc dt=7 heapalloc_value=22034056 +HeapAlloc dt=8 heapalloc_value=22042248 +HeapAlloc dt=7 heapalloc_value=22050440 +HeapAlloc dt=7 heapalloc_value=22058632 +HeapAlloc dt=8 heapalloc_value=22066824 +HeapAlloc dt=7 heapalloc_value=22075016 +HeapAlloc dt=8 heapalloc_value=22083208 +HeapAlloc dt=7 heapalloc_value=22091400 +HeapAlloc dt=7 heapalloc_value=22099592 +HeapAlloc dt=14 heapalloc_value=22107784 +HeapAlloc dt=5 heapalloc_value=22115976 +HeapAlloc dt=6 heapalloc_value=22124168 +HeapAlloc dt=6 heapalloc_value=22132360 +HeapAlloc dt=5 heapalloc_value=22140552 +HeapAlloc dt=6 heapalloc_value=22148744 +HeapAlloc dt=5 heapalloc_value=22156936 +HeapAlloc dt=6 heapalloc_value=22165128 +HeapAlloc dt=6 heapalloc_value=22173320 +HeapAlloc dt=38 heapalloc_value=22181512 +HeapAlloc dt=7 heapalloc_value=22189704 +HeapAlloc dt=5 heapalloc_value=22197896 +HeapAlloc dt=6 heapalloc_value=22206088 +HeapAlloc dt=6 heapalloc_value=22214280 +HeapAlloc dt=5 heapalloc_value=22222472 +GoBlock dt=9 reason_string=19 stack=21 +ProcStop dt=163 +ProcStart dt=16841 p=0 p_seq=30 +ProcStop dt=23 +ProcStart dt=1498 p=1 p_seq=24 +ProcStop dt=11 +ProcStart dt=16726 p=1 p_seq=25 +GoUnblock dt=19 g=1 g_seq=67 stack=0 +GoStart dt=117 g=1 g_seq=68 +HeapAlloc dt=46 heapalloc_value=23254664 +HeapAlloc dt=19 heapalloc_value=23262856 +HeapAlloc dt=20 heapalloc_value=23271048 +HeapAlloc dt=16 heapalloc_value=23279240 +HeapAlloc dt=12 heapalloc_value=23287432 +HeapAlloc dt=12 heapalloc_value=23295624 +HeapAlloc dt=13 heapalloc_value=23303816 +HeapAlloc dt=15 heapalloc_value=23312008 +HeapAlloc dt=13 heapalloc_value=23320200 +HeapAlloc dt=13 heapalloc_value=23328392 +HeapAlloc dt=12 heapalloc_value=23336584 +HeapAlloc dt=12 heapalloc_value=23344776 +HeapAlloc dt=5 heapalloc_value=23352968 +HeapAlloc dt=100 heapalloc_value=23361160 +HeapAlloc dt=14 heapalloc_value=23369352 +HeapAlloc dt=16 heapalloc_value=23377544 +HeapAlloc dt=13 heapalloc_value=23385736 +HeapAlloc dt=5 heapalloc_value=23393928 +HeapAlloc dt=6 heapalloc_value=23402120 +HeapAlloc dt=9 heapalloc_value=23410312 +HeapAlloc dt=6 heapalloc_value=23418504 +HeapAlloc dt=6 heapalloc_value=23426696 +HeapAlloc dt=5 heapalloc_value=23434888 +HeapAlloc dt=6 heapalloc_value=23443080 +HeapAlloc dt=5 heapalloc_value=23451272 +HeapAlloc dt=6 heapalloc_value=23459464 +HeapAlloc dt=6 heapalloc_value=23467656 +HeapAlloc dt=6 heapalloc_value=23475848 +HeapAlloc dt=6 heapalloc_value=23484040 +HeapAlloc dt=5 heapalloc_value=23492232 +HeapAlloc dt=6 heapalloc_value=23500424 +HeapAlloc dt=5 heapalloc_value=23508616 +HeapAlloc dt=83 heapalloc_value=23516808 +HeapAlloc dt=8 heapalloc_value=23525000 +HeapAlloc dt=5 heapalloc_value=23533192 +HeapAlloc dt=6 heapalloc_value=23541384 +HeapAlloc dt=6 heapalloc_value=23549576 +HeapAlloc dt=5 heapalloc_value=23557768 +HeapAlloc dt=7 heapalloc_value=23565960 +HeapAlloc dt=7 heapalloc_value=23574152 +HeapAlloc dt=6 heapalloc_value=23582344 +HeapAlloc dt=5 heapalloc_value=23590536 +HeapAlloc dt=6 heapalloc_value=23598728 +HeapAlloc dt=6 heapalloc_value=23606920 +HeapAlloc dt=5 heapalloc_value=23615112 +HeapAlloc dt=6 heapalloc_value=23623304 +HeapAlloc dt=6 heapalloc_value=23631496 +HeapAlloc dt=5 heapalloc_value=23639688 +HeapAlloc dt=38 heapalloc_value=23647880 +HeapAlloc dt=8 heapalloc_value=23656072 +HeapAlloc dt=37 heapalloc_value=23664264 +HeapAlloc dt=6 heapalloc_value=23672456 +HeapAlloc dt=6 heapalloc_value=23680648 +HeapAlloc dt=6 heapalloc_value=23688840 +HeapAlloc dt=6 heapalloc_value=23697032 +HeapAlloc dt=6 heapalloc_value=23705224 +HeapAlloc dt=5 heapalloc_value=23713416 +HeapAlloc dt=6 heapalloc_value=23721608 +HeapAlloc dt=10 heapalloc_value=23729800 +HeapAlloc dt=5 heapalloc_value=23737992 +HeapAlloc dt=6 heapalloc_value=23746184 +HeapAlloc dt=6 heapalloc_value=23754376 +HeapAlloc dt=5 heapalloc_value=23762568 +HeapAlloc dt=6 heapalloc_value=23770760 +HeapAlloc dt=6 heapalloc_value=23778952 +HeapAlloc dt=5 heapalloc_value=23787144 +HeapAlloc dt=9 heapalloc_value=23795336 +HeapAlloc dt=6 heapalloc_value=23803528 +HeapAlloc dt=6 heapalloc_value=23811720 +HeapAlloc dt=5 heapalloc_value=23819912 +HeapAlloc dt=6 heapalloc_value=23828104 +HeapAlloc dt=6 heapalloc_value=23836296 +HeapAlloc dt=6 heapalloc_value=23844488 +HeapAlloc dt=5 heapalloc_value=23852680 +HeapAlloc dt=6 heapalloc_value=23860872 +HeapAlloc dt=6 heapalloc_value=23869064 +HeapAlloc dt=6 heapalloc_value=23877256 +HeapAlloc dt=6 heapalloc_value=23885448 +HeapAlloc dt=5 heapalloc_value=23893640 +HeapAlloc dt=6 heapalloc_value=23901832 +HeapAlloc dt=5 heapalloc_value=23910024 +HeapAlloc dt=6 heapalloc_value=23918216 +HeapAlloc dt=6 heapalloc_value=23926408 +HeapAlloc dt=6 heapalloc_value=23934600 +HeapAlloc dt=6 heapalloc_value=23942792 +HeapAlloc dt=5 heapalloc_value=23950984 +HeapAlloc dt=6 heapalloc_value=23959176 +HeapAlloc dt=5 heapalloc_value=23967368 +HeapAlloc dt=6 heapalloc_value=23975560 +HeapAlloc dt=7 heapalloc_value=23983752 +HeapAlloc dt=5 heapalloc_value=23991944 +HeapAlloc dt=6 heapalloc_value=24000136 +HeapAlloc dt=5 heapalloc_value=24008328 +HeapAlloc dt=6 heapalloc_value=24016520 +HeapAlloc dt=6 heapalloc_value=24024712 +HeapAlloc dt=5 heapalloc_value=24032904 +HeapAlloc dt=50 heapalloc_value=24041096 +HeapAlloc dt=7 heapalloc_value=24049288 +HeapAlloc dt=6 heapalloc_value=24057480 +HeapAlloc dt=5 heapalloc_value=24065672 +HeapAlloc dt=34 heapalloc_value=24073864 +HeapAlloc dt=7 heapalloc_value=24082056 +HeapAlloc dt=6 heapalloc_value=24090248 +HeapAlloc dt=6 heapalloc_value=24098440 +HeapAlloc dt=6 heapalloc_value=24106632 +HeapAlloc dt=5 heapalloc_value=24114824 +HeapAlloc dt=6 heapalloc_value=24123016 +HeapAlloc dt=6 heapalloc_value=24131208 +HeapAlloc dt=6 heapalloc_value=24139400 +HeapAlloc dt=6 heapalloc_value=24147592 +HeapAlloc dt=5 heapalloc_value=24155784 +HeapAlloc dt=6 heapalloc_value=24163976 +HeapAlloc dt=5 heapalloc_value=24172168 +HeapAlloc dt=6 heapalloc_value=24180360 +HeapAlloc dt=365 heapalloc_value=24188552 +HeapAlloc dt=13 heapalloc_value=24196744 +HeapAlloc dt=6 heapalloc_value=24204936 +HeapAlloc dt=6 heapalloc_value=24213128 +HeapAlloc dt=5 heapalloc_value=24221320 +HeapAlloc dt=6 heapalloc_value=24229512 +HeapAlloc dt=6 heapalloc_value=24237704 +HeapAlloc dt=6 heapalloc_value=24245896 +HeapAlloc dt=6 heapalloc_value=24254088 +HeapAlloc dt=6 heapalloc_value=24262280 +HeapAlloc dt=6 heapalloc_value=24270472 +GoBlock dt=10 reason_string=19 stack=21 +ProcStop dt=157 +ProcStart dt=12778 p=1 p_seq=27 +GoUnblock dt=12 g=1 g_seq=69 stack=0 +GoStart dt=143 g=1 g_seq=70 +HeapAlloc dt=61 heapalloc_value=24278664 +HeapAlloc dt=11 heapalloc_value=24286856 +HeapAlloc dt=5 heapalloc_value=24295048 +HeapAlloc dt=6 heapalloc_value=24303240 +HeapAlloc dt=5 heapalloc_value=24311432 +HeapAlloc dt=6 heapalloc_value=24319624 +HeapAlloc dt=6 heapalloc_value=24327816 +HeapAlloc dt=6 heapalloc_value=24336008 +HeapAlloc dt=7 heapalloc_value=24344200 +HeapAlloc dt=5 heapalloc_value=24352392 +HeapAlloc dt=7 heapalloc_value=24360584 +HeapAlloc dt=5 heapalloc_value=24368776 +HeapAlloc dt=6 heapalloc_value=24376968 +HeapAlloc dt=6 heapalloc_value=24385160 +HeapAlloc dt=5 heapalloc_value=24393352 +HeapAlloc dt=6 heapalloc_value=24401544 +HeapAlloc dt=6 heapalloc_value=24409736 +HeapAlloc dt=6 heapalloc_value=24417928 +HeapAlloc dt=6 heapalloc_value=24426120 +HeapAlloc dt=5 heapalloc_value=24434312 +HeapAlloc dt=6 heapalloc_value=24442504 +HeapAlloc dt=6 heapalloc_value=24450696 +HeapAlloc dt=6 heapalloc_value=24458888 +HeapAlloc dt=6 heapalloc_value=24467080 +HeapAlloc dt=5 heapalloc_value=24475272 +HeapAlloc dt=6 heapalloc_value=24483464 +HeapAlloc dt=5 heapalloc_value=24491656 +HeapAlloc dt=6 heapalloc_value=24499848 +HeapAlloc dt=6 heapalloc_value=24508040 +HeapAlloc dt=5 heapalloc_value=24516232 +HeapAlloc dt=6 heapalloc_value=24524424 +HeapAlloc dt=6 heapalloc_value=24532616 +HeapAlloc dt=5 heapalloc_value=24540808 +HeapAlloc dt=6 heapalloc_value=24549000 +HeapAlloc dt=5 heapalloc_value=24557192 +HeapAlloc dt=49 heapalloc_value=24565384 +HeapAlloc dt=7 heapalloc_value=24573576 +HeapAlloc dt=5 heapalloc_value=24581768 +HeapAlloc dt=6 heapalloc_value=24589960 +HeapAlloc dt=17 heapalloc_value=24598152 +HeapAlloc dt=12 heapalloc_value=24606344 +HeapAlloc dt=5 heapalloc_value=24614536 +HeapAlloc dt=6 heapalloc_value=24622728 +HeapAlloc dt=5 heapalloc_value=24630920 +HeapAlloc dt=6 heapalloc_value=24639112 +HeapAlloc dt=6 heapalloc_value=24647304 +HeapAlloc dt=5 heapalloc_value=24655496 +HeapAlloc dt=6 heapalloc_value=24663688 +HeapAlloc dt=37 heapalloc_value=24671880 +HeapAlloc dt=6 heapalloc_value=24680072 +HeapAlloc dt=6 heapalloc_value=24688264 +HeapAlloc dt=36 heapalloc_value=24696456 +HeapAlloc dt=7 heapalloc_value=24704648 +HeapAlloc dt=12 heapalloc_value=24712840 +HeapAlloc dt=6 heapalloc_value=24721032 +HeapAlloc dt=17 heapalloc_value=24729224 +HeapAlloc dt=5 heapalloc_value=24737416 +HeapAlloc dt=6 heapalloc_value=24745608 +HeapAlloc dt=19 heapalloc_value=24753800 +HeapAlloc dt=5 heapalloc_value=24761992 +HeapAlloc dt=6 heapalloc_value=24770184 +HeapAlloc dt=79 heapalloc_value=24778376 +HeapAlloc dt=7 heapalloc_value=24786568 +HeapAlloc dt=6 heapalloc_value=24794760 +HeapAlloc dt=5 heapalloc_value=24802952 +HeapAlloc dt=6 heapalloc_value=24811144 +HeapAlloc dt=6 heapalloc_value=24819336 +HeapAlloc dt=6 heapalloc_value=24827528 +HeapAlloc dt=5 heapalloc_value=24835720 +HeapAlloc dt=6 heapalloc_value=24843912 +HeapAlloc dt=6 heapalloc_value=24852104 +HeapAlloc dt=6 heapalloc_value=24860296 +HeapAlloc dt=6 heapalloc_value=24868488 +HeapAlloc dt=5 heapalloc_value=24876680 +HeapAlloc dt=6 heapalloc_value=24884872 +HeapAlloc dt=6 heapalloc_value=24893064 +HeapAlloc dt=5 heapalloc_value=24901256 +HeapAlloc dt=6 heapalloc_value=24909448 +HeapAlloc dt=6 heapalloc_value=24917640 +HeapAlloc dt=5 heapalloc_value=24925832 +HeapAlloc dt=6 heapalloc_value=24934024 +HeapAlloc dt=5 heapalloc_value=24942216 +HeapAlloc dt=6 heapalloc_value=24950408 +HeapAlloc dt=6 heapalloc_value=24958600 +HeapAlloc dt=6 heapalloc_value=24966792 +HeapAlloc dt=5 heapalloc_value=24974984 +HeapAlloc dt=6 heapalloc_value=24983176 +HeapAlloc dt=6 heapalloc_value=24991368 +HeapAlloc dt=6 heapalloc_value=24999560 +HeapAlloc dt=5 heapalloc_value=25007752 +HeapAlloc dt=6 heapalloc_value=25015944 +HeapAlloc dt=5 heapalloc_value=25024136 +HeapAlloc dt=6 heapalloc_value=25032328 +HeapAlloc dt=6 heapalloc_value=25040520 +HeapAlloc dt=6 heapalloc_value=25048712 +HeapAlloc dt=6 heapalloc_value=25056904 +HeapAlloc dt=5 heapalloc_value=25065096 +HeapAlloc dt=6 heapalloc_value=25073288 +HeapAlloc dt=6 heapalloc_value=25081480 +HeapAlloc dt=46 heapalloc_value=25089672 +HeapAlloc dt=7 heapalloc_value=25097864 +HeapAlloc dt=6 heapalloc_value=25106056 +HeapAlloc dt=5 heapalloc_value=25114248 +HeapAlloc dt=36 heapalloc_value=25122440 +HeapAlloc dt=7 heapalloc_value=25130632 +HeapAlloc dt=6 heapalloc_value=25138824 +HeapAlloc dt=6 heapalloc_value=25147016 +HeapAlloc dt=5 heapalloc_value=25155208 +HeapAlloc dt=6 heapalloc_value=25163400 +HeapAlloc dt=5 heapalloc_value=25171592 +HeapAlloc dt=6 heapalloc_value=25179784 +HeapAlloc dt=5 heapalloc_value=25187976 +HeapAlloc dt=6 heapalloc_value=25196168 +HeapAlloc dt=5 heapalloc_value=25204360 +HeapAlloc dt=6 heapalloc_value=25212552 +HeapAlloc dt=5 heapalloc_value=25220744 +HeapAlloc dt=6 heapalloc_value=25228936 +HeapAlloc dt=10 heapalloc_value=25237128 +HeapAlloc dt=5 heapalloc_value=25245320 +HeapAlloc dt=6 heapalloc_value=25253512 +HeapAlloc dt=5 heapalloc_value=25261704 +HeapAlloc dt=6 heapalloc_value=25269896 +HeapAlloc dt=6 heapalloc_value=25278088 +HeapAlloc dt=5 heapalloc_value=25286280 +HeapAlloc dt=6 heapalloc_value=25294472 +GoBlock dt=10 reason_string=19 stack=21 +ProcStop dt=14 +ProcStart dt=7152 p=1 p_seq=29 +GoStart dt=199 g=37 g_seq=1 +GoStop dt=306782 reason_string=16 stack=50 +GoStart dt=57 g=37 g_seq=2 +GoStop dt=315218 reason_string=16 stack=50 +GoStart dt=17 g=37 g_seq=3 +GoDestroy dt=159214 +ProcStop dt=60 +EventBatch gen=1 m=1709041 time=7689670150297 size=5255 +ProcStart dt=316 p=3 p_seq=1 +ProcStop dt=37 +ProcStart dt=311299 p=1 p_seq=5 +ProcStop dt=17 +ProcStart dt=16759 p=1 p_seq=6 +GoUnblock dt=47 g=1 g_seq=3 stack=0 +GoStart dt=137 g=1 g_seq=4 +HeapAlloc dt=56 heapalloc_value=2809856 +HeapAlloc dt=29 heapalloc_value=2818048 +HeapAlloc dt=19 heapalloc_value=2826240 +HeapAlloc dt=22 heapalloc_value=2834432 +HeapAlloc dt=91 heapalloc_value=2842624 +HeapAlloc dt=21 heapalloc_value=2850816 +HeapAlloc dt=24 heapalloc_value=2859008 +HeapAlloc dt=7 heapalloc_value=2867200 +HeapAlloc dt=15 heapalloc_value=2875392 +HeapAlloc dt=16 heapalloc_value=2883584 +HeapAlloc dt=12 heapalloc_value=2899968 +HeapAlloc dt=9 heapalloc_value=2908160 +HeapAlloc dt=16 heapalloc_value=2916352 +HeapAlloc dt=15 heapalloc_value=2924544 +HeapAlloc dt=12 heapalloc_value=2932736 +HeapAlloc dt=12 heapalloc_value=2940928 +HeapAlloc dt=7 heapalloc_value=2949120 +HeapAlloc dt=18 heapalloc_value=2957312 +HeapAlloc dt=14 heapalloc_value=2965504 +HeapAlloc dt=12 heapalloc_value=2973696 +HeapAlloc dt=13 heapalloc_value=2981888 +HeapAlloc dt=12 heapalloc_value=2990080 +HeapAlloc dt=11 heapalloc_value=2998272 +HeapAlloc dt=12 heapalloc_value=3006464 +HeapAlloc dt=13 heapalloc_value=3014656 +HeapAlloc dt=12 heapalloc_value=3022848 +HeapAlloc dt=11 heapalloc_value=3031040 +HeapAlloc dt=11 heapalloc_value=3039232 +HeapAlloc dt=13 heapalloc_value=3047424 +HeapAlloc dt=11 heapalloc_value=3055616 +HeapAlloc dt=20 heapalloc_value=3063808 +HeapAlloc dt=12 heapalloc_value=3072000 +HeapAlloc dt=12 heapalloc_value=3080192 +HeapAlloc dt=11 heapalloc_value=3088384 +HeapAlloc dt=12 heapalloc_value=3096576 +HeapAlloc dt=11 heapalloc_value=3104768 +HeapAlloc dt=11 heapalloc_value=3112960 +HeapAlloc dt=12 heapalloc_value=3121152 +HeapAlloc dt=11 heapalloc_value=3129344 +HeapAlloc dt=15 heapalloc_value=3137536 +HeapAlloc dt=15 heapalloc_value=3145728 +HeapAlloc dt=18 heapalloc_value=3153920 +HeapAlloc dt=13 heapalloc_value=3162112 +HeapAlloc dt=12 heapalloc_value=3170304 +HeapAlloc dt=16 heapalloc_value=3178496 +HeapAlloc dt=11 heapalloc_value=3186688 +HeapAlloc dt=12 heapalloc_value=3194880 +HeapAlloc dt=11 heapalloc_value=3203072 +HeapAlloc dt=13 heapalloc_value=3211264 +HeapAlloc dt=12 heapalloc_value=3219456 +HeapAlloc dt=11 heapalloc_value=3227648 +HeapAlloc dt=13 heapalloc_value=3244032 +HeapAlloc dt=734 heapalloc_value=3252224 +HeapAlloc dt=16 heapalloc_value=3260416 +HeapAlloc dt=8 heapalloc_value=3268608 +HeapAlloc dt=5 heapalloc_value=3276800 +HeapAlloc dt=8 heapalloc_value=3284992 +HeapAlloc dt=88 heapalloc_value=3293184 +HeapAlloc dt=7 heapalloc_value=3301376 +HeapAlloc dt=5 heapalloc_value=3309568 +HeapAlloc dt=6 heapalloc_value=3317760 +HeapAlloc dt=5 heapalloc_value=3325952 +HeapAlloc dt=5 heapalloc_value=3334144 +HeapAlloc dt=5 heapalloc_value=3342336 +HeapAlloc dt=5 heapalloc_value=3350528 +HeapAlloc dt=6 heapalloc_value=3358720 +HeapAlloc dt=5 heapalloc_value=3366912 +HeapAlloc dt=5 heapalloc_value=3375104 +HeapAlloc dt=7 heapalloc_value=3383296 +HeapAlloc dt=6 heapalloc_value=3391488 +HeapAlloc dt=5 heapalloc_value=3399680 +HeapAlloc dt=5 heapalloc_value=3407872 +HeapAlloc dt=5 heapalloc_value=3416064 +HeapAlloc dt=6 heapalloc_value=3424256 +HeapAlloc dt=5 heapalloc_value=3432448 +HeapAlloc dt=5 heapalloc_value=3440640 +HeapAlloc dt=5 heapalloc_value=3448832 +HeapAlloc dt=6 heapalloc_value=3457024 +HeapAlloc dt=5 heapalloc_value=3465216 +HeapAlloc dt=38 heapalloc_value=3473408 +HeapAlloc dt=6 heapalloc_value=3481600 +HeapAlloc dt=5 heapalloc_value=3489792 +HeapAlloc dt=6 heapalloc_value=3497984 +HeapAlloc dt=5 heapalloc_value=3506176 +HeapAlloc dt=6 heapalloc_value=3514368 +HeapAlloc dt=5 heapalloc_value=3522560 +HeapAlloc dt=5 heapalloc_value=3530752 +HeapAlloc dt=5 heapalloc_value=3538944 +HeapAlloc dt=5 heapalloc_value=3547136 +HeapAlloc dt=6 heapalloc_value=3555328 +HeapAlloc dt=5 heapalloc_value=3563520 +HeapAlloc dt=5 heapalloc_value=3571712 +HeapAlloc dt=5 heapalloc_value=3579904 +HeapAlloc dt=5 heapalloc_value=3588096 +HeapAlloc dt=6 heapalloc_value=3596288 +HeapAlloc dt=10 heapalloc_value=3678208 +HeapAlloc dt=2433 heapalloc_value=3686400 +HeapAlloc dt=6 heapalloc_value=3694592 +HeapAlloc dt=6 heapalloc_value=3702784 +HeapAlloc dt=6 heapalloc_value=3710976 +HeapAlloc dt=5 heapalloc_value=3719168 +HeapAlloc dt=6 heapalloc_value=3727360 +HeapAlloc dt=5 heapalloc_value=3735552 +HeapAlloc dt=5 heapalloc_value=3743744 +HeapAlloc dt=5 heapalloc_value=3751936 +HeapAlloc dt=6 heapalloc_value=3760128 +HeapAlloc dt=5 heapalloc_value=3768320 +HeapAlloc dt=11 heapalloc_value=3776512 +HeapAlloc dt=31 heapalloc_value=3784704 +HeapAlloc dt=7 heapalloc_value=3792896 +HeapAlloc dt=6 heapalloc_value=3801088 +HeapAlloc dt=5 heapalloc_value=3809280 +HeapAlloc dt=6 heapalloc_value=3817472 +HeapAlloc dt=5 heapalloc_value=3825664 +HeapAlloc dt=5 heapalloc_value=3833856 +HeapAlloc dt=6 heapalloc_value=3842048 +HeapAlloc dt=5 heapalloc_value=3850240 +HeapAlloc dt=5 heapalloc_value=3858432 +HeapAlloc dt=6 heapalloc_value=3866624 +HeapAlloc dt=5 heapalloc_value=3874816 +HeapAlloc dt=5 heapalloc_value=3883008 +HeapAlloc dt=78 heapalloc_value=3891200 +HeapAlloc dt=7 heapalloc_value=3899392 +HeapAlloc dt=6 heapalloc_value=3907584 +HeapAlloc dt=5 heapalloc_value=3915776 +HeapAlloc dt=5 heapalloc_value=3923968 +HeapAlloc dt=5 heapalloc_value=3932160 +HeapAlloc dt=6 heapalloc_value=3940352 +HeapAlloc dt=5 heapalloc_value=3948544 +HeapAlloc dt=5 heapalloc_value=3956736 +HeapAlloc dt=5 heapalloc_value=3964928 +HeapAlloc dt=5 heapalloc_value=3973120 +HeapAlloc dt=6 heapalloc_value=3981312 +HeapAlloc dt=5 heapalloc_value=3989504 +HeapAlloc dt=6 heapalloc_value=3997696 +GCBegin dt=38 gc_seq=1 stack=22 +HeapAlloc dt=42 heapalloc_value=4005888 +HeapAlloc dt=14 heapalloc_value=4014080 +GoCreate dt=73 new_g=18 new_stack=23 stack=24 +GoBlock dt=235 reason_string=12 stack=25 +GoStart dt=11 g=18 g_seq=1 +HeapAlloc dt=16 heapalloc_value=4022272 +GoUnblock dt=15 g=1 g_seq=5 stack=26 +GoBlock dt=9 reason_string=15 stack=27 +GoStart dt=12 g=1 g_seq=6 +GoCreate dt=44 new_g=19 new_stack=23 stack=24 +GoBlock dt=4 reason_string=12 stack=25 +GoStart dt=3 g=19 g_seq=1 +GoUnblock dt=5 g=1 g_seq=7 stack=26 +GoBlock dt=2 reason_string=15 stack=27 +GoStart dt=2 g=1 g_seq=8 +GoCreate dt=8 new_g=20 new_stack=23 stack=24 +GoBlock dt=3 reason_string=12 stack=25 +GoStart dt=2 g=20 g_seq=1 +GoUnblock dt=3 g=1 g_seq=9 stack=26 +GoBlock dt=1 reason_string=15 stack=27 +GoStart dt=2 g=1 g_seq=10 +GoCreate dt=6 new_g=21 new_stack=23 stack=24 +GoBlock dt=3 reason_string=12 stack=25 +GoStart dt=1 g=21 g_seq=1 +GoUnblock dt=6 g=1 g_seq=11 stack=26 +GoBlock dt=1 reason_string=15 stack=27 +GoStart dt=8 g=1 g_seq=12 +GoCreate dt=7 new_g=22 new_stack=23 stack=24 +GoBlock dt=2 reason_string=12 stack=25 +GoStart dt=2 g=22 g_seq=1 +GoUnblock dt=2 g=1 g_seq=13 stack=26 +GoBlock dt=6 reason_string=15 stack=27 +GoStart dt=4 g=1 g_seq=14 +GoCreate dt=15 new_g=23 new_stack=23 stack=24 +GoBlock dt=166 reason_string=12 stack=25 +GoStart dt=4 g=23 g_seq=1 +GoUnblock dt=3 g=1 g_seq=15 stack=26 +GoBlock dt=3 reason_string=15 stack=27 +GoStart dt=3 g=1 g_seq=16 +HeapAlloc dt=18 heapalloc_value=4030464 +GoCreate dt=11 new_g=24 new_stack=23 stack=24 +GoBlock dt=3 reason_string=12 stack=25 +GoStart dt=1 g=24 g_seq=1 +GoUnblock dt=3 g=1 g_seq=17 stack=26 +GoBlock dt=2 reason_string=15 stack=27 +GoStart dt=1 g=1 g_seq=18 +GoCreate dt=6 new_g=25 new_stack=23 stack=24 +GoBlock dt=3 reason_string=12 stack=25 +GoStart dt=1 g=25 g_seq=1 +GoUnblock dt=2 g=1 g_seq=19 stack=26 +GoBlock dt=2 reason_string=15 stack=27 +GoStart dt=1 g=1 g_seq=20 +STWBegin dt=118 kind_string=22 stack=28 +GoStatus dt=1398 g=4 m=18446744073709551615 gstatus=4 +GoUnblock dt=83 g=4 g_seq=1 stack=29 +ProcsChange dt=91 procs_value=8 stack=30 +STWEnd dt=31 +GCMarkAssistBegin dt=149 stack=31 +GCMarkAssistEnd dt=1458 +GoBlock dt=23 reason_string=19 stack=21 +GoStart dt=166 g=4 g_seq=2 +GoBlock dt=22 reason_string=15 stack=32 +GoUnblock dt=35 g=23 g_seq=2 stack=0 +GoStart dt=4 g=23 g_seq=3 +GoLabel dt=1 label_string=4 +GoBlock dt=441 reason_string=15 stack=27 +ProcStop dt=23 +ProcStart dt=16781 p=0 p_seq=6 +GoUnblock dt=28 g=1 g_seq=27 stack=0 +GoStart dt=162 g=1 g_seq=28 +HeapAlloc dt=69 heapalloc_value=4663024 +HeapAlloc dt=23 heapalloc_value=4671216 +HeapAlloc dt=15 heapalloc_value=4679408 +HeapAlloc dt=10 heapalloc_value=4687600 +HeapAlloc dt=12 heapalloc_value=4695792 +HeapAlloc dt=8 heapalloc_value=4703984 +HeapAlloc dt=6 heapalloc_value=4712176 +HeapAlloc dt=12 heapalloc_value=4720368 +HeapAlloc dt=12 heapalloc_value=4728560 +HeapAlloc dt=12 heapalloc_value=4736752 +HeapAlloc dt=15 heapalloc_value=4744944 +HeapAlloc dt=9 heapalloc_value=4753136 +HeapAlloc dt=9 heapalloc_value=4761328 +HeapAlloc dt=7 heapalloc_value=4769520 +HeapAlloc dt=8 heapalloc_value=4777712 +HeapAlloc dt=9 heapalloc_value=4785904 +HeapAlloc dt=112 heapalloc_value=4794096 +HeapAlloc dt=7 heapalloc_value=4802288 +HeapAlloc dt=9 heapalloc_value=4810480 +HeapAlloc dt=13 heapalloc_value=4818672 +HeapAlloc dt=14 heapalloc_value=4826864 +HeapAlloc dt=6 heapalloc_value=4835056 +HeapAlloc dt=5 heapalloc_value=4843248 +HeapAlloc dt=6 heapalloc_value=4851440 +HeapAlloc dt=14 heapalloc_value=4859632 +HeapAlloc dt=10 heapalloc_value=4867824 +HeapAlloc dt=10 heapalloc_value=4876016 +HeapAlloc dt=6 heapalloc_value=4884208 +HeapAlloc dt=9 heapalloc_value=4892400 +HeapAlloc dt=72 heapalloc_value=4900592 +HeapAlloc dt=6 heapalloc_value=4908784 +HeapAlloc dt=5 heapalloc_value=4916976 +HeapAlloc dt=6 heapalloc_value=4925168 +HeapAlloc dt=6 heapalloc_value=4933360 +HeapAlloc dt=9 heapalloc_value=4941552 +HeapAlloc dt=46 heapalloc_value=4949744 +HeapAlloc dt=10 heapalloc_value=4957936 +HeapAlloc dt=6 heapalloc_value=4966128 +HeapAlloc dt=6 heapalloc_value=4974320 +HeapAlloc dt=6 heapalloc_value=4982512 +HeapAlloc dt=5 heapalloc_value=4990704 +HeapAlloc dt=6 heapalloc_value=4998896 +HeapAlloc dt=45 heapalloc_value=5007088 +HeapAlloc dt=6 heapalloc_value=5015280 +HeapAlloc dt=9 heapalloc_value=5023472 +HeapAlloc dt=6 heapalloc_value=5031664 +HeapAlloc dt=5 heapalloc_value=5039856 +HeapAlloc dt=6 heapalloc_value=5048048 +HeapAlloc dt=6 heapalloc_value=5056240 +HeapAlloc dt=15 heapalloc_value=5138160 +HeapAlloc dt=81 heapalloc_value=5146352 +HeapAlloc dt=6 heapalloc_value=5154544 +HeapAlloc dt=6 heapalloc_value=5162736 +HeapAlloc dt=5 heapalloc_value=5170928 +HeapAlloc dt=6 heapalloc_value=5179120 +HeapAlloc dt=5 heapalloc_value=5187312 +HeapAlloc dt=6 heapalloc_value=5195504 +HeapAlloc dt=7 heapalloc_value=5203696 +HeapAlloc dt=5 heapalloc_value=5211888 +HeapAlloc dt=6 heapalloc_value=5220080 +HeapAlloc dt=6 heapalloc_value=5228272 +HeapAlloc dt=37 heapalloc_value=5236464 +HeapAlloc dt=7 heapalloc_value=5244656 +HeapAlloc dt=6 heapalloc_value=5252848 +HeapAlloc dt=5 heapalloc_value=5261040 +HeapAlloc dt=8 heapalloc_value=5269232 +HeapAlloc dt=6 heapalloc_value=5277424 +HeapAlloc dt=6 heapalloc_value=5285616 +HeapAlloc dt=5 heapalloc_value=5293808 +HeapAlloc dt=7 heapalloc_value=5302000 +HeapAlloc dt=5 heapalloc_value=5310192 +HeapAlloc dt=5 heapalloc_value=5318384 +HeapAlloc dt=6 heapalloc_value=5326576 +HeapAlloc dt=7 heapalloc_value=5334768 +HeapAlloc dt=6 heapalloc_value=5342960 +HeapAlloc dt=5 heapalloc_value=5351152 +HeapAlloc dt=6 heapalloc_value=5359344 +HeapAlloc dt=5 heapalloc_value=5367536 +HeapAlloc dt=13 heapalloc_value=5375728 +HeapAlloc dt=6 heapalloc_value=5383920 +HeapAlloc dt=100 heapalloc_value=5392112 +HeapAlloc dt=8 heapalloc_value=5400304 +HeapAlloc dt=6 heapalloc_value=5408496 +HeapAlloc dt=6 heapalloc_value=5416688 +HeapAlloc dt=5 heapalloc_value=5424880 +HeapAlloc dt=6 heapalloc_value=5433072 +HeapAlloc dt=33 heapalloc_value=5441264 +HeapAlloc dt=7 heapalloc_value=5449456 +HeapAlloc dt=5 heapalloc_value=5457648 +HeapAlloc dt=8 heapalloc_value=5465840 +HeapAlloc dt=6 heapalloc_value=5474032 +HeapAlloc dt=5 heapalloc_value=5482224 +HeapAlloc dt=6 heapalloc_value=5490416 +HeapAlloc dt=5 heapalloc_value=5498608 +HeapAlloc dt=6 heapalloc_value=5506800 +HeapAlloc dt=6 heapalloc_value=5514992 +HeapAlloc dt=5 heapalloc_value=5523184 +HeapAlloc dt=12 heapalloc_value=5531376 +HeapAlloc dt=6 heapalloc_value=5539568 +HeapAlloc dt=6 heapalloc_value=5547760 +HeapAlloc dt=5 heapalloc_value=5555952 +HeapAlloc dt=6 heapalloc_value=5564144 +HeapAlloc dt=5 heapalloc_value=5572336 +HeapAlloc dt=6 heapalloc_value=5580528 +HeapAlloc dt=5 heapalloc_value=5588720 +HeapAlloc dt=7 heapalloc_value=5596912 +HeapAlloc dt=6 heapalloc_value=5605104 +HeapAlloc dt=5 heapalloc_value=5613296 +HeapAlloc dt=6 heapalloc_value=5621488 +HeapAlloc dt=5 heapalloc_value=5629680 +HeapAlloc dt=6 heapalloc_value=5637872 +HeapAlloc dt=6 heapalloc_value=5646064 +HeapAlloc dt=37 heapalloc_value=5654256 +HeapAlloc dt=7 heapalloc_value=5662448 +HeapAlloc dt=6 heapalloc_value=5670640 +HeapAlloc dt=5 heapalloc_value=5678832 +HeapAlloc dt=6 heapalloc_value=5687024 +HeapAlloc dt=5 heapalloc_value=5695216 +HeapAlloc dt=6 heapalloc_value=5703408 +HeapAlloc dt=6 heapalloc_value=5711600 +HeapAlloc dt=5 heapalloc_value=5719792 +HeapAlloc dt=5 heapalloc_value=5727984 +HeapAlloc dt=6 heapalloc_value=5736176 +HeapAlloc dt=6 heapalloc_value=5744368 +HeapAlloc dt=5 heapalloc_value=5752560 +HeapAlloc dt=5 heapalloc_value=5760752 +GoBlock dt=15 reason_string=19 stack=21 +ProcStop dt=178 +ProcStart dt=17613 p=4 p_seq=3 +ProcStop dt=26 +ProcStart dt=3944 p=0 p_seq=9 +ProcStop dt=12 +ProcStart dt=16762 p=4 p_seq=6 +ProcStop dt=14 +ProcStart dt=9275 p=0 p_seq=12 +ProcStop dt=9 +ProcStart dt=16732 p=0 p_seq=13 +GoUnblock dt=9 g=1 g_seq=38 stack=0 +GoStart dt=84 g=1 g_seq=39 +HeapAlloc dt=23 heapalloc_value=9631048 +HeapAlloc dt=24 heapalloc_value=9639240 +HeapAlloc dt=15 heapalloc_value=9647432 +HeapAlloc dt=15 heapalloc_value=9655624 +HeapAlloc dt=15 heapalloc_value=9663816 +HeapAlloc dt=16 heapalloc_value=9672008 +HeapAlloc dt=14 heapalloc_value=9680200 +HeapAlloc dt=18 heapalloc_value=9688392 +HeapAlloc dt=14 heapalloc_value=9696584 +HeapAlloc dt=19 heapalloc_value=9704776 +HeapAlloc dt=15 heapalloc_value=9712968 +HeapAlloc dt=76 heapalloc_value=9721160 +HeapAlloc dt=18 heapalloc_value=9729352 +HeapAlloc dt=17 heapalloc_value=9737544 +HeapAlloc dt=14 heapalloc_value=9745736 +HeapAlloc dt=15 heapalloc_value=9753928 +HeapAlloc dt=16 heapalloc_value=9762120 +HeapAlloc dt=28 heapalloc_value=9770312 +HeapAlloc dt=23 heapalloc_value=9778504 +HeapAlloc dt=19 heapalloc_value=9786696 +HeapAlloc dt=14 heapalloc_value=9794888 +HeapAlloc dt=26 heapalloc_value=9803080 +HeapAlloc dt=18 heapalloc_value=9811272 +HeapAlloc dt=16 heapalloc_value=9819464 +HeapAlloc dt=15 heapalloc_value=9827656 +HeapAlloc dt=19 heapalloc_value=9835848 +HeapAlloc dt=16 heapalloc_value=9844040 +HeapAlloc dt=15 heapalloc_value=9852232 +HeapAlloc dt=15 heapalloc_value=9860424 +HeapAlloc dt=15 heapalloc_value=9868616 +HeapAlloc dt=15 heapalloc_value=9876808 +HeapAlloc dt=15 heapalloc_value=9885000 +HeapAlloc dt=15 heapalloc_value=9893192 +HeapAlloc dt=15 heapalloc_value=9901384 +HeapAlloc dt=16 heapalloc_value=9909576 +HeapAlloc dt=16 heapalloc_value=9917768 +HeapAlloc dt=15 heapalloc_value=9925960 +HeapAlloc dt=15 heapalloc_value=9934152 +HeapAlloc dt=15 heapalloc_value=9942344 +HeapAlloc dt=15 heapalloc_value=9950536 +HeapAlloc dt=16 heapalloc_value=9958728 +HeapAlloc dt=15 heapalloc_value=9966920 +HeapAlloc dt=63 heapalloc_value=9975112 +HeapAlloc dt=20 heapalloc_value=9983304 +HeapAlloc dt=14 heapalloc_value=9991496 +HeapAlloc dt=15 heapalloc_value=9999688 +HeapAlloc dt=14 heapalloc_value=10007880 +HeapAlloc dt=15 heapalloc_value=10016072 +HeapAlloc dt=16 heapalloc_value=10024264 +HeapAlloc dt=16 heapalloc_value=10032456 +HeapAlloc dt=16 heapalloc_value=10040648 +HeapAlloc dt=16 heapalloc_value=10048840 +HeapAlloc dt=15 heapalloc_value=10057032 +HeapAlloc dt=16 heapalloc_value=10065224 +HeapAlloc dt=14 heapalloc_value=10073416 +HeapAlloc dt=16 heapalloc_value=10081608 +HeapAlloc dt=15 heapalloc_value=10089800 +HeapAlloc dt=16 heapalloc_value=10097992 +HeapAlloc dt=16 heapalloc_value=10106184 +HeapAlloc dt=17 heapalloc_value=10114376 +HeapAlloc dt=15 heapalloc_value=10122568 +HeapAlloc dt=33 heapalloc_value=10327368 +HeapAlloc dt=367 heapalloc_value=10335560 +HeapAlloc dt=21 heapalloc_value=10343752 +HeapAlloc dt=16 heapalloc_value=10351944 +HeapAlloc dt=15 heapalloc_value=10360136 +HeapAlloc dt=16 heapalloc_value=10368328 +HeapAlloc dt=16 heapalloc_value=10376520 +HeapAlloc dt=16 heapalloc_value=10384712 +HeapAlloc dt=15 heapalloc_value=10392904 +HeapAlloc dt=15 heapalloc_value=10401096 +HeapAlloc dt=15 heapalloc_value=10409288 +HeapAlloc dt=15 heapalloc_value=10417480 +HeapAlloc dt=15 heapalloc_value=10425672 +HeapAlloc dt=17 heapalloc_value=10433864 +HeapAlloc dt=15 heapalloc_value=10442056 +HeapAlloc dt=15 heapalloc_value=10450248 +HeapAlloc dt=15 heapalloc_value=10458440 +HeapAlloc dt=15 heapalloc_value=10466632 +HeapAlloc dt=15 heapalloc_value=10474824 +HeapAlloc dt=15 heapalloc_value=10483016 +HeapAlloc dt=14 heapalloc_value=10491208 +HeapAlloc dt=22 heapalloc_value=10499400 +HeapAlloc dt=7 heapalloc_value=10507592 +HeapAlloc dt=9 heapalloc_value=10515784 +HeapAlloc dt=7 heapalloc_value=10523976 +HeapAlloc dt=6 heapalloc_value=10532168 +HeapAlloc dt=5 heapalloc_value=10540360 +HeapAlloc dt=6 heapalloc_value=10548552 +HeapAlloc dt=6 heapalloc_value=10556744 +HeapAlloc dt=5 heapalloc_value=10564936 +HeapAlloc dt=6 heapalloc_value=10573128 +HeapAlloc dt=6 heapalloc_value=10581320 +HeapAlloc dt=5 heapalloc_value=10589512 +HeapAlloc dt=6 heapalloc_value=10597704 +HeapAlloc dt=6 heapalloc_value=10605896 +HeapAlloc dt=5 heapalloc_value=10614088 +HeapAlloc dt=6 heapalloc_value=10622280 +HeapAlloc dt=5 heapalloc_value=10630472 +HeapAlloc dt=6 heapalloc_value=10638664 +HeapAlloc dt=6 heapalloc_value=10646856 +HeapAlloc dt=5 heapalloc_value=10655048 +HeapAlloc dt=6 heapalloc_value=10663240 +HeapAlloc dt=5 heapalloc_value=10671432 +HeapAlloc dt=6 heapalloc_value=10679624 +HeapAlloc dt=5 heapalloc_value=10687816 +HeapAlloc dt=221 heapalloc_value=10696008 +HeapAlloc dt=9 heapalloc_value=10704200 +HeapAlloc dt=6 heapalloc_value=10712392 +HeapAlloc dt=5 heapalloc_value=10720584 +HeapAlloc dt=6 heapalloc_value=10728776 +HeapAlloc dt=6 heapalloc_value=10736968 +HeapAlloc dt=5 heapalloc_value=10745160 +HeapAlloc dt=6 heapalloc_value=10753352 +HeapAlloc dt=5 heapalloc_value=10761544 +HeapAlloc dt=6 heapalloc_value=10769736 +HeapAlloc dt=5 heapalloc_value=10777928 +HeapAlloc dt=5 heapalloc_value=10786120 +HeapAlloc dt=6 heapalloc_value=10794312 +HeapAlloc dt=6 heapalloc_value=10802504 +HeapAlloc dt=5 heapalloc_value=10810696 +HeapAlloc dt=6 heapalloc_value=10818888 +HeapAlloc dt=5 heapalloc_value=10827080 +HeapAlloc dt=6 heapalloc_value=10835272 +HeapAlloc dt=5 heapalloc_value=10843464 +HeapAlloc dt=6 heapalloc_value=10851656 +GoBlock dt=11 reason_string=19 stack=21 +ProcStop dt=119 +ProcStart dt=17350 p=2 p_seq=7 +ProcStop dt=13 +ProcStart dt=1133 p=0 p_seq=16 +ProcStop dt=8 +ProcStart dt=16748 p=0 p_seq=17 +GoUnblock dt=7 g=1 g_seq=42 stack=0 +GoStart dt=84 g=1 g_seq=43 +HeapAlloc dt=15 heapalloc_value=11883848 +HeapAlloc dt=10 heapalloc_value=11892040 +HeapAlloc dt=6 heapalloc_value=11900232 +HeapAlloc dt=6 heapalloc_value=11908424 +HeapAlloc dt=6 heapalloc_value=11916616 +HeapAlloc dt=6 heapalloc_value=11924808 +HeapAlloc dt=8 heapalloc_value=11933000 +HeapAlloc dt=5 heapalloc_value=11941192 +HeapAlloc dt=6 heapalloc_value=11949384 +HeapAlloc dt=62 heapalloc_value=11957576 +HeapAlloc dt=7 heapalloc_value=11965768 +HeapAlloc dt=6 heapalloc_value=11973960 +HeapAlloc dt=6 heapalloc_value=11982152 +HeapAlloc dt=5 heapalloc_value=11990344 +HeapAlloc dt=6 heapalloc_value=11998536 +HeapAlloc dt=6 heapalloc_value=12006728 +HeapAlloc dt=5 heapalloc_value=12014920 +HeapAlloc dt=6 heapalloc_value=12023112 +HeapAlloc dt=5 heapalloc_value=12031304 +HeapAlloc dt=6 heapalloc_value=12039496 +HeapAlloc dt=5 heapalloc_value=12047688 +HeapAlloc dt=6 heapalloc_value=12055880 +HeapAlloc dt=6 heapalloc_value=12064072 +HeapAlloc dt=6 heapalloc_value=12072264 +HeapAlloc dt=5 heapalloc_value=12080456 +HeapAlloc dt=352 heapalloc_value=12088648 +HeapAlloc dt=14 heapalloc_value=12096840 +HeapAlloc dt=7 heapalloc_value=12105032 +HeapAlloc dt=5 heapalloc_value=12113224 +HeapAlloc dt=6 heapalloc_value=12121416 +HeapAlloc dt=41 heapalloc_value=12129608 +HeapAlloc dt=7 heapalloc_value=12137800 +HeapAlloc dt=5 heapalloc_value=12145992 +HeapAlloc dt=6 heapalloc_value=12154184 +HeapAlloc dt=6 heapalloc_value=12162376 +HeapAlloc dt=6 heapalloc_value=12170568 +HeapAlloc dt=5 heapalloc_value=12178760 +HeapAlloc dt=6 heapalloc_value=12186952 +HeapAlloc dt=5 heapalloc_value=12195144 +HeapAlloc dt=7 heapalloc_value=12203336 +HeapAlloc dt=5 heapalloc_value=12211528 +HeapAlloc dt=6 heapalloc_value=12219720 +HeapAlloc dt=5 heapalloc_value=12227912 +HeapAlloc dt=6 heapalloc_value=12236104 +HeapAlloc dt=6 heapalloc_value=12244296 +HeapAlloc dt=6 heapalloc_value=12252488 +HeapAlloc dt=5 heapalloc_value=12260680 +HeapAlloc dt=46 heapalloc_value=12268872 +HeapAlloc dt=6 heapalloc_value=12277064 +HeapAlloc dt=6 heapalloc_value=12285256 +HeapAlloc dt=6 heapalloc_value=12293448 +HeapAlloc dt=5 heapalloc_value=12301640 +HeapAlloc dt=6 heapalloc_value=12309832 +HeapAlloc dt=5 heapalloc_value=12318024 +HeapAlloc dt=6 heapalloc_value=12326216 +HeapAlloc dt=5 heapalloc_value=12334408 +HeapAlloc dt=6 heapalloc_value=12342600 +HeapAlloc dt=5 heapalloc_value=12350792 +HeapAlloc dt=6 heapalloc_value=12358984 +HeapAlloc dt=5 heapalloc_value=12367176 +HeapAlloc dt=6 heapalloc_value=12375368 +HeapAlloc dt=37 heapalloc_value=12383560 +HeapAlloc dt=7 heapalloc_value=12391752 +HeapAlloc dt=6 heapalloc_value=12399944 +HeapAlloc dt=5 heapalloc_value=12408136 +HeapAlloc dt=6 heapalloc_value=12416328 +HeapAlloc dt=6 heapalloc_value=12424520 +HeapAlloc dt=13 heapalloc_value=12686664 +HeapAlloc dt=2516 heapalloc_value=12694856 +HeapAlloc dt=9 heapalloc_value=12703048 +HeapAlloc dt=8 heapalloc_value=12711240 +HeapAlloc dt=7 heapalloc_value=12719432 +HeapAlloc dt=8 heapalloc_value=12727624 +HeapAlloc dt=7 heapalloc_value=12735816 +HeapAlloc dt=8 heapalloc_value=12744008 +HeapAlloc dt=7 heapalloc_value=12752200 +HeapAlloc dt=8 heapalloc_value=12760392 +HeapAlloc dt=7 heapalloc_value=12768584 +HeapAlloc dt=7 heapalloc_value=12776776 +HeapAlloc dt=8 heapalloc_value=12784968 +HeapAlloc dt=7 heapalloc_value=12793160 +HeapAlloc dt=8 heapalloc_value=12801352 +HeapAlloc dt=8 heapalloc_value=12809544 +HeapAlloc dt=7 heapalloc_value=12817736 +HeapAlloc dt=7 heapalloc_value=12825928 +HeapAlloc dt=8 heapalloc_value=12834120 +HeapAlloc dt=7 heapalloc_value=12842312 +HeapAlloc dt=8 heapalloc_value=12850504 +HeapAlloc dt=8 heapalloc_value=12858696 +HeapAlloc dt=7 heapalloc_value=12866888 +HeapAlloc dt=13 heapalloc_value=12875080 +HeapAlloc dt=8 heapalloc_value=12883272 +HeapAlloc dt=5 heapalloc_value=12891464 +HeapAlloc dt=6 heapalloc_value=12899656 +HeapAlloc dt=6 heapalloc_value=12907848 +HeapAlloc dt=5 heapalloc_value=12916040 +HeapAlloc dt=6 heapalloc_value=12924232 +HeapAlloc dt=6 heapalloc_value=12932424 +HeapAlloc dt=5 heapalloc_value=12940616 +HeapAlloc dt=6 heapalloc_value=12948808 +HeapAlloc dt=5 heapalloc_value=12957000 +HeapAlloc dt=6 heapalloc_value=12965192 +HeapAlloc dt=5 heapalloc_value=12973384 +HeapAlloc dt=6 heapalloc_value=12981576 +HeapAlloc dt=6 heapalloc_value=12989768 +HeapAlloc dt=5 heapalloc_value=12997960 +HeapAlloc dt=6 heapalloc_value=13006152 +HeapAlloc dt=6 heapalloc_value=13014344 +HeapAlloc dt=5 heapalloc_value=13022536 +HeapAlloc dt=6 heapalloc_value=13030728 +HeapAlloc dt=5 heapalloc_value=13038920 +HeapAlloc dt=62 heapalloc_value=13047112 +HeapAlloc dt=39 heapalloc_value=13055304 +HeapAlloc dt=7 heapalloc_value=13063496 +HeapAlloc dt=6 heapalloc_value=13071688 +HeapAlloc dt=6 heapalloc_value=13079880 +HeapAlloc dt=6 heapalloc_value=13088072 +HeapAlloc dt=5 heapalloc_value=13096264 +HeapAlloc dt=5 heapalloc_value=13104456 +HeapAlloc dt=6 heapalloc_value=13112648 +HeapAlloc dt=6 heapalloc_value=13120840 +HeapAlloc dt=5 heapalloc_value=13129032 +HeapAlloc dt=10 heapalloc_value=13137224 +HeapAlloc dt=6 heapalloc_value=13145416 +HeapAlloc dt=5 heapalloc_value=13153608 +HeapAlloc dt=6 heapalloc_value=13161800 +GoBlock dt=12 reason_string=19 stack=21 +ProcStop dt=124 +ProcStart dt=17212 p=2 p_seq=9 +ProcStop dt=13 +ProcStart dt=1068 p=0 p_seq=20 +ProcStop dt=8 +ProcStart dt=16756 p=0 p_seq=21 +GoUnblock dt=11 g=1 g_seq=46 stack=0 +GoStart dt=92 g=1 g_seq=47 +HeapAlloc dt=19 heapalloc_value=14193992 +HeapAlloc dt=10 heapalloc_value=14202184 +HeapAlloc dt=6 heapalloc_value=14210376 +HeapAlloc dt=6 heapalloc_value=14218568 +HeapAlloc dt=6 heapalloc_value=14226760 +HeapAlloc dt=6 heapalloc_value=14234952 +HeapAlloc dt=6 heapalloc_value=14243144 +HeapAlloc dt=6 heapalloc_value=14251336 +HeapAlloc dt=6 heapalloc_value=14259528 +HeapAlloc dt=6 heapalloc_value=14267720 +HeapAlloc dt=5 heapalloc_value=14275912 +HeapAlloc dt=6 heapalloc_value=14284104 +HeapAlloc dt=6 heapalloc_value=14292296 +HeapAlloc dt=6 heapalloc_value=14300488 +HeapAlloc dt=60 heapalloc_value=14308680 +HeapAlloc dt=8 heapalloc_value=14316872 +HeapAlloc dt=6 heapalloc_value=14325064 +HeapAlloc dt=6 heapalloc_value=14333256 +HeapAlloc dt=6 heapalloc_value=14341448 +HeapAlloc dt=5 heapalloc_value=14349640 +HeapAlloc dt=6 heapalloc_value=14357832 +HeapAlloc dt=6 heapalloc_value=14366024 +HeapAlloc dt=6 heapalloc_value=14374216 +HeapAlloc dt=6 heapalloc_value=14382408 +HeapAlloc dt=8 heapalloc_value=14390600 +HeapAlloc dt=6 heapalloc_value=14398792 +HeapAlloc dt=6 heapalloc_value=14406984 +HeapAlloc dt=6 heapalloc_value=14415176 +HeapAlloc dt=6 heapalloc_value=14423368 +HeapAlloc dt=5 heapalloc_value=14431560 +HeapAlloc dt=6 heapalloc_value=14439752 +HeapAlloc dt=7 heapalloc_value=14447944 +HeapAlloc dt=5 heapalloc_value=14456136 +HeapAlloc dt=6 heapalloc_value=14464328 +HeapAlloc dt=6 heapalloc_value=14472520 +HeapAlloc dt=5 heapalloc_value=14480712 +HeapAlloc dt=6 heapalloc_value=14488904 +HeapAlloc dt=6 heapalloc_value=14497096 +HeapAlloc dt=6 heapalloc_value=14505288 +HeapAlloc dt=6 heapalloc_value=14513480 +HeapAlloc dt=6 heapalloc_value=14521672 +HeapAlloc dt=6 heapalloc_value=14529864 +HeapAlloc dt=5 heapalloc_value=14538056 +HeapAlloc dt=6 heapalloc_value=14546248 +HeapAlloc dt=6 heapalloc_value=14554440 +HeapAlloc dt=5 heapalloc_value=14562632 +HeapAlloc dt=6 heapalloc_value=14570824 +HeapAlloc dt=6 heapalloc_value=14579016 +HeapAlloc dt=6 heapalloc_value=14587208 +HeapAlloc dt=6 heapalloc_value=14595400 +HeapAlloc dt=5 heapalloc_value=14603592 +HeapAlloc dt=6 heapalloc_value=14611784 +HeapAlloc dt=45 heapalloc_value=14619976 +HeapAlloc dt=7 heapalloc_value=14628168 +HeapAlloc dt=6 heapalloc_value=14636360 +HeapAlloc dt=7 heapalloc_value=14644552 +HeapAlloc dt=5 heapalloc_value=14652744 +HeapAlloc dt=6 heapalloc_value=14660936 +HeapAlloc dt=6 heapalloc_value=14669128 +HeapAlloc dt=5 heapalloc_value=14677320 +HeapAlloc dt=6 heapalloc_value=14685512 +HeapAlloc dt=6 heapalloc_value=14693704 +HeapAlloc dt=6 heapalloc_value=14701896 +HeapAlloc dt=15 heapalloc_value=14710088 +HeapAlloc dt=6 heapalloc_value=14718280 +HeapAlloc dt=5 heapalloc_value=14726472 +HeapAlloc dt=35 heapalloc_value=14734664 +HeapAlloc dt=7 heapalloc_value=14742856 +HeapAlloc dt=6 heapalloc_value=14751048 +HeapAlloc dt=6 heapalloc_value=14759240 +HeapAlloc dt=6 heapalloc_value=14767432 +HeapAlloc dt=6 heapalloc_value=14775624 +HeapAlloc dt=6 heapalloc_value=14783816 +HeapAlloc dt=6 heapalloc_value=14792008 +HeapAlloc dt=5 heapalloc_value=14800200 +HeapAlloc dt=6 heapalloc_value=14808392 +HeapAlloc dt=5 heapalloc_value=14816584 +HeapAlloc dt=6 heapalloc_value=14824776 +HeapAlloc dt=6 heapalloc_value=14832968 +HeapAlloc dt=6 heapalloc_value=14841160 +HeapAlloc dt=6 heapalloc_value=14849352 +HeapAlloc dt=45 heapalloc_value=14857544 +HeapAlloc dt=6 heapalloc_value=14865736 +HeapAlloc dt=5 heapalloc_value=14873928 +HeapAlloc dt=6 heapalloc_value=14882120 +HeapAlloc dt=6 heapalloc_value=14890312 +HeapAlloc dt=6 heapalloc_value=14898504 +HeapAlloc dt=6 heapalloc_value=14906696 +HeapAlloc dt=6 heapalloc_value=14914888 +HeapAlloc dt=5 heapalloc_value=14923080 +HeapAlloc dt=6 heapalloc_value=14931272 +HeapAlloc dt=6 heapalloc_value=14939464 +HeapAlloc dt=5 heapalloc_value=14947656 +HeapAlloc dt=6 heapalloc_value=14955848 +HeapAlloc dt=6 heapalloc_value=14964040 +HeapAlloc dt=6 heapalloc_value=14972232 +HeapAlloc dt=5 heapalloc_value=14980424 +HeapAlloc dt=6 heapalloc_value=14988616 +HeapAlloc dt=6 heapalloc_value=14996808 +HeapAlloc dt=5 heapalloc_value=15005000 +HeapAlloc dt=6 heapalloc_value=15013192 +HeapAlloc dt=6 heapalloc_value=15021384 +HeapAlloc dt=6 heapalloc_value=15029576 +HeapAlloc dt=6 heapalloc_value=15037768 +HeapAlloc dt=6 heapalloc_value=15045960 +HeapAlloc dt=5 heapalloc_value=15054152 +HeapAlloc dt=6 heapalloc_value=15062344 +HeapAlloc dt=6 heapalloc_value=15070536 +HeapAlloc dt=6 heapalloc_value=15078728 +HeapAlloc dt=5 heapalloc_value=15086920 +HeapAlloc dt=6 heapalloc_value=15095112 +HeapAlloc dt=6 heapalloc_value=15103304 +HeapAlloc dt=5 heapalloc_value=15111496 +HeapAlloc dt=6 heapalloc_value=15119688 +HeapAlloc dt=6 heapalloc_value=15127880 +HeapAlloc dt=5 heapalloc_value=15136072 +HeapAlloc dt=51 heapalloc_value=15471944 +HeapAlloc dt=2533 heapalloc_value=15480136 +HeapAlloc dt=11 heapalloc_value=15488328 +HeapAlloc dt=9 heapalloc_value=15496520 +HeapAlloc dt=7 heapalloc_value=15504712 +HeapAlloc dt=9 heapalloc_value=15512904 +HeapAlloc dt=9 heapalloc_value=15521096 +HeapAlloc dt=7 heapalloc_value=15529288 +HeapAlloc dt=8 heapalloc_value=15537480 +HeapAlloc dt=8 heapalloc_value=15545672 +GoBlock dt=13 reason_string=19 stack=21 +ProcStop dt=116 +ProcStart dt=17265 p=2 p_seq=11 +ProcStop dt=10 +ProcStart dt=1450 p=0 p_seq=24 +ProcStop dt=9 +ProcStart dt=17026 p=0 p_seq=25 +GoUnblock dt=12 g=1 g_seq=50 stack=0 +GoStart dt=148 g=1 g_seq=51 +HeapAlloc dt=20 heapalloc_value=16577864 +HeapAlloc dt=15 heapalloc_value=16586056 +HeapAlloc dt=10 heapalloc_value=16594248 +HeapAlloc dt=11 heapalloc_value=16602440 +HeapAlloc dt=9 heapalloc_value=16610632 +HeapAlloc dt=9 heapalloc_value=16618824 +HeapAlloc dt=10 heapalloc_value=16627016 +HeapAlloc dt=9 heapalloc_value=16635208 +HeapAlloc dt=11 heapalloc_value=16643400 +HeapAlloc dt=11 heapalloc_value=16651592 +HeapAlloc dt=9 heapalloc_value=16659784 +HeapAlloc dt=11 heapalloc_value=16667976 +HeapAlloc dt=9 heapalloc_value=16676168 +HeapAlloc dt=10 heapalloc_value=16684360 +HeapAlloc dt=10 heapalloc_value=16692552 +HeapAlloc dt=10 heapalloc_value=16700744 +HeapAlloc dt=11 heapalloc_value=16708936 +HeapAlloc dt=11 heapalloc_value=16717128 +HeapAlloc dt=9 heapalloc_value=16725320 +HeapAlloc dt=78 heapalloc_value=16733512 +HeapAlloc dt=14 heapalloc_value=16741704 +HeapAlloc dt=10 heapalloc_value=16749896 +HeapAlloc dt=11 heapalloc_value=16758088 +HeapAlloc dt=11 heapalloc_value=16766280 +HeapAlloc dt=10 heapalloc_value=16774472 +HeapAlloc dt=9 heapalloc_value=16782664 +HeapAlloc dt=10 heapalloc_value=16790856 +HeapAlloc dt=9 heapalloc_value=16799048 +HeapAlloc dt=21 heapalloc_value=16807240 +HeapAlloc dt=11 heapalloc_value=16815432 +HeapAlloc dt=9 heapalloc_value=16823624 +HeapAlloc dt=9 heapalloc_value=16831816 +HeapAlloc dt=9 heapalloc_value=16840008 +HeapAlloc dt=10 heapalloc_value=16848200 +HeapAlloc dt=11 heapalloc_value=16856392 +HeapAlloc dt=9 heapalloc_value=16864584 +HeapAlloc dt=6 heapalloc_value=16872776 +HeapAlloc dt=9 heapalloc_value=16880968 +HeapAlloc dt=6 heapalloc_value=16889160 +HeapAlloc dt=6 heapalloc_value=16897352 +HeapAlloc dt=5 heapalloc_value=16905544 +HeapAlloc dt=6 heapalloc_value=16913736 +HeapAlloc dt=6 heapalloc_value=16921928 +HeapAlloc dt=5 heapalloc_value=16930120 +HeapAlloc dt=6 heapalloc_value=16938312 +HeapAlloc dt=5 heapalloc_value=16946504 +HeapAlloc dt=6 heapalloc_value=16954696 +HeapAlloc dt=5 heapalloc_value=16962888 +HeapAlloc dt=5 heapalloc_value=16971080 +HeapAlloc dt=5 heapalloc_value=16979272 +HeapAlloc dt=6 heapalloc_value=16987464 +HeapAlloc dt=5 heapalloc_value=16995656 +HeapAlloc dt=5 heapalloc_value=17003848 +HeapAlloc dt=6 heapalloc_value=17012040 +HeapAlloc dt=5 heapalloc_value=17020232 +HeapAlloc dt=6 heapalloc_value=17028424 +HeapAlloc dt=5 heapalloc_value=17036616 +HeapAlloc dt=53 heapalloc_value=17044808 +HeapAlloc dt=7 heapalloc_value=17053000 +HeapAlloc dt=5 heapalloc_value=17061192 +HeapAlloc dt=6 heapalloc_value=17069384 +HeapAlloc dt=11 heapalloc_value=17077576 +HeapAlloc dt=10 heapalloc_value=17085768 +HeapAlloc dt=5 heapalloc_value=17093960 +HeapAlloc dt=5 heapalloc_value=17102152 +HeapAlloc dt=6 heapalloc_value=17110344 +HeapAlloc dt=5 heapalloc_value=17118536 +HeapAlloc dt=5 heapalloc_value=17126728 +HeapAlloc dt=6 heapalloc_value=17134920 +HeapAlloc dt=5 heapalloc_value=17143112 +HeapAlloc dt=6 heapalloc_value=17151304 +HeapAlloc dt=37 heapalloc_value=17159496 +GCBegin dt=15 gc_seq=5 stack=22 +STWBegin dt=37 kind_string=22 stack=28 +GoUnblock dt=288 g=4 g_seq=9 stack=29 +ProcsChange dt=56 procs_value=8 stack=30 +STWEnd dt=23 +GCMarkAssistBegin dt=90 stack=31 +GCMarkAssistEnd dt=3424 +HeapAlloc dt=523 heapalloc_value=17175048 +HeapAlloc dt=21 heapalloc_value=17183240 +HeapAlloc dt=46 heapalloc_value=17191432 +HeapAlloc dt=96 heapalloc_value=17199624 +HeapAlloc dt=12 heapalloc_value=17207816 +HeapAlloc dt=12 heapalloc_value=17216008 +HeapAlloc dt=13 heapalloc_value=17224200 +HeapAlloc dt=10 heapalloc_value=17232392 +HeapAlloc dt=12 heapalloc_value=17240584 +HeapAlloc dt=13 heapalloc_value=17248776 +HeapAlloc dt=12 heapalloc_value=17256968 +HeapAlloc dt=14 heapalloc_value=17265160 +HeapAlloc dt=12 heapalloc_value=17273352 +HeapAlloc dt=12 heapalloc_value=17281544 +HeapAlloc dt=11 heapalloc_value=17289736 +HeapAlloc dt=13 heapalloc_value=17297928 +HeapAlloc dt=36 heapalloc_value=17306120 +HeapAlloc dt=12 heapalloc_value=17314312 +HeapAlloc dt=10 heapalloc_value=17322504 +HeapAlloc dt=12 heapalloc_value=17330696 +HeapAlloc dt=10 heapalloc_value=17338888 +HeapAlloc dt=11 heapalloc_value=17347080 +HeapAlloc dt=10 heapalloc_value=17355272 +HeapAlloc dt=10 heapalloc_value=17363464 +HeapAlloc dt=10 heapalloc_value=17371656 +HeapAlloc dt=11 heapalloc_value=17379848 +HeapAlloc dt=8 heapalloc_value=17388040 +HeapAlloc dt=13 heapalloc_value=17396232 +HeapAlloc dt=10 heapalloc_value=17404424 +HeapAlloc dt=13 heapalloc_value=17412616 +HeapAlloc dt=13 heapalloc_value=17420808 +HeapAlloc dt=10 heapalloc_value=17429000 +HeapAlloc dt=31 heapalloc_value=17437192 +HeapAlloc dt=6 heapalloc_value=17445384 +HeapAlloc dt=7 heapalloc_value=17453576 +HeapAlloc dt=6 heapalloc_value=17461768 +HeapAlloc dt=7 heapalloc_value=17469960 +HeapAlloc dt=7 heapalloc_value=17478152 +HeapAlloc dt=7 heapalloc_value=17486344 +HeapAlloc dt=7 heapalloc_value=17494536 +HeapAlloc dt=12 heapalloc_value=17502728 +HeapAlloc dt=7 heapalloc_value=17510920 +HeapAlloc dt=12 heapalloc_value=17519112 +HeapAlloc dt=13 heapalloc_value=17527304 +HeapAlloc dt=20 heapalloc_value=17535496 +HeapAlloc dt=15 heapalloc_value=17543688 +HeapAlloc dt=6 heapalloc_value=17551880 +HeapAlloc dt=7 heapalloc_value=17560072 +HeapAlloc dt=72 heapalloc_value=17568264 +HeapAlloc dt=37 heapalloc_value=17576456 +HeapAlloc dt=7 heapalloc_value=17584648 +HeapAlloc dt=7 heapalloc_value=17592840 +HeapAlloc dt=6 heapalloc_value=17601032 +GoBlock dt=13 reason_string=19 stack=21 +GoUnblock dt=157 g=24 g_seq=12 stack=0 +GoStart dt=7 g=24 g_seq=13 +GoLabel dt=1 label_string=2 +STWBegin dt=4128 kind_string=23 stack=37 +GoUnblock dt=64 g=25 g_seq=8 stack=38 +HeapAlloc dt=25 heapalloc_value=16970376 +GoUnblock dt=24 g=3 g_seq=5 stack=39 +GCEnd dt=6 gc_seq=6 +HeapGoal dt=7 heapgoal_value=34360936 +ProcsChange dt=46 procs_value=8 stack=40 +STWEnd dt=49 +GoBlock dt=756 reason_string=15 stack=27 +GoStart dt=10 g=3 g_seq=6 +GoBlock dt=14862 reason_string=14 stack=44 +ProcStop dt=25 +ProcStart dt=132428 p=0 p_seq=32 +GoStart dt=162 g=4 g_seq=12 +GoBlock dt=19 reason_string=15 stack=32 +ProcStop dt=20 +ProcStart dt=8304 p=0 p_seq=33 +GoStart dt=191 g=39 g_seq=1 +GoStop dt=306173 reason_string=16 stack=50 +GoStart dt=17 g=39 g_seq=2 +GoStop dt=315175 reason_string=16 stack=50 +GoStart dt=7 g=39 g_seq=3 +GoDestroy dt=159902 +ProcStop dt=50 +EventBatch gen=1 m=1709040 time=7689670148204 size=3534 +ProcStart dt=256 p=1 p_seq=1 +GoStart dt=186 g=6 g_seq=1 +HeapAlloc dt=320 heapalloc_value=2768896 +HeapAlloc dt=22 heapalloc_value=2777088 +GoBlock dt=229 reason_string=12 stack=15 +GoStart dt=12 g=8 g_seq=1 +HeapAlloc dt=15 heapalloc_value=2785280 +GoSyscallBegin dt=16 p_seq=2 stack=16 +GoSyscallEnd dt=254 +GoBlock dt=293 reason_string=15 stack=17 +GoStart dt=19 g=9 g_seq=1 +GoDestroy dt=156265 +ProcStop dt=44 +ProcStart dt=67218 p=1 p_seq=3 +ProcStop dt=19 +ProcStart dt=88214 p=1 p_seq=4 +ProcStop dt=13 +ProcStart dt=17539 p=0 p_seq=1 +ProcStop dt=14 +ProcStart dt=9071 p=4 p_seq=1 +GoUnblock dt=33 g=22 g_seq=2 stack=0 +GoStart dt=6 g=22 g_seq=3 +GoLabel dt=1 label_string=4 +GoUnblock dt=2321 g=1 g_seq=23 stack=34 +STWBegin dt=1205 kind_string=23 stack=37 +GoUnblock dt=78 g=24 g_seq=6 stack=38 +HeapAlloc dt=26 heapalloc_value=3840752 +GoStatus dt=14 g=3 m=18446744073709551615 gstatus=4 +GoUnblock dt=7 g=3 g_seq=1 stack=39 +GCEnd dt=3 gc_seq=2 +HeapGoal dt=6 heapgoal_value=8101720 +ProcsChange dt=43 procs_value=8 stack=40 +STWEnd dt=31 +GoBlock dt=4030 reason_string=15 stack=27 +GoStart dt=12 g=3 g_seq=2 +GoBlock dt=1406 reason_string=14 stack=44 +ProcStop dt=24 +ProcStart dt=34332 p=4 p_seq=4 +GoStart dt=153 g=4 g_seq=4 +GoBlock dt=20 reason_string=15 stack=32 +ProcStop dt=19 +ProcStart dt=1832 p=2 p_seq=5 +GoUnblock dt=22 g=24 g_seq=8 stack=0 +GoStart dt=102 g=24 g_seq=9 +GoLabel dt=1 label_string=2 +STWBegin dt=11769 kind_string=23 stack=37 +GoUnblock dt=60 g=1 g_seq=36 stack=38 +HeapAlloc dt=23 heapalloc_value=8744264 +GoUnblock dt=17 g=3 g_seq=3 stack=39 +GCEnd dt=6 gc_seq=4 +HeapGoal dt=7 heapgoal_value=17908728 +ProcsChange dt=47 procs_value=8 stack=40 +STWEnd dt=28 +GoBlock dt=572 reason_string=15 stack=27 +GoStart dt=13 g=3 g_seq=4 +GoBlock dt=5707 reason_string=14 stack=44 +ProcStop dt=16 +ProcStart dt=136502 p=1 p_seq=11 +GoStart dt=17 g=4 g_seq=8 +GoBlock dt=12 reason_string=15 stack=32 +ProcStop dt=22 +ProcStart dt=5977 p=6 p_seq=1 +ProcStop dt=34 +ProcStart dt=16775 p=2 p_seq=15 +ProcStop dt=23 +ProcStart dt=3966 p=1 p_seq=14 +ProcStop dt=15 +ProcStart dt=16753 p=1 p_seq=15 +GoUnblock dt=35 g=1 g_seq=57 stack=0 +GoStart dt=139 g=1 g_seq=58 +HeapAlloc dt=71 heapalloc_value=17593992 +HeapAlloc dt=47 heapalloc_value=17602184 +HeapAlloc dt=24 heapalloc_value=17610376 +HeapAlloc dt=97 heapalloc_value=17618568 +HeapAlloc dt=23 heapalloc_value=17626760 +HeapAlloc dt=18 heapalloc_value=17634952 +HeapAlloc dt=15 heapalloc_value=17643144 +HeapAlloc dt=18 heapalloc_value=17651336 +HeapAlloc dt=21 heapalloc_value=17659528 +HeapAlloc dt=28 heapalloc_value=17667720 +HeapAlloc dt=26 heapalloc_value=17675912 +HeapAlloc dt=23 heapalloc_value=17684104 +HeapAlloc dt=12 heapalloc_value=17692296 +HeapAlloc dt=12 heapalloc_value=17700488 +HeapAlloc dt=11 heapalloc_value=17708680 +HeapAlloc dt=15 heapalloc_value=17716872 +HeapAlloc dt=18 heapalloc_value=17725064 +HeapAlloc dt=15 heapalloc_value=17733256 +HeapAlloc dt=165 heapalloc_value=17741448 +HeapAlloc dt=16 heapalloc_value=17749640 +HeapAlloc dt=12 heapalloc_value=17757832 +HeapAlloc dt=15 heapalloc_value=17766024 +HeapAlloc dt=12 heapalloc_value=17774216 +HeapAlloc dt=12 heapalloc_value=17782408 +HeapAlloc dt=15 heapalloc_value=17790600 +HeapAlloc dt=11 heapalloc_value=17798792 +HeapAlloc dt=11 heapalloc_value=17806984 +HeapAlloc dt=12 heapalloc_value=17815176 +HeapAlloc dt=12 heapalloc_value=17823368 +HeapAlloc dt=15 heapalloc_value=17831560 +HeapAlloc dt=11 heapalloc_value=17839752 +HeapAlloc dt=12 heapalloc_value=17847944 +HeapAlloc dt=15 heapalloc_value=17856136 +HeapAlloc dt=11 heapalloc_value=17864328 +HeapAlloc dt=12 heapalloc_value=17872520 +HeapAlloc dt=12 heapalloc_value=17880712 +HeapAlloc dt=14 heapalloc_value=17888904 +HeapAlloc dt=42 heapalloc_value=17897096 +HeapAlloc dt=54 heapalloc_value=17905288 +HeapAlloc dt=49 heapalloc_value=17913480 +HeapAlloc dt=54 heapalloc_value=17921672 +HeapAlloc dt=56 heapalloc_value=17929864 +HeapAlloc dt=45 heapalloc_value=17938056 +HeapAlloc dt=57 heapalloc_value=17946248 +HeapAlloc dt=63 heapalloc_value=17954440 +HeapAlloc dt=57 heapalloc_value=17962632 +HeapAlloc dt=56 heapalloc_value=17970824 +HeapAlloc dt=62 heapalloc_value=17979016 +HeapAlloc dt=109 heapalloc_value=17987208 +HeapAlloc dt=59 heapalloc_value=17995400 +HeapAlloc dt=45 heapalloc_value=18003592 +HeapAlloc dt=61 heapalloc_value=18011784 +HeapAlloc dt=35 heapalloc_value=18019976 +HeapAlloc dt=16 heapalloc_value=18028168 +HeapAlloc dt=15 heapalloc_value=18036360 +HeapAlloc dt=15 heapalloc_value=18044552 +HeapAlloc dt=21 heapalloc_value=18052744 +HeapAlloc dt=16 heapalloc_value=18060936 +HeapAlloc dt=16 heapalloc_value=18069128 +HeapAlloc dt=22 heapalloc_value=18077320 +HeapAlloc dt=43 heapalloc_value=18085512 +HeapAlloc dt=46 heapalloc_value=18093704 +HeapAlloc dt=43 heapalloc_value=18101896 +HeapAlloc dt=42 heapalloc_value=18110088 +HeapAlloc dt=44 heapalloc_value=18118280 +HeapAlloc dt=35 heapalloc_value=18126472 +HeapAlloc dt=39 heapalloc_value=18134664 +HeapAlloc dt=40 heapalloc_value=18142856 +HeapAlloc dt=43 heapalloc_value=18151048 +HeapAlloc dt=44 heapalloc_value=18159240 +HeapAlloc dt=38 heapalloc_value=18167432 +HeapAlloc dt=42 heapalloc_value=18175624 +HeapAlloc dt=40 heapalloc_value=18183816 +HeapAlloc dt=40 heapalloc_value=18192008 +HeapAlloc dt=36 heapalloc_value=18200200 +HeapAlloc dt=55 heapalloc_value=18208392 +HeapAlloc dt=54 heapalloc_value=18216584 +HeapAlloc dt=54 heapalloc_value=18224776 +HeapAlloc dt=41 heapalloc_value=18232968 +HeapAlloc dt=58 heapalloc_value=18241160 +HeapAlloc dt=61 heapalloc_value=18249352 +HeapAlloc dt=55 heapalloc_value=18257544 +HeapAlloc dt=141 heapalloc_value=18265736 +HeapAlloc dt=55 heapalloc_value=18273928 +HeapAlloc dt=54 heapalloc_value=18282120 +HeapAlloc dt=50 heapalloc_value=18290312 +HeapAlloc dt=82 heapalloc_value=18298504 +HeapAlloc dt=64 heapalloc_value=18306696 +HeapAlloc dt=55 heapalloc_value=18314888 +HeapAlloc dt=58 heapalloc_value=18323080 +HeapAlloc dt=54 heapalloc_value=18331272 +HeapAlloc dt=57 heapalloc_value=18339464 +HeapAlloc dt=46 heapalloc_value=18347656 +HeapAlloc dt=41 heapalloc_value=18355848 +HeapAlloc dt=56 heapalloc_value=18364040 +HeapAlloc dt=50 heapalloc_value=18372232 +HeapAlloc dt=54 heapalloc_value=18380424 +HeapAlloc dt=56 heapalloc_value=18388616 +HeapAlloc dt=57 heapalloc_value=18396808 +HeapAlloc dt=55 heapalloc_value=18405000 +HeapAlloc dt=55 heapalloc_value=18413192 +HeapAlloc dt=51 heapalloc_value=18421384 +HeapAlloc dt=52 heapalloc_value=18429576 +HeapAlloc dt=67 heapalloc_value=18437768 +HeapAlloc dt=36 heapalloc_value=18445960 +HeapAlloc dt=28 heapalloc_value=18454152 +HeapAlloc dt=30 heapalloc_value=18462344 +HeapAlloc dt=40 heapalloc_value=18470536 +HeapAlloc dt=29 heapalloc_value=18478728 +HeapAlloc dt=37 heapalloc_value=18486920 +HeapAlloc dt=34 heapalloc_value=18495112 +HeapAlloc dt=73 heapalloc_value=18503304 +HeapAlloc dt=37 heapalloc_value=18511496 +HeapAlloc dt=38 heapalloc_value=18519688 +HeapAlloc dt=29 heapalloc_value=18527880 +HeapAlloc dt=35 heapalloc_value=18536072 +HeapAlloc dt=33 heapalloc_value=18544264 +HeapAlloc dt=40 heapalloc_value=18552456 +HeapAlloc dt=32 heapalloc_value=18560648 +HeapAlloc dt=42 heapalloc_value=18568840 +HeapAlloc dt=34 heapalloc_value=18577032 +HeapAlloc dt=37 heapalloc_value=18585224 +HeapAlloc dt=35 heapalloc_value=18593416 +HeapAlloc dt=39 heapalloc_value=18601608 +HeapAlloc dt=35 heapalloc_value=18609800 +GoBlock dt=51 reason_string=19 stack=21 +ProcStop dt=192 +ProcStart dt=17579 p=0 p_seq=27 +ProcStop dt=18 +ProcStart dt=1930 p=1 p_seq=18 +ProcStop dt=15 +ProcStart dt=16696 p=1 p_seq=19 +GoUnblock dt=22 g=1 g_seq=61 stack=0 +GoStart dt=125 g=1 g_seq=62 +HeapAlloc dt=53 heapalloc_value=19641992 +HeapAlloc dt=19 heapalloc_value=19650184 +HeapAlloc dt=20 heapalloc_value=19658376 +HeapAlloc dt=23 heapalloc_value=19666568 +HeapAlloc dt=16 heapalloc_value=19674760 +HeapAlloc dt=16 heapalloc_value=19682952 +HeapAlloc dt=19 heapalloc_value=19691144 +HeapAlloc dt=15 heapalloc_value=19699336 +HeapAlloc dt=12 heapalloc_value=19707528 +HeapAlloc dt=12 heapalloc_value=19715720 +HeapAlloc dt=13 heapalloc_value=19723912 +HeapAlloc dt=18 heapalloc_value=19732104 +HeapAlloc dt=12 heapalloc_value=19740296 +HeapAlloc dt=12 heapalloc_value=19748488 +HeapAlloc dt=9 heapalloc_value=19756680 +HeapAlloc dt=6 heapalloc_value=19764872 +HeapAlloc dt=5 heapalloc_value=19773064 +HeapAlloc dt=6 heapalloc_value=19781256 +HeapAlloc dt=5 heapalloc_value=19789448 +HeapAlloc dt=10 heapalloc_value=19797640 +HeapAlloc dt=5 heapalloc_value=19805832 +HeapAlloc dt=6 heapalloc_value=19814024 +HeapAlloc dt=9 heapalloc_value=19822216 +HeapAlloc dt=6 heapalloc_value=19830408 +HeapAlloc dt=117 heapalloc_value=19838600 +HeapAlloc dt=17 heapalloc_value=19846792 +HeapAlloc dt=5 heapalloc_value=19854984 +HeapAlloc dt=10 heapalloc_value=19863176 +HeapAlloc dt=6 heapalloc_value=19871368 +HeapAlloc dt=6 heapalloc_value=19879560 +HeapAlloc dt=9 heapalloc_value=19887752 +HeapAlloc dt=6 heapalloc_value=19895944 +HeapAlloc dt=6 heapalloc_value=19904136 +HeapAlloc dt=5 heapalloc_value=19912328 +HeapAlloc dt=6 heapalloc_value=19920520 +HeapAlloc dt=10 heapalloc_value=19928712 +HeapAlloc dt=5 heapalloc_value=19936904 +HeapAlloc dt=6 heapalloc_value=19945096 +HeapAlloc dt=9 heapalloc_value=19953288 +HeapAlloc dt=6 heapalloc_value=19961480 +HeapAlloc dt=35 heapalloc_value=19969672 +HeapAlloc dt=7 heapalloc_value=19977864 +HeapAlloc dt=5 heapalloc_value=19986056 +HeapAlloc dt=468 heapalloc_value=19994248 +HeapAlloc dt=14 heapalloc_value=20002440 +HeapAlloc dt=6 heapalloc_value=20010632 +HeapAlloc dt=10 heapalloc_value=20018824 +HeapAlloc dt=5 heapalloc_value=20027016 +HeapAlloc dt=6 heapalloc_value=20035208 +HeapAlloc dt=11 heapalloc_value=20043400 +HeapAlloc dt=6 heapalloc_value=20051592 +HeapAlloc dt=5 heapalloc_value=20059784 +HeapAlloc dt=6 heapalloc_value=20067976 +HeapAlloc dt=5 heapalloc_value=20076168 +HeapAlloc dt=7 heapalloc_value=20084360 +HeapAlloc dt=6 heapalloc_value=20092552 +HeapAlloc dt=5 heapalloc_value=20100744 +HeapAlloc dt=6 heapalloc_value=20108936 +HeapAlloc dt=6 heapalloc_value=20117128 +HeapAlloc dt=5 heapalloc_value=20125320 +HeapAlloc dt=6 heapalloc_value=20133512 +HeapAlloc dt=6 heapalloc_value=20141704 +HeapAlloc dt=7 heapalloc_value=20149896 +HeapAlloc dt=5 heapalloc_value=20158088 +HeapAlloc dt=6 heapalloc_value=20166280 +HeapAlloc dt=5 heapalloc_value=20174472 +HeapAlloc dt=6 heapalloc_value=20182664 +HeapAlloc dt=6 heapalloc_value=20190856 +HeapAlloc dt=5 heapalloc_value=20199048 +HeapAlloc dt=5 heapalloc_value=20207240 +HeapAlloc dt=6 heapalloc_value=20215432 +HeapAlloc dt=6 heapalloc_value=20223624 +HeapAlloc dt=5 heapalloc_value=20231816 +HeapAlloc dt=6 heapalloc_value=20240008 +HeapAlloc dt=5 heapalloc_value=20248200 +HeapAlloc dt=5 heapalloc_value=20256392 +HeapAlloc dt=6 heapalloc_value=20264584 +HeapAlloc dt=5 heapalloc_value=20272776 +HeapAlloc dt=6 heapalloc_value=20280968 +HeapAlloc dt=5 heapalloc_value=20289160 +HeapAlloc dt=6 heapalloc_value=20297352 +HeapAlloc dt=5 heapalloc_value=20305544 +HeapAlloc dt=6 heapalloc_value=20313736 +HeapAlloc dt=5 heapalloc_value=20321928 +HeapAlloc dt=6 heapalloc_value=20330120 +HeapAlloc dt=5 heapalloc_value=20338312 +HeapAlloc dt=6 heapalloc_value=20346504 +HeapAlloc dt=6 heapalloc_value=20354696 +HeapAlloc dt=62 heapalloc_value=20362888 +HeapAlloc dt=7 heapalloc_value=20371080 +HeapAlloc dt=5 heapalloc_value=20379272 +HeapAlloc dt=6 heapalloc_value=20387464 +HeapAlloc dt=37 heapalloc_value=20395656 +HeapAlloc dt=7 heapalloc_value=20403848 +HeapAlloc dt=6 heapalloc_value=20412040 +HeapAlloc dt=5 heapalloc_value=20420232 +HeapAlloc dt=6 heapalloc_value=20428424 +HeapAlloc dt=5 heapalloc_value=20436616 +HeapAlloc dt=6 heapalloc_value=20444808 +HeapAlloc dt=5 heapalloc_value=20453000 +HeapAlloc dt=6 heapalloc_value=20461192 +HeapAlloc dt=5 heapalloc_value=20469384 +HeapAlloc dt=6 heapalloc_value=20477576 +HeapAlloc dt=5 heapalloc_value=20485768 +HeapAlloc dt=6 heapalloc_value=20493960 +HeapAlloc dt=5 heapalloc_value=20502152 +HeapAlloc dt=6 heapalloc_value=20510344 +HeapAlloc dt=9 heapalloc_value=20518536 +HeapAlloc dt=6 heapalloc_value=20526728 +HeapAlloc dt=5 heapalloc_value=20534920 +HeapAlloc dt=6 heapalloc_value=20543112 +HeapAlloc dt=5 heapalloc_value=20551304 +HeapAlloc dt=6 heapalloc_value=20559496 +HeapAlloc dt=5 heapalloc_value=20567688 +HeapAlloc dt=6 heapalloc_value=20575880 +HeapAlloc dt=5 heapalloc_value=20584072 +HeapAlloc dt=6 heapalloc_value=20592264 +HeapAlloc dt=38 heapalloc_value=20600456 +HeapAlloc dt=7 heapalloc_value=20608648 +HeapAlloc dt=5 heapalloc_value=20616840 +HeapAlloc dt=6 heapalloc_value=20625032 +HeapAlloc dt=5 heapalloc_value=20633224 +HeapAlloc dt=6 heapalloc_value=20641416 +HeapAlloc dt=5 heapalloc_value=20649608 +HeapAlloc dt=6 heapalloc_value=20657800 +GoBlock dt=12 reason_string=19 stack=21 +ProcStop dt=167 +ProcStart dt=17576 p=0 p_seq=29 +ProcStop dt=20 +ProcStart dt=3256 p=1 p_seq=22 +ProcStop dt=17 +ProcStart dt=16071 p=1 p_seq=23 +GoUnblock dt=21 g=1 g_seq=65 stack=0 +GoStart dt=124 g=1 g_seq=66 +HeapAlloc dt=51 heapalloc_value=22230664 +HeapAlloc dt=26 heapalloc_value=22238856 +HeapAlloc dt=16 heapalloc_value=22247048 +HeapAlloc dt=19 heapalloc_value=22255240 +HeapAlloc dt=19 heapalloc_value=22263432 +HeapAlloc dt=16 heapalloc_value=22271624 +HeapAlloc dt=16 heapalloc_value=22279816 +HeapAlloc dt=19 heapalloc_value=22288008 +HeapAlloc dt=18 heapalloc_value=22296200 +HeapAlloc dt=16 heapalloc_value=22304392 +HeapAlloc dt=12 heapalloc_value=22312584 +HeapAlloc dt=13 heapalloc_value=22320776 +HeapAlloc dt=15 heapalloc_value=22328968 +HeapAlloc dt=12 heapalloc_value=22337160 +HeapAlloc dt=6 heapalloc_value=22345352 +HeapAlloc dt=8 heapalloc_value=22353544 +HeapAlloc dt=6 heapalloc_value=22361736 +HeapAlloc dt=5 heapalloc_value=22369928 +HeapAlloc dt=25 heapalloc_value=22378120 +HeapAlloc dt=23 heapalloc_value=22386312 +HeapAlloc dt=9 heapalloc_value=22394504 +HeapAlloc dt=6 heapalloc_value=22402696 +HeapAlloc dt=5 heapalloc_value=22410888 +HeapAlloc dt=10 heapalloc_value=22419080 +HeapAlloc dt=5 heapalloc_value=22427272 +HeapAlloc dt=6 heapalloc_value=22435464 +HeapAlloc dt=5 heapalloc_value=22443656 +HeapAlloc dt=6 heapalloc_value=22451848 +HeapAlloc dt=8 heapalloc_value=22460040 +HeapAlloc dt=135 heapalloc_value=22468232 +HeapAlloc dt=8 heapalloc_value=22476424 +HeapAlloc dt=9 heapalloc_value=22484616 +HeapAlloc dt=6 heapalloc_value=22492808 +HeapAlloc dt=6 heapalloc_value=22501000 +HeapAlloc dt=6 heapalloc_value=22509192 +HeapAlloc dt=5 heapalloc_value=22517384 +HeapAlloc dt=9 heapalloc_value=22525576 +HeapAlloc dt=6 heapalloc_value=22533768 +HeapAlloc dt=6 heapalloc_value=22541960 +HeapAlloc dt=5 heapalloc_value=22550152 +HeapAlloc dt=6 heapalloc_value=22558344 +HeapAlloc dt=5 heapalloc_value=22566536 +HeapAlloc dt=6 heapalloc_value=22574728 +HeapAlloc dt=5 heapalloc_value=22582920 +HeapAlloc dt=9 heapalloc_value=22591112 +HeapAlloc dt=44 heapalloc_value=22599304 +HeapAlloc dt=7 heapalloc_value=22607496 +HeapAlloc dt=38 heapalloc_value=22615688 +HeapAlloc dt=6 heapalloc_value=22623880 +HeapAlloc dt=6 heapalloc_value=22632072 +HeapAlloc dt=6 heapalloc_value=22640264 +HeapAlloc dt=6 heapalloc_value=22648456 +HeapAlloc dt=6 heapalloc_value=22656648 +HeapAlloc dt=5 heapalloc_value=22664840 +HeapAlloc dt=6 heapalloc_value=22673032 +HeapAlloc dt=5 heapalloc_value=22681224 +HeapAlloc dt=6 heapalloc_value=22689416 +HeapAlloc dt=5 heapalloc_value=22697608 +HeapAlloc dt=6 heapalloc_value=22705800 +HeapAlloc dt=6 heapalloc_value=22713992 +HeapAlloc dt=5 heapalloc_value=22722184 +HeapAlloc dt=5 heapalloc_value=22730376 +HeapAlloc dt=6 heapalloc_value=22738568 +HeapAlloc dt=6 heapalloc_value=22746760 +HeapAlloc dt=5 heapalloc_value=22754952 +HeapAlloc dt=6 heapalloc_value=22763144 +HeapAlloc dt=6 heapalloc_value=22771336 +HeapAlloc dt=6 heapalloc_value=22779528 +HeapAlloc dt=5 heapalloc_value=22787720 +HeapAlloc dt=5 heapalloc_value=22795912 +HeapAlloc dt=6 heapalloc_value=22804104 +HeapAlloc dt=75 heapalloc_value=22812296 +HeapAlloc dt=7 heapalloc_value=22820488 +HeapAlloc dt=5 heapalloc_value=22828680 +HeapAlloc dt=6 heapalloc_value=22836872 +HeapAlloc dt=5 heapalloc_value=22845064 +HeapAlloc dt=6 heapalloc_value=22853256 +HeapAlloc dt=6 heapalloc_value=22861448 +HeapAlloc dt=5 heapalloc_value=22869640 +HeapAlloc dt=6 heapalloc_value=22877832 +HeapAlloc dt=5 heapalloc_value=22886024 +HeapAlloc dt=5 heapalloc_value=22894216 +HeapAlloc dt=6 heapalloc_value=22902408 +HeapAlloc dt=7 heapalloc_value=22910600 +HeapAlloc dt=6 heapalloc_value=22918792 +HeapAlloc dt=5 heapalloc_value=22926984 +HeapAlloc dt=6 heapalloc_value=22935176 +HeapAlloc dt=6 heapalloc_value=22943368 +HeapAlloc dt=6 heapalloc_value=22951560 +HeapAlloc dt=5 heapalloc_value=22959752 +HeapAlloc dt=6 heapalloc_value=22967944 +HeapAlloc dt=7 heapalloc_value=22976136 +HeapAlloc dt=5 heapalloc_value=22984328 +HeapAlloc dt=43 heapalloc_value=22992520 +HeapAlloc dt=7 heapalloc_value=23000712 +HeapAlloc dt=5 heapalloc_value=23008904 +HeapAlloc dt=6 heapalloc_value=23017096 +HeapAlloc dt=35 heapalloc_value=23025288 +HeapAlloc dt=7 heapalloc_value=23033480 +HeapAlloc dt=5 heapalloc_value=23041672 +HeapAlloc dt=5 heapalloc_value=23049864 +HeapAlloc dt=6 heapalloc_value=23058056 +HeapAlloc dt=5 heapalloc_value=23066248 +HeapAlloc dt=6 heapalloc_value=23074440 +HeapAlloc dt=5 heapalloc_value=23082632 +HeapAlloc dt=6 heapalloc_value=23090824 +HeapAlloc dt=5 heapalloc_value=23099016 +HeapAlloc dt=6 heapalloc_value=23107208 +HeapAlloc dt=5 heapalloc_value=23115400 +HeapAlloc dt=6 heapalloc_value=23123592 +HeapAlloc dt=5 heapalloc_value=23131784 +HeapAlloc dt=12 heapalloc_value=23139976 +HeapAlloc dt=5 heapalloc_value=23148168 +HeapAlloc dt=6 heapalloc_value=23156360 +HeapAlloc dt=5 heapalloc_value=23164552 +HeapAlloc dt=6 heapalloc_value=23172744 +HeapAlloc dt=5 heapalloc_value=23180936 +HeapAlloc dt=6 heapalloc_value=23189128 +HeapAlloc dt=5 heapalloc_value=23197320 +HeapAlloc dt=7 heapalloc_value=23205512 +HeapAlloc dt=5 heapalloc_value=23213704 +HeapAlloc dt=6 heapalloc_value=23221896 +HeapAlloc dt=38 heapalloc_value=23230088 +HeapAlloc dt=7 heapalloc_value=23238280 +HeapAlloc dt=5 heapalloc_value=23246472 +GoBlock dt=9 reason_string=19 stack=21 +ProcStop dt=164 +ProcStart dt=17494 p=0 p_seq=31 +ProcStop dt=25 +ProcStart dt=1701 p=1 p_seq=26 +ProcStop dt=16 +ProcStart dt=16748 p=2 p_seq=17 +GoUnblock dt=36 g=1 g_seq=71 stack=0 +GoStart dt=149 g=1 g_seq=72 +HeapAlloc dt=67 heapalloc_value=25302664 +HeapAlloc dt=38 heapalloc_value=25310856 +HeapAlloc dt=23 heapalloc_value=25319048 +HeapAlloc dt=17 heapalloc_value=25327240 +HeapAlloc dt=21 heapalloc_value=25335432 +HeapAlloc dt=17 heapalloc_value=25343624 +HeapAlloc dt=17 heapalloc_value=25351816 +HeapAlloc dt=16 heapalloc_value=25360008 +HeapAlloc dt=19 heapalloc_value=25368200 +HeapAlloc dt=16 heapalloc_value=25376392 +HeapAlloc dt=16 heapalloc_value=25384584 +HeapAlloc dt=16 heapalloc_value=25392776 +HeapAlloc dt=17 heapalloc_value=25400968 +HeapAlloc dt=16 heapalloc_value=25409160 +HeapAlloc dt=9 heapalloc_value=25417352 +HeapAlloc dt=9 heapalloc_value=25425544 +HeapAlloc dt=9 heapalloc_value=25433736 +HeapAlloc dt=10 heapalloc_value=25441928 +HeapAlloc dt=9 heapalloc_value=25450120 +HeapAlloc dt=10 heapalloc_value=25458312 +HeapAlloc dt=9 heapalloc_value=25466504 +HeapAlloc dt=6 heapalloc_value=25474696 +HeapAlloc dt=5 heapalloc_value=25482888 +HeapAlloc dt=6 heapalloc_value=25491080 +HeapAlloc dt=9 heapalloc_value=25499272 +HeapAlloc dt=6 heapalloc_value=25507464 +HeapAlloc dt=8 heapalloc_value=25515656 +HeapAlloc dt=7 heapalloc_value=25523848 +HeapAlloc dt=10 heapalloc_value=25532040 +HeapAlloc dt=9 heapalloc_value=25540232 +HeapAlloc dt=102 heapalloc_value=25548424 +HeapAlloc dt=7 heapalloc_value=25556616 +HeapAlloc dt=10 heapalloc_value=25564808 +HeapAlloc dt=5 heapalloc_value=25573000 +HeapAlloc dt=5 heapalloc_value=25581192 +HeapAlloc dt=36 heapalloc_value=25589384 +HeapAlloc dt=8 heapalloc_value=25597576 +HeapAlloc dt=5 heapalloc_value=25605768 +HeapAlloc dt=43 heapalloc_value=25613960 +HeapAlloc dt=7 heapalloc_value=25622152 +HeapAlloc dt=10 heapalloc_value=25630344 +HeapAlloc dt=6 heapalloc_value=25638536 +HeapAlloc dt=6 heapalloc_value=25646728 +HeapAlloc dt=6 heapalloc_value=25654920 +HeapAlloc dt=7 heapalloc_value=25663112 +HeapAlloc dt=5 heapalloc_value=25671304 +HeapAlloc dt=6 heapalloc_value=25679496 +HeapAlloc dt=41 heapalloc_value=25687688 +HeapAlloc dt=13 heapalloc_value=25695880 +HeapAlloc dt=5 heapalloc_value=25704072 +HeapAlloc dt=6 heapalloc_value=25712264 +HeapAlloc dt=13 heapalloc_value=25720456 +HeapAlloc dt=13 heapalloc_value=25728648 +HeapAlloc dt=5 heapalloc_value=25736840 +HeapAlloc dt=6 heapalloc_value=25745032 +HeapAlloc dt=6 heapalloc_value=25753224 +HeapAlloc dt=9 heapalloc_value=25761416 +HeapAlloc dt=6 heapalloc_value=25769608 +HeapAlloc dt=5 heapalloc_value=25777800 +HeapAlloc dt=6 heapalloc_value=25785992 +HeapAlloc dt=5 heapalloc_value=25794184 +HeapAlloc dt=6 heapalloc_value=25802376 +HeapAlloc dt=5 heapalloc_value=25810568 +HeapAlloc dt=6 heapalloc_value=25818760 +HeapAlloc dt=10 heapalloc_value=25826952 +HeapAlloc dt=6 heapalloc_value=25835144 +HeapAlloc dt=6 heapalloc_value=25843336 +HeapAlloc dt=5 heapalloc_value=25851528 +HeapAlloc dt=6 heapalloc_value=25859720 +HeapAlloc dt=5 heapalloc_value=25867912 +HeapAlloc dt=6 heapalloc_value=25876104 +HeapAlloc dt=6 heapalloc_value=25884296 +HeapAlloc dt=7 heapalloc_value=25892488 +HeapAlloc dt=6 heapalloc_value=25900680 +HeapAlloc dt=5 heapalloc_value=25908872 +HeapAlloc dt=6 heapalloc_value=25917064 +HeapAlloc dt=6 heapalloc_value=25925256 +HeapAlloc dt=5 heapalloc_value=25933448 +HeapAlloc dt=6 heapalloc_value=25941640 +HeapAlloc dt=6 heapalloc_value=25949832 +HeapAlloc dt=6 heapalloc_value=25958024 +HeapAlloc dt=5 heapalloc_value=25966216 +HeapAlloc dt=6 heapalloc_value=25974408 +HeapAlloc dt=5 heapalloc_value=25982600 +HeapAlloc dt=6 heapalloc_value=25990792 +HeapAlloc dt=6 heapalloc_value=25998984 +HeapAlloc dt=5 heapalloc_value=26007176 +HeapAlloc dt=6 heapalloc_value=26015368 +HeapAlloc dt=6 heapalloc_value=26023560 +HeapAlloc dt=6 heapalloc_value=26031752 +HeapAlloc dt=5 heapalloc_value=26039944 +HeapAlloc dt=6 heapalloc_value=26048136 +HeapAlloc dt=5 heapalloc_value=26056328 +HeapAlloc dt=6 heapalloc_value=26064520 +HeapAlloc dt=94 heapalloc_value=26072712 +HeapAlloc dt=7 heapalloc_value=26080904 +HeapAlloc dt=5 heapalloc_value=26089096 +HeapAlloc dt=6 heapalloc_value=26097288 +HeapAlloc dt=6 heapalloc_value=26105480 +HeapAlloc dt=5 heapalloc_value=26113672 +HeapAlloc dt=6 heapalloc_value=26121864 +HeapAlloc dt=6 heapalloc_value=26130056 +HeapAlloc dt=5 heapalloc_value=26138248 +HeapAlloc dt=6 heapalloc_value=26146440 +HeapAlloc dt=6 heapalloc_value=26154632 +HeapAlloc dt=5 heapalloc_value=26162824 +HeapAlloc dt=1696 heapalloc_value=26171016 +HeapAlloc dt=7 heapalloc_value=26179208 +HeapAlloc dt=6 heapalloc_value=26187400 +HeapAlloc dt=5 heapalloc_value=26195592 +HeapAlloc dt=6 heapalloc_value=26203784 +HeapAlloc dt=5 heapalloc_value=26211976 +HeapAlloc dt=47 heapalloc_value=26220168 +HeapAlloc dt=8 heapalloc_value=26228360 +HeapAlloc dt=5 heapalloc_value=26236552 +HeapAlloc dt=6 heapalloc_value=26244744 +HeapAlloc dt=6 heapalloc_value=26252936 +HeapAlloc dt=5 heapalloc_value=26261128 +HeapAlloc dt=6 heapalloc_value=26269320 +HeapAlloc dt=5 heapalloc_value=26277512 +HeapAlloc dt=6 heapalloc_value=26285704 +HeapAlloc dt=6 heapalloc_value=26293896 +HeapAlloc dt=5 heapalloc_value=26302088 +HeapAlloc dt=6 heapalloc_value=26310280 +HeapAlloc dt=6 heapalloc_value=26318472 +HeapAlloc dt=30 heapalloc_value=26326360 +HeapAlloc dt=30 heapalloc_value=26334536 +HeapAlloc dt=24 heapalloc_value=26336904 +GoCreate dt=72 new_g=34 new_stack=47 stack=48 +GoCreate dt=183 new_g=35 new_stack=47 stack=48 +GoCreate dt=15 new_g=36 new_stack=47 stack=48 +GoCreate dt=12 new_g=37 new_stack=47 stack=48 +GoCreate dt=14 new_g=38 new_stack=47 stack=48 +HeapAlloc dt=25 heapalloc_value=26344200 +GoCreate dt=9 new_g=39 new_stack=47 stack=48 +GoCreate dt=13 new_g=40 new_stack=47 stack=48 +GoCreate dt=4 new_g=41 new_stack=47 stack=48 +HeapAlloc dt=17 heapalloc_value=26351912 +GoBlock dt=15 reason_string=10 stack=49 +GoStart dt=5 g=41 g_seq=1 +GoStop dt=307427 reason_string=16 stack=51 +GoStart dt=34 g=41 g_seq=2 +GoStop dt=315328 reason_string=16 stack=50 +GoStart dt=10 g=41 g_seq=3 +GoDestroy dt=158464 +ProcStop dt=40 +EventBatch gen=1 m=1709039 time=7689670530705 size=53 +GoUnblock dt=117 g=4 g_seq=3 stack=0 +GoUnblock dt=157408 g=4 g_seq=7 stack=0 +GoUnblock dt=157553 g=4 g_seq=11 stack=0 +ProcSteal dt=947714 p=7 p_seq=9 m=1709048 +ProcSteal dt=646055 p=7 p_seq=13 m=1709046 +ProcSteal dt=5677 p=5 p_seq=11 m=1709046 +ProcSteal dt=1312 p=6 p_seq=9 m=1709048 +EventBatch gen=1 m=1709038 time=7689670147327 size=336 +ProcStatus dt=56 p=0 pstatus=1 +GoStatus dt=4 g=1 m=1709038 gstatus=2 +ProcsChange dt=184 procs_value=8 stack=1 +STWBegin dt=81 kind_string=21 stack=2 +HeapGoal dt=5 heapgoal_value=4194304 +ProcStatus dt=2 p=1 pstatus=2 +ProcStatus dt=1 p=2 pstatus=2 +ProcStatus dt=1 p=3 pstatus=2 +ProcStatus dt=1 p=4 pstatus=2 +ProcStatus dt=1 p=5 pstatus=2 +ProcStatus dt=1 p=6 pstatus=2 +ProcStatus dt=1 p=7 pstatus=2 +ProcsChange dt=51 procs_value=8 stack=3 +STWEnd dt=74 +GoCreate dt=216 new_g=6 new_stack=4 stack=5 +HeapAlloc dt=174 heapalloc_value=2752512 +GoCreate dt=140 new_g=7 new_stack=6 stack=7 +HeapAlloc dt=16 heapalloc_value=2760704 +GoCreate dt=11 new_g=8 new_stack=8 stack=9 +GoCreate dt=197 new_g=9 new_stack=10 stack=11 +GoCreate dt=18 new_g=10 new_stack=12 stack=13 +GoBlock dt=159 reason_string=10 stack=14 +GoStart dt=10 g=10 g_seq=1 +GoStop dt=224159 reason_string=16 stack=19 +GoStart dt=105 g=10 g_seq=2 +GoUnblock dt=88262 g=1 g_seq=1 stack=20 +GoDestroy dt=111 +GoStart dt=10 g=1 g_seq=2 +GoBlock dt=18 reason_string=19 stack=21 +ProcStop dt=177 +ProcStart dt=22598 p=0 p_seq=2 +ProcStop dt=20 +ProcStart dt=30 p=2 p_seq=2 +ProcStop dt=1158 +ProcStart dt=1116 p=0 p_seq=4 +GoUnblock dt=19 g=25 g_seq=2 stack=0 +GoStart dt=130 g=25 g_seq=3 +GoLabel dt=1 label_string=2 +GoBlock dt=1809 reason_string=15 stack=27 +ProcStop dt=35 +ProcStart dt=45680 p=3 p_seq=4 +HeapAlloc dt=46 heapalloc_value=7659248 +HeapAlloc dt=48 heapalloc_value=7663408 +HeapAlloc dt=6065 heapalloc_value=7876144 +GoStart dt=2865 g=4 g_seq=6 +GoBlock dt=31 reason_string=15 stack=32 +ProcStop dt=49 +ProcStart dt=1490 p=3 p_seq=5 +ProcStop dt=29 +ProcStart dt=2071 p=1 p_seq=10 +ProcStop dt=21 +ProcStart dt=143297 p=2 p_seq=13 +GoUnblock dt=21 g=22 g_seq=6 stack=0 +GoStart dt=177 g=22 g_seq=7 +GoLabel dt=2 label_string=2 +GoBlock dt=2058 reason_string=15 stack=27 +ProcStop dt=2352 +ProcStart dt=162401 p=5 p_seq=2 +HeapAlloc dt=51 heapalloc_value=26353960 +HeapAlloc dt=42 heapalloc_value=26360360 +HeapAlloc dt=6510 heapalloc_value=26367784 +GoStart dt=1039 g=40 g_seq=1 +GoStop dt=297000 reason_string=16 stack=50 +GoStart dt=15 g=40 g_seq=2 +GoStop dt=315522 reason_string=16 stack=50 +GoStart dt=7 g=40 g_seq=3 +GoDestroy dt=168735 +ProcStop dt=43 +ProcStart dt=799345 p=6 p_seq=6 +ProcStop dt=33 +ProcStart dt=1506 p=6 p_seq=10 +ProcStop dt=26 +ProcStart dt=18634 p=7 p_seq=33 +ProcStop dt=34 +EventBatch gen=1 m=18446744073709551615 time=7689672466616 size=28 +GoStatus dt=61 g=2 m=18446744073709551615 gstatus=4 +GoStatus dt=3 g=5 m=18446744073709551615 gstatus=4 +EventBatch gen=1 m=18446744073709551615 time=7689672467258 size=4540 +Stacks +Stack id=86 nframes=7 + pc=4754167 func=24 file=25 line=736 + pc=4814861 func=26 file=27 line=181 + pc=4814837 func=28 file=29 line=736 + pc=4814480 func=30 file=29 line=160 + pc=4996132 func=31 file=32 line=55 + pc=5032836 func=33 file=34 line=179 + pc=5078635 func=35 file=36 line=73 +Stack id=77 nframes=16 + pc=4756520 func=37 file=25 line=1442 + pc=4751813 func=38 file=27 line=298 + pc=4996815 func=39 file=40 line=59 + pc=5049499 func=41 file=42 line=124 + pc=5048282 func=43 file=42 line=70 + pc=5021687 func=44 file=45 line=154 + pc=5057739 func=46 file=47 line=85 + pc=5057380 func=48 file=47 line=75 + pc=5057381 func=49 file=47 line=71 + pc=4965884 func=50 file=51 line=651 + pc=4964173 func=52 file=51 line=616 + pc=4961811 func=53 file=51 line=517 + pc=4960409 func=54 file=51 line=508 + pc=4958646 func=55 file=51 line=434 + pc=4958647 func=56 file=51 line=401 + pc=5078500 func=35 file=36 line=68 +Stack id=13 nframes=1 + pc=5077820 func=35 file=36 line=28 +Stack id=65 nframes=2 + pc=4224086 func=57 file=58 line=145 + pc=5080123 func=59 file=36 line=94 +Stack id=21 nframes=3 + pc=4640852 func=60 file=61 line=195 + pc=5081128 func=62 file=36 line=125 + pc=5077843 func=35 file=36 line=32 +Stack id=11 nframes=1 + pc=5077754 func=35 file=36 line=27 +Stack id=10 nframes=1 + pc=5080288 func=63 file=36 line=97 +Stack id=44 nframes=2 + pc=4354430 func=64 file=65 line=408 + pc=4354396 func=66 file=67 line=318 +Stack id=51 nframes=3 + pc=4658586 func=68 file=69 line=53 + pc=5080816 func=70 file=36 line=110 + pc=5079149 func=71 file=36 line=40 +Stack id=36 nframes=7 + pc=4310007 func=72 file=73 line=806 + pc=4326610 func=74 file=75 line=562 + pc=4258131 func=76 file=77 line=1353 + pc=4255947 func=78 file=77 line=1025 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=57 nframes=5 + pc=4753924 func=81 file=25 line=432 + pc=4744496 func=82 file=83 line=118 + pc=4823012 func=84 file=85 line=218 + pc=4824373 func=86 file=87 line=21 + pc=5079543 func=59 file=36 line=82 +Stack id=16 nframes=7 + pc=4754618 func=88 file=25 line=964 + pc=4816103 func=89 file=27 line=209 + pc=4816095 func=28 file=29 line=736 + pc=4815648 func=90 file=29 line=380 + pc=4821008 func=91 file=92 line=46 + pc=4821000 func=93 file=94 line=189 + pc=5077114 func=95 file=96 line=134 +Stack id=63 nframes=1 + pc=5080224 func=97 file=36 line=89 +Stack id=2 nframes=3 + pc=4567556 func=98 file=99 line=239 + pc=5076805 func=100 file=96 line=125 + pc=5077595 func=35 file=36 line=20 +Stack id=80 nframes=15 + pc=4998478 func=101 file=29 line=683 + pc=4998507 func=39 file=40 line=141 + pc=5049499 func=41 file=42 line=124 + pc=5048282 func=43 file=42 line=70 + pc=5021687 func=44 file=45 line=154 + pc=5057739 func=46 file=47 line=85 + pc=5057380 func=48 file=47 line=75 + pc=5057381 func=49 file=47 line=71 + pc=4965884 func=50 file=51 line=651 + pc=4964173 func=52 file=51 line=616 + pc=4961811 func=53 file=51 line=517 + pc=4960409 func=54 file=51 line=508 + pc=4958646 func=55 file=51 line=434 + pc=4958647 func=56 file=51 line=401 + pc=5078500 func=35 file=36 line=68 +Stack id=47 nframes=1 + pc=5079072 func=71 file=36 line=38 +Stack id=55 nframes=2 + pc=4227441 func=102 file=58 line=442 + pc=5078106 func=35 file=36 line=48 +Stack id=5 nframes=4 + pc=4576789 func=103 file=104 line=44 + pc=4567832 func=98 file=99 line=258 + pc=5076805 func=100 file=96 line=125 + pc=5077595 func=35 file=36 line=20 +Stack id=46 nframes=3 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=8 nframes=1 + pc=5077056 func=95 file=96 line=128 +Stack id=24 nframes=6 + pc=4315620 func=105 file=73 line=1249 + pc=4308860 func=106 file=73 line=662 + pc=4257811 func=78 file=77 line=1308 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=37 nframes=1 + pc=4316644 func=107 file=73 line=1469 +Stack id=79 nframes=5 + pc=4817209 func=108 file=29 line=611 + pc=5000296 func=109 file=40 line=172 + pc=5058941 func=110 file=47 line=159 + pc=5055951 func=111 file=112 line=327 + pc=5078747 func=113 file=36 line=59 +Stack id=17 nframes=1 + pc=5077124 func=95 file=96 line=130 +Stack id=41 nframes=2 + pc=4310763 func=72 file=73 line=816 + pc=4316644 func=107 file=73 line=1469 +Stack id=33 nframes=7 + pc=4328420 func=114 file=75 line=747 + pc=4326674 func=74 file=75 line=587 + pc=4258131 func=76 file=77 line=1353 + pc=4255947 func=78 file=77 line=1025 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=29 nframes=6 + pc=4644903 func=115 file=116 line=474 + pc=4309092 func=106 file=73 line=683 + pc=4257811 func=78 file=77 line=1308 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=73 nframes=10 + pc=4756296 func=117 file=25 line=1432 + pc=4751685 func=118 file=27 line=290 + pc=5051812 func=119 file=42 line=167 + pc=5048051 func=43 file=42 line=57 + pc=5021687 func=44 file=45 line=154 + pc=5059172 func=120 file=47 line=189 + pc=4967876 func=121 file=47 line=179 + pc=4967838 func=122 file=51 line=734 + pc=4968614 func=123 file=51 line=808 + pc=5078215 func=35 file=36 line=53 +Stack id=92 nframes=2 + pc=4640852 func=60 file=61 line=195 + pc=5078782 func=113 file=36 line=63 +Stack id=32 nframes=2 + pc=4344589 func=124 file=125 line=425 + pc=4346072 func=126 file=125 line=658 +Stack id=45 nframes=1 + pc=5077843 func=35 file=36 line=32 +Stack id=62 nframes=3 + pc=4754167 func=24 file=25 line=736 + pc=5079848 func=26 file=27 line=181 + pc=5079785 func=59 file=36 line=90 +Stack id=15 nframes=3 + pc=4227441 func=102 file=58 line=442 + pc=4574090 func=127 file=99 line=937 + pc=4576964 func=128 file=104 line=56 +Stack id=28 nframes=4 + pc=4257811 func=78 file=77 line=1308 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=64 nframes=7 + pc=4754618 func=88 file=25 line=964 + pc=4816103 func=89 file=27 line=209 + pc=4816095 func=28 file=29 line=736 + pc=4815648 func=90 file=29 line=380 + pc=4821008 func=91 file=92 line=46 + pc=4821000 func=93 file=94 line=189 + pc=5080260 func=97 file=36 line=89 +Stack id=91 nframes=8 + pc=4757394 func=129 file=25 line=1488 + pc=4819063 func=130 file=27 line=462 + pc=4819041 func=131 file=132 line=17 + pc=5060022 func=133 file=134 line=21 + pc=5055784 func=135 file=112 line=257 + pc=5058972 func=110 file=47 line=163 + pc=5055951 func=111 file=112 line=327 + pc=5078747 func=113 file=36 line=59 +Stack id=95 nframes=8 + pc=4753732 func=136 file=25 line=335 + pc=4813424 func=137 file=138 line=24 + pc=4813394 func=139 file=29 line=81 + pc=4811154 func=140 file=141 line=213 + pc=4813572 func=142 file=29 line=104 + pc=4996049 func=143 file=32 line=37 + pc=5033653 func=144 file=34 line=203 + pc=5078651 func=35 file=36 line=74 +Stack id=22 nframes=4 + pc=4257811 func=78 file=77 line=1308 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=56 nframes=5 + pc=4753924 func=81 file=25 line=432 + pc=4744422 func=82 file=83 line=106 + pc=4823012 func=84 file=85 line=218 + pc=4824373 func=86 file=87 line=21 + pc=5079543 func=59 file=36 line=82 +Stack id=60 nframes=5 + pc=4753924 func=81 file=25 line=432 + pc=4744422 func=82 file=83 line=106 + pc=4813961 func=145 file=29 line=129 + pc=5079772 func=146 file=85 line=90 + pc=5079785 func=59 file=36 line=90 +Stack id=38 nframes=2 + pc=4310679 func=72 file=73 line=914 + pc=4316644 func=107 file=73 line=1469 +Stack id=52 nframes=3 + pc=4708004 func=147 file=148 line=81 + pc=5079238 func=149 file=148 line=87 + pc=5079164 func=71 file=36 line=41 +Stack id=20 nframes=3 + pc=4708004 func=147 file=148 line=81 + pc=5080678 func=149 file=148 line=87 + pc=5080600 func=150 file=36 line=105 +Stack id=67 nframes=19 + pc=4752943 func=151 file=25 line=98 + pc=4822218 func=152 file=153 line=280 + pc=4822195 func=154 file=155 line=15 + pc=4823409 func=156 file=85 line=272 + pc=4821405 func=157 file=94 line=374 + pc=5042404 func=158 file=94 line=354 + pc=5042391 func=159 file=160 line=76 + pc=5047095 func=161 file=162 line=35 + pc=5068462 func=163 file=34 line=373 + pc=4703265 func=164 file=165 line=74 + pc=5034315 func=166 file=165 line=65 + pc=5034286 func=167 file=34 line=373 + pc=5047998 func=43 file=42 line=57 + pc=5021687 func=44 file=45 line=154 + pc=5059172 func=120 file=47 line=189 + pc=4967876 func=121 file=47 line=179 + pc=4967838 func=122 file=51 line=734 + pc=4968614 func=123 file=51 line=808 + pc=5078215 func=35 file=36 line=53 +Stack id=84 nframes=15 + pc=4757394 func=129 file=25 line=1488 + pc=4819063 func=130 file=27 line=462 + pc=4819041 func=131 file=132 line=17 + pc=5059867 func=133 file=134 line=18 + pc=5055784 func=135 file=112 line=257 + pc=5058352 func=46 file=47 line=121 + pc=5057380 func=48 file=47 line=75 + pc=5057381 func=49 file=47 line=71 + pc=4965884 func=50 file=51 line=651 + pc=4964173 func=52 file=51 line=616 + pc=4961811 func=53 file=51 line=517 + pc=4960409 func=54 file=51 line=508 + pc=4958646 func=55 file=51 line=434 + pc=4958647 func=56 file=51 line=401 + pc=5078500 func=35 file=36 line=68 +Stack id=74 nframes=9 + pc=4755428 func=168 file=25 line=1213 + pc=5051952 func=119 file=42 line=170 + pc=5048051 func=43 file=42 line=57 + pc=5021687 func=44 file=45 line=154 + pc=5059172 func=120 file=47 line=189 + pc=4967876 func=121 file=47 line=179 + pc=4967838 func=122 file=51 line=734 + pc=4968614 func=123 file=51 line=808 + pc=5078215 func=35 file=36 line=53 +Stack id=50 nframes=1 + pc=5079149 func=71 file=36 line=40 +Stack id=14 nframes=2 + pc=4708263 func=169 file=148 line=116 + pc=5077833 func=35 file=36 line=29 +Stack id=27 nframes=2 + pc=4437613 func=170 file=65 line=402 + pc=4316040 func=107 file=73 line=1333 +Stack id=30 nframes=5 + pc=4309402 func=106 file=73 line=745 + pc=4257811 func=78 file=77 line=1308 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=75 nframes=1 + pc=5078720 func=113 file=36 line=58 +Stack id=88 nframes=8 + pc=4757394 func=129 file=25 line=1488 + pc=4819063 func=130 file=27 line=462 + pc=4819041 func=131 file=132 line=17 + pc=5059594 func=171 file=172 line=15 + pc=5055722 func=135 file=112 line=251 + pc=5058972 func=110 file=47 line=163 + pc=5055951 func=111 file=112 line=327 + pc=5078747 func=113 file=36 line=59 +Stack id=70 nframes=21 + pc=4754167 func=24 file=25 line=736 + pc=4814861 func=26 file=27 line=181 + pc=4814837 func=28 file=29 line=736 + pc=4814480 func=30 file=29 line=160 + pc=4820817 func=173 file=92 line=29 + pc=4820809 func=174 file=94 line=118 + pc=4742703 func=175 file=176 line=335 + pc=5041967 func=177 file=176 line=354 + pc=5041927 func=178 file=160 line=55 + pc=5047143 func=161 file=162 line=40 + pc=5068462 func=163 file=34 line=373 + pc=4703265 func=164 file=165 line=74 + pc=5034315 func=166 file=165 line=65 + pc=5034286 func=167 file=34 line=373 + pc=5047998 func=43 file=42 line=57 + pc=5021687 func=44 file=45 line=154 + pc=5059172 func=120 file=47 line=189 + pc=4967876 func=121 file=47 line=179 + pc=4967838 func=122 file=51 line=734 + pc=4968614 func=123 file=51 line=808 + pc=5078215 func=35 file=36 line=53 +Stack id=25 nframes=7 + pc=4227441 func=102 file=58 line=442 + pc=4315507 func=105 file=73 line=1259 + pc=4308860 func=106 file=73 line=662 + pc=4257811 func=78 file=77 line=1308 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=58 nframes=5 + pc=4753924 func=81 file=25 line=432 + pc=4744422 func=82 file=83 line=106 + pc=4823012 func=84 file=85 line=218 + pc=4824408 func=86 file=87 line=21 + pc=5079543 func=59 file=36 line=82 +Stack id=69 nframes=19 + pc=4753924 func=81 file=25 line=432 + pc=4744496 func=82 file=83 line=118 + pc=4823012 func=84 file=85 line=218 + pc=4823631 func=156 file=85 line=301 + pc=4821405 func=157 file=94 line=374 + pc=5042404 func=158 file=94 line=354 + pc=5042391 func=159 file=160 line=76 + pc=5047095 func=161 file=162 line=35 + pc=5068462 func=163 file=34 line=373 + pc=4703265 func=164 file=165 line=74 + pc=5034315 func=166 file=165 line=65 + pc=5034286 func=167 file=34 line=373 + pc=5047998 func=43 file=42 line=57 + pc=5021687 func=44 file=45 line=154 + pc=5059172 func=120 file=47 line=189 + pc=4967876 func=121 file=47 line=179 + pc=4967838 func=122 file=51 line=734 + pc=4968614 func=123 file=51 line=808 + pc=5078215 func=35 file=36 line=53 +Stack id=83 nframes=15 + pc=4757394 func=129 file=25 line=1488 + pc=4819063 func=130 file=27 line=462 + pc=4819041 func=131 file=132 line=17 + pc=5054762 func=179 file=180 line=88 + pc=5055769 func=135 file=112 line=256 + pc=5058352 func=46 file=47 line=121 + pc=5057380 func=48 file=47 line=75 + pc=5057381 func=49 file=47 line=71 + pc=4965884 func=50 file=51 line=651 + pc=4964173 func=52 file=51 line=616 + pc=4961811 func=53 file=51 line=517 + pc=4960409 func=54 file=51 line=508 + pc=4958646 func=55 file=51 line=434 + pc=4958647 func=56 file=51 line=401 + pc=5078500 func=35 file=36 line=68 +Stack id=43 nframes=9 + pc=4368154 func=181 file=182 line=958 + pc=4293585 func=183 file=184 line=254 + pc=4293175 func=185 file=184 line=170 + pc=4290674 func=186 file=187 line=182 + pc=4255364 func=188 file=77 line=948 + pc=4256932 func=78 file=77 line=1149 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=78 nframes=8 + pc=4756062 func=189 file=25 line=1421 + pc=4750293 func=190 file=153 line=684 + pc=4818215 func=191 file=192 line=17 + pc=4816989 func=108 file=29 line=602 + pc=5000296 func=109 file=40 line=172 + pc=5058941 func=110 file=47 line=159 + pc=5055951 func=111 file=112 line=327 + pc=5078747 func=113 file=36 line=59 +Stack id=71 nframes=20 + pc=4753732 func=136 file=25 line=335 + pc=4813424 func=137 file=138 line=24 + pc=4813394 func=139 file=29 line=81 + pc=4811154 func=140 file=141 line=213 + pc=4813572 func=142 file=29 line=104 + pc=4823895 func=193 file=85 line=315 + pc=5047564 func=194 file=92 line=23 + pc=5047547 func=195 file=160 line=23 + pc=5047406 func=161 file=162 line=53 + pc=5068462 func=163 file=34 line=373 + pc=4703265 func=164 file=165 line=74 + pc=5034315 func=166 file=165 line=65 + pc=5034286 func=167 file=34 line=373 + pc=5047998 func=43 file=42 line=57 + pc=5021687 func=44 file=45 line=154 + pc=5059172 func=120 file=47 line=189 + pc=4967876 func=121 file=47 line=179 + pc=4967838 func=122 file=51 line=734 + pc=4968614 func=123 file=51 line=808 + pc=5078215 func=35 file=36 line=53 +Stack id=3 nframes=4 + pc=4446827 func=196 file=65 line=1369 + pc=4567827 func=98 file=99 line=256 + pc=5076805 func=100 file=96 line=125 + pc=5077595 func=35 file=36 line=20 +Stack id=35 nframes=2 + pc=4310007 func=72 file=73 line=806 + pc=4316644 func=107 file=73 line=1469 +Stack id=6 nframes=1 + pc=4573664 func=197 file=99 line=877 +Stack id=19 nframes=1 + pc=5080585 func=150 file=36 line=104 +Stack id=54 nframes=1 + pc=5078085 func=35 file=36 line=47 +Stack id=82 nframes=15 + pc=4757394 func=129 file=25 line=1488 + pc=4819063 func=130 file=27 line=462 + pc=4819041 func=131 file=132 line=17 + pc=5059594 func=171 file=172 line=15 + pc=5055722 func=135 file=112 line=251 + pc=5058352 func=46 file=47 line=121 + pc=5057380 func=48 file=47 line=75 + pc=5057381 func=49 file=47 line=71 + pc=4965884 func=50 file=51 line=651 + pc=4964173 func=52 file=51 line=616 + pc=4961811 func=53 file=51 line=517 + pc=4960409 func=54 file=51 line=508 + pc=4958646 func=55 file=51 line=434 + pc=4958647 func=56 file=51 line=401 + pc=5078500 func=35 file=36 line=68 +Stack id=90 nframes=8 + pc=4757394 func=129 file=25 line=1488 + pc=4819063 func=130 file=27 line=462 + pc=4819041 func=131 file=132 line=17 + pc=5059867 func=133 file=134 line=18 + pc=5055784 func=135 file=112 line=257 + pc=5058972 func=110 file=47 line=163 + pc=5055951 func=111 file=112 line=327 + pc=5078747 func=113 file=36 line=59 +Stack id=61 nframes=5 + pc=4753924 func=81 file=25 line=432 + pc=4744496 func=82 file=83 line=118 + pc=4813961 func=145 file=29 line=129 + pc=5079772 func=146 file=85 line=90 + pc=5079785 func=59 file=36 line=90 +Stack id=23 nframes=1 + pc=4315808 func=107 file=73 line=1298 +Stack id=12 nframes=1 + pc=5080512 func=150 file=36 line=102 +Stack id=68 nframes=19 + pc=4753924 func=81 file=25 line=432 + pc=4744422 func=82 file=83 line=106 + pc=4823012 func=84 file=85 line=218 + pc=4823631 func=156 file=85 line=301 + pc=4821405 func=157 file=94 line=374 + pc=5042404 func=158 file=94 line=354 + pc=5042391 func=159 file=160 line=76 + pc=5047095 func=161 file=162 line=35 + pc=5068462 func=163 file=34 line=373 + pc=4703265 func=164 file=165 line=74 + pc=5034315 func=166 file=165 line=65 + pc=5034286 func=167 file=34 line=373 + pc=5047998 func=43 file=42 line=57 + pc=5021687 func=44 file=45 line=154 + pc=5059172 func=120 file=47 line=189 + pc=4967876 func=121 file=47 line=179 + pc=4967838 func=122 file=51 line=734 + pc=4968614 func=123 file=51 line=808 + pc=5078215 func=35 file=36 line=53 +Stack id=4 nframes=1 + pc=4576896 func=128 file=104 line=44 +Stack id=66 nframes=6 + pc=5021687 func=44 file=45 line=154 + pc=5059172 func=120 file=47 line=189 + pc=4967876 func=121 file=47 line=179 + pc=4967838 func=122 file=51 line=734 + pc=4968614 func=123 file=51 line=808 + pc=5078215 func=35 file=36 line=53 +Stack id=81 nframes=16 + pc=4757147 func=198 file=25 line=1478 + pc=4752076 func=199 file=27 line=313 + pc=4998549 func=39 file=40 line=149 + pc=5049499 func=41 file=42 line=124 + pc=5048282 func=43 file=42 line=70 + pc=5021687 func=44 file=45 line=154 + pc=5057739 func=46 file=47 line=85 + pc=5057380 func=48 file=47 line=75 + pc=5057381 func=49 file=47 line=71 + pc=4965884 func=50 file=51 line=651 + pc=4964173 func=52 file=51 line=616 + pc=4961811 func=53 file=51 line=517 + pc=4960409 func=54 file=51 line=508 + pc=4958646 func=55 file=51 line=434 + pc=4958647 func=56 file=51 line=401 + pc=5078500 func=35 file=36 line=68 +Stack id=87 nframes=4 + pc=4814791 func=30 file=29 line=164 + pc=4996132 func=31 file=32 line=55 + pc=5032836 func=33 file=34 line=179 + pc=5078635 func=35 file=36 line=73 +Stack id=85 nframes=15 + pc=4757394 func=129 file=25 line=1488 + pc=4819063 func=130 file=27 line=462 + pc=4819041 func=131 file=132 line=17 + pc=5060022 func=133 file=134 line=21 + pc=5055784 func=135 file=112 line=257 + pc=5058352 func=46 file=47 line=121 + pc=5057380 func=48 file=47 line=75 + pc=5057381 func=49 file=47 line=71 + pc=4965884 func=50 file=51 line=651 + pc=4964173 func=52 file=51 line=616 + pc=4961811 func=53 file=51 line=517 + pc=4960409 func=54 file=51 line=508 + pc=4958646 func=55 file=51 line=434 + pc=4958647 func=56 file=51 line=401 + pc=5078500 func=35 file=36 line=68 +Stack id=39 nframes=4 + pc=4644903 func=115 file=116 line=474 + pc=4311677 func=200 file=73 line=964 + pc=4310756 func=72 file=73 line=926 + pc=4316644 func=107 file=73 line=1469 +Stack id=31 nframes=7 + pc=4585153 func=201 file=202 line=383 + pc=4326396 func=74 file=75 line=534 + pc=4258131 func=76 file=77 line=1353 + pc=4255947 func=78 file=77 line=1025 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=89 nframes=8 + pc=4757394 func=129 file=25 line=1488 + pc=4819063 func=130 file=27 line=462 + pc=4819041 func=131 file=132 line=17 + pc=5054762 func=179 file=180 line=88 + pc=5055769 func=135 file=112 line=256 + pc=5058972 func=110 file=47 line=163 + pc=5055951 func=111 file=112 line=327 + pc=5078747 func=113 file=36 line=59 +Stack id=53 nframes=1 + pc=5079488 func=59 file=36 line=81 +Stack id=18 nframes=3 + pc=4227441 func=102 file=58 line=442 + pc=4574090 func=127 file=99 line=937 + pc=4573703 func=197 file=99 line=880 +Stack id=48 nframes=1 + pc=5077881 func=35 file=36 line=38 +Stack id=94 nframes=8 + pc=4753732 func=136 file=25 line=335 + pc=4813424 func=137 file=138 line=24 + pc=4813394 func=139 file=29 line=81 + pc=4811154 func=140 file=141 line=213 + pc=4813572 func=142 file=29 line=104 + pc=4996049 func=143 file=32 line=37 + pc=5033653 func=144 file=34 line=203 + pc=5078837 func=113 file=36 line=66 +Stack id=42 nframes=9 + pc=4584693 func=203 file=202 line=357 + pc=4355940 func=204 file=67 line=522 + pc=4292956 func=185 file=184 line=147 + pc=4290674 func=186 file=187 line=182 + pc=4255364 func=188 file=77 line=948 + pc=4256932 func=78 file=77 line=1149 + pc=4528840 func=79 file=80 line=107 + pc=5081148 func=62 file=36 line=127 + pc=5077843 func=35 file=36 line=32 +Stack id=93 nframes=7 + pc=4754618 func=88 file=25 line=964 + pc=4816103 func=89 file=27 line=209 + pc=4816095 func=28 file=29 line=736 + pc=4815648 func=90 file=29 line=380 + pc=4996388 func=205 file=32 line=96 + pc=5033284 func=206 file=34 line=191 + pc=5078821 func=113 file=36 line=65 +Stack id=34 nframes=2 + pc=4644903 func=115 file=116 line=474 + pc=4316309 func=107 file=73 line=1393 +Stack id=49 nframes=2 + pc=4708263 func=169 file=148 line=116 + pc=5078001 func=35 file=36 line=43 +Stack id=7 nframes=4 + pc=4573636 func=207 file=99 line=877 + pc=4567844 func=98 file=99 line=259 + pc=5076805 func=100 file=96 line=125 + pc=5077595 func=35 file=36 line=20 +Stack id=76 nframes=1 + pc=5078444 func=35 file=36 line=58 +Stack id=1 nframes=4 + pc=4583115 func=208 file=202 line=260 + pc=4567535 func=98 file=99 line=238 + pc=5076805 func=100 file=96 line=125 + pc=5077595 func=35 file=36 line=20 +Stack id=26 nframes=2 + pc=4224086 func=57 file=58 line=145 + pc=4316011 func=107 file=73 line=1312 +Stack id=40 nframes=3 + pc=4312646 func=200 file=73 line=1086 + pc=4310756 func=72 file=73 line=926 + pc=4316644 func=107 file=73 line=1469 +Stack id=72 nframes=11 + pc=4757394 func=129 file=25 line=1488 + pc=5054386 func=130 file=27 line=462 + pc=5054396 func=209 file=210 line=28 + pc=5051349 func=119 file=42 line=152 + pc=5048051 func=43 file=42 line=57 + pc=5021687 func=44 file=45 line=154 + pc=5059172 func=120 file=47 line=189 + pc=4967876 func=121 file=47 line=179 + pc=4967838 func=122 file=51 line=734 + pc=4968614 func=123 file=51 line=808 + pc=5078215 func=35 file=36 line=53 +Stack id=59 nframes=5 + pc=4753924 func=81 file=25 line=432 + pc=4744496 func=82 file=83 line=118 + pc=4823012 func=84 file=85 line=218 + pc=4824408 func=86 file=87 line=21 + pc=5079543 func=59 file=36 line=82 +Stack id=9 nframes=2 + pc=5076879 func=100 file=96 line=128 + pc=5077595 func=35 file=36 line=20 +EventBatch gen=1 m=18446744073709551615 time=7689670146021 size=6980 +Strings +String id=1 + data="Not worker" +String id=2 + data="GC (dedicated)" +String id=3 + data="GC (fractional)" +String id=4 + data="GC (idle)" +String id=5 + data="unspecified" +String id=6 + data="forever" +String id=7 + data="network" +String id=8 + data="select" +String id=9 + data="sync.(*Cond).Wait" +String id=10 + data="sync" +String id=11 + data="chan send" +String id=12 + data="chan receive" +String id=13 + data="GC mark assist wait for work" +String id=14 + data="GC background sweeper wait" +String id=15 + data="system goroutine wait" +String id=16 + data="preempted" +String id=17 + data="wait for debug call" +String id=18 + data="wait until GC ends" +String id=19 + data="sleep" +String id=20 + data="runtime.Gosched" +String id=21 + data="start trace" +String id=22 + data="GC sweep termination" +String id=23 + data="GC mark termination" +String id=24 + data="syscall.read" +String id=25 + data="/usr/local/google/home/mknyszek/work/go-1/src/syscall/zsyscall_linux_amd64.go" +String id=26 + data="syscall.Read" +String id=27 + data="/usr/local/google/home/mknyszek/work/go-1/src/syscall/syscall_unix.go" +String id=28 + data="internal/poll.ignoringEINTRIO" +String id=29 + data="/usr/local/google/home/mknyszek/work/go-1/src/internal/poll/fd_unix.go" +String id=30 + data="internal/poll.(*FD).Read" +String id=31 + data="net.(*netFD).Read" +String id=32 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/fd_posix.go" +String id=33 + data="net.(*conn).Read" +String id=34 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/net.go" +String id=35 + data="main.main" +String id=36 + data="/usr/local/google/home/mknyszek/work/go-1/src/cmd/trace/v2/testdata/testprog/main.go" +String id=37 + data="syscall.connect" +String id=38 + data="syscall.Connect" +String id=39 + data="net.(*netFD).connect" +String id=40 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/fd_unix.go" +String id=41 + data="net.(*netFD).dial" +String id=42 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/sock_posix.go" +String id=43 + data="net.socket" +String id=44 + data="net.internetSocket" +String id=45 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/ipsock_posix.go" +String id=46 + data="net.(*sysDialer).doDialTCPProto" +String id=47 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/tcpsock_posix.go" +String id=48 + data="net.(*sysDialer).doDialTCP" +String id=49 + data="net.(*sysDialer).dialTCP" +String id=50 + data="net.(*sysDialer).dialSingle" +String id=51 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/dial.go" +String id=52 + data="net.(*sysDialer).dialSerial" +String id=53 + data="net.(*sysDialer).dialParallel" +String id=54 + data="net.(*Dialer).DialContext" +String id=55 + data="net.(*Dialer).Dial" +String id=56 + data="net.Dial" +String id=57 + data="runtime.chansend1" +String id=58 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/chan.go" +String id=59 + data="main.blockingSyscall" +String id=60 + data="time.Sleep" +String id=61 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/time.go" +String id=62 + data="main.allocHog" +String id=63 + data="main.cpu10" +String id=64 + data="runtime.goparkunlock" +String id=65 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/proc.go" +String id=66 + data="runtime.bgsweep" +String id=67 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mgcsweep.go" +String id=68 + data="runtime.asyncPreempt" +String id=69 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/preempt_amd64.s" +String id=70 + data="main.cpuHog" +String id=71 + data="main.main.func1" +String id=72 + data="runtime.gcMarkDone" +String id=73 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mgc.go" +String id=74 + data="runtime.gcAssistAlloc" +String id=75 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mgcmark.go" +String id=76 + data="runtime.deductAssistCredit" +String id=77 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/malloc.go" +String id=78 + data="runtime.mallocgc" +String id=79 + data="runtime.makeslice" +String id=80 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/slice.go" +String id=81 + data="syscall.fcntl" +String id=82 + data="syscall.SetNonblock" +String id=83 + data="/usr/local/google/home/mknyszek/work/go-1/src/syscall/exec_unix.go" +String id=84 + data="os.newFile" +String id=85 + data="/usr/local/google/home/mknyszek/work/go-1/src/os/file_unix.go" +String id=86 + data="os.Pipe" +String id=87 + data="/usr/local/google/home/mknyszek/work/go-1/src/os/pipe2_unix.go" +String id=88 + data="syscall.write" +String id=89 + data="syscall.Write" +String id=90 + data="internal/poll.(*FD).Write" +String id=91 + data="os.(*File).write" +String id=92 + data="/usr/local/google/home/mknyszek/work/go-1/src/os/file_posix.go" +String id=93 + data="os.(*File).Write" +String id=94 + data="/usr/local/google/home/mknyszek/work/go-1/src/os/file.go" +String id=95 + data="runtime/trace.Start.func1" +String id=96 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace/trace.go" +String id=97 + data="main.blockingSyscall.func1" +String id=98 + data="runtime.StartTrace" +String id=99 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace2.go" +String id=100 + data="runtime/trace.Start" +String id=101 + data="internal/poll.(*FD).WaitWrite" +String id=102 + data="runtime.chanrecv1" +String id=103 + data="runtime.traceStartReadCPU" +String id=104 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace2cpu.go" +String id=105 + data="runtime.gcBgMarkStartWorkers" +String id=106 + data="runtime.gcStart" +String id=107 + data="runtime.gcBgMarkWorker" +String id=108 + data="internal/poll.(*FD).Accept" +String id=109 + data="net.(*netFD).accept" +String id=110 + data="net.(*TCPListener).accept" +String id=111 + data="net.(*TCPListener).Accept" +String id=112 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/tcpsock.go" +String id=113 + data="main.main.func2" +String id=114 + data="runtime.gcParkAssist" +String id=115 + data="runtime.systemstack_switch" +String id=116 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/asm_amd64.s" +String id=117 + data="syscall.bind" +String id=118 + data="syscall.Bind" +String id=119 + data="net.(*netFD).listenStream" +String id=120 + data="net.(*sysListener).listenTCPProto" +String id=121 + data="net.(*sysListener).listenTCP" +String id=122 + data="net.(*ListenConfig).Listen" +String id=123 + data="net.Listen" +String id=124 + data="runtime.(*scavengerState).park" +String id=125 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mgcscavenge.go" +String id=126 + data="runtime.bgscavenge" +String id=127 + data="runtime.(*wakeableSleep).sleep" +String id=128 + data="runtime.traceStartReadCPU.func1" +String id=129 + data="syscall.setsockopt" +String id=130 + data="syscall.SetsockoptInt" +String id=131 + data="internal/poll.(*FD).SetsockoptInt" +String id=132 + data="/usr/local/google/home/mknyszek/work/go-1/src/internal/poll/sockopt.go" +String id=133 + data="net.setKeepAlivePeriod" +String id=134 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/tcpsockopt_unix.go" +String id=135 + data="net.newTCPConn" +String id=136 + data="syscall.Close" +String id=137 + data="internal/poll.(*SysFile).destroy" +String id=138 + data="/usr/local/google/home/mknyszek/work/go-1/src/internal/poll/fd_unixjs.go" +String id=139 + data="internal/poll.(*FD).destroy" +String id=140 + data="internal/poll.(*FD).decref" +String id=141 + data="/usr/local/google/home/mknyszek/work/go-1/src/internal/poll/fd_mutex.go" +String id=142 + data="internal/poll.(*FD).Close" +String id=143 + data="net.(*netFD).Close" +String id=144 + data="net.(*conn).Close" +String id=145 + data="internal/poll.(*FD).SetBlocking" +String id=146 + data="os.(*File).Fd" +String id=147 + data="sync.(*WaitGroup).Add" +String id=148 + data="/usr/local/google/home/mknyszek/work/go-1/src/sync/waitgroup.go" +String id=149 + data="sync.(*WaitGroup).Done" +String id=150 + data="main.cpu20" +String id=151 + data="syscall.openat" +String id=152 + data="syscall.Open" +String id=153 + data="/usr/local/google/home/mknyszek/work/go-1/src/syscall/syscall_linux.go" +String id=154 + data="os.open" +String id=155 + data="/usr/local/google/home/mknyszek/work/go-1/src/os/file_open_unix.go" +String id=156 + data="os.openFileNolog" +String id=157 + data="os.OpenFile" +String id=158 + data="os.Open" +String id=159 + data="net.open" +String id=160 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/parse.go" +String id=161 + data="net.maxListenerBacklog" +String id=162 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/sock_linux.go" +String id=163 + data="net.listenerBacklog.func1" +String id=164 + data="sync.(*Once).doSlow" +String id=165 + data="/usr/local/google/home/mknyszek/work/go-1/src/sync/once.go" +String id=166 + data="sync.(*Once).Do" +String id=167 + data="net.listenerBacklog" +String id=168 + data="syscall.Listen" +String id=169 + data="sync.(*WaitGroup).Wait" +String id=170 + data="runtime.gopark" +String id=171 + data="net.setNoDelay" +String id=172 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/tcpsockopt_posix.go" +String id=173 + data="os.(*File).read" +String id=174 + data="os.(*File).Read" +String id=175 + data="io.ReadAtLeast" +String id=176 + data="/usr/local/google/home/mknyszek/work/go-1/src/io/io.go" +String id=177 + data="io.ReadFull" +String id=178 + data="net.(*file).readLine" +String id=179 + data="net.setKeepAlive" +String id=180 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/sockopt_posix.go" +String id=181 + data="runtime.(*mheap).alloc" +String id=182 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mheap.go" +String id=183 + data="runtime.(*mcentral).grow" +String id=184 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mcentral.go" +String id=185 + data="runtime.(*mcentral).cacheSpan" +String id=186 + data="runtime.(*mcache).refill" +String id=187 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/mcache.go" +String id=188 + data="runtime.(*mcache).nextFree" +String id=189 + data="syscall.accept4" +String id=190 + data="syscall.Accept4" +String id=191 + data="internal/poll.accept" +String id=192 + data="/usr/local/google/home/mknyszek/work/go-1/src/internal/poll/sock_cloexec.go" +String id=193 + data="os.(*file).close" +String id=194 + data="os.(*File).Close" +String id=195 + data="net.(*file).close" +String id=196 + data="runtime.startTheWorld" +String id=197 + data="runtime.(*traceAdvancerState).start.func1" +String id=198 + data="syscall.getsockopt" +String id=199 + data="syscall.GetsockoptInt" +String id=200 + data="runtime.gcMarkTermination" +String id=201 + data="runtime.traceLocker.GCMarkAssistStart" +String id=202 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace2runtime.go" +String id=203 + data="runtime.traceLocker.GCSweepSpan" +String id=204 + data="runtime.(*sweepLocked).sweep" +String id=205 + data="net.(*netFD).Write" +String id=206 + data="net.(*conn).Write" +String id=207 + data="runtime.(*traceAdvancerState).start" +String id=208 + data="runtime.traceLocker.Gomaxprocs" +String id=209 + data="net.setDefaultListenerSockopts" +String id=210 + data="/usr/local/google/home/mknyszek/work/go-1/src/net/sockopt_linux.go" diff --git a/src/cmd/trace/v2/testdata/mktests.go b/src/cmd/trace/v2/testdata/mktests.go new file mode 100644 index 0000000000..143e8ece35 --- /dev/null +++ b/src/cmd/trace/v2/testdata/mktests.go @@ -0,0 +1,60 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore + +package main + +import ( + "bytes" + "fmt" + "internal/trace/v2/raw" + "internal/trace/v2/version" + "io" + "log" + "os" + "os/exec" +) + +func main() { + // Create command. + var trace, stderr bytes.Buffer + cmd := exec.Command("go", "run", "./testprog/main.go") + // TODO(mknyszek): Remove if goexperiment.Exectracer2 becomes the default. + cmd.Env = append(os.Environ(), "GOEXPERIMENT=exectracer2") + cmd.Stdout = &trace + cmd.Stderr = &stderr + + // Run trace program; the trace will appear in stdout. + fmt.Fprintln(os.Stderr, "running trace program...") + if err := cmd.Run(); err != nil { + log.Fatalf("running trace program: %v:\n%s", err, stderr.String()) + } + + // Create file. + f, err := os.Create(fmt.Sprintf("./go1%d.test", version.Current)) + if err != nil { + log.Fatalf("creating output file: %v", err) + } + defer f.Close() + + // Write out the trace. + r, err := raw.NewReader(&trace) + if err != nil { + log.Fatalf("reading trace: %v", err) + } + w, err := raw.NewTextWriter(f, version.Current) + for { + ev, err := r.ReadEvent() + if err == io.EOF { + break + } + if err != nil { + log.Fatalf("reading trace: %v", err) + } + if err := w.WriteEvent(ev); err != nil { + log.Fatalf("writing trace: %v", err) + } + } +} diff --git a/src/cmd/trace/v2/testdata/testprog/main.go b/src/cmd/trace/v2/testdata/testprog/main.go new file mode 100644 index 0000000000..fcf4dc156c --- /dev/null +++ b/src/cmd/trace/v2/testdata/testprog/main.go @@ -0,0 +1,129 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "fmt" + "log" + "net" + "os" + "runtime" + "runtime/trace" + "sync" + "syscall" + "time" +) + +func main() { + if err := trace.Start(os.Stdout); err != nil { + log.Fatal(err) + } + + // checkExecutionTimes relies on this. + var wg sync.WaitGroup + wg.Add(2) + go cpu10(&wg) + go cpu20(&wg) + wg.Wait() + + // checkHeapMetrics relies on this. + allocHog(25 * time.Millisecond) + + // checkProcStartStop relies on this. + var wg2 sync.WaitGroup + for i := 0; i < runtime.GOMAXPROCS(0); i++ { + wg2.Add(1) + go func() { + defer wg2.Done() + cpuHog(50 * time.Millisecond) + }() + } + wg2.Wait() + + // checkSyscalls relies on this. + done := make(chan error) + go blockingSyscall(50*time.Millisecond, done) + if err := <-done; err != nil { + log.Fatal(err) + } + + // checkNetworkUnblock relies on this. + ln, err := net.Listen("tcp", "127.0.0.1:0") + if err != nil { + log.Fatalf("listen failed: %v", err) + } + defer ln.Close() + go func() { + c, err := ln.Accept() + if err != nil { + return + } + time.Sleep(time.Millisecond) + var buf [1]byte + c.Write(buf[:]) + c.Close() + }() + c, err := net.Dial("tcp", ln.Addr().String()) + if err != nil { + log.Fatalf("dial failed: %v", err) + } + var tmp [1]byte + c.Read(tmp[:]) + c.Close() + + trace.Stop() +} + +// blockingSyscall blocks the current goroutine for duration d in a syscall and +// sends a message to done when it is done or if the syscall failed. +func blockingSyscall(d time.Duration, done chan<- error) { + r, w, err := os.Pipe() + if err != nil { + done <- err + return + } + start := time.Now() + msg := []byte("hello") + time.AfterFunc(d, func() { w.Write(msg) }) + _, err = syscall.Read(int(r.Fd()), make([]byte, len(msg))) + if err == nil && time.Since(start) < d { + err = fmt.Errorf("syscall returned too early: want=%s got=%s", d, time.Since(start)) + } + done <- err +} + +func cpu10(wg *sync.WaitGroup) { + defer wg.Done() + cpuHog(10 * time.Millisecond) +} + +func cpu20(wg *sync.WaitGroup) { + defer wg.Done() + cpuHog(20 * time.Millisecond) +} + +func cpuHog(dt time.Duration) { + start := time.Now() + for i := 0; ; i++ { + if i%1000 == 0 && time.Since(start) > dt { + return + } + } +} + +func allocHog(dt time.Duration) { + start := time.Now() + var s [][]byte + for i := 0; ; i++ { + if i%1000 == 0 { + if time.Since(start) > dt { + return + } + // Take a break... this will generate a ton of events otherwise. + time.Sleep(50 * time.Microsecond) + } + s = append(s, make([]byte, 1024)) + } +} diff --git a/src/cmd/trace/v2/threadgen.go b/src/cmd/trace/v2/threadgen.go new file mode 100644 index 0000000000..e1cae2b2cf --- /dev/null +++ b/src/cmd/trace/v2/threadgen.go @@ -0,0 +1,204 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "fmt" + "internal/trace/traceviewer" + "internal/trace/traceviewer/format" + tracev2 "internal/trace/v2" +) + +var _ generator = &threadGenerator{} + +type threadGenerator struct { + globalRangeGenerator + globalMetricGenerator + stackSampleGenerator[tracev2.ThreadID] + logEventGenerator[tracev2.ThreadID] + + gStates map[tracev2.GoID]*gState[tracev2.ThreadID] + threads map[tracev2.ThreadID]struct{} +} + +func newThreadGenerator() *threadGenerator { + tg := new(threadGenerator) + rg := func(ev *tracev2.Event) tracev2.ThreadID { + return ev.Thread() + } + tg.stackSampleGenerator.getResource = rg + tg.logEventGenerator.getResource = rg + tg.gStates = make(map[tracev2.GoID]*gState[tracev2.ThreadID]) + tg.threads = make(map[tracev2.ThreadID]struct{}) + return tg +} + +func (g *threadGenerator) Sync() { + g.globalRangeGenerator.Sync() +} + +func (g *threadGenerator) GoroutineLabel(ctx *traceContext, ev *tracev2.Event) { + l := ev.Label() + g.gStates[l.Resource.Goroutine()].setLabel(l.Label) +} + +func (g *threadGenerator) GoroutineRange(ctx *traceContext, ev *tracev2.Event) { + r := ev.Range() + switch ev.Kind() { + case tracev2.EventRangeBegin: + g.gStates[r.Scope.Goroutine()].rangeBegin(ev.Time(), r.Name, ev.Stack()) + case tracev2.EventRangeActive: + g.gStates[r.Scope.Goroutine()].rangeActive(r.Name) + case tracev2.EventRangeEnd: + gs := g.gStates[r.Scope.Goroutine()] + gs.rangeEnd(ev.Time(), r.Name, ev.Stack(), ctx) + } +} + +func (g *threadGenerator) GoroutineTransition(ctx *traceContext, ev *tracev2.Event) { + if ev.Thread() != tracev2.NoThread { + if _, ok := g.threads[ev.Thread()]; !ok { + g.threads[ev.Thread()] = struct{}{} + } + } + + st := ev.StateTransition() + goID := st.Resource.Goroutine() + + // If we haven't seen this goroutine before, create a new + // gState for it. + gs, ok := g.gStates[goID] + if !ok { + gs = newGState[tracev2.ThreadID](goID) + g.gStates[goID] = gs + } + // If we haven't already named this goroutine, try to name it. + gs.augmentName(st.Stack) + + // Handle the goroutine state transition. + from, to := st.Goroutine() + if from == to { + // Filter out no-op events. + return + } + if from.Executing() && !to.Executing() { + if to == tracev2.GoWaiting { + // Goroutine started blocking. + gs.block(ev.Time(), ev.Stack(), st.Reason, ctx) + } else { + gs.stop(ev.Time(), ev.Stack(), ctx) + } + } + if !from.Executing() && to.Executing() { + start := ev.Time() + if from == tracev2.GoUndetermined { + // Back-date the event to the start of the trace. + start = ctx.startTime + } + gs.start(start, ev.Thread(), ctx) + } + + if from == tracev2.GoWaiting { + // Goroutine was unblocked. + gs.unblock(ev.Time(), ev.Stack(), ev.Thread(), ctx) + } + if from == tracev2.GoNotExist && to == tracev2.GoRunnable { + // Goroutine was created. + gs.created(ev.Time(), ev.Thread(), ev.Stack()) + } + if from == tracev2.GoSyscall { + // Exiting syscall. + gs.syscallEnd(ev.Time(), to != tracev2.GoRunning, ctx) + } + + // Handle syscalls. + if to == tracev2.GoSyscall { + start := ev.Time() + if from == tracev2.GoUndetermined { + // Back-date the event to the start of the trace. + start = ctx.startTime + } + // Write down that we've entered a syscall. Note: we might have no P here + // if we're in a cgo callback or this is a transition from GoUndetermined + // (i.e. the G has been blocked in a syscall). + gs.syscallBegin(start, ev.Thread(), ev.Stack()) + } + + // Note down the goroutine transition. + _, inMarkAssist := gs.activeRanges["GC mark assist"] + ctx.GoroutineTransition(ctx.elapsed(ev.Time()), viewerGState(from, inMarkAssist), viewerGState(to, inMarkAssist)) +} + +func (g *threadGenerator) ProcTransition(ctx *traceContext, ev *tracev2.Event) { + if ev.Thread() != tracev2.NoThread { + if _, ok := g.threads[ev.Thread()]; !ok { + g.threads[ev.Thread()] = struct{}{} + } + } + + type procArg struct { + Proc uint64 `json:"proc,omitempty"` + } + st := ev.StateTransition() + viewerEv := traceviewer.InstantEvent{ + Resource: uint64(ev.Thread()), + Stack: ctx.Stack(viewerFrames(ev.Stack())), + Arg: procArg{Proc: uint64(st.Resource.Proc())}, + } + + from, to := st.Proc() + if from == to { + // Filter out no-op events. + return + } + if to.Executing() { + start := ev.Time() + if from == tracev2.ProcUndetermined { + start = ctx.startTime + } + viewerEv.Name = "proc start" + viewerEv.Arg = format.ThreadIDArg{ThreadID: uint64(ev.Thread())} + viewerEv.Ts = ctx.elapsed(start) + // TODO(mknyszek): We don't have a state machine for threads, so approximate + // running threads with running Ps. + ctx.IncThreadStateCount(ctx.elapsed(start), traceviewer.ThreadStateRunning, 1) + } + if from.Executing() { + start := ev.Time() + viewerEv.Name = "proc stop" + viewerEv.Ts = ctx.elapsed(start) + // TODO(mknyszek): We don't have a state machine for threads, so approximate + // running threads with running Ps. + ctx.IncThreadStateCount(ctx.elapsed(start), traceviewer.ThreadStateRunning, -1) + } + // TODO(mknyszek): Consider modeling procs differently and have them be + // transition to and from NotExist when GOMAXPROCS changes. We can emit + // events for this to clearly delineate GOMAXPROCS changes. + + if viewerEv.Name != "" { + ctx.Instant(viewerEv) + } +} + +func (g *threadGenerator) ProcRange(ctx *traceContext, ev *tracev2.Event) { + // TODO(mknyszek): Extend procRangeGenerator to support rendering proc ranges on threads. +} + +func (g *threadGenerator) Finish(ctx *traceContext) { + ctx.SetResourceType("OS THREADS") + + // Finish off global ranges. + g.globalRangeGenerator.Finish(ctx) + + // Finish off all the goroutine slices. + for _, gs := range g.gStates { + gs.finish(ctx) + } + + // Name all the threads to the emitter. + for id := range g.threads { + ctx.Resource(uint64(id), fmt.Sprintf("Thread %d", id)) + } +} diff --git a/src/cmd/trace/v2/viewer.go b/src/cmd/trace/v2/viewer.go new file mode 100644 index 0000000000..de67fc4e0e --- /dev/null +++ b/src/cmd/trace/v2/viewer.go @@ -0,0 +1,56 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "fmt" + "internal/trace" + "internal/trace/traceviewer" + tracev2 "internal/trace/v2" + "time" +) + +// viewerFrames returns the frames of the stack of ev. The given frame slice is +// used to store the frames to reduce allocations. +func viewerFrames(stk tracev2.Stack) []*trace.Frame { + var frames []*trace.Frame + stk.Frames(func(f tracev2.StackFrame) bool { + frames = append(frames, &trace.Frame{ + PC: f.PC, + Fn: f.Func, + File: f.File, + Line: int(f.Line), + }) + return true + }) + return frames +} + +func viewerGState(state tracev2.GoState, inMarkAssist bool) traceviewer.GState { + switch state { + case tracev2.GoUndetermined: + return traceviewer.GDead + case tracev2.GoNotExist: + return traceviewer.GDead + case tracev2.GoRunnable: + return traceviewer.GRunnable + case tracev2.GoRunning: + return traceviewer.GRunning + case tracev2.GoWaiting: + if inMarkAssist { + return traceviewer.GWaitingGC + } + return traceviewer.GWaiting + case tracev2.GoSyscall: + // N.B. A goroutine in a syscall is considered "executing" (state.Executing() == true). + return traceviewer.GRunning + default: + panic(fmt.Sprintf("unknown GoState: %s", state.String())) + } +} + +func viewerTime(t time.Duration) float64 { + return float64(t) / float64(time.Microsecond) +} diff --git a/src/cmd/vendor/github.com/google/pprof/driver/driver.go b/src/cmd/vendor/github.com/google/pprof/driver/driver.go index 5a8222f70a..d5860036c3 100644 --- a/src/cmd/vendor/github.com/google/pprof/driver/driver.go +++ b/src/cmd/vendor/github.com/google/pprof/driver/driver.go @@ -186,9 +186,10 @@ type ObjFile interface { // A Frame describes a single line in a source file. type Frame struct { - Func string // name of function - File string // source file name - Line int // line in file + Func string // name of function + File string // source file name + Line int // line in file + Column int // column in file } // A Sym describes a single symbol in an object file. diff --git a/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go index 491422fcda..3049545b6b 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/binutils/addr2liner_llvm.go @@ -129,6 +129,7 @@ func (d *llvmSymbolizer) readFrame() (plugin.Frame, bool) { } linenumber := 0 + columnnumber := 0 // The llvm-symbolizer outputs the ::. // When it cannot identify the source code location, it outputs "??:0:0". // Older versions output just the filename and line number, so we check for @@ -137,22 +138,27 @@ func (d *llvmSymbolizer) readFrame() (plugin.Frame, bool) { fileline = "" } else { switch split := strings.Split(fileline, ":"); len(split) { - case 1: - // filename - fileline = split[0] - case 2, 3: - // filename:line , or - // filename:line:disc , or - fileline = split[0] + case 3: + // filename:line:column + if col, err := strconv.Atoi(split[2]); err == nil { + columnnumber = col + } + fallthrough + case 2: + // filename:line if line, err := strconv.Atoi(split[1]); err == nil { linenumber = line } + fallthrough + case 1: + // filename + fileline = split[0] default: // Unrecognized, ignore } } - return plugin.Frame{Func: funcname, File: fileline, Line: linenumber}, false + return plugin.Frame{Func: funcname, File: fileline, Line: linenumber, Column: columnnumber}, false } // addrInfo returns the stack frame information for a specific program diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go index c9edf10bb4..f990780d75 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/commands.go @@ -247,6 +247,8 @@ var configHelp = map[string]string{ "noinlines": helpText( "Ignore inlines.", "Attributes inlined functions to their first out-of-line caller."), + "showcolumns": helpText( + "Show column numbers at the source code line level."), } func helpText(s ...string) string { diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/config.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/config.go index 9fcdd459b2..f7d227416e 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/config.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/config.go @@ -51,6 +51,7 @@ type config struct { TagShow string `json:"tagshow,omitempty"` TagHide string `json:"taghide,omitempty"` NoInlines bool `json:"noinlines,omitempty"` + ShowColumns bool `json:"showcolumns,omitempty"` // Output granularity Granularity string `json:"granularity,omitempty"` @@ -157,6 +158,7 @@ func init() { "sort": "sort", "granularity": "g", "noinlines": "noinlines", + "showcolumns": "showcolumns", } def := defaultConfig() diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go index 27681c540f..74ce8cb422 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/driver.go @@ -256,7 +256,7 @@ func aggregate(prof *profile.Profile, cfg config) error { default: return fmt.Errorf("unexpected granularity") } - return prof.Aggregate(inlines, function, filename, linenumber, address) + return prof.Aggregate(inlines, function, filename, linenumber, cfg.ShowColumns, address) } func reportOptions(p *profile.Profile, numLabelUnits map[string]string, cfg config) (*report.Options, error) { diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go index 584c5d85e0..95204a394f 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/fetch.go @@ -492,17 +492,23 @@ mapping: func fetch(source string, duration, timeout time.Duration, ui plugin.UI, tr http.RoundTripper) (p *profile.Profile, src string, err error) { var f io.ReadCloser - if sourceURL, timeout := adjustURL(source, duration, timeout); sourceURL != "" { - ui.Print("Fetching profile over HTTP from " + sourceURL) - if duration > 0 { - ui.Print(fmt.Sprintf("Please wait... (%v)", duration)) + // First determine whether the source is a file, if not, it will be treated as a URL. + if _, openErr := os.Stat(source); openErr == nil { + if isPerfFile(source) { + f, err = convertPerfData(source, ui) + } else { + f, err = os.Open(source) } - f, err = fetchURL(sourceURL, timeout, tr) - src = sourceURL - } else if isPerfFile(source) { - f, err = convertPerfData(source, ui) } else { - f, err = os.Open(source) + sourceURL, timeout := adjustURL(source, duration, timeout) + if sourceURL != "" { + ui.Print("Fetching profile over HTTP from " + sourceURL) + if duration > 0 { + ui.Print(fmt.Sprintf("Please wait... (%v)", duration)) + } + f, err = fetchURL(sourceURL, timeout, tr) + src = sourceURL + } } if err == nil { defer f.Close() diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/flamegraph.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/flamegraph.go deleted file mode 100644 index fbeb765dbc..0000000000 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/flamegraph.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package driver - -import ( - "encoding/json" - "html/template" - "net/http" - "strings" - - "github.com/google/pprof/internal/graph" - "github.com/google/pprof/internal/measurement" - "github.com/google/pprof/internal/report" -) - -type treeNode struct { - Name string `json:"n"` - FullName string `json:"f"` - Cum int64 `json:"v"` - CumFormat string `json:"l"` - Percent string `json:"p"` - Children []*treeNode `json:"c"` -} - -// flamegraph generates a web page containing a flamegraph. -func (ui *webInterface) flamegraph(w http.ResponseWriter, req *http.Request) { - // Force the call tree so that the graph is a tree. - // Also do not trim the tree so that the flame graph contains all functions. - rpt, errList := ui.makeReport(w, req, []string{"svg"}, func(cfg *config) { - cfg.CallTree = true - cfg.Trim = false - }) - if rpt == nil { - return // error already reported - } - - // Generate dot graph. - g, config := report.GetDOT(rpt) - var nodes []*treeNode - nroots := 0 - rootValue := int64(0) - nodeArr := []string{} - nodeMap := map[*graph.Node]*treeNode{} - // Make all nodes and the map, collect the roots. - for _, n := range g.Nodes { - v := n.CumValue() - fullName := n.Info.PrintableName() - node := &treeNode{ - Name: graph.ShortenFunctionName(fullName), - FullName: fullName, - Cum: v, - CumFormat: config.FormatValue(v), - Percent: strings.TrimSpace(measurement.Percentage(v, config.Total)), - } - nodes = append(nodes, node) - if len(n.In) == 0 { - nodes[nroots], nodes[len(nodes)-1] = nodes[len(nodes)-1], nodes[nroots] - nroots++ - rootValue += v - } - nodeMap[n] = node - // Get all node names into an array. - nodeArr = append(nodeArr, n.Info.Name) - } - // Populate the child links. - for _, n := range g.Nodes { - node := nodeMap[n] - for child := range n.Out { - node.Children = append(node.Children, nodeMap[child]) - } - } - - rootNode := &treeNode{ - Name: "root", - FullName: "root", - Cum: rootValue, - CumFormat: config.FormatValue(rootValue), - Percent: strings.TrimSpace(measurement.Percentage(rootValue, config.Total)), - Children: nodes[0:nroots], - } - - // JSON marshalling flame graph - b, err := json.Marshal(rootNode) - if err != nil { - http.Error(w, "error serializing flame graph", http.StatusInternalServerError) - ui.options.UI.PrintErr(err) - return - } - - ui.render(w, req, "flamegraph", rpt, errList, config.Labels, webArgs{ - FlameGraph: template.JS(b), - Nodes: nodeArr, - }) -} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.js b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.js index ff980f66de..4a2067eb68 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.js +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/common.js @@ -558,11 +558,6 @@ function viewer(baseUrl, nodes, options) { return null; } - // convert a string to a regexp that matches that string. - function quotemeta(str) { - return str.replace(/([\\\.?+*\[\](){}|^$])/g, '\\$1'); - } - function setSampleIndexLink(si) { const elem = document.getElementById('sampletype-' + si); if (elem != null) { @@ -595,7 +590,7 @@ function viewer(baseUrl, nodes, options) { // list-based. Construct regular expression depending on mode. let re = regexpActive ? search.value - : Array.from(getSelection().keys()).map(key => quotemeta(nodes[key])).join('|'); + : Array.from(getSelection().keys()).map(key => pprofQuoteMeta(nodes[key])).join('|'); setHrefParams(elem, function (params) { if (re != '') { @@ -683,7 +678,7 @@ function viewer(baseUrl, nodes, options) { } const ids = ['topbtn', 'graphbtn', - 'flamegraph', 'flamegraph2', 'flamegraphold', + 'flamegraph', 'peek', 'list', 'disasm', 'focus', 'ignore', 'hide', 'show', 'show-from']; ids.forEach(makeSearchLinkDynamic); @@ -712,3 +707,8 @@ function viewer(baseUrl, nodes, options) { main.focus(); } } + +// convert a string to a regexp that matches exactly that string. +function pprofQuoteMeta(str) { + return '^' + str.replace(/([\\\.?+*\[\](){}|^$])/g, '\\$1') + '$'; +} diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/flamegraph.html b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/flamegraph.html deleted file mode 100644 index 9866755bcd..0000000000 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/flamegraph.html +++ /dev/null @@ -1,103 +0,0 @@ - - - - - {{.Title}} - {{template "css" .}} - - - - - {{template "header" .}} -
    -
    -
    -
    -
    -
    - {{template "script" .}} - - - - - diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/header.html b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/header.html index 42cb7960e6..e946e6b882 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/header.html +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/header.html @@ -12,7 +12,6 @@ Top Graph Flame Graph - Flame Graph (old) Peek Source Disassemble diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.js b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.js index be78edd553..c8059fe6bf 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.js +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/html/stacks.js @@ -75,8 +75,12 @@ function stackViewer(stacks, nodes) { hiliter: (n, on) => { return hilite(n, on); }, current: () => { let r = new Map(); - for (let p of pivots) { - r.set(p, true); + if (pivots.length == 1 && pivots[0] == 0) { + // Not pivoting + } else { + for (let p of pivots) { + r.set(p, true); + } } return r; }}); @@ -145,7 +149,7 @@ function stackViewer(stacks, nodes) { } // Update params to include src. - let v = stacks.Sources[src].RE; + let v = pprofQuoteMeta(stacks.Sources[src].FullName); if (param != 'f' && param != 'sf') { // old f,sf values are overwritten // Add new source to current parameter value. const old = params.get(param); @@ -174,7 +178,11 @@ function stackViewer(stacks, nodes) { function switchPivots(regexp) { // Switch URL without hitting the server. const url = new URL(document.URL); - url.searchParams.set('p', regexp); + if (regexp === '' || regexp === '^$') { + url.searchParams.delete('p'); // Not pivoting + } else { + url.searchParams.set('p', regexp); + } history.pushState('', '', url.toString()); // Makes back-button work matches = new Set(); search.value = ''; @@ -445,7 +453,7 @@ function stackViewer(stacks, nodes) { r.appendChild(t); } - r.addEventListener('click', () => { switchPivots(src.RE); }); + r.addEventListener('click', () => { switchPivots(pprofQuoteMeta(src.UniqueName)); }); r.addEventListener('mouseenter', () => { handleEnter(box, r); }); r.addEventListener('mouseleave', () => { handleLeave(box); }); r.addEventListener('contextmenu', (e) => { showActionMenu(e, box); }); diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/stacks.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/stacks.go index 249dfe0742..6a61613344 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/stacks.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/stacks.go @@ -22,7 +22,7 @@ import ( "github.com/google/pprof/internal/report" ) -// stackView generates the new flamegraph view. +// stackView generates the flamegraph view. func (ui *webInterface) stackView(w http.ResponseWriter, req *http.Request) { // Get all data in a report. rpt, errList := ui.makeReport(w, req, []string{"svg"}, func(cfg *config) { diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/svg.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/svg.go index 62767e726d..9cbef4d787 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/svg.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/svg.go @@ -65,7 +65,7 @@ func massageSVG(svg string) string { if loc := graphID.FindStringIndex(svg); loc != nil { svg = svg[:loc[0]] + - `` + + `` + `` + svg[loc[0]:] } diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go index 55973ffb9f..984936a9d6 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/webhtml.go @@ -19,8 +19,6 @@ import ( "fmt" "html/template" "os" - - "github.com/google/pprof/third_party/d3flamegraph" ) //go:embed html @@ -52,11 +50,7 @@ func addTemplates(templates *template.Template) { template.Must(templates.AddParseTree(name, sub.Tree)) } - // Pre-packaged third-party files. - def("d3flamegraphscript", d3flamegraph.JSSource) - def("d3flamegraphcss", d3flamegraph.CSSSource) - - // Embeded files. + // Embedded files. def("css", loadCSS("html/common.css")) def("header", loadFile("html/header.html")) def("graph", loadFile("html/graph.html")) @@ -64,7 +58,7 @@ func addTemplates(templates *template.Template) { def("top", loadFile("html/top.html")) def("sourcelisting", loadFile("html/source.html")) def("plaintext", loadFile("html/plaintext.html")) - def("flamegraph", loadFile("html/flamegraph.html")) + // TODO: Rename "stacks" to "flamegraph" to seal moving off d3 flamegraph. def("stacks", loadFile("html/stacks.html")) def("stacks_css", loadCSS("html/stacks.css")) def("stacks_js", loadJS("html/stacks.js")) diff --git a/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go b/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go index 41b30021f5..476e1d2cdf 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/driver/webui.go @@ -112,7 +112,6 @@ func serveWebInterface(hostport string, p *profile.Profile, o *plugin.Options, d ui.help["details"] = "Show information about the profile and this view" ui.help["graph"] = "Display profile as a directed graph" ui.help["flamegraph"] = "Display profile as a flame graph" - ui.help["flamegraphold"] = "Display profile as a flame graph (old version; slated for removal)" ui.help["reset"] = "Show the entire profile" ui.help["save_config"] = "Save current settings" @@ -130,9 +129,9 @@ func serveWebInterface(hostport string, p *profile.Profile, o *plugin.Options, d "/disasm": http.HandlerFunc(ui.disasm), "/source": http.HandlerFunc(ui.source), "/peek": http.HandlerFunc(ui.peek), - "/flamegraphold": http.HandlerFunc(ui.flamegraph), "/flamegraph": http.HandlerFunc(ui.stackView), - "/flamegraph2": http.HandlerFunc(ui.stackView), // Support older URL + "/flamegraph2": redirectWithQuery("flamegraph", http.StatusMovedPermanently), // Keep legacy URL working. + "/flamegraphold": redirectWithQuery("flamegraph", http.StatusMovedPermanently), // Keep legacy URL working. "/saveconfig": http.HandlerFunc(ui.saveConfig), "/deleteconfig": http.HandlerFunc(ui.deleteConfig), "/download": http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { @@ -209,15 +208,20 @@ func defaultWebServer(args *plugin.HTTPServerArgs) error { // https://github.com/google/pprof/pull/348 mux := http.NewServeMux() mux.Handle("/ui/", http.StripPrefix("/ui", handler)) - mux.Handle("/", redirectWithQuery("/ui")) + mux.Handle("/", redirectWithQuery("/ui", http.StatusTemporaryRedirect)) s := &http.Server{Handler: mux} return s.Serve(ln) } -func redirectWithQuery(path string) http.HandlerFunc { +// redirectWithQuery responds with a given redirect code, preserving query +// parameters in the redirect URL. It does not convert relative paths to +// absolute paths like http.Redirect does, so that HTTPServerArgs.Handlers can +// generate relative redirects that work with the external prefixing. +func redirectWithQuery(path string, code int) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { pathWithQuery := &gourl.URL{Path: path, RawQuery: r.URL.RawQuery} - http.Redirect(w, r, pathWithQuery.String(), http.StatusTemporaryRedirect) + w.Header().Set("Location", pathWithQuery.String()) + w.WriteHeader(code) } } diff --git a/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go b/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go index b64ef27991..5ad10a2ae0 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/graph/graph.go @@ -154,6 +154,7 @@ type NodeInfo struct { Address uint64 File string StartLine, Lineno int + Columnno int Objfile string } @@ -174,8 +175,12 @@ func (i *NodeInfo) NameComponents() []string { switch { case i.Lineno != 0: + s := fmt.Sprintf("%s:%d", i.File, i.Lineno) + if i.Columnno != 0 { + s += fmt.Sprintf(":%d", i.Columnno) + } // User requested line numbers, provide what we have. - name = append(name, fmt.Sprintf("%s:%d", i.File, i.Lineno)) + name = append(name, s) case i.File != "": // User requested file name, provide it. name = append(name, i.File) @@ -239,6 +244,7 @@ func (nm NodeMap) FindOrInsertNode(info NodeInfo, kept NodeSet) *Node { // Find a node that represents the whole function. info.Address = 0 info.Lineno = 0 + info.Columnno = 0 n.Function = nm.FindOrInsertNode(info, nil) return n } @@ -592,9 +598,10 @@ func nodeInfo(l *profile.Location, line profile.Line, objfile string, o *Options return &NodeInfo{Address: l.Address, Objfile: objfile} } ni := &NodeInfo{ - Address: l.Address, - Lineno: int(line.Line), - Name: line.Function.Name, + Address: l.Address, + Lineno: int(line.Line), + Columnno: int(line.Column), + Name: line.Function.Name, } if fname := line.Function.Filename; fname != "" { ni.File = filepath.Clean(fname) diff --git a/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go b/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go index 98eb1dd817..c934551036 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/plugin/plugin.go @@ -157,11 +157,12 @@ type ObjFile interface { Close() error } -// A Frame describes a single line in a source file. +// A Frame describes a location in a single line in a source file. type Frame struct { - Func string // name of function - File string // source file name - Line int // line in file + Func string // name of function + File string // source file name + Line int // line in file + Column int // column in line (if available) } // A Sym describes a single symbol in an object file. diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/report.go b/src/cmd/vendor/github.com/google/pprof/internal/report/report.go index f73e49a176..96b80039e6 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/report/report.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/report/report.go @@ -293,7 +293,7 @@ func (rpt *Report) newGraph(nodes graph.NodeSet) *graph.Graph { return graph.New(rpt.prof, gopt) } -// printProto writes the incoming proto via thw writer w. +// printProto writes the incoming proto via the writer w. // If the divide_by option has been specified, samples are scaled appropriately. func printProto(w io.Writer, rpt *Report) error { p, o := rpt.prof, rpt.options @@ -339,6 +339,7 @@ func printTopProto(w io.Writer, rpt *Report) error { Line: []profile.Line{ { Line: int64(n.Info.Lineno), + Column: int64(n.Info.Columnno), Function: f, }, }, diff --git a/src/cmd/vendor/github.com/google/pprof/internal/report/stacks.go b/src/cmd/vendor/github.com/google/pprof/internal/report/stacks.go index 7db51bc01c..aa3bf80f2d 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/report/stacks.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/report/stacks.go @@ -18,7 +18,6 @@ import ( "crypto/sha256" "encoding/binary" "fmt" - "regexp" "github.com/google/pprof/internal/measurement" "github.com/google/pprof/profile" @@ -54,9 +53,6 @@ type StackSource struct { // Guaranteed to be non-empty. Display []string - // Regular expression (anchored) that matches exactly FullName. - RE string - // Places holds the list of stack slots where this source occurs. // In particular, if [a,b] is an element in Places, // StackSet.Stacks[a].Sources[b] points to this source. @@ -135,7 +131,6 @@ func (s *StackSet) makeInitialStacks(rpt *Report) { unknownIndex++ } x.Inlined = inlined - x.RE = "^" + regexp.QuoteMeta(x.UniqueName) + "$" x.Display = shortNameList(x.FullName) s.Sources = append(s.Sources, x) srcs[k] = len(s.Sources) - 1 diff --git a/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go b/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go index c3f6cc6281..5ca71ab8be 100644 --- a/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go +++ b/src/cmd/vendor/github.com/google/pprof/internal/symbolizer/symbolizer.go @@ -181,6 +181,7 @@ func doLocalSymbolize(prof *profile.Profile, fast, force bool, obj plugin.ObjToo l.Line[i] = profile.Line{ Function: f, Line: int64(frame.Line), + Column: int64(frame.Column), } } diff --git a/src/cmd/vendor/github.com/google/pprof/profile/encode.go b/src/cmd/vendor/github.com/google/pprof/profile/encode.go index 182c926b90..860bb304c3 100644 --- a/src/cmd/vendor/github.com/google/pprof/profile/encode.go +++ b/src/cmd/vendor/github.com/google/pprof/profile/encode.go @@ -530,6 +530,7 @@ func (p *Line) decoder() []decoder { func (p *Line) encode(b *buffer) { encodeUint64Opt(b, 1, p.functionIDX) encodeInt64Opt(b, 2, p.Line) + encodeInt64Opt(b, 3, p.Column) } var lineDecoder = []decoder{ @@ -538,6 +539,8 @@ var lineDecoder = []decoder{ func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, // optional int64 line = 2 func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, + // optional int64 column = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Column) }, } func (p *Function) decoder() []decoder { diff --git a/src/cmd/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/src/cmd/vendor/github.com/google/pprof/profile/legacy_java_profile.go index 91f45e53c6..4580bab183 100644 --- a/src/cmd/vendor/github.com/google/pprof/profile/legacy_java_profile.go +++ b/src/cmd/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -56,7 +56,7 @@ func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte } // Strip out addresses for better merge. - if err = p.Aggregate(true, true, true, true, false); err != nil { + if err = p.Aggregate(true, true, true, true, false, false); err != nil { return nil, err } @@ -99,7 +99,7 @@ func parseJavaProfile(b []byte) (*Profile, error) { } // Strip out addresses for better merge. - if err = p.Aggregate(true, true, true, true, false); err != nil { + if err = p.Aggregate(true, true, true, true, false, false); err != nil { return nil, err } diff --git a/src/cmd/vendor/github.com/google/pprof/profile/merge.go b/src/cmd/vendor/github.com/google/pprof/profile/merge.go index 4b66282cb8..eee0132e74 100644 --- a/src/cmd/vendor/github.com/google/pprof/profile/merge.go +++ b/src/cmd/vendor/github.com/google/pprof/profile/merge.go @@ -326,12 +326,13 @@ func (l *Location) key() locationKey { key.addr -= l.Mapping.Start key.mappingID = l.Mapping.ID } - lines := make([]string, len(l.Line)*2) + lines := make([]string, len(l.Line)*3) for i, line := range l.Line { if line.Function != nil { lines[i*2] = strconv.FormatUint(line.Function.ID, 16) } lines[i*2+1] = strconv.FormatInt(line.Line, 16) + lines[i*2+2] = strconv.FormatInt(line.Column, 16) } key.lines = strings.Join(lines, "|") return key @@ -418,6 +419,7 @@ func (pm *profileMerger) mapLine(src Line) Line { ln := Line{ Function: pm.mapFunction(src.Function), Line: src.Line, + Column: src.Column, } return ln } diff --git a/src/cmd/vendor/github.com/google/pprof/profile/profile.go b/src/cmd/vendor/github.com/google/pprof/profile/profile.go index 60ef7e9268..62df80a556 100644 --- a/src/cmd/vendor/github.com/google/pprof/profile/profile.go +++ b/src/cmd/vendor/github.com/google/pprof/profile/profile.go @@ -145,6 +145,7 @@ type Location struct { type Line struct { Function *Function Line int64 + Column int64 functionIDX uint64 } @@ -436,7 +437,7 @@ func (p *Profile) CheckValid() error { // Aggregate merges the locations in the profile into equivalence // classes preserving the request attributes. It also updates the // samples to point to the merged locations. -func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error { +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, columnnumber, address bool) error { for _, m := range p.Mapping { m.HasInlineFrames = m.HasInlineFrames && inlineFrame m.HasFunctions = m.HasFunctions && function @@ -458,7 +459,7 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address } // Aggregate locations - if !inlineFrame || !address || !linenumber { + if !inlineFrame || !address || !linenumber || !columnnumber { for _, l := range p.Location { if !inlineFrame && len(l.Line) > 1 { l.Line = l.Line[len(l.Line)-1:] @@ -466,6 +467,12 @@ func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address if !linenumber { for i := range l.Line { l.Line[i].Line = 0 + l.Line[i].Column = 0 + } + } + if !columnnumber { + for i := range l.Line { + l.Line[i].Column = 0 } } if !address { @@ -627,10 +634,11 @@ func (l *Location) string() string { for li := range l.Line { lnStr := "??" if fn := l.Line[li].Function; fn != nil { - lnStr = fmt.Sprintf("%s %s:%d s=%d", + lnStr = fmt.Sprintf("%s %s:%d:%d s=%d", fn.Name, fn.Filename, l.Line[li].Line, + l.Line[li].Column, fn.StartLine) if fn.Name != fn.SystemName { lnStr = lnStr + "(" + fn.SystemName + ")" diff --git a/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/D3_FLAME_GRAPH_LICENSE b/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/D3_FLAME_GRAPH_LICENSE deleted file mode 100644 index 8dada3edaf..0000000000 --- a/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/D3_FLAME_GRAPH_LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/D3_LICENSE b/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/D3_LICENSE deleted file mode 100644 index b0145150fd..0000000000 --- a/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/D3_LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2010-2021 Mike Bostock - -Permission to use, copy, modify, and/or distribute this software for any purpose -with or without fee is hereby granted, provided that the above copyright notice -and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH -REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND -FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, -INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS -OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER -TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF -THIS SOFTWARE. diff --git a/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/README.md b/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/README.md deleted file mode 100644 index eb84b68007..0000000000 --- a/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/README.md +++ /dev/null @@ -1,33 +0,0 @@ -# Building a customized D3.js bundle - -The D3.js version distributed with pprof is customized to only include the -modules required by pprof. - -## Dependencies - -- Install [npm](https://www.npmjs.com). - -## Building - -- Run `update.sh` to: - - Download npm package dependencies (declared in `package.json` and `package-lock.json`) - - Create a d3.js bundle containing the JavScript of d3 and d3-flame-graph (by running `webpack`) - -This will `d3_flame_graph.go`, the minified custom D3.js bundle as Go source code. - -# References / Appendix - -## D3 Custom Bundle - -A demonstration of building a custom D3 4.0 bundle using ES2015 modules and Rollup. - -[bl.ocks.org/mbostock/bb09af4c39c79cffcde4](https://bl.ocks.org/mbostock/bb09af4c39c79cffcde4) - -## Old version of d3-pprof - -A previous version of d3-flame-graph bundled for pprof used Rollup instead of -Webpack. This has now been migrated directly into this directory. - -The repository configuring Rollup was here: - -[github.com/spiermar/d3-pprof](https://github.com/spiermar/d3-pprof) diff --git a/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/d3_flame_graph.go b/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/d3_flame_graph.go deleted file mode 100644 index 7e27941995..0000000000 --- a/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/d3_flame_graph.go +++ /dev/null @@ -1,65 +0,0 @@ -// D3.js is a JavaScript library for manipulating documents based on data. -// https://github.com/d3/d3 -// See D3_LICENSE file for license details - -// d3-flame-graph is a D3.js plugin that produces flame graphs from hierarchical data. -// https://github.com/spiermar/d3-flame-graph -// See D3_FLAME_GRAPH_LICENSE file for license details - -package d3flamegraph - -// JSSource returns the d3 and d3-flame-graph JavaScript bundle -const JSSource = ` - -!function(t,n){if("object"==typeof exports&&"object"==typeof module)module.exports=n();else if("function"==typeof define&&define.amd)define([],n);else{var e=n();for(var r in e)("object"==typeof exports?exports:t)[r]=e[r]}}(self,(function(){return(()=>{"use strict";var t={d:(n,e)=>{for(var r in e)t.o(e,r)&&!t.o(n,r)&&Object.defineProperty(n,r,{enumerable:!0,get:e[r]})},o:(t,n)=>Object.prototype.hasOwnProperty.call(t,n),r:t=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(t,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(t,"__esModule",{value:!0})}},n={};function e(){}function r(t){return null==t?e:function(){return this.querySelector(t)}}function i(t){return null==t?[]:Array.isArray(t)?t:Array.from(t)}function o(){return[]}function u(t){return function(n){return n.matches(t)}}t.r(n),t.d(n,{flamegraph:()=>ji,select:()=>pt});var a=Array.prototype.find;function l(){return this.firstElementChild}var s=Array.prototype.filter;function c(){return Array.from(this.children)}function f(t){return new Array(t.length)}function h(t,n){this.ownerDocument=t.ownerDocument,this.namespaceURI=t.namespaceURI,this._next=null,this._parent=t,this.__data__=n}function p(t){return function(){return t}}function d(t,n,e,r,i,o){for(var u,a=0,l=n.length,s=o.length;an?1:t>=n?0:NaN}h.prototype={constructor:h,appendChild:function(t){return this._parent.insertBefore(t,this._next)},insertBefore:function(t,n){return this._parent.insertBefore(t,n)},querySelector:function(t){return this._parent.querySelector(t)},querySelectorAll:function(t){return this._parent.querySelectorAll(t)}};var _="http://www.w3.org/1999/xhtml";const w={svg:"http://www.w3.org/2000/svg",xhtml:_,xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};function b(t){var n=t+="",e=n.indexOf(":");return e>=0&&"xmlns"!==(n=t.slice(0,e))&&(t=t.slice(e+1)),w.hasOwnProperty(n)?{space:w[n],local:t}:t}function x(t){return function(){this.removeAttribute(t)}}function M(t){return function(){this.removeAttributeNS(t.space,t.local)}}function A(t,n){return function(){this.setAttribute(t,n)}}function N(t,n){return function(){this.setAttributeNS(t.space,t.local,n)}}function E(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttribute(t):this.setAttribute(t,e)}}function k(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,e)}}function S(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView}function C(t){return function(){this.style.removeProperty(t)}}function P(t,n,e){return function(){this.style.setProperty(t,n,e)}}function j(t,n,e){return function(){var r=n.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,e)}}function q(t,n){return t.style.getPropertyValue(n)||S(t).getComputedStyle(t,null).getPropertyValue(n)}function O(t){return function(){delete this[t]}}function L(t,n){return function(){this[t]=n}}function T(t,n){return function(){var e=n.apply(this,arguments);null==e?delete this[t]:this[t]=e}}function B(t){return t.trim().split(/^|\s+/)}function D(t){return t.classList||new H(t)}function H(t){this._node=t,this._names=B(t.getAttribute("class")||"")}function R(t,n){for(var e=D(t),r=-1,i=n.length;++r=0&&(n=t.slice(e+1),t=t.slice(0,e)),{type:t,name:n}}))}function ut(t){return function(){var n=this.__on;if(n){for(var e,r=0,i=-1,o=n.length;r=0&&(this._names.splice(n,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};var ft=[null];function ht(t,n){this._groups=t,this._parents=n}function pt(t){return"string"==typeof t?new ht([[document.querySelector(t)]],[document.documentElement]):new ht([[t]],ft)}function dt(){}function gt(t){return null==t?dt:function(){return this.querySelector(t)}}function vt(t){return null==t?[]:Array.isArray(t)?t:Array.from(t)}function yt(){return[]}function mt(t){return null==t?yt:function(){return this.querySelectorAll(t)}}function _t(t){return function(){return this.matches(t)}}function wt(t){return function(n){return n.matches(t)}}ht.prototype=function(){return new ht([[document.documentElement]],ft)}.prototype={constructor:ht,select:function(t){"function"!=typeof t&&(t=r(t));for(var n=this._groups,e=n.length,i=new Array(e),o=0;o=E&&(E=N+1);!(A=b[E])&&++E<_;);M._next=A||null}}return(u=new ht(u,r))._enter=a,u._exit=l,u},enter:function(){return new ht(this._enter||this._groups.map(f),this._parents)},exit:function(){return new ht(this._exit||this._groups.map(f),this._parents)},join:function(t,n,e){var r=this.enter(),i=this,o=this.exit();return"function"==typeof t?(r=t(r))&&(r=r.selection()):r=r.append(t+""),null!=n&&(i=n(i))&&(i=i.selection()),null==e?o.remove():e(o),r&&i?r.merge(i).order():i},merge:function(t){for(var n=t.selection?t.selection():t,e=this._groups,r=n._groups,i=e.length,o=r.length,u=Math.min(i,o),a=new Array(i),l=0;l=0;)(r=i[o])&&(u&&4^r.compareDocumentPosition(u)&&u.parentNode.insertBefore(r,u),u=r);return this},sort:function(t){function n(n,e){return n&&e?t(n.__data__,e.__data__):!n-!e}t||(t=m);for(var e=this._groups,r=e.length,i=new Array(r),o=0;o1?this.each((null==n?C:"function"==typeof n?j:P)(t,n,null==e?"":e)):q(this.node(),t)},property:function(t,n){return arguments.length>1?this.each((null==n?O:"function"==typeof n?T:L)(t,n)):this.node()[t]},classed:function(t,n){var e=B(t+"");if(arguments.length<2){for(var r=D(this.node()),i=-1,o=e.length;++in?1:t>=n?0:NaN}Et.prototype={constructor:Et,appendChild:function(t){return this._parent.insertBefore(t,this._next)},insertBefore:function(t,n){return this._parent.insertBefore(t,n)},querySelector:function(t){return this._parent.querySelector(t)},querySelectorAll:function(t){return this._parent.querySelectorAll(t)}};var Ot="http://www.w3.org/1999/xhtml";const Lt={svg:"http://www.w3.org/2000/svg",xhtml:Ot,xlink:"http://www.w3.org/1999/xlink",xml:"http://www.w3.org/XML/1998/namespace",xmlns:"http://www.w3.org/2000/xmlns/"};function Tt(t){var n=t+="",e=n.indexOf(":");return e>=0&&"xmlns"!==(n=t.slice(0,e))&&(t=t.slice(e+1)),Lt.hasOwnProperty(n)?{space:Lt[n],local:t}:t}function Bt(t){return function(){this.removeAttribute(t)}}function Dt(t){return function(){this.removeAttributeNS(t.space,t.local)}}function Ht(t,n){return function(){this.setAttribute(t,n)}}function Rt(t,n){return function(){this.setAttributeNS(t.space,t.local,n)}}function Vt(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttribute(t):this.setAttribute(t,e)}}function Xt(t,n){return function(){var e=n.apply(this,arguments);null==e?this.removeAttributeNS(t.space,t.local):this.setAttributeNS(t.space,t.local,e)}}function zt(t){return t.ownerDocument&&t.ownerDocument.defaultView||t.document&&t||t.defaultView}function It(t){return function(){this.style.removeProperty(t)}}function $t(t,n,e){return function(){this.style.setProperty(t,n,e)}}function Ut(t,n,e){return function(){var r=n.apply(this,arguments);null==r?this.style.removeProperty(t):this.style.setProperty(t,r,e)}}function Yt(t,n){return t.style.getPropertyValue(n)||zt(t).getComputedStyle(t,null).getPropertyValue(n)}function Ft(t){return function(){delete this[t]}}function Zt(t,n){return function(){this[t]=n}}function Gt(t,n){return function(){var e=n.apply(this,arguments);null==e?delete this[t]:this[t]=e}}function Jt(t){return t.trim().split(/^|\s+/)}function Kt(t){return t.classList||new Qt(t)}function Qt(t){this._node=t,this._names=Jt(t.getAttribute("class")||"")}function Wt(t,n){for(var e=Kt(t),r=-1,i=n.length;++r=0&&(n=t.slice(e+1),t=t.slice(0,e)),{type:t,name:n}}))}function bn(t){return function(){var n=this.__on;if(n){for(var e,r=0,i=-1,o=n.length;r=0&&(this._names.splice(n,1),this._node.setAttribute("class",this._names.join(" ")))},contains:function(t){return this._names.indexOf(t)>=0}};var En=[null];function kn(t,n){this._groups=t,this._parents=n}function Sn(){return new kn([[document.documentElement]],En)}kn.prototype=Sn.prototype={constructor:kn,select:function(t){"function"!=typeof t&&(t=gt(t));for(var n=this._groups,e=n.length,r=new Array(e),i=0;i=b&&(b=w+1);!(_=v[b])&&++b=0;)(r=i[o])&&(u&&4^r.compareDocumentPosition(u)&&u.parentNode.insertBefore(r,u),u=r);return this},sort:function(t){function n(n,e){return n&&e?t(n.__data__,e.__data__):!n-!e}t||(t=qt);for(var e=this._groups,r=e.length,i=new Array(r),o=0;o1?this.each((null==n?It:"function"==typeof n?Ut:$t)(t,n,null==e?"":e)):Yt(this.node(),t)},property:function(t,n){return arguments.length>1?this.each((null==n?Ft:"function"==typeof n?Gt:Zt)(t,n)):this.node()[t]},classed:function(t,n){var e=Jt(t+"");if(arguments.length<2){for(var r=Kt(this.node()),i=-1,o=e.length;++i1?r[0]+r.slice(2):r,+t.slice(e+1)]}function qn(t){return(t=jn(Math.abs(t)))?t[1]:NaN}var On,Ln=/^(?:(.)?([<>=^]))?([+\-( ])?([$#])?(0)?(\d+)?(,)?(\.\d+)?(~)?([a-z%])?$/i;function Tn(t){if(!(n=Ln.exec(t)))throw new Error("invalid format: "+t);var n;return new Bn({fill:n[1],align:n[2],sign:n[3],symbol:n[4],zero:n[5],width:n[6],comma:n[7],precision:n[8]&&n[8].slice(1),trim:n[9],type:n[10]})}function Bn(t){this.fill=void 0===t.fill?" ":t.fill+"",this.align=void 0===t.align?">":t.align+"",this.sign=void 0===t.sign?"-":t.sign+"",this.symbol=void 0===t.symbol?"":t.symbol+"",this.zero=!!t.zero,this.width=void 0===t.width?void 0:+t.width,this.comma=!!t.comma,this.precision=void 0===t.precision?void 0:+t.precision,this.trim=!!t.trim,this.type=void 0===t.type?"":t.type+""}function Dn(t,n){var e=jn(t,n);if(!e)return t+"";var r=e[0],i=e[1];return i<0?"0."+new Array(-i).join("0")+r:r.length>i+1?r.slice(0,i+1)+"."+r.slice(i+1):r+new Array(i-r.length+2).join("0")}Tn.prototype=Bn.prototype,Bn.prototype.toString=function(){return this.fill+this.align+this.sign+this.symbol+(this.zero?"0":"")+(void 0===this.width?"":Math.max(1,0|this.width))+(this.comma?",":"")+(void 0===this.precision?"":"."+Math.max(0,0|this.precision))+(this.trim?"~":"")+this.type};const Hn={"%":(t,n)=>(100*t).toFixed(n),b:t=>Math.round(t).toString(2),c:t=>t+"",d:function(t){return Math.abs(t=Math.round(t))>=1e21?t.toLocaleString("en").replace(/,/g,""):t.toString(10)},e:(t,n)=>t.toExponential(n),f:(t,n)=>t.toFixed(n),g:(t,n)=>t.toPrecision(n),o:t=>Math.round(t).toString(8),p:(t,n)=>Dn(100*t,n),r:Dn,s:function(t,n){var e=jn(t,n);if(!e)return t+"";var r=e[0],i=e[1],o=i-(On=3*Math.max(-8,Math.min(8,Math.floor(i/3))))+1,u=r.length;return o===u?r:o>u?r+new Array(o-u+1).join("0"):o>0?r.slice(0,o)+"."+r.slice(o):"0."+new Array(1-o).join("0")+jn(t,Math.max(0,n+o-1))[0]},X:t=>Math.round(t).toString(16).toUpperCase(),x:t=>Math.round(t).toString(16)};function Rn(t){return t}var Vn,Xn,zn,In=Array.prototype.map,$n=["y","z","a","f","p","n","µ","m","","k","M","G","T","P","E","Z","Y"];function Un(t,n){return null==t||null==n?NaN:tn?1:t>=n?0:NaN}function Yn(t){t.x0=Math.round(t.x0),t.y0=Math.round(t.y0),t.x1=Math.round(t.x1),t.y1=Math.round(t.y1)}function Fn(t){var n=0,e=t.children,r=e&&e.length;if(r)for(;--r>=0;)n+=e[r].value;else n=1;t.value=n}function Zn(t,n){t instanceof Map?(t=[void 0,t],void 0===n&&(n=Jn)):void 0===n&&(n=Gn);for(var e,r,i,o,u,a=new Wn(t),l=[a];e=l.pop();)if((i=n(e.data))&&(u=(i=Array.from(i)).length))for(e.children=i,o=u-1;o>=0;--o)l.push(r=i[o]=new Wn(i[o])),r.parent=e,r.depth=e.depth+1;return a.eachBefore(Qn)}function Gn(t){return t.children}function Jn(t){return Array.isArray(t)?t[1]:null}function Kn(t){void 0!==t.data.value&&(t.value=t.data.value),t.data=t.data.data}function Qn(t){var n=0;do{t.height=n}while((t=t.parent)&&t.height<++n)}function Wn(t){this.data=t,this.depth=this.height=0,this.parent=null}Vn=function(t){var n,e,r=void 0===t.grouping||void 0===t.thousands?Rn:(n=In.call(t.grouping,Number),e=t.thousands+"",function(t,r){for(var i=t.length,o=[],u=0,a=n[0],l=0;i>0&&a>0&&(l+a+1>r&&(a=Math.max(1,r-l)),o.push(t.substring(i-=a,i+a)),!((l+=a+1)>r));)a=n[u=(u+1)%n.length];return o.reverse().join(e)}),i=void 0===t.currency?"":t.currency[0]+"",o=void 0===t.currency?"":t.currency[1]+"",u=void 0===t.decimal?".":t.decimal+"",a=void 0===t.numerals?Rn:function(t){return function(n){return n.replace(/[0-9]/g,(function(n){return t[+n]}))}}(In.call(t.numerals,String)),l=void 0===t.percent?"%":t.percent+"",s=void 0===t.minus?"−":t.minus+"",c=void 0===t.nan?"NaN":t.nan+"";function f(t){var n=(t=Tn(t)).fill,e=t.align,f=t.sign,h=t.symbol,p=t.zero,d=t.width,g=t.comma,v=t.precision,y=t.trim,m=t.type;"n"===m?(g=!0,m="g"):Hn[m]||(void 0===v&&(v=12),y=!0,m="g"),(p||"0"===n&&"="===e)&&(p=!0,n="0",e="=");var _="$"===h?i:"#"===h&&/[boxX]/.test(m)?"0"+m.toLowerCase():"",w="$"===h?o:/[%p]/.test(m)?l:"",b=Hn[m],x=/[defgprs%]/.test(m);function M(t){var i,o,l,h=_,M=w;if("c"===m)M=b(t)+M,t="";else{var A=(t=+t)<0||1/t<0;if(t=isNaN(t)?c:b(Math.abs(t),v),y&&(t=function(t){t:for(var n,e=t.length,r=1,i=-1;r0&&(i=0)}return i>0?t.slice(0,i)+t.slice(n+1):t}(t)),A&&0==+t&&"+"!==f&&(A=!1),h=(A?"("===f?f:s:"-"===f||"("===f?"":f)+h,M=("s"===m?$n[8+On/3]:"")+M+(A&&"("===f?")":""),x)for(i=-1,o=t.length;++i(l=t.charCodeAt(i))||l>57){M=(46===l?u+t.slice(i+1):t.slice(i))+M,t=t.slice(0,i);break}}g&&!p&&(t=r(t,1/0));var N=h.length+t.length+M.length,E=N>1)+h+t+M+E.slice(N);break;default:t=E+h+t+M}return a(t)}return v=void 0===v?6:/[gprs]/.test(m)?Math.max(1,Math.min(21,v)):Math.max(0,Math.min(20,v)),M.toString=function(){return t+""},M}return{format:f,formatPrefix:function(t,n){var e=f(((t=Tn(t)).type="f",t)),r=3*Math.max(-8,Math.min(8,Math.floor(qn(n)/3))),i=Math.pow(10,-r),o=$n[8+r/3];return function(t){return e(i*t)+o}}}}({thousands:",",grouping:[3],currency:["$",""]}),Xn=Vn.format,zn=Vn.formatPrefix,Wn.prototype=Zn.prototype={constructor:Wn,count:function(){return this.eachAfter(Fn)},each:function(t,n){let e=-1;for(const r of this)t.call(n,r,++e,this);return this},eachAfter:function(t,n){for(var e,r,i,o=this,u=[o],a=[],l=-1;o=u.pop();)if(a.push(o),e=o.children)for(r=0,i=e.length;r=0;--r)o.push(e[r]);return this},find:function(t,n){let e=-1;for(const r of this)if(t.call(n,r,++e,this))return r},sum:function(t){return this.eachAfter((function(n){for(var e=+t(n.data)||0,r=n.children,i=r&&r.length;--i>=0;)e+=r[i].value;n.value=e}))},sort:function(t){return this.eachBefore((function(n){n.children&&n.children.sort(t)}))},path:function(t){for(var n=this,e=function(t,n){if(t===n)return t;var e=t.ancestors(),r=n.ancestors(),i=null;for(t=e.pop(),n=r.pop();t===n;)i=t,t=e.pop(),n=r.pop();return i}(n,t),r=[n];n!==e;)n=n.parent,r.push(n);for(var i=r.length;t!==e;)r.splice(i,0,t),t=t.parent;return r},ancestors:function(){for(var t=this,n=[t];t=t.parent;)n.push(t);return n},descendants:function(){return Array.from(this)},leaves:function(){var t=[];return this.eachBefore((function(n){n.children||t.push(n)})),t},links:function(){var t=this,n=[];return t.each((function(e){e!==t&&n.push({source:e.parent,target:e})})),n},copy:function(){return Zn(this).eachBefore(Kn)},[Symbol.iterator]:function*(){var t,n,e,r,i=this,o=[i];do{for(t=o.reverse(),o=[];i=t.pop();)if(yield i,n=i.children)for(e=0,r=n.length;e=0?(o>=te?10:o>=ne?5:o>=ee?2:1)*Math.pow(10,i):-Math.pow(10,-i)/(o>=te?10:o>=ne?5:o>=ee?2:1)}function ie(t){let n=t,e=t,r=t;function i(t,n,i=0,o=t.length){if(i>>1;r(t[e],n)<0?i=e+1:o=e}while(it(n)-e,e=Un,r=(n,e)=>Un(t(n),e)),{left:i,center:function(t,e,r=0,o=t.length){const u=i(t,e,r,o-1);return u>r&&n(t[u-1],e)>-n(t[u],e)?u-1:u},right:function(t,n,i=0,o=t.length){if(i>>1;r(t[e],n)<=0?i=e+1:o=e}while(i>8&15|n>>4&240,n>>4&15|240&n,(15&n)<<4|15&n,1):8===e?Se(n>>24&255,n>>16&255,n>>8&255,(255&n)/255):4===e?Se(n>>12&15|n>>8&240,n>>8&15|n>>4&240,n>>4&15|240&n,((15&n)<<4|15&n)/255):null):(n=ye.exec(t))?new je(n[1],n[2],n[3],1):(n=me.exec(t))?new je(255*n[1]/100,255*n[2]/100,255*n[3]/100,1):(n=_e.exec(t))?Se(n[1],n[2],n[3],n[4]):(n=we.exec(t))?Se(255*n[1]/100,255*n[2]/100,255*n[3]/100,n[4]):(n=be.exec(t))?Te(n[1],n[2]/100,n[3]/100,1):(n=xe.exec(t))?Te(n[1],n[2]/100,n[3]/100,n[4]):Me.hasOwnProperty(t)?ke(Me[t]):"transparent"===t?new je(NaN,NaN,NaN,0):null}function ke(t){return new je(t>>16&255,t>>8&255,255&t,1)}function Se(t,n,e,r){return r<=0&&(t=n=e=NaN),new je(t,n,e,r)}function Ce(t){return t instanceof ce||(t=Ee(t)),t?new je((t=t.rgb()).r,t.g,t.b,t.opacity):new je}function Pe(t,n,e,r){return 1===arguments.length?Ce(t):new je(t,n,e,null==r?1:r)}function je(t,n,e,r){this.r=+t,this.g=+n,this.b=+e,this.opacity=+r}function qe(){return"#"+Le(this.r)+Le(this.g)+Le(this.b)}function Oe(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"rgb(":"rgba(")+Math.max(0,Math.min(255,Math.round(this.r)||0))+", "+Math.max(0,Math.min(255,Math.round(this.g)||0))+", "+Math.max(0,Math.min(255,Math.round(this.b)||0))+(1===t?")":", "+t+")")}function Le(t){return((t=Math.max(0,Math.min(255,Math.round(t)||0)))<16?"0":"")+t.toString(16)}function Te(t,n,e,r){return r<=0?t=n=e=NaN:e<=0||e>=1?t=n=NaN:n<=0&&(t=NaN),new De(t,n,e,r)}function Be(t){if(t instanceof De)return new De(t.h,t.s,t.l,t.opacity);if(t instanceof ce||(t=Ee(t)),!t)return new De;if(t instanceof De)return t;var n=(t=t.rgb()).r/255,e=t.g/255,r=t.b/255,i=Math.min(n,e,r),o=Math.max(n,e,r),u=NaN,a=o-i,l=(o+i)/2;return a?(u=n===o?(e-r)/a+6*(e0&&l<1?0:u,new De(u,a,l,t.opacity)}function De(t,n,e,r){this.h=+t,this.s=+n,this.l=+e,this.opacity=+r}function He(t,n,e){return 255*(t<60?n+(e-n)*t/60:t<180?e:t<240?n+(e-n)*(240-t)/60:n)}function Re(t,n,e,r,i){var o=t*t,u=o*t;return((1-3*t+3*o-u)*n+(4-6*o+3*u)*e+(1+3*t+3*o-3*u)*r+u*i)/6}function Ve(t){return function(){return t}}function Xe(t,n){var e=n-t;return e?function(t,n){return function(e){return t+e*n}}(t,e):Ve(isNaN(t)?n:t)}le(ce,Ee,{copy:function(t){return Object.assign(new this.constructor,this,t)},displayable:function(){return this.rgb().displayable()},hex:Ae,formatHex:Ae,formatHsl:function(){return Be(this).formatHsl()},formatRgb:Ne,toString:Ne}),le(je,Pe,se(ce,{brighter:function(t){return t=null==t?he:Math.pow(he,t),new je(this.r*t,this.g*t,this.b*t,this.opacity)},darker:function(t){return t=null==t?fe:Math.pow(fe,t),new je(this.r*t,this.g*t,this.b*t,this.opacity)},rgb:function(){return this},displayable:function(){return-.5<=this.r&&this.r<255.5&&-.5<=this.g&&this.g<255.5&&-.5<=this.b&&this.b<255.5&&0<=this.opacity&&this.opacity<=1},hex:qe,formatHex:qe,formatRgb:Oe,toString:Oe})),le(De,(function(t,n,e,r){return 1===arguments.length?Be(t):new De(t,n,e,null==r?1:r)}),se(ce,{brighter:function(t){return t=null==t?he:Math.pow(he,t),new De(this.h,this.s,this.l*t,this.opacity)},darker:function(t){return t=null==t?fe:Math.pow(fe,t),new De(this.h,this.s,this.l*t,this.opacity)},rgb:function(){var t=this.h%360+360*(this.h<0),n=isNaN(t)||isNaN(this.s)?0:this.s,e=this.l,r=e+(e<.5?e:1-e)*n,i=2*e-r;return new je(He(t>=240?t-240:t+120,i,r),He(t,i,r),He(t<120?t+240:t-120,i,r),this.opacity)},displayable:function(){return(0<=this.s&&this.s<=1||isNaN(this.s))&&0<=this.l&&this.l<=1&&0<=this.opacity&&this.opacity<=1},formatHsl:function(){var t=this.opacity;return(1===(t=isNaN(t)?1:Math.max(0,Math.min(1,t)))?"hsl(":"hsla(")+(this.h||0)+", "+100*(this.s||0)+"%, "+100*(this.l||0)+"%"+(1===t?")":", "+t+")")}}));const ze=function t(n){var e=function(t){return 1==(t=+t)?Xe:function(n,e){return e-n?function(t,n,e){return t=Math.pow(t,e),n=Math.pow(n,e)-t,e=1/e,function(r){return Math.pow(t+r*n,e)}}(n,e,t):Ve(isNaN(n)?e:n)}}(n);function r(t,n){var r=e((t=Pe(t)).r,(n=Pe(n)).r),i=e(t.g,n.g),o=e(t.b,n.b),u=Xe(t.opacity,n.opacity);return function(n){return t.r=r(n),t.g=i(n),t.b=o(n),t.opacity=u(n),t+""}}return r.gamma=t,r}(1);function Ie(t){return function(n){var e,r,i=n.length,o=new Array(i),u=new Array(i),a=new Array(i);for(e=0;e=1?(e=1,n-1):Math.floor(e*n),i=t[r],o=t[r+1],u=r>0?t[r-1]:2*i-o,a=ro&&(i=n.slice(o,i),a[u]?a[u]+=i:a[++u]=i),(e=e[0])===(r=r[0])?a[u]?a[u]+=r:a[++u]=r:(a[++u]=null,l.push({i:u,x:Ye(e,r)})),o=Ge.lastIndex;return on&&(e=t,t=n,n=e),s=function(e){return Math.max(t,Math.min(n,e))}),r=l>2?or:ir,i=o=null,f}function f(n){return null==n||isNaN(n=+n)?e:(i||(i=r(u.map(t),a,l)))(t(s(n)))}return f.invert=function(e){return s(n((o||(o=r(a,u.map(t),Ye)))(e)))},f.domain=function(t){return arguments.length?(u=Array.from(t,tr),c()):u.slice()},f.range=function(t){return arguments.length?(a=Array.from(t),c()):a.slice()},f.rangeRound=function(t){return a=Array.from(t),l=We,c()},f.clamp=function(t){return arguments.length?(s=!!t||er,c()):s!==er},f.interpolate=function(t){return arguments.length?(l=t,c()):l},f.unknown=function(t){return arguments.length?(e=t,f):e},function(e,r){return t=e,n=r,c()}}()(er,er)}function lr(t,n){switch(arguments.length){case 0:break;case 1:this.range(t);break;default:this.range(n).domain(t)}return this}function sr(t){var n=t.domain;return t.ticks=function(t){var e=n();return function(t,n,e){var r,i,o,u,a=-1;if(e=+e,(t=+t)==(n=+n)&&e>0)return[t];if((r=n0){let e=Math.round(t/u),r=Math.round(n/u);for(e*un&&--r,o=new Array(i=r-e+1);++an&&--r,o=new Array(i=r-e+1);++a=te?i*=10:o>=ne?i*=5:o>=ee&&(i*=2),n0;){if((i=re(l,s,e))===r)return o[u]=l,o[a]=s,n(o);if(i>0)l=Math.floor(l/i)*i,s=Math.ceil(s/i)*i;else{if(!(i<0))break;l=Math.ceil(l*i)/i,s=Math.floor(s*i)/i}r=i}return t},t}function cr(){var t=ar();return t.copy=function(){return ur(t,cr())},lr.apply(t,arguments),sr(t)}function fr(t){return((t*=2)<=1?t*t*t:(t-=2)*t*t+2)/2}var hr={value:()=>{}};function pr(){for(var t,n=0,e=arguments.length,r={};n=0&&(e=t.slice(r+1),t=t.slice(0,r)),t&&!n.hasOwnProperty(t))throw new Error("unknown type: "+t);return{type:t,name:e}}))}function vr(t,n){for(var e,r=0,i=t.length;r0)for(var e,r,i=new Array(e),o=0;o=0&&n._call.call(null,t),n=n._next;--br}()}finally{br=0,function(){for(var t,n,e=_r,r=1/0;e;)e._call?(r>e._time&&(r=e._time),t=e,e=e._next):(n=e._next,e._next=null,e=t?t._next=n:_r=n);wr=t,Tr(r)}(),Nr=0}}function Lr(){var t=kr.now(),n=t-Ar;n>1e3&&(Er-=n,Ar=t)}function Tr(t){br||(xr&&(xr=clearTimeout(xr)),t-Nr>24?(t<1/0&&(xr=setTimeout(Or,t-kr.now()-Er)),Mr&&(Mr=clearInterval(Mr))):(Mr||(Ar=kr.now(),Mr=setInterval(Lr,1e3)),br=1,Sr(Or)))}function Br(t,n,e){var r=new jr;return n=null==n?0:+n,r.restart((function(e){r.stop(),t(e+n)}),n,e),r}jr.prototype=qr.prototype={constructor:jr,restart:function(t,n,e){if("function"!=typeof t)throw new TypeError("callback is not a function");e=(null==e?Cr():+e)+(null==n?0:+n),this._next||wr===this||(wr?wr._next=this:_r=this,wr=this),this._call=t,this._time=e,Tr()},stop:function(){this._call&&(this._call=null,this._time=1/0,Tr())}};var Dr=mr("start","end","cancel","interrupt"),Hr=[];function Rr(t,n,e,r,i,o){var u=t.__transition;if(u){if(e in u)return}else t.__transition={};!function(t,n,e){var r,i=t.__transition;function o(l){var s,c,f,h;if(1!==e.state)return a();for(s in i)if((h=i[s]).name===e.name){if(3===h.state)return Br(o);4===h.state?(h.state=6,h.timer.stop(),h.on.call("interrupt",t,t.__data__,h.index,h.group),delete i[s]):+s0)throw new Error("too late; already scheduled");return e}function Xr(t,n){var e=zr(t,n);if(e.state>3)throw new Error("too late; already running");return e}function zr(t,n){var e=t.__transition;if(!e||!(e=e[n]))throw new Error("transition not found");return e}var Ir,$r,Ur,Yr,Fr=180/Math.PI,Zr={translateX:0,translateY:0,rotate:0,skewX:0,scaleX:1,scaleY:1};function Gr(t,n,e,r,i,o){var u,a,l;return(u=Math.sqrt(t*t+n*n))&&(t/=u,n/=u),(l=t*e+n*r)&&(e-=t*l,r-=n*l),(a=Math.sqrt(e*e+r*r))&&(e/=a,r/=a,l/=a),t*r180?n+=360:n-t>180&&(t+=360),o.push({i:e.push(i(e)+"rotate(",null,r)-2,x:Ye(t,n)})):n&&e.push(i(e)+"rotate("+n+r)}(o.rotate,u.rotate,a,l),function(t,n,e,o){t!==n?o.push({i:e.push(i(e)+"skewX(",null,r)-2,x:Ye(t,n)}):n&&e.push(i(e)+"skewX("+n+r)}(o.skewX,u.skewX,a,l),function(t,n,e,r,o,u){if(t!==e||n!==r){var a=o.push(i(o)+"scale(",null,",",null,")");u.push({i:a-4,x:Ye(t,e)},{i:a-2,x:Ye(n,r)})}else 1===e&&1===r||o.push(i(o)+"scale("+e+","+r+")")}(o.scaleX,o.scaleY,u.scaleX,u.scaleY,a,l),o=u=null,function(t){for(var n,e=-1,r=l.length;++e=0&&(t=t.slice(0,n)),!t||"start"===t}))}(n)?Vr:Xr;return function(){var u=o(this,t),a=u.on;a!==r&&(i=(r=a).copy()).on(n,e),u.on=i}}var _i=Cn.prototype.constructor;function wi(t){return function(){this.style.removeProperty(t)}}function bi(t,n,e){return function(r){this.style.setProperty(t,n.call(this,r),e)}}function xi(t,n,e){var r,i;function o(){var o=n.apply(this,arguments);return o!==i&&(r=(i=o)&&bi(t,o,e)),r}return o._value=n,o}function Mi(t){return function(n){this.textContent=t.call(this,n)}}function Ai(t){var n,e;function r(){var r=t.apply(this,arguments);return r!==e&&(n=(e=r)&&Mi(r)),n}return r._value=t,r}var Ni=0;function Ei(t,n,e,r){this._groups=t,this._parents=n,this._name=e,this._id=r}function ki(){return++Ni}var Si=Cn.prototype;Ei.prototype=function(t){return Cn().transition(t)}.prototype={constructor:Ei,select:function(t){var n=this._name,e=this._id;"function"!=typeof t&&(t=gt(t));for(var r=this._groups,i=r.length,o=new Array(i),u=0;u{p&&(p.textContent="search: "+n+" of "+e+" total samples ( "+Xn(".3f")(n/e*100,3)+"%)")},d()};const k=E;let S=(t,n,e=!1)=>{if(!n)return!1;let r=b(t);e&&(n=n.toLowerCase(),r=r.toLowerCase());const i=new RegExp(n);return void 0!==r&&r&&r.match(i)};const C=S;let P=function(t){p&&(t?p.textContent=t:"function"==typeof d?d():p.textContent="")};const j=P;let q=function(t){return b(t)+" ("+Xn(".3f")(100*(t.x1-t.x0),3)+"%, "+x(t)+" samples)"},O=function(t){return t.highlight?"#E600E6":function(t,n){let e=w||"warm";w||void 0===n||""===n||(e="red",void 0!==t&&t&&t.match(/::/)&&(e="yellow"),"kernel"===n?e="orange":"jit"===n?e="green":"inlined"===n&&(e="aqua"));const r=function(t){let n=0;if(t){const e=t.split("` + "`" + `");e.length>1&&(t=e[e.length-1]),n=function(t){let n=0,e=0,r=1;if(t){for(let i=0;i6);i++)n+=r*(t.charCodeAt(i)%10),e+=9*r,r*=.7;e>0&&(n/=e)}return n}(t=t.split("(")[0])}return n}(t);return function(t,n){let e,r,i;return"red"===t?(e=200+Math.round(55*n),r=50+Math.round(80*n),i=r):"orange"===t?(e=190+Math.round(65*n),r=90+Math.round(65*n),i=0):"yellow"===t?(e=175+Math.round(55*n),r=e,i=50+Math.round(20*n)):"green"===t?(e=50+Math.round(60*n),r=200+Math.round(55*n),i=e):"pastelgreen"===t?(e=163+Math.round(75*n),r=195+Math.round(49*n),i=72+Math.round(149*n)):"blue"===t?(e=91+Math.round(126*n),r=156+Math.round(76*n),i=221+Math.round(26*n)):"aqua"===t?(e=50+Math.round(60*n),r=165+Math.round(55*n),i=r):"cold"===t?(e=0+Math.round(55*(1-n)),r=0+Math.round(230*(1-n)),i=200+Math.round(55*n)):(e=200+Math.round(55*n),r=0+Math.round(230*(1-n)),i=0+Math.round(55*(1-n))),"rgb("+e+","+r+","+i+")"}(e,r)}(b(t),A(t))};const L=O;function T(t){t.data.fade=!1,t.data.hide=!1,t.children&&t.children.forEach(T)}function B(t){t.parent&&(t.parent.data.fade=!0,B(t.parent))}function D(t){if(i&&i.hide(),function(t){let n,e,r,i=t,o=i.parent;for(;o;){for(n=o.children,e=n.length;e--;)r=n[e],r!==i&&(r.data.hide=!0);i=o,o=i.parent}}(t),T(t),B(t),z(),y){const n=Pn(this).select("svg")._groups[0][0].parentNode.offsetTop,r=(window.innerHeight-n)/e,i=(t.height-r+10)*e;window.scrollTo({top:n+i,left:0,behavior:"smooth"})}"function"==typeof c&&c(t)}function H(t,n){if(t.id===n)return t;{const e=M(t);if(e)for(let t=0;t0){const r=t/(n.x1-n.x0);e=e.filter((function(t){return(t.x1-t.x0)*r>h}))}return e}(r),y=Pn(this).select("svg");y.attr("width",t);let _=y.selectAll("g").data(g,(function(t){return t.id}));if(!n||v){const t=Math.max.apply(null,g.map((function(t){return t.depth})));n=(t+3)*e,n{D(n)})),_.exit().remove(),_.on("mouseover",(function(t,n){i&&i.show(n,this),P(q(n)),"function"==typeof f&&f(n)})).on("mouseout",(function(){i&&i.hide(),P(null)}))}))}function I(t,n){n.forEach((function(n){const e=t.find((function(t){return t.name===n.name}));e?(e.value+=n.value,n.children&&(e.children||(e.children=[]),I(e.children,n.children))):t.push(n)}))}function $(t){let n,e,r,i,o,u,a,l;const s=[],c=[],f=[],h=!g;let p=t.data;for(p.hide?(t.value=0,e=t.children,e&&f.push(e)):(t.value=p.fade?0:x(p),s.push(t));n=s.pop();)if(e=n.children,e&&(o=e.length)){for(i=0;o--;)a=e[o],p=a.data,p.hide?(a.value=0,r=a.children,r&&f.push(r)):(p.fade?a.value=0:(l=x(p),a.value=l,i+=l),s.push(a));h&&n.value&&(n.value-=i),c.push(e)}for(o=c.length;o--;){for(e=c[o],i=0,u=e.length;u--;)i+=e[u].value;e[0].parent.value+=i}for(;f.length;)for(e=f.pop(),u=e.length;u--;)a=e[u],a.value=0,r=a.children,r&&f.push(r)}function U(){r.datum((t=>{if("Node"!==t.constructor.name){const n=Zn(t,M);return function(t){let n=0;!function(t,n){n(t);let e=t.children;if(e){const t=[e];let r,i,o;for(;t.length;)for(e=t.pop(),r=e.length;r--;)i=e[r],n(i),o=i.children,o&&t.push(o)}}(t,(function(t){t.id=n++}))}(n),$(n),n.originalValue=n.value,_&&n.eachAfter((t=>{let n=N(t);const e=t.children;let r=e&&e.length;for(;--r>=0;)n+=e[r].delta;t.delta=n})),n}}))}function Y(e){if(!arguments.length)return Y;r=e,U(),r.each((function(e){if(0===Pn(this).select("svg").size()){const e=Pn(this).append("svg:svg").attr("width",t).attr("class","partition d3-flame-graph");n&&(n(I([n.data],[t]),n.data))),U(),z(),Y):Y},Y.update=function(t){return r?(t&&(r.datum(t),U()),z(),Y):Y},Y.destroy=function(){return r?(i&&(i.hide(),"function"==typeof i.destroy&&i.destroy()),r.selectAll("svg").remove(),Y):Y},Y.setColorMapper=function(t){return arguments.length?(O=n=>{const e=L(n);return t(n,e)},Y):(O=L,Y)},Y.color=Y.setColorMapper,Y.setColorHue=function(t){return arguments.length?(w=t,Y):(w=null,Y)},Y.minFrameSize=function(t){return arguments.length?(h=t,Y):h},Y.setDetailsElement=function(t){return arguments.length?(p=t,Y):p},Y.details=Y.setDetailsElement,Y.selfValue=function(t){return arguments.length?(g=t,Y):g},Y.resetHeightOnZoom=function(t){return arguments.length?(v=t,Y):v},Y.scrollOnZoom=function(t){return arguments.length?(y=t,Y):y},Y.getName=function(t){return arguments.length?(b=t,Y):b},Y.getValue=function(t){return arguments.length?(x=t,Y):x},Y.getChildren=function(t){return arguments.length?(M=t,Y):M},Y.getLibtype=function(t){return arguments.length?(A=t,Y):A},Y.getDelta=function(t){return arguments.length?(N=t,Y):N},Y.setSearchHandler=function(t){return arguments.length?(E=t,Y):(E=k,Y)},Y.setDetailsHandler=function(t){return arguments.length?(P=t,Y):(P=j,Y)},Y.setSearchMatch=function(t){return arguments.length?(S=t,Y):(S=C,Y)},Y}return Cn.prototype.interrupt=function(t){return this.each((function(){!function(t,n){var e,r,i,o=t.__transition,u=!0;if(o){for(i in n=null==n?null:n+"",o)(e=o[i]).name===n?(r=e.state>2&&e.state<5,e.state=6,e.timer.stop(),e.on.call(r?"interrupt":"cancel",t,t.__data__,e.index,e.group),delete o[i]):u=!1;u&&delete t.__transition}}(this,t)}))},Cn.prototype.transition=function(t){var n,e;t instanceof Ei?(n=t._id,t=t._name):(n=ki(),(e=Ci).time=Cr(),t=null==t?null:t+"");for(var r=this._groups,i=r.length,o=0;o d3_flame_graph.go -// D3.js is a JavaScript library for manipulating documents based on data. -// https://github.com/d3/d3 -// See D3_LICENSE file for license details - -// d3-flame-graph is a D3.js plugin that produces flame graphs from hierarchical data. -// https://github.com/spiermar/d3-flame-graph -// See D3_FLAME_GRAPH_LICENSE file for license details - -package d3flamegraph - -// JSSource returns the d3 and d3-flame-graph JavaScript bundle -const JSSource = \` - -$d3_js -\` - -// CSSSource returns the $D3FLAMEGRAPH_CSS file -const CSSSource = \` -$d3_css -\` - -EOF - gofmt -w d3_flame_graph.go -} - -get_licenses() { - cp node_modules/d3-selection/LICENSE D3_LICENSE - cp node_modules/d3-flame-graph/LICENSE D3_FLAME_GRAPH_LICENSE -} - -get_licenses -generate_d3_flame_graph_go diff --git a/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/webpack.config.js b/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/webpack.config.js deleted file mode 100644 index 71239d9e96..0000000000 --- a/src/cmd/vendor/github.com/google/pprof/third_party/d3flamegraph/webpack.config.js +++ /dev/null @@ -1,13 +0,0 @@ -// Minimal webpack config to package a minified JS bundle (including -// dependencies) for execution in a 1 so this loses precision in JS // but it is still a representable integer literal. - {uint64(1)<<53 + 1, " 9007199254740993 "}, - {float32(1.0), " 1 "}, - {float32(-1.0), " -1 "}, - {float32(0.5), " 0.5 "}, - {float32(-0.5), " -0.5 "}, - {float32(1.0) / float32(256), " 0.00390625 "}, - {float32(0), " 0 "}, - {math.Copysign(0, -1), " -0 "}, - {float64(1.0), " 1 "}, - {float64(-1.0), " -1 "}, - {float64(0.5), " 0.5 "}, - {float64(-0.5), " -0.5 "}, - {float64(0), " 0 "}, - {math.Copysign(0, -1), " -0 "}, - {"", `""`}, - {"foo", `"foo"`}, + {uint64(1)<<53 + 1, " 9007199254740993 ", false}, + {float32(1.0), " 1 ", false}, + {float32(-1.0), " -1 ", false}, + {float32(0.5), " 0.5 ", false}, + {float32(-0.5), " -0.5 ", false}, + {float32(1.0) / float32(256), " 0.00390625 ", false}, + {float32(0), " 0 ", false}, + {math.Copysign(0, -1), " -0 ", false}, + {float64(1.0), " 1 ", false}, + {float64(-1.0), " -1 ", false}, + {float64(0.5), " 0.5 ", false}, + {float64(-0.5), " -0.5 ", false}, + {float64(0), " 0 ", false}, + {math.Copysign(0, -1), " -0 ", false}, + {"", `""`, false}, + {"foo", `"foo"`, false}, // Newlines. - {"\r\n\u2028\u2029", `"\r\n\u2028\u2029"`}, + {"\r\n\u2028\u2029", `"\r\n\u2028\u2029"`, false}, // "\v" == "v" on IE 6 so use "\u000b" instead. - {"\t\x0b", `"\t\u000b"`}, - {struct{ X, Y int }{1, 2}, `{"X":1,"Y":2}`}, - {[]any{}, "[]"}, - {[]any{42, "foo", nil}, `[42,"foo",null]`}, - {[]string{""}, `["\u003c!--","\u003c/script\u003e","--\u003e"]`}, - {"", `"--\u003e"`}, - {"", `"]]\u003e"`}, - {"", "-->"}, `["\u003c!--","\u003c/script\u003e","--\u003e"]`, false}, + {"", `"--\u003e"`, false}, + {"", `"]]\u003e"`, false}, + {" 0 && i+7 <= len(s) && bytes.Compare(bytes.ToLower(s[i-1:i+7]), []byte(" 0 && i+7 <= len(s) && bytes.Equal(bytes.ToLower(s[i-1 : i+7]), []byte("func lookup table diff --git a/src/internal/abi/type.go b/src/internal/abi/type.go index 86f055cb91..6474007de4 100644 --- a/src/internal/abi/type.go +++ b/src/internal/abi/type.go @@ -111,6 +111,12 @@ const ( // TFlagRegularMemory means that equal and hash functions can treat // this type as a single region of t.size bytes. TFlagRegularMemory TFlag = 1 << 3 + + // TFlagUnrolledBitmap marks special types that are unrolled-bitmap + // versions of types with GC programs. + // These types need to be deallocated when the underlying object + // is freed. + TFlagUnrolledBitmap TFlag = 1 << 4 ) // NameOff is the offset to a name from moduledata.types. See resolveNameOff in runtime. @@ -166,6 +172,7 @@ func (t *Type) HasName() bool { return t.TFlag&TFlagNamed != 0 } +// Pointers reports whether t contains pointers. func (t *Type) Pointers() bool { return t.PtrBytes != 0 } // IfaceIndir reports whether t is stored indirectly in an interface value. @@ -710,3 +717,67 @@ func NewName(n, tag string, exported, embedded bool) Name { return Name{Bytes: &b[0]} } + +const ( + TraceArgsLimit = 10 // print no more than 10 args/components + TraceArgsMaxDepth = 5 // no more than 5 layers of nesting + + // maxLen is a (conservative) upper bound of the byte stream length. For + // each arg/component, it has no more than 2 bytes of data (size, offset), + // and no more than one {, }, ... at each level (it cannot have both the + // data and ... unless it is the last one, just be conservative). Plus 1 + // for _endSeq. + TraceArgsMaxLen = (TraceArgsMaxDepth*3+2)*TraceArgsLimit + 1 +) + +// Populate the data. +// The data is a stream of bytes, which contains the offsets and sizes of the +// non-aggregate arguments or non-aggregate fields/elements of aggregate-typed +// arguments, along with special "operators". Specifically, +// - for each non-aggregate arg/field/element, its offset from FP (1 byte) and +// size (1 byte) +// - special operators: +// - 0xff - end of sequence +// - 0xfe - print { (at the start of an aggregate-typed argument) +// - 0xfd - print } (at the end of an aggregate-typed argument) +// - 0xfc - print ... (more args/fields/elements) +// - 0xfb - print _ (offset too large) +const ( + TraceArgsEndSeq = 0xff + TraceArgsStartAgg = 0xfe + TraceArgsEndAgg = 0xfd + TraceArgsDotdotdot = 0xfc + TraceArgsOffsetTooLarge = 0xfb + TraceArgsSpecial = 0xf0 // above this are operators, below this are ordinary offsets +) + +// MaxPtrmaskBytes is the maximum length of a GC ptrmask bitmap, +// which holds 1-bit entries describing where pointers are in a given type. +// Above this length, the GC information is recorded as a GC program, +// which can express repetition compactly. In either form, the +// information is used by the runtime to initialize the heap bitmap, +// and for large types (like 128 or more words), they are roughly the +// same speed. GC programs are never much larger and often more +// compact. (If large arrays are involved, they can be arbitrarily +// more compact.) +// +// The cutoff must be large enough that any allocation large enough to +// use a GC program is large enough that it does not share heap bitmap +// bytes with any other objects, allowing the GC program execution to +// assume an aligned start and not use atomic operations. In the current +// runtime, this means all malloc size classes larger than the cutoff must +// be multiples of four words. On 32-bit systems that's 16 bytes, and +// all size classes >= 16 bytes are 16-byte aligned, so no real constraint. +// On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed +// for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated +// is 32 pointers, the bits for which fit in 4 bytes. So MaxPtrmaskBytes +// must be >= 4. +// +// We used to use 16 because the GC programs do have some constant overhead +// to get started, and processing 128 pointers seems to be enough to +// amortize that overhead well. +// +// To make sure that the runtime's chansend can call typeBitsBulkBarrier, +// we raised the limit to 2048, so that even 32-bit systems are guaranteed to +// use bitmaps for objects up to 64 kB in size. +const MaxPtrmaskBytes = 2048 diff --git a/src/internal/asan/asan.go b/src/internal/asan/asan.go new file mode 100644 index 0000000000..0a8148e5b9 --- /dev/null +++ b/src/internal/asan/asan.go @@ -0,0 +1,19 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build asan + +package asan + +import ( + "unsafe" +) + +const Enabled = true + +//go:linkname Read runtime.asanread +func Read(addr unsafe.Pointer, len int) + +//go:linkname Write runtime.asanwrite +func Write(addr unsafe.Pointer, len int) diff --git a/src/internal/asan/doc.go b/src/internal/asan/doc.go new file mode 100644 index 0000000000..21b1bc945b --- /dev/null +++ b/src/internal/asan/doc.go @@ -0,0 +1,10 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package asan contains helper functions for manually instrumenting +// code for the address sanitizer. +// The runtime package intentionally exports these functions only in the +// asan build; this package exports them unconditionally but without the +// "asan" build tag they are no-ops. +package asan diff --git a/src/internal/asan/noasan.go b/src/internal/asan/noasan.go new file mode 100644 index 0000000000..e01b46a104 --- /dev/null +++ b/src/internal/asan/noasan.go @@ -0,0 +1,19 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !asan + +package asan + +import ( + "unsafe" +) + +const Enabled = false + +func Read(addr unsafe.Pointer, len int) { +} + +func Write(addr unsafe.Pointer, len int) { +} diff --git a/src/internal/bisect/bisect.go b/src/internal/bisect/bisect.go index bf67ceb9d7..3e5a6849f7 100644 --- a/src/internal/bisect/bisect.go +++ b/src/internal/bisect/bisect.go @@ -482,7 +482,6 @@ func (m *Matcher) stack(w Writer) bool { } } return m.ShouldEnable(h) - } // Writer is the same interface as io.Writer. diff --git a/src/internal/buildcfg/cfg.go b/src/internal/buildcfg/cfg.go index b97b9c1b53..b074a36b94 100644 --- a/src/internal/buildcfg/cfg.go +++ b/src/internal/buildcfg/cfg.go @@ -15,25 +15,26 @@ import ( "fmt" "os" "path/filepath" - "runtime" "strconv" "strings" ) var ( - GOROOT = runtime.GOROOT() // cached for efficiency - GOARCH = envOr("GOARCH", defaultGOARCH) - GOOS = envOr("GOOS", defaultGOOS) - GO386 = envOr("GO386", defaultGO386) - GOAMD64 = goamd64() - GOARM = goarm() - GOMIPS = gomips() - GOMIPS64 = gomips64() - GOPPC64 = goppc64() - GOWASM = gowasm() - ToolTags = toolTags() - GO_LDSO = defaultGO_LDSO - Version = version + GOROOT = os.Getenv("GOROOT") // cached for efficiency + GOARCH = envOr("GOARCH", defaultGOARCH) + GOOS = envOr("GOOS", defaultGOOS) + GO386 = envOr("GO386", defaultGO386) + GOAMD64 = goamd64() + GOARM = goarm() + GOARM64 = goarm64() + GOMIPS = gomips() + GOMIPS64 = gomips64() + GOPPC64 = goppc64() + GORISCV64 = goriscv64() + GOWASM = gowasm() + ToolTags = toolTags() + GO_LDSO = defaultGO_LDSO + Version = version ) // Error is one of the errors found (if any) in the build configuration. @@ -69,22 +70,161 @@ func goamd64() int { return int(defaultGOAMD64[len("v")] - '0') } -func goarm() int { +type goarmFeatures struct { + Version int + SoftFloat bool +} + +func (g goarmFeatures) String() string { + armStr := strconv.Itoa(g.Version) + if g.SoftFloat { + armStr += ",softfloat" + } else { + armStr += ",hardfloat" + } + return armStr +} + +func goarm() (g goarmFeatures) { + const ( + softFloatOpt = ",softfloat" + hardFloatOpt = ",hardfloat" + ) def := defaultGOARM if GOOS == "android" && GOARCH == "arm" { // Android arm devices always support GOARM=7. def = "7" } - switch v := envOr("GOARM", def); v { - case "5": - return 5 - case "6": - return 6 - case "7": - return 7 + v := envOr("GOARM", def) + + floatSpecified := false + if strings.HasSuffix(v, softFloatOpt) { + g.SoftFloat = true + floatSpecified = true + v = v[:len(v)-len(softFloatOpt)] + } + if strings.HasSuffix(v, hardFloatOpt) { + floatSpecified = true + v = v[:len(v)-len(hardFloatOpt)] + } + + switch v { + case "5": + g.Version = 5 + case "6": + g.Version = 6 + case "7": + g.Version = 7 + default: + Error = fmt.Errorf("invalid GOARM: must start with 5, 6, or 7, and may optionally end in either %q or %q", hardFloatOpt, softFloatOpt) + g.Version = int(def[0] - '0') + } + + // 5 defaults to softfloat. 6 and 7 default to hardfloat. + if !floatSpecified && g.Version == 5 { + g.SoftFloat = true + } + return +} + +type goarm64Features struct { + Version string + // Large Systems Extension + LSE bool + // ARM v8.0 Cryptographic Extension. It includes the following features: + // * FEAT_AES, which includes the AESD and AESE instructions. + // * FEAT_PMULL, which includes the PMULL, PMULL2 instructions. + // * FEAT_SHA1, which includes the SHA1* instructions. + // * FEAT_SHA256, which includes the SHA256* instructions. + Crypto bool +} + +func (g goarm64Features) String() string { + arm64Str := g.Version + if g.LSE { + arm64Str += ",lse" + } + if g.Crypto { + arm64Str += ",crypto" + } + return arm64Str +} + +func parseGoarm64(v string) (g goarm64Features) { + const ( + lseOpt = ",lse" + cryptoOpt = ",crypto" + ) + + g.LSE = false + g.Crypto = false + // We allow any combination of suffixes, in any order + for { + if strings.HasSuffix(v, lseOpt) { + g.LSE = true + v = v[:len(v)-len(lseOpt)] + continue + } + + if strings.HasSuffix(v, cryptoOpt) { + g.Crypto = true + v = v[:len(v)-len(cryptoOpt)] + continue + } + + break + } + + switch v { + case "v8.0": + g.Version = v + case "v8.1", "v8.2", "v8.3", "v8.4", "v8.5", "v8.6", "v8.7", "v8.8", "v8.9", + "v9.0", "v9.1", "v9.2", "v9.3", "v9.4", "v9.5": + g.Version = v + // LSE extension is mandatory starting from 8.1 + g.LSE = true + default: + Error = fmt.Errorf("invalid GOARM64: must start with v8.{0-9} or v9.{0-5} and may optionally end in %q and/or %q", + lseOpt, cryptoOpt) + g.Version = defaultGOARM64 + } + + return +} + +func goarm64() goarm64Features { + return parseGoarm64(envOr("GOARM64", defaultGOARM64)) +} + +// Returns true if g supports giving ARM64 ISA +// Note that this function doesn't accept / test suffixes (like ",lse" or ",crypto") +func (g goarm64Features) Supports(s string) bool { + // We only accept "v{8-9}.{0-9}. Everything else is malformed. + if len(s) != 4 { + return false + } + + major := s[1] + minor := s[3] + + // We only accept "v{8-9}.{0-9}. Everything else is malformed. + if major < '8' || major > '9' || + minor < '0' || minor > '9' || + s[0] != 'v' || s[2] != '.' { + return false + } + + g_major := g.Version[1] + g_minor := g.Version[3] + + if major == g_major { + return minor <= g_minor + } else if g_major == '9' { + // v9.0 diverged from v8.5. This means we should compare with g_minor increased by five. + return minor <= g_minor+5 + } else { + return false } - Error = fmt.Errorf("invalid GOARM: must be 5, 6, 7") - return int(def[0] - '0') } func gomips() string { @@ -118,6 +258,22 @@ func goppc64() int { return int(defaultGOPPC64[len("power")] - '0') } +func goriscv64() int { + switch v := envOr("GORISCV64", defaultGORISCV64); v { + case "rva20u64": + return 20 + case "rva22u64": + return 22 + } + Error = fmt.Errorf("invalid GORISCV64: must be rva20u64, rva22u64") + v := defaultGORISCV64[len("rva"):] + i := strings.IndexFunc(v, func(r rune) bool { + return r < '0' || r > '9' + }) + year, _ := strconv.Atoi(v[:i]) + return year +} + type gowasmFeatures struct { SatConv bool SignExt bool @@ -182,7 +338,9 @@ func GOGOARCH() (name, value string) { case "amd64": return "GOAMD64", fmt.Sprintf("v%d", GOAMD64) case "arm": - return "GOARM", strconv.Itoa(GOARM) + return "GOARM", GOARM.String() + case "arm64": + return "GOARM64", GOARM64.String() case "mips", "mipsle": return "GOMIPS", GOMIPS case "mips64", "mips64le": @@ -207,10 +365,24 @@ func gogoarchTags() []string { return list case "arm": var list []string - for i := 5; i <= GOARM; i++ { + for i := 5; i <= GOARM.Version; i++ { list = append(list, fmt.Sprintf("%s.%d", GOARCH, i)) } return list + case "arm64": + var list []string + major := int(GOARM64.Version[1] - '0') + minor := int(GOARM64.Version[3] - '0') + for i := 0; i <= minor; i++ { + list = append(list, fmt.Sprintf("%s.v%d.%d", GOARCH, major, i)) + } + // ARM64 v9.x also includes support of v8.x+5 (i.e. v9.1 includes v8.(1+5) = v8.6). + if major == 9 { + for i := 0; i <= minor+5 && i <= 9; i++ { + list = append(list, fmt.Sprintf("%s.v%d.%d", GOARCH, 8, i)) + } + } + return list case "mips", "mipsle": return []string{GOARCH + "." + GOMIPS} case "mips64", "mips64le": @@ -221,6 +393,12 @@ func gogoarchTags() []string { list = append(list, fmt.Sprintf("%s.power%d", GOARCH, i)) } return list + case "riscv64": + list := []string{GOARCH + "." + "rva20u64"} + if GORISCV64 >= 22 { + list = append(list, GOARCH+"."+"rva22u64") + } + return list case "wasm": var list []string if GOWASM.SatConv { diff --git a/src/internal/buildcfg/cfg_test.go b/src/internal/buildcfg/cfg_test.go index 0123593317..33a9c5e1b8 100644 --- a/src/internal/buildcfg/cfg_test.go +++ b/src/internal/buildcfg/cfg_test.go @@ -23,4 +23,103 @@ func TestConfigFlags(t *testing.T) { if goamd64(); Error == nil { t.Errorf("Wrong parsing of GOAMD64=1") } + + os.Setenv("GORISCV64", "rva20u64") + if goriscv64() != 20 { + t.Errorf("Wrong parsing of RISCV64=rva20u64") + } + os.Setenv("GORISCV64", "rva22u64") + if goriscv64() != 22 { + t.Errorf("Wrong parsing of RISCV64=rva22u64") + } + Error = nil + os.Setenv("GORISCV64", "rva22") + if _ = goriscv64(); Error == nil { + t.Errorf("Wrong parsing of RISCV64=rva22") + } + Error = nil + os.Setenv("GOARM64", "v7.0") + if _ = goarm64(); Error == nil { + t.Errorf("Wrong parsing of GOARM64=7.0") + } + Error = nil + os.Setenv("GOARM64", "8.0") + if _ = goarm64(); Error == nil { + t.Errorf("Wrong parsing of GOARM64=8.0") + } + Error = nil + os.Setenv("GOARM64", "v8.0,lsb") + if _ = goarm64(); Error == nil { + t.Errorf("Wrong parsing of GOARM64=v8.0,lsb") + } + os.Setenv("GOARM64", "v8.0,lse") + if goarm64().Version != "v8.0" || goarm64().LSE != true || goarm64().Crypto != false { + t.Errorf("Wrong parsing of GOARM64=v8.0,lse") + } + os.Setenv("GOARM64", "v8.0,crypto") + if goarm64().Version != "v8.0" || goarm64().LSE != false || goarm64().Crypto != true { + t.Errorf("Wrong parsing of GOARM64=v8.0,crypto") + } + os.Setenv("GOARM64", "v8.0,crypto,lse") + if goarm64().Version != "v8.0" || goarm64().LSE != true || goarm64().Crypto != true { + t.Errorf("Wrong parsing of GOARM64=v8.0,crypto,lse") + } + os.Setenv("GOARM64", "v8.0,lse,crypto") + if goarm64().Version != "v8.0" || goarm64().LSE != true || goarm64().Crypto != true { + t.Errorf("Wrong parsing of GOARM64=v8.0,lse,crypto") + } + os.Setenv("GOARM64", "v9.0") + if goarm64().Version != "v9.0" || goarm64().LSE != true || goarm64().Crypto != false { + t.Errorf("Wrong parsing of GOARM64=v9.0") + } +} + +func TestGoarm64FeaturesSupports(t *testing.T) { + g := parseGoarm64("v9.3") + + if !g.Supports("v9.3") { + t.Errorf("Wrong goarm64Features.Supports for v9.3, v9.3") + } + + if g.Supports("v9.4") { + t.Errorf("Wrong goarm64Features.Supports for v9.3, v9.4") + } + + if !g.Supports("v8.8") { + t.Errorf("Wrong goarm64Features.Supports for v9.3, v8.8") + } + + if g.Supports("v8.9") { + t.Errorf("Wrong goarm64Features.Supports for v9.3, v8.9") + } + + if g.Supports(",lse") { + t.Errorf("Wrong goarm64Features.Supports for v9.3, ,lse") + } +} + +func TestGogoarchTags(t *testing.T) { + old_goarch := GOARCH + old_goarm64 := GOARM64 + + GOARCH = "arm64" + + os.Setenv("GOARM64", "v9.5") + GOARM64 = goarm64() + tags := gogoarchTags() + want := []string{"arm64.v9.0", "arm64.v9.1", "arm64.v9.2", "arm64.v9.3", "arm64.v9.4", "arm64.v9.5", + "arm64.v8.0", "arm64.v8.1", "arm64.v8.2", "arm64.v8.3", "arm64.v8.4", "arm64.v8.5", "arm64.v8.6", "arm64.v8.7", "arm64.v8.8", "arm64.v8.9"} + if len(tags) != len(want) { + t.Errorf("Wrong number of tags for GOARM64=v9.5") + } else { + for i, v := range tags { + if v != want[i] { + t.Error("Wrong tags for GOARM64=v9.5") + break + } + } + } + + GOARCH = old_goarch + GOARM64 = old_goarm64 } diff --git a/src/internal/buildcfg/exp.go b/src/internal/buildcfg/exp.go index c6a6c84f3c..a45cfaf862 100644 --- a/src/internal/buildcfg/exp.go +++ b/src/internal/buildcfg/exp.go @@ -65,6 +65,8 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) { case "amd64", "arm64", "ppc64le", "ppc64", "riscv64": regabiAlwaysOn = true regabiSupported = true + case "loong64": + regabiSupported = true } baseline := goexperiment.Flags{ @@ -72,6 +74,7 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) { RegabiArgs: regabiSupported, CoverageRedesign: true, AllocHeaders: true, + ExecTracer2: true, } // Start with the statically enabled set of experiments. @@ -130,7 +133,7 @@ func ParseGOEXPERIMENT(goos, goarch, goexp string) (*ExperimentFlags, error) { flags.RegabiWrappers = true flags.RegabiArgs = true } - // regabi is only supported on amd64, arm64, riscv64, ppc64 and ppc64le. + // regabi is only supported on amd64, arm64, loong64, riscv64, ppc64 and ppc64le. if !regabiSupported { flags.RegabiWrappers = false flags.RegabiArgs = false diff --git a/src/internal/bytealg/bytealg.go b/src/internal/bytealg/bytealg.go index 1103891eee..6b79a2e1fa 100644 --- a/src/internal/bytealg/bytealg.go +++ b/src/internal/bytealg/bytealg.go @@ -111,7 +111,8 @@ func LastIndexRabinKarp[T string | []byte](s, sep T) int { return -1 } -// MakeNoZero makes a slice of length and capacity n without zeroing the bytes. +// MakeNoZero makes a slice of length n and capacity of at least n Bytes +// without zeroing the bytes (including the bytes between len and cap). // It is the caller's responsibility to ensure uninitialized bytes // do not leak to the end user. func MakeNoZero(n int) []byte diff --git a/src/internal/bytealg/compare_loong64.s b/src/internal/bytealg/compare_loong64.s index c89c5a9256..311449ab18 100644 --- a/src/internal/bytealg/compare_loong64.s +++ b/src/internal/bytealg/compare_loong64.s @@ -5,83 +5,102 @@ #include "go_asm.h" #include "textflag.h" -TEXT ·Compare(SB),NOSPLIT,$0-56 - MOVV a_base+0(FP), R6 - MOVV b_base+24(FP), R7 - MOVV a_len+8(FP), R4 - MOVV b_len+32(FP), R5 +TEXT ·Compare(SB),NOSPLIT,$0-56 +#ifndef GOEXPERIMENT_regabiargs + MOVV a_base+0(FP), R4 + MOVV a_len+8(FP), R5 + MOVV b_base+24(FP), R6 + MOVV b_len+32(FP), R7 MOVV $ret+48(FP), R13 +#else + // R4 = a_base + // R5 = a_len + // R6 = a_cap (unused) + // R7 = b_base (want in R6) + // R8 = b_len (want in R7) + // R9 = b_cap (unused) + MOVV R7, R6 + MOVV R8, R7 +#endif JMP cmpbody<>(SB) -TEXT runtime·cmpstring(SB),NOSPLIT,$0-40 - MOVV a_base+0(FP), R6 - MOVV b_base+16(FP), R7 - MOVV a_len+8(FP), R4 - MOVV b_len+24(FP), R5 +TEXT runtime·cmpstring(SB),NOSPLIT,$0-40 +#ifndef GOEXPERIMENT_regabiargs + MOVV a_base+0(FP), R4 + MOVV b_base+16(FP), R6 + MOVV a_len+8(FP), R5 + MOVV b_len+24(FP), R7 MOVV $ret+32(FP), R13 +#endif + // R4 = a_base + // R5 = a_len + // R6 = b_base + // R7 = b_len JMP cmpbody<>(SB) // On entry: -// R4 length of a -// R5 length of b -// R6 points to the start of a -// R7 points to the start of b +// R5 length of a +// R7 length of b +// R4 points to the start of a +// R6 points to the start of b // R13 points to the return value (-1/0/1) TEXT cmpbody<>(SB),NOSPLIT|NOFRAME,$0 - BEQ R6, R7, samebytes // same start of a and b + BEQ R4, R6, samebytes // same start of a and b - SGTU R4, R5, R9 + SGTU R5, R7, R9 BNE R0, R9, r2_lt_r1 - MOVV R4, R14 + MOVV R5, R14 JMP entry r2_lt_r1: - MOVV R5, R14 // R14 is min(R4, R5) + MOVV R7, R14 // R14 is min(R4, R5) entry: - ADDV R6, R14, R12 // R6 start of a, R14 end of a - BEQ R6, R12, samebytes // length is 0 + ADDV R4, R14, R12 // R6 start of a, R14 end of a + BEQ R4, R12, samebytes // length is 0 SRLV $4, R14 // R14 is number of chunks BEQ R0, R14, byte_loop // make sure both a and b are aligned. - OR R6, R7, R15 + OR R4, R6, R15 AND $7, R15 BNE R0, R15, byte_loop PCALIGN $16 chunk16_loop: BEQ R0, R14, byte_loop - MOVV (R6), R8 - MOVV (R7), R9 + MOVV (R4), R8 + MOVV (R6), R9 BNE R8, R9, byte_loop - MOVV 8(R6), R16 - MOVV 8(R7), R17 + MOVV 8(R4), R16 + MOVV 8(R6), R17 + ADDV $16, R4 ADDV $16, R6 - ADDV $16, R7 SUBVU $1, R14 BEQ R16, R17, chunk16_loop + SUBV $8, R4 SUBV $8, R6 - SUBV $8, R7 byte_loop: - BEQ R6, R12, samebytes - MOVBU (R6), R8 + BEQ R4, R12, samebytes + MOVBU (R4), R8 + ADDVU $1, R4 + MOVBU (R6), R9 ADDVU $1, R6 - MOVBU (R7), R9 - ADDVU $1, R7 BEQ R8, R9, byte_loop byte_cmp: - SGTU R8, R9, R12 // R12 = 1 if (R8 > R9) - BNE R0, R12, ret - MOVV $-1, R12 + SGTU R8, R9, R4 // R12 = 1 if (R8 > R9) + BNE R0, R4, ret + MOVV $-1, R4 JMP ret samebytes: - SGTU R4, R5, R8 - SGTU R5, R4, R9 - SUBV R9, R8, R12 + SGTU R5, R7, R8 + SGTU R7, R5, R9 + SUBV R9, R8, R4 ret: - MOVV R12, (R13) +#ifndef GOEXPERIMENT_regabiargs + MOVV R4, (R13) +#endif RET diff --git a/src/internal/bytealg/count_riscv64.s b/src/internal/bytealg/count_riscv64.s index d123cbd7c6..3f255cd263 100644 --- a/src/internal/bytealg/count_riscv64.s +++ b/src/internal/bytealg/count_riscv64.s @@ -14,6 +14,7 @@ TEXT ·Count(SB),NOSPLIT,$0-40 MOV ZERO, X14 // count ADD X10, X11 // end + PCALIGN $16 loop: BEQ X10, X11, done MOVBU (X10), X15 @@ -34,6 +35,7 @@ TEXT ·CountString(SB),NOSPLIT,$0-32 MOV ZERO, X14 // count ADD X10, X11 // end + PCALIGN $16 loop: BEQ X10, X11, done MOVBU (X10), X15 diff --git a/src/internal/bytealg/equal_arm64.s b/src/internal/bytealg/equal_arm64.s index d3aabba587..4db9515474 100644 --- a/src/internal/bytealg/equal_arm64.s +++ b/src/internal/bytealg/equal_arm64.s @@ -9,6 +9,9 @@ TEXT runtime·memequal(SB),NOSPLIT|NOFRAME,$0-25 // short path to handle 0-byte case CBZ R2, equal + // short path to handle equal pointers + CMP R0, R1 + BEQ equal B memeqbody<>(SB) equal: MOVD $1, R0 diff --git a/src/internal/bytealg/equal_loong64.s b/src/internal/bytealg/equal_loong64.s index ba2a5578c3..a3ad5c1b35 100644 --- a/src/internal/bytealg/equal_loong64.s +++ b/src/internal/bytealg/equal_loong64.s @@ -8,17 +8,21 @@ #define REGCTXT R29 // memequal(a, b unsafe.Pointer, size uintptr) bool -TEXT runtime·memequal(SB),NOSPLIT|NOFRAME,$0-25 +TEXT runtime·memequal(SB),NOSPLIT|NOFRAME,$0-25 +#ifndef GOEXPERIMENT_regabiargs MOVV a+0(FP), R4 MOVV b+8(FP), R5 - BEQ R4, R5, eq MOVV size+16(FP), R6 +#endif + BEQ R4, R5, eq ADDV R4, R6, R7 PCALIGN $16 loop: BNE R4, R7, test MOVV $1, R4 +#ifndef GOEXPERIMENT_regabiargs MOVB R4, ret+24(FP) +#endif RET test: MOVBU (R4), R9 @@ -27,17 +31,24 @@ test: ADDV $1, R5 BEQ R9, R10, loop + MOVB R0, R4 +#ifndef GOEXPERIMENT_regabiargs MOVB R0, ret+24(FP) +#endif RET eq: MOVV $1, R4 +#ifndef GOEXPERIMENT_regabiargs MOVB R4, ret+24(FP) +#endif RET // memequal_varlen(a, b unsafe.Pointer) bool -TEXT runtime·memequal_varlen(SB),NOSPLIT,$40-17 +TEXT runtime·memequal_varlen(SB),NOSPLIT,$40-17 +#ifndef GOEXPERIMENT_regabiargs MOVV a+0(FP), R4 MOVV b+8(FP), R5 +#endif BEQ R4, R5, eq MOVV 8(REGCTXT), R6 // compiler stores size at offset 8 in the closure MOVV R4, 8(R3) @@ -45,9 +56,13 @@ TEXT runtime·memequal_varlen(SB),NOSPLIT,$40-17 MOVV R6, 24(R3) JAL runtime·memequal(SB) MOVBU 32(R3), R4 +#ifndef GOEXPERIMENT_regabiargs MOVB R4, ret+16(FP) +#endif RET eq: MOVV $1, R4 +#ifndef GOEXPERIMENT_regabiargs MOVB R4, ret+16(FP) +#endif RET diff --git a/src/internal/bytealg/indexbyte_loong64.s b/src/internal/bytealg/indexbyte_loong64.s index 604970549f..03e0660973 100644 --- a/src/internal/bytealg/indexbyte_loong64.s +++ b/src/internal/bytealg/indexbyte_loong64.s @@ -5,11 +5,18 @@ #include "go_asm.h" #include "textflag.h" -TEXT ·IndexByte(SB),NOSPLIT,$0-40 +TEXT ·IndexByte(SB),NOSPLIT,$0-40 +#ifndef GOEXPERIMENT_regabiargs MOVV b_base+0(FP), R4 MOVV b_len+8(FP), R5 - MOVBU c+24(FP), R6 // byte to find - MOVV R4, R7 // store base for later + MOVBU c+24(FP), R7 // byte to find +#endif + // R4 = b_base + // R5 = b_len + // R6 = b_cap (unused) + // R7 = byte to find + AND $0xff, R7 + MOVV R4, R6 // store base for later ADDV R4, R5 // end ADDV $-1, R4 @@ -18,21 +25,30 @@ loop: ADDV $1, R4 BEQ R4, R5, notfound MOVBU (R4), R8 - BNE R6, R8, loop + BNE R7, R8, loop - SUBV R7, R4 // remove base + SUBV R6, R4 // remove base +#ifndef GOEXPERIMENT_regabiargs MOVV R4, ret+32(FP) +#endif RET notfound: MOVV $-1, R4 +#ifndef GOEXPERIMENT_regabiargs MOVV R4, ret+32(FP) +#endif RET -TEXT ·IndexByteString(SB),NOSPLIT,$0-32 +TEXT ·IndexByteString(SB),NOSPLIT,$0-32 +#ifndef GOEXPERIMENT_regabiargs MOVV s_base+0(FP), R4 MOVV s_len+8(FP), R5 MOVBU c+16(FP), R6 // byte to find +#endif + // R4 = s_base + // R5 = s_len + // R6 = byte to find MOVV R4, R7 // store base for later ADDV R4, R5 // end ADDV $-1, R4 @@ -45,10 +61,14 @@ loop: BNE R6, R8, loop SUBV R7, R4 // remove base +#ifndef GOEXPERIMENT_regabiargs MOVV R4, ret+24(FP) +#endif RET notfound: MOVV $-1, R4 +#ifndef GOEXPERIMENT_regabiargs MOVV R4, ret+24(FP) +#endif RET diff --git a/src/internal/cfg/cfg.go b/src/internal/cfg/cfg.go index 2af0ec7078..08d210b797 100644 --- a/src/internal/cfg/cfg.go +++ b/src/internal/cfg/cfg.go @@ -36,6 +36,7 @@ const KnownEnv = ` GOAMD64 GOARCH GOARM + GOARM64 GOBIN GOCACHE GOCACHEPROG @@ -57,6 +58,7 @@ const KnownEnv = ` GOPPC64 GOPRIVATE GOPROXY + GORISCV64 GOROOT GOSUMDB GOTMPDIR diff --git a/src/internal/chacha8rand/chacha8.go b/src/internal/chacha8rand/chacha8.go new file mode 100644 index 0000000000..0e601c23ac --- /dev/null +++ b/src/internal/chacha8rand/chacha8.go @@ -0,0 +1,198 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package chacha8rand implements a pseudorandom generator +// based on ChaCha8. It is used by both runtime and math/rand/v2 +// and must have no dependencies. +package chacha8rand + +const ( + ctrInc = 4 // increment counter by 4 between block calls + ctrMax = 16 // reseed when counter reaches 16 + chunk = 32 // each chunk produced by block is 32 uint64s + reseed = 4 // reseed with 4 words +) + +// block is the chacha8rand block function. +func block(seed *[4]uint64, blocks *[32]uint64, counter uint32) + +// A State holds the state for a single random generator. +// It must be used from one goroutine at a time. +// If used by multiple goroutines at a time, the goroutines +// may see the same random values, but the code will not +// crash or cause out-of-bounds memory accesses. +type State struct { + buf [32]uint64 + seed [4]uint64 + i uint32 + n uint32 + c uint32 +} + +// Next returns the next random value, along with a boolean +// indicating whether one was available. +// If one is not available, the caller should call Refill +// and then repeat the call to Next. +// +// Next is //go:nosplit to allow its use in the runtime +// with per-m data without holding the per-m lock. +// +//go:nosplit +func (s *State) Next() (uint64, bool) { + i := s.i + if i >= s.n { + return 0, false + } + s.i = i + 1 + return s.buf[i&31], true // i&31 eliminates bounds check +} + +// Init seeds the State with the given seed value. +func (s *State) Init(seed [32]byte) { + s.Init64([4]uint64{ + leUint64(seed[0*8:]), + leUint64(seed[1*8:]), + leUint64(seed[2*8:]), + leUint64(seed[3*8:]), + }) +} + +// Init64 seeds the state with the given seed value. +func (s *State) Init64(seed [4]uint64) { + s.seed = seed + block(&s.seed, &s.buf, 0) + s.c = 0 + s.i = 0 + s.n = chunk +} + +// Refill refills the state with more random values. +// After a call to Refill, an immediate call to Next will succeed +// (unless multiple goroutines are incorrectly sharing a state). +func (s *State) Refill() { + s.c += ctrInc + if s.c == ctrMax { + // Reseed with generated uint64s for forward secrecy. + // Normally this is done immediately after computing a block, + // but we do it immediately before computing the next block, + // to allow a much smaller serialized state (just the seed plus offset). + // This gives a delayed benefit for the forward secrecy + // (you can reconstruct the recent past given a memory dump), + // which we deem acceptable in exchange for the reduced size. + s.seed[0] = s.buf[len(s.buf)-reseed+0] + s.seed[1] = s.buf[len(s.buf)-reseed+1] + s.seed[2] = s.buf[len(s.buf)-reseed+2] + s.seed[3] = s.buf[len(s.buf)-reseed+3] + s.c = 0 + } + block(&s.seed, &s.buf, s.c) + s.i = 0 + s.n = uint32(len(s.buf)) + if s.c == ctrMax-ctrInc { + s.n = uint32(len(s.buf)) - reseed + } +} + +// Reseed reseeds the state with new random values. +// After a call to Reseed, any previously returned random values +// have been erased from the memory of the state and cannot be +// recovered. +func (s *State) Reseed() { + var seed [4]uint64 + for i := range seed { + for { + x, ok := s.Next() + if ok { + seed[i] = x + break + } + s.Refill() + } + } + s.Init64(seed) +} + +// Marshal marshals the state into a byte slice. +// Marshal and Unmarshal are functions, not methods, +// so that they will not be linked into the runtime +// when it uses the State struct, since the runtime +// does not need these. +func Marshal(s *State) []byte { + data := make([]byte, 6*8) + copy(data, "chacha8:") + used := (s.c/ctrInc)*chunk + s.i + bePutUint64(data[1*8:], uint64(used)) + for i, seed := range s.seed { + lePutUint64(data[(2+i)*8:], seed) + } + return data +} + +type errUnmarshalChaCha8 struct{} + +func (*errUnmarshalChaCha8) Error() string { + return "invalid ChaCha8 encoding" +} + +// Unmarshal unmarshals the state from a byte slice. +func Unmarshal(s *State, data []byte) error { + if len(data) != 6*8 || string(data[:8]) != "chacha8:" { + return new(errUnmarshalChaCha8) + } + used := beUint64(data[1*8:]) + if used > (ctrMax/ctrInc)*chunk-reseed { + return new(errUnmarshalChaCha8) + } + for i := range s.seed { + s.seed[i] = leUint64(data[(2+i)*8:]) + } + s.c = ctrInc * (uint32(used) / chunk) + block(&s.seed, &s.buf, s.c) + s.i = uint32(used) % chunk + s.n = chunk + if s.c == ctrMax-ctrInc { + s.n = chunk - reseed + } + return nil +} + +// binary.bigEndian.Uint64, copied to avoid dependency +func beUint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | + uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56 +} + +// binary.bigEndian.PutUint64, copied to avoid dependency +func bePutUint64(b []byte, v uint64) { + _ = b[7] // early bounds check to guarantee safety of writes below + b[0] = byte(v >> 56) + b[1] = byte(v >> 48) + b[2] = byte(v >> 40) + b[3] = byte(v >> 32) + b[4] = byte(v >> 24) + b[5] = byte(v >> 16) + b[6] = byte(v >> 8) + b[7] = byte(v) +} + +// binary.littleEndian.Uint64, copied to avoid dependency +func leUint64(b []byte) uint64 { + _ = b[7] // bounds check hint to compiler; see golang.org/issue/14808 + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// binary.littleEndian.PutUint64, copied to avoid dependency +func lePutUint64(b []byte, v uint64) { + _ = b[7] // early bounds check to guarantee safety of writes below + b[0] = byte(v) + b[1] = byte(v >> 8) + b[2] = byte(v >> 16) + b[3] = byte(v >> 24) + b[4] = byte(v >> 32) + b[5] = byte(v >> 40) + b[6] = byte(v >> 48) + b[7] = byte(v >> 56) +} diff --git a/src/internal/chacha8rand/chacha8_amd64.s b/src/internal/chacha8rand/chacha8_amd64.s new file mode 100644 index 0000000000..b56deb3b0b --- /dev/null +++ b/src/internal/chacha8rand/chacha8_amd64.s @@ -0,0 +1,174 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// ChaCha8 is ChaCha with 8 rounds. +// See https://cr.yp.to/chacha/chacha-20080128.pdf. +// See chacha8_generic.go for additional details. + +// ROL rotates the uint32s in register R left by N bits, using temporary T. +#define ROL(N, R, T) \ + MOVO R, T; PSLLL $(N), T; PSRLL $(32-(N)), R; PXOR T, R + +// ROL16 rotates the uint32s in register R left by 16, using temporary T if needed. +#ifdef GOAMD64_v2 +#define ROL16(R, T) PSHUFB ·rol16<>(SB), R +#else +#define ROL16(R, T) ROL(16, R, T) +#endif + +// ROL8 rotates the uint32s in register R left by 8, using temporary T if needed. +#ifdef GOAMD64_v2 +#define ROL8(R, T) PSHUFB ·rol8<>(SB), R +#else +#define ROL8(R, T) ROL(8, R, T) +#endif + +// QR is the ChaCha quarter-round on A, B, C, and D. T is an available temporary. +#define QR(A, B, C, D, T) \ + PADDD B, A; PXOR A, D; ROL16(D, T); \ + PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B; \ + PADDD B, A; PXOR A, D; ROL8(D, T); \ + PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B + +// REPLREG replicates the register R into 4 uint32s in XR. +#define REPLREG(R, XR) \ + MOVQ R, XR; \ + PSHUFD $0, XR, XR + +// REPL replicates the uint32 constant val into 4 uint32s in XR. It smashes DX. +#define REPL(val, XR) \ + MOVL $val, DX; \ + REPLREG(DX, XR) + +// SEED copies the off'th uint32 of the seed into the register XR, +// replicating it into all four stripes of the register. +#define SEED(off, reg, XR) \ + MOVL (4*off)(AX), reg; \ + REPLREG(reg, XR) \ + +// block runs 4 ChaCha8 block transformations in the four stripes of the X registers. + +// func block(seed *[8]uint32, blocks *[16][4]uint32, counter uint32) +TEXT ·block(SB), NOSPLIT, $16 + // seed in AX + // blocks in BX + // counter in CX + + // Load initial constants into top row. + REPL(0x61707865, X0) + REPL(0x3320646e, X1) + REPL(0x79622d32, X2) + REPL(0x6b206574, X3) + + // Load counter into bottom left cell. + // Each stripe gets a different counter: 0, 1, 2, 3. + // (PINSRD is not available in GOAMD64_v1, + // so just do it in memory on all systems. + // This is not on the critical path.) + MOVL CX, 0(SP) + INCL CX + MOVL CX, 4(SP) + INCL CX + MOVL CX, 8(SP) + INCL CX + MOVL CX, 12(SP) + MOVOU 0(SP), X12 + + // Load seed words into next two rows and into DI, SI, R8..R13 + SEED(0, DI, X4) + SEED(1, SI, X5) + SEED(2, R8, X6) + SEED(3, R9, X7) + SEED(4, R10, X8) + SEED(5, R11, X9) + SEED(6, R12, X10) + SEED(7, R13, X11) + + // Zeros for remaining two matrix entries. + // We have just enough XMM registers to hold the state, + // without one for the temporary, so we flush and restore + // some values to and from memory to provide a temporary. + // The initial temporary is X15, so zero its memory instead + // of X15 itself. + MOVL $0, DX + MOVQ DX, X13 + MOVQ DX, X14 + MOVOU X14, (15*16)(BX) + + // 4 iterations. Each iteration is 8 quarter-rounds. + MOVL $4, DX +loop: + QR(X0, X4, X8, X12, X15) + MOVOU X4, (4*16)(BX) // save X4 + QR(X1, X5, X9, X13, X15) + MOVOU (15*16)(BX), X15 // reload X15; temp now X4 + QR(X2, X6, X10, X14, X4) + QR(X3, X7, X11, X15, X4) + + QR(X0, X5, X10, X15, X4) + MOVOU X15, (15*16)(BX) // save X15 + QR(X1, X6, X11, X12, X4) + MOVOU (4*16)(BX), X4 // reload X4; temp now X15 + QR(X2, X7, X8, X13, X15) + QR(X3, X4, X9, X14, X15) + + DECL DX + JNZ loop + + // Store interlaced blocks back to output buffer, + // adding original seed along the way. + + // First the top and bottom rows. + MOVOU X0, (0*16)(BX) + MOVOU X1, (1*16)(BX) + MOVOU X2, (2*16)(BX) + MOVOU X3, (3*16)(BX) + MOVOU X12, (12*16)(BX) + MOVOU X13, (13*16)(BX) + MOVOU X14, (14*16)(BX) + // X15 has already been stored. + + // Now we have X0-X3, X12-X15 available for temporaries. + // Add seed rows back to output. We left seed in DI, SI, R8..R13 above. + REPLREG(DI, X0) + REPLREG(SI, X1) + REPLREG(R8, X2) + REPLREG(R9, X3) + REPLREG(R10, X12) + REPLREG(R11, X13) + REPLREG(R12, X14) + REPLREG(R13, X15) + PADDD X0, X4 + PADDD X1, X5 + PADDD X2, X6 + PADDD X3, X7 + PADDD X12, X8 + PADDD X13, X9 + PADDD X14, X10 + PADDD X15, X11 + MOVOU X4, (4*16)(BX) + MOVOU X5, (5*16)(BX) + MOVOU X6, (6*16)(BX) + MOVOU X7, (7*16)(BX) + MOVOU X8, (8*16)(BX) + MOVOU X9, (9*16)(BX) + MOVOU X10, (10*16)(BX) + MOVOU X11, (11*16)(BX) + + MOVL $0, AX + MOVQ AX, X15 // must be 0 on return + + RET + +// rotate left 16 indexes for PSHUFB +GLOBL ·rol16<>(SB), NOPTR|RODATA, $16 +DATA ·rol16<>+0(SB)/8, $0x0504070601000302 +DATA ·rol16<>+8(SB)/8, $0x0D0C0F0E09080B0A + +// rotate left 8 indexes for PSHUFB +GLOBL ·rol8<>(SB), NOPTR|RODATA, $16 +DATA ·rol8<>+0(SB)/8, $0x0605040702010003 +DATA ·rol8<>+8(SB)/8, $0x0E0D0C0F0A09080B diff --git a/src/internal/chacha8rand/chacha8_arm64.s b/src/internal/chacha8rand/chacha8_arm64.s new file mode 100644 index 0000000000..18e34dd148 --- /dev/null +++ b/src/internal/chacha8rand/chacha8_arm64.s @@ -0,0 +1,104 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// QR is the ChaCha quarter-round on A, B, C, and D. +// V30 is used as a temporary, and V31 is assumed to +// hold the index table for rotate left 8. +#define QR(A, B, C, D) \ + VADD A.S4, B.S4, A.S4; VEOR D.B16, A.B16, D.B16; VREV32 D.H8, D.H8; \ + VADD C.S4, D.S4, C.S4; VEOR B.B16, C.B16, V30.B16; VSHL $12, V30.S4, B.S4; VSRI $20, V30.S4, B.S4 \ + VADD A.S4, B.S4, A.S4; VEOR D.B16, A.B16, D.B16; VTBL V31.B16, [D.B16], D.B16; \ + VADD C.S4, D.S4, C.S4; VEOR B.B16, C.B16, V30.B16; VSHL $7, V30.S4, B.S4; VSRI $25, V30.S4, B.S4 + +// block runs 4 ChaCha8 block transformations in the four stripes of the V registers. + +// func block(seed *[8]uint32, blocks *[4][16]uint32, counter uint32) +TEXT ·block(SB), NOSPLIT, $16 + // seed in R0 + // blocks in R1 + // counter in R2 + + // Load initial constants into top row. + MOVD $·chachaConst(SB), R10 + VLD4R (R10), [V0.S4, V1.S4, V2.S4, V3.S4] + + // Load increment and rotate 8 constants into V30, V31. + MOVD $·chachaIncRot(SB), R11 + VLD1 (R11), [V30.S4, V31.S4] + + VLD4R.P 16(R0), [V4.S4, V5.S4, V6.S4, V7.S4] + VLD4R.P 16(R0), [V8.S4, V9.S4, V10.S4, V11.S4] + + // store counter to memory to replicate its uint32 halfs back out + MOVW R2, 0(RSP) + VLD1R 0(RSP), [V12.S4] + + // Add 0, 1, 2, 3 to counter stripes. + VADD V30.S4, V12.S4, V12.S4 + + // Zeros for remaining two matrix entries. + VEOR V13.B16, V13.B16, V13.B16 + VEOR V14.B16, V14.B16, V14.B16 + VEOR V15.B16, V15.B16, V15.B16 + + // Save seed state for adding back later. + VMOV V4.B16, V20.B16 + VMOV V5.B16, V21.B16 + VMOV V6.B16, V22.B16 + VMOV V7.B16, V23.B16 + VMOV V8.B16, V24.B16 + VMOV V9.B16, V25.B16 + VMOV V10.B16, V26.B16 + VMOV V11.B16, V27.B16 + + // 4 iterations. Each iteration is 8 quarter-rounds. + MOVD $4, R0 +loop: + QR(V0, V4, V8, V12) + QR(V1, V5, V9, V13) + QR(V2, V6, V10, V14) + QR(V3, V7, V11, V15) + + QR(V0, V5, V10, V15) + QR(V1, V6, V11, V12) + QR(V2, V7, V8, V13) + QR(V3, V4, V9, V14) + + SUB $1, R0 + CBNZ R0, loop + + // Add seed back. + VADD V4.S4, V20.S4, V4.S4 + VADD V5.S4, V21.S4, V5.S4 + VADD V6.S4, V22.S4, V6.S4 + VADD V7.S4, V23.S4, V7.S4 + VADD V8.S4, V24.S4, V8.S4 + VADD V9.S4, V25.S4, V9.S4 + VADD V10.S4, V26.S4, V10.S4 + VADD V11.S4, V27.S4, V11.S4 + + // Store interlaced blocks back to output buffer. + VST1.P [ V0.B16, V1.B16, V2.B16, V3.B16], 64(R1) + VST1.P [ V4.B16, V5.B16, V6.B16, V7.B16], 64(R1) + VST1.P [ V8.B16, V9.B16, V10.B16, V11.B16], 64(R1) + VST1.P [V12.B16, V13.B16, V14.B16, V15.B16], 64(R1) + RET + +GLOBL ·chachaConst(SB), NOPTR|RODATA, $32 +DATA ·chachaConst+0x00(SB)/4, $0x61707865 +DATA ·chachaConst+0x04(SB)/4, $0x3320646e +DATA ·chachaConst+0x08(SB)/4, $0x79622d32 +DATA ·chachaConst+0x0c(SB)/4, $0x6b206574 + +GLOBL ·chachaIncRot(SB), NOPTR|RODATA, $32 +DATA ·chachaIncRot+0x00(SB)/4, $0x00000000 +DATA ·chachaIncRot+0x04(SB)/4, $0x00000001 +DATA ·chachaIncRot+0x08(SB)/4, $0x00000002 +DATA ·chachaIncRot+0x0c(SB)/4, $0x00000003 +DATA ·chachaIncRot+0x10(SB)/4, $0x02010003 +DATA ·chachaIncRot+0x14(SB)/4, $0x06050407 +DATA ·chachaIncRot+0x18(SB)/4, $0x0A09080B +DATA ·chachaIncRot+0x1c(SB)/4, $0x0E0D0C0F diff --git a/src/internal/chacha8rand/chacha8_generic.go b/src/internal/chacha8rand/chacha8_generic.go new file mode 100644 index 0000000000..2a0f5cd1d6 --- /dev/null +++ b/src/internal/chacha8rand/chacha8_generic.go @@ -0,0 +1,235 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// ChaCha8 is ChaCha with 8 rounds. +// See https://cr.yp.to/chacha/chacha-20080128.pdf. +// +// ChaCha8 operates on a 4x4 matrix of uint32 values, initially set to: +// +// const1 const2 const3 const4 +// seed seed seed seed +// seed seed seed seed +// counter64 0 0 +// +// We use the same constants as ChaCha20 does, a random seed, +// and a counter. Running ChaCha8 on this input produces +// a 4x4 matrix of pseudo-random values with as much entropy +// as the seed. +// +// Given SIMD registers that can hold N uint32s, it is possible +// to run N ChaCha8 block transformations in parallel by filling +// the first register with the N copies of const1, the second +// with N copies of const2, and so on, and then running the operations. +// +// Each iteration of ChaCha8Rand operates over 32 bytes of input and +// produces 992 bytes of RNG output, plus 32 bytes of input for the next +// iteration. +// +// The 32 bytes of input are used as a ChaCha8 key, with a zero nonce, to +// produce 1024 bytes of output (16 blocks, with counters 0 to 15). +// First, for each block, the values 0x61707865, 0x3320646e, 0x79622d32, +// 0x6b206574 are subtracted from the 32-bit little-endian words at +// position 0, 1, 2, and 3 respectively, and an increasing counter +// starting at zero is subtracted from each word at position 12. Then, +// this stream is permuted such that for each sequence of four blocks, +// first we output the first four bytes of each block, then the next four +// bytes of each block, and so on. Finally, the last 32 bytes of output +// are used as the input of the next iteration, and the remaining 992 +// bytes are the RNG output. +// +// See https://c2sp.org/chacha8rand for additional details. +// +// Normal ChaCha20 implementations for encryption use this same +// parallelism but then have to deinterlace the results so that +// it appears the blocks were generated separately. For the purposes +// of generating random numbers, the interlacing is fine. +// We are simply locked in to preserving the 4-way interlacing +// in any future optimizations. +package chacha8rand + +import ( + "internal/goarch" + "unsafe" +) + +// setup sets up 4 ChaCha8 blocks in b32 with the counter and seed. +// Note that b32 is [16][4]uint32 not [4][16]uint32: the blocks are interlaced +// the same way they would be in a 4-way SIMD implementations. +func setup(seed *[4]uint64, b32 *[16][4]uint32, counter uint32) { + // Convert to uint64 to do half as many stores to memory. + b := (*[16][2]uint64)(unsafe.Pointer(b32)) + + // Constants; same as in ChaCha20: "expand 32-byte k" + b[0][0] = 0x61707865_61707865 + b[0][1] = 0x61707865_61707865 + + b[1][0] = 0x3320646e_3320646e + b[1][1] = 0x3320646e_3320646e + + b[2][0] = 0x79622d32_79622d32 + b[2][1] = 0x79622d32_79622d32 + + b[3][0] = 0x6b206574_6b206574 + b[3][1] = 0x6b206574_6b206574 + + // Seed values. + var x64 uint64 + var x uint32 + + x = uint32(seed[0]) + x64 = uint64(x)<<32 | uint64(x) + b[4][0] = x64 + b[4][1] = x64 + + x = uint32(seed[0] >> 32) + x64 = uint64(x)<<32 | uint64(x) + b[5][0] = x64 + b[5][1] = x64 + + x = uint32(seed[1]) + x64 = uint64(x)<<32 | uint64(x) + b[6][0] = x64 + b[6][1] = x64 + + x = uint32(seed[1] >> 32) + x64 = uint64(x)<<32 | uint64(x) + b[7][0] = x64 + b[7][1] = x64 + + x = uint32(seed[2]) + x64 = uint64(x)<<32 | uint64(x) + b[8][0] = x64 + b[8][1] = x64 + + x = uint32(seed[2] >> 32) + x64 = uint64(x)<<32 | uint64(x) + b[9][0] = x64 + b[9][1] = x64 + + x = uint32(seed[3]) + x64 = uint64(x)<<32 | uint64(x) + b[10][0] = x64 + b[10][1] = x64 + + x = uint32(seed[3] >> 32) + x64 = uint64(x)<<32 | uint64(x) + b[11][0] = x64 + b[11][1] = x64 + + // Counters. + if goarch.BigEndian { + b[12][0] = uint64(counter+0)<<32 | uint64(counter+1) + b[12][1] = uint64(counter+2)<<32 | uint64(counter+3) + } else { + b[12][0] = uint64(counter+0) | uint64(counter+1)<<32 + b[12][1] = uint64(counter+2) | uint64(counter+3)<<32 + } + + // Zeros. + b[13][0] = 0 + b[13][1] = 0 + b[14][0] = 0 + b[14][1] = 0 + + b[15][0] = 0 + b[15][1] = 0 +} + +func _() { + // block and block_generic must have same type + x := block + x = block_generic + _ = x +} + +// block_generic is the non-assembly block implementation, +// for use on systems without special assembly. +// Even on such systems, it is quite fast: on GOOS=386, +// ChaCha8 using this code generates random values faster than PCG-DXSM. +func block_generic(seed *[4]uint64, buf *[32]uint64, counter uint32) { + b := (*[16][4]uint32)(unsafe.Pointer(buf)) + + setup(seed, b, counter) + + for i := range b[0] { + // Load block i from b[*][i] into local variables. + b0 := b[0][i] + b1 := b[1][i] + b2 := b[2][i] + b3 := b[3][i] + b4 := b[4][i] + b5 := b[5][i] + b6 := b[6][i] + b7 := b[7][i] + b8 := b[8][i] + b9 := b[9][i] + b10 := b[10][i] + b11 := b[11][i] + b12 := b[12][i] + b13 := b[13][i] + b14 := b[14][i] + b15 := b[15][i] + + // 4 iterations of eight quarter-rounds each is 8 rounds + for round := 0; round < 4; round++ { + b0, b4, b8, b12 = qr(b0, b4, b8, b12) + b1, b5, b9, b13 = qr(b1, b5, b9, b13) + b2, b6, b10, b14 = qr(b2, b6, b10, b14) + b3, b7, b11, b15 = qr(b3, b7, b11, b15) + + b0, b5, b10, b15 = qr(b0, b5, b10, b15) + b1, b6, b11, b12 = qr(b1, b6, b11, b12) + b2, b7, b8, b13 = qr(b2, b7, b8, b13) + b3, b4, b9, b14 = qr(b3, b4, b9, b14) + } + + // Store block i back into b[*][i]. + // Add b4..b11 back to the original key material, + // like in ChaCha20, to avoid trivial invertibility. + // There is no entropy in b0..b3 and b12..b15 + // so we can skip the additions and save some time. + b[0][i] = b0 + b[1][i] = b1 + b[2][i] = b2 + b[3][i] = b3 + b[4][i] += b4 + b[5][i] += b5 + b[6][i] += b6 + b[7][i] += b7 + b[8][i] += b8 + b[9][i] += b9 + b[10][i] += b10 + b[11][i] += b11 + b[12][i] = b12 + b[13][i] = b13 + b[14][i] = b14 + b[15][i] = b15 + } + + if goarch.BigEndian { + // On a big-endian system, reading the uint32 pairs as uint64s + // will word-swap them compared to little-endian, so we word-swap + // them here first to make the next swap get the right answer. + for i, x := range buf { + buf[i] = x>>32 | x<<32 + } + } +} + +// qr is the (inlinable) ChaCha8 quarter round. +func qr(a, b, c, d uint32) (_a, _b, _c, _d uint32) { + a += b + d ^= a + d = d<<16 | d>>16 + c += d + b ^= c + b = b<<12 | b>>20 + a += b + d ^= a + d = d<<8 | d>>24 + c += d + b ^= c + b = b<<7 | b>>25 + return a, b, c, d +} diff --git a/src/internal/chacha8rand/chacha8_stub.s b/src/internal/chacha8rand/chacha8_stub.s new file mode 100644 index 0000000000..09be558fcb --- /dev/null +++ b/src/internal/chacha8rand/chacha8_stub.s @@ -0,0 +1,12 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !amd64 && !arm64 + +#include "textflag.h" + +// func block(counter uint64, seed *[8]uint32, blocks *[16][4]uint32) +TEXT ·block(SB), NOSPLIT, $0 + JMP ·block_generic(SB) + diff --git a/src/internal/chacha8rand/export_test.go b/src/internal/chacha8rand/export_test.go new file mode 100644 index 0000000000..728aded682 --- /dev/null +++ b/src/internal/chacha8rand/export_test.go @@ -0,0 +1,12 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package chacha8rand + +var Block = block +var Block_generic = block_generic + +func Seed(s *State) [4]uint64 { + return s.seed +} diff --git a/src/internal/chacha8rand/rand_test.go b/src/internal/chacha8rand/rand_test.go new file mode 100644 index 0000000000..2975013bfa --- /dev/null +++ b/src/internal/chacha8rand/rand_test.go @@ -0,0 +1,202 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package chacha8rand_test + +import ( + "bytes" + "encoding/binary" + "fmt" + . "internal/chacha8rand" + "slices" + "testing" +) + +func TestOutput(t *testing.T) { + var s State + s.Init(seed) + for i := range output { + for { + x, ok := s.Next() + if ok { + if x != output[i] { + t.Errorf("#%d: have %#x want %#x", i, x, output[i]) + } + break + } + s.Refill() + } + } +} + +func TestMarshal(t *testing.T) { + var s State + s.Init(seed) + for i := range output { + for { + b := Marshal(&s) + s = State{} + err := Unmarshal(&s, b) + if err != nil { + t.Fatalf("#%d: Unmarshal: %v", i, err) + } + x, ok := s.Next() + if ok { + if x != output[i] { + t.Fatalf("#%d: have %#x want %#x", i, x, output[i]) + } + break + } + s.Refill() + } + } +} + +func TestReseed(t *testing.T) { + var s State + s.Init(seed) + old := Seed(&s) + s.Reseed() + if Seed(&s) == old { + t.Errorf("Reseed did not change seed") + } +} + +func BenchmarkBlock(b *testing.B) { + var seed [4]uint64 + var blocks [32]uint64 + + for i := 0; i < b.N; i++ { + Block(&seed, &blocks, 0) + } + b.SetBytes(32 * 8) +} + +func TestBlockGeneric(t *testing.T) { + var b1, b2 [32]uint64 + s := seed // byte seed + seed := [4]uint64{ + binary.LittleEndian.Uint64(s[0*8:]), + binary.LittleEndian.Uint64(s[1*8:]), + binary.LittleEndian.Uint64(s[2*8:]), + binary.LittleEndian.Uint64(s[3*8:]), + } + + Block(&seed, &b1, 4) + Block_generic(&seed, &b2, 4) + if !slices.Equal(b1[:], b2[:]) { + var out bytes.Buffer + fmt.Fprintf(&out, "%-18s %-18s\n", "block", "block_generic") + for i := range b1 { + suffix := "" + if b1[i] != b2[i] { + suffix = " mismatch!" + } + fmt.Fprintf(&out, "%#016x %#016x%s\n", b1[i], b2[i], suffix) + } + t.Errorf("block and block_generic disagree:\n%s", out.String()) + } +} + +// Golden output test to make sure algorithm never changes, +// so that its use in math/rand/v2 stays stable. +// See https://c2sp.org/chacha8rand. + +var seed = [32]byte([]byte("ABCDEFGHIJKLMNOPQRSTUVWXYZ123456")) + +var output = []uint64{ + 0xb773b6063d4616a5, 0x1160af22a66abc3c, 0x8c2599d9418d287c, 0x7ee07e037edc5cd6, + 0xcfaa9ee02d1c16ad, 0x0e090eef8febea79, 0x3c82d271128b5b3e, 0x9c5addc11252a34f, + 0xdf79bb617d6ceea6, 0x36d553591f9d736a, 0xeef0d14e181ee01f, 0x089bfc760ae58436, + 0xd9e52b59cc2ad268, 0xeb2fb4444b1b8aba, 0x4f95c8a692c46661, 0xc3c6323217cae62c, + 0x91ebb4367f4e2e7e, 0x784cf2c6a0ec9bc6, 0x5c34ec5c34eabe20, 0x4f0a8f515570daa8, + 0xfc35dcb4113d6bf2, 0x5b0da44c645554bc, 0x6d963da3db21d9e1, 0xeeaefc3150e500f3, + 0x2d37923dda3750a5, 0x380d7a626d4bc8b0, 0xeeaf68ede3d7ee49, 0xf4356695883b717c, + 0x846a9021392495a4, 0x8e8510549630a61b, 0x18dc02545dbae493, 0x0f8f9ff0a65a3d43, + 0xccf065f7190ff080, 0xfd76d1aa39673330, 0x95d232936cba6433, 0x6c7456d1070cbd17, + 0x462acfdaff8c6562, 0x5bafab866d34fc6a, 0x0c862f78030a2988, 0xd39a83e407c3163d, + 0xc00a2b7b45f22ebf, 0x564307c62466b1a9, 0x257e0424b0c072d4, 0x6fb55e99496c28fe, + 0xae9873a88f5cd4e0, 0x4657362ac60d3773, 0x1c83f91ecdf23e8e, 0x6fdc0792c15387c0, + 0x36dad2a30dfd2b5c, 0xa4b593290595bdb7, 0x4de18934e4cc02c5, 0xcdc0d604f015e3a7, + 0xfba0dbf69ad80321, 0x60e8bea3d139de87, 0xd18a4d851ef48756, 0x6366447c2215f34a, + 0x05682e97d3d007ee, 0x4c0e8978c6d54ab2, 0xcf1e9f6a6712edc2, 0x061439414c80cfd3, + 0xd1a8b6e2745c0ead, 0x31a7918d45c410e8, 0xabcc61ad90216eec, 0x4040d92d2032a71a, + 0x3cd2f66ffb40cd68, 0xdcd051c07295857a, 0xeab55cbcd9ab527e, 0x18471dce781bdaac, + 0xf7f08cd144dc7252, 0x5804e0b13d7f40d1, 0x5cb1a446e4b2d35b, 0xe6d4a728d2138a06, + 0x05223e40ca60dad8, 0x2d61ec3206ac6a68, 0xab692356874c17b8, 0xc30954417676de1c, + 0x4f1ace3732225624, 0xfba9510813988338, 0x997f200f52752e11, 0x1116aaafe86221fa, + 0x07ce3b5cb2a13519, 0x2956bc72bc458314, 0x4188b7926140eb78, 0x56ca6dbfd4adea4d, + 0x7fe3c22349340ce5, 0x35c08f9c37675f8a, 0x11e1c7fbef5ed521, 0x98adc8464ec1bc75, + 0xd163b2c73d1203f8, 0x8c761ee043a2f3f3, 0x24b99d6accecd7b7, 0x793e31aa112f0370, + 0x8e87dc2a19285139, 0x4247ae04f7096e25, 0x514f3122926fe20f, 0xdc6fb3f045d2a7e9, + 0x15cb30cecdd18eba, 0xcbc7fdecf6900274, 0x3fb5c696dc8ba021, 0xd1664417c8d274e6, + 0x05f7e445ea457278, 0xf920bbca1b9db657, 0x0c1950b4da22cb99, 0xf875baf1af09e292, + 0xbed3d7b84250f838, 0xf198e8080fd74160, 0xc9eda51d9b7ea703, 0xf709ef55439bf8f6, + 0xd20c74feebf116fc, 0x305668eb146d7546, 0x829af3ec10d89787, 0x15b8f9697b551dbc, + 0xfc823c6c8e64b8c9, 0x345585e8183b40bc, 0x674b4171d6581368, 0x1234d81cd670e9f7, + 0x0e505210d8a55e19, 0xe8258d69eeeca0dc, 0x05d4c452e8baf67e, 0xe8dbe30116a45599, + 0x1cf08ce1b1176f00, 0xccf7d0a4b81ecb49, 0x303fea136b2c430e, 0x861d6c139c06c871, + 0x5f41df72e05e0487, 0x25bd7e1e1ae26b1d, 0xbe9f4004d662a41d, 0x65bf58d483188546, + 0xd1b27cff69db13cc, 0x01a6663372c1bb36, 0x578dd7577b727f4d, 0x19c78f066c083cf6, + 0xdbe014d4f9c391bb, 0x97fbb2dd1d13ffb3, 0x31c91e0af9ef8d4f, 0x094dfc98402a43ba, + 0x069bd61bea37b752, 0x5b72d762e8d986ca, 0x72ee31865904bc85, 0xd1f5fdc5cd36c33e, + 0xba9b4980a8947cad, 0xece8f05eac49ab43, 0x65fe1184abae38e7, 0x2d7cb9dea5d31452, + 0xcc71489476e467e3, 0x4c03a258a578c68c, 0x00efdf9ecb0fd8fc, 0x9924cad471e2666d, + 0x87f8668318f765e9, 0xcb4dc57c1b55f5d8, 0xd373835a86604859, 0xe526568b5540e482, + 0x1f39040f08586fec, 0xb764f3f00293f8e6, 0x049443a2f6bd50a8, 0x76fec88697d3941a, + 0x3efb70d039bae7a2, 0xe2f4611368eca8a8, 0x7c007a96e01d2425, 0xbbcce5768e69c5bf, + 0x784fb4985c42aac3, 0xf72b5091aa223874, 0x3630333fb1e62e07, 0x8e7319ebdebbb8de, + 0x2a3982bca959fa00, 0xb2b98b9f964ba9b3, 0xf7e31014adb71951, 0xebd0fca3703acc82, + 0xec654e2a2fe6419a, 0xb326132d55a52e2c, 0x2248c57f44502978, 0x32710c2f342daf16, + 0x0517b47b5acb2bec, 0x4c7a718fca270937, 0xd69142bed0bcc541, 0xe40ebcb8ff52ce88, + 0x3e44a2dbc9f828d4, 0xc74c2f4f8f873f58, 0x3dbf648eb799e45b, 0x33f22475ee0e86f8, + 0x1eb4f9ee16d47f65, 0x40f8d2b8712744e3, 0xb886b4da3cb14572, 0x2086326fbdd6f64d, + 0xcc3de5907dd882b9, 0xa2e8b49a5ee909df, 0xdbfb8e7823964c10, 0x70dd6089ef0df8d5, + 0x30141663cdd9c99f, 0x04b805325c240365, 0x7483d80314ac12d6, 0x2b271cb91aa7f5f9, + 0x97e2245362abddf0, 0x5a84f614232a9fab, 0xf71125fcda4b7fa2, 0x1ca5a61d74b27267, + 0x38cc6a9b3adbcb45, 0xdde1bb85dc653e39, 0xe9d0c8fa64f89fd4, 0x02c5fb1ecd2b4188, + 0xf2bd137bca5756e5, 0xadefe25d121be155, 0x56cd1c3c5d893a8e, 0x4c50d337beb65bb9, + 0x918c5151675cf567, 0xaba649ffcfb56a1e, 0x20c74ab26a2247cd, 0x71166bac853c08da, + 0xb07befe2e584fc5d, 0xda45ff2a588dbf32, 0xdb98b03c4d75095e, 0x60285ae1aaa65a4c, + 0xf93b686a263140b8, 0xde469752ee1c180e, 0xcec232dc04129aae, 0xeb916baa1835ea04, + 0xd49c21c8b64388ff, 0x72a82d9658864888, 0x003348ef7eac66a8, 0x7f6f67e655b209eb, + 0x532ffb0b7a941b25, 0xd940ade6128deede, 0xdf24f2a1af89fe23, 0x95aa3b4988195ae0, + 0x3da649404f94be4a, 0x692dad132c3f7e27, 0x40aee76ecaaa9eb8, 0x1294a01e09655024, + 0x6df797abdba4e4f5, 0xea2fb6024c1d7032, 0x5f4e0492295489fc, 0x57972914ea22e06a, + 0x9a8137d133aad473, 0xa2e6dd6ae7cdf2f3, 0x9f42644f18086647, 0x16d03301c170bd3e, + 0x908c416fa546656d, 0xe081503be22e123e, 0x077cf09116c4cc72, 0xcbd25cd264b7f229, + 0x3db2f468ec594031, 0x46c00e734c9badd5, 0xd0ec0ac72075d861, 0x3037cb3cf80b7630, + 0x574c3d7b3a2721c6, 0xae99906a0076824b, 0xb175a5418b532e70, 0xd8b3e251ee231ddd, + 0xb433eec25dca1966, 0x530f30dc5cff9a93, 0x9ff03d98b53cd335, 0xafc4225076558cdf, + 0xef81d3a28284402a, 0x110bdbf51c110a28, 0x9ae1b255d027e8f6, 0x7de3e0aa24688332, + 0xe483c3ecd2067ee2, 0xf829328b276137e6, 0xa413ccad57562cad, 0xe6118e8b496acb1f, + 0x8288dca6da5ec01f, 0xa53777dc88c17255, 0x8a00f1e0d5716eda, 0x618e6f47b7a720a8, + 0x9e3907b0c692a841, 0x978b42ca963f34f3, 0x75e4b0cd98a7d7ef, 0xde4dbd6e0b5f4752, + 0x0252e4153f34493f, 0x50f0e7d803734ef9, 0x237766a38ed167ee, 0x4124414001ee39a0, + 0xd08df643e535bb21, 0x34f575b5a9a80b74, 0x2c343af87297f755, 0xcd8b6d99d821f7cb, + 0xe376fd7256fc48ae, 0xe1b06e7334352885, 0xfa87b26f86c169eb, 0x36c1604665a971de, + 0xdba147c2239c8e80, 0x6b208e69fc7f0e24, 0x8795395b6f2b60c3, 0x05dabee9194907f4, + 0xb98175142f5ed902, 0x5e1701e2021ddc81, 0x0875aba2755eed08, 0x778d83289251de95, + 0x3bfbe46a039ecb31, 0xb24704fce4cbd7f9, 0x6985ffe9a7c91e3d, 0xc8efb13df249dabb, + 0xb1037e64b0f4c9f6, 0x55f69fd197d6b7c3, 0x672589d71d68a90c, 0xbebdb8224f50a77e, + 0x3f589f80007374a7, 0xd307f4635954182a, 0xcff5850c10d4fd90, 0xc6da02dfb6408e15, + 0x93daeef1e2b1a485, 0x65d833208aeea625, 0xe2b13fa13ed3b5fa, 0x67053538130fb68e, + 0xc1042f6598218fa9, 0xee5badca749b8a2e, 0x6d22a3f947dae37d, 0xb62c6d1657f4dbaf, + 0x6e007de69704c20b, 0x1af2b913fc3841d8, 0xdc0e47348e2e8e22, 0x9b1ddef1cf958b22, + 0x632ed6b0233066b8, 0xddd02d3311bed8f2, 0xf147cfe1834656e9, 0x399aaa49d511597a, + 0x6b14886979ec0309, 0x64fc4ac36b5afb97, 0xb82f78e07f7cf081, 0x10925c9a323d0e1b, + 0xf451c79ee13c63f6, 0x7c2fc180317876c7, 0x35a12bd9eecb7d22, 0x335654a539621f90, + 0xcc32a3f35db581f0, 0xc60748a80b2369cb, 0x7c4dd3b08591156b, 0xac1ced4b6de22291, + 0xa32cfa2df134def5, 0x627108918dea2a53, 0x0555b1608fcb4ff4, 0x143ee7ac43aaa33c, + 0xdae90ce7cf4fc218, 0x4d68fc2582bcf4b5, 0x37094e1849135d71, 0xf7857e09f3d49fd8, + 0x007538c503768be7, 0xedf648ba2f6be601, 0xaa347664dd72513e, 0xbe63893c6ef23b86, + 0x130b85710605af97, 0xdd765c6b1ef6ab56, 0xf3249a629a97dc6b, 0x2a114f9020fab8e5, + 0x5a69e027cfc6ad08, 0x3c4ccb36f1a5e050, 0x2e9e7d596834f0a5, 0x2430be6858fce789, + 0xe90b862f2466e597, 0x895e2884f159a9ec, 0x26ab8fa4902fcb57, 0xa6efff5c54e1fa50, + 0x333ac4e5811a8255, 0xa58d515f02498611, 0xfe5a09dcb25c6ef4, 0x03898988ab5f5818, + 0x289ff6242af6c617, 0x3d9dd59fd381ea23, 0x52d7d93d8a8aae51, 0xc76a123d511f786f, + 0xf68901edaf00c46c, 0x8c630871b590de80, 0x05209c308991e091, 0x1f809f99b4788177, + 0x11170c2eb6c19fd8, 0x44433c779062ba58, 0xc0acb51af1874c45, 0x9f2e134284809fa1, + 0xedb523bd15c619fa, 0x02d97fd53ecc23c0, 0xacaf05a34462374c, 0xddd9c6d34bffa11f, +} diff --git a/src/internal/coverage/encodemeta/encode.go b/src/internal/coverage/encodemeta/encode.go index d211c7c08e..549b3f55a8 100644 --- a/src/internal/coverage/encodemeta/encode.go +++ b/src/internal/coverage/encodemeta/encode.go @@ -54,7 +54,7 @@ func NewCoverageMetaDataBuilder(pkgpath string, pkgname string, modulepath strin func h32(x uint32, h hash.Hash, tmp []byte) { tmp = tmp[:0] - tmp = append(tmp, []byte{0, 0, 0, 0}...) + tmp = append(tmp, 0, 0, 0, 0) binary.LittleEndian.PutUint32(tmp, x) h.Write(tmp) } @@ -117,7 +117,7 @@ func (b *CoverageMetaDataBuilder) reportWriteError(err error) { func (b *CoverageMetaDataBuilder) wrUint32(w io.WriteSeeker, v uint32) { b.tmp = b.tmp[:0] - b.tmp = append(b.tmp, []byte{0, 0, 0, 0}...) + b.tmp = append(b.tmp, 0, 0, 0, 0) binary.LittleEndian.PutUint32(b.tmp, v) if nw, err := w.Write(b.tmp); err != nil { b.reportWriteError(err) diff --git a/src/internal/coverage/pkid.go b/src/internal/coverage/pkid.go index 8ddd44d6bb..a7a89c03ee 100644 --- a/src/internal/coverage/pkid.go +++ b/src/internal/coverage/pkid.go @@ -31,7 +31,7 @@ package coverage // slot: 6 path='runtime/internal/math' hard-coded id: 6 // slot: 7 path='internal/bytealg' hard-coded id: 7 // slot: 8 path='internal/goexperiment' -// slot: 9 path='runtime/internal/syscall' hard-coded id: 8 +// slot: 9 path='internal/runtime/syscall' hard-coded id: 8 // slot: 10 path='runtime' hard-coded id: 9 // fatal error: runtime.addCovMeta // @@ -49,12 +49,13 @@ var rtPkgs = [...]string{ "internal/goarch", "runtime/internal/atomic", "internal/goos", + "internal/chacha8rand", "runtime/internal/sys", "internal/abi", "runtime/internal/math", "internal/bytealg", "internal/goexperiment", - "runtime/internal/syscall", + "internal/runtime/syscall", "runtime", } diff --git a/src/internal/cpu/cpu_arm64_hwcap.go b/src/internal/cpu/cpu_arm64_hwcap.go index 2fabbb6edc..7f0504ca16 100644 --- a/src/internal/cpu/cpu_arm64_hwcap.go +++ b/src/internal/cpu/cpu_arm64_hwcap.go @@ -48,13 +48,13 @@ func hwcapInit(os string) { if ARM64.HasCPUID { midr := getMIDR() part_num := uint16((midr >> 4) & 0xfff) - implementor := byte((midr >> 24) & 0xff) + implementer := byte((midr >> 24) & 0xff) // d0c - NeoverseN1 // d40 - NeoverseV1 // d49 - NeoverseN2 // d4f - NeoverseV2 - if implementor == 'A' && (part_num == 0xd0c || part_num == 0xd40 || + if implementer == 'A' && (part_num == 0xd0c || part_num == 0xd40 || part_num == 0xd49 || part_num == 0xd4f) { ARM64.IsNeoverse = true } diff --git a/src/internal/cpu/cpu_s390x.s b/src/internal/cpu/cpu_s390x.s index c55a4c725d..4ffbbde38d 100644 --- a/src/internal/cpu/cpu_s390x.s +++ b/src/internal/cpu/cpu_s390x.s @@ -30,14 +30,14 @@ TEXT ·kmcQuery(SB), NOSPLIT|NOFRAME, $0-16 TEXT ·kmctrQuery(SB), NOSPLIT|NOFRAME, $0-16 MOVD $0, R0 // set function code to 0 (KMCTR-Query) MOVD $ret+0(FP), R1 // address of 16-byte return value - KMCTR R6, R2, R4 // cipher message with counter (KMCTR) + KMCTR R2, R4, R4 // cipher message with counter (KMCTR) RET // func kmaQuery() queryResult TEXT ·kmaQuery(SB), NOSPLIT|NOFRAME, $0-16 MOVD $0, R0 // set function code to 0 (KMA-Query) MOVD $ret+0(FP), R1 // address of 16-byte return value - KMA R6, R2, R4 // cipher message with authentication (KMA) + KMA R2, R6, R4 // cipher message with authentication (KMA) RET // func kimdQuery() queryResult diff --git a/src/internal/fuzz/encoding_test.go b/src/internal/fuzz/encoding_test.go index 6f6173d7e0..a46a347403 100644 --- a/src/internal/fuzz/encoding_test.go +++ b/src/internal/fuzz/encoding_test.go @@ -233,9 +233,6 @@ uint(18446744073709551615)` t.Fatalf("unmarshal unexpected error: %v", err) } newB := marshalCorpusFile(vals...) - if err != nil { - t.Fatalf("marshal unexpected error: %v", err) - } if newB[len(newB)-1] != '\n' { t.Error("didn't write final newline to corpus file") } diff --git a/src/internal/godebug/godebug_test.go b/src/internal/godebug/godebug_test.go index ed8e93d453..1ed0a365ab 100644 --- a/src/internal/godebug/godebug_test.go +++ b/src/internal/godebug/godebug_test.go @@ -7,6 +7,7 @@ package godebug_test import ( "fmt" . "internal/godebug" + "internal/race" "internal/testenv" "os" "os/exec" @@ -70,6 +71,36 @@ func TestMetrics(t *testing.T) { } } +// TestPanicNilRace checks for a race in the runtime caused by use of runtime +// atomics (not visible to usual race detection) to install the counter for +// non-default panic(nil) semantics. For #64649. +func TestPanicNilRace(t *testing.T) { + if !race.Enabled { + t.Skip("Skipping test intended for use with -race.") + } + if os.Getenv("GODEBUG") != "panicnil=1" { + cmd := testenv.CleanCmdEnv(testenv.Command(t, os.Args[0], "-test.run=^TestPanicNilRace$", "-test.v", "-test.parallel=2", "-test.count=1")) + cmd.Env = append(cmd.Env, "GODEBUG=panicnil=1") + out, err := cmd.CombinedOutput() + t.Logf("output:\n%s", out) + + if err != nil { + t.Errorf("Was not expecting a crash") + } + return + } + + test := func(t *testing.T) { + t.Parallel() + defer func() { + recover() + }() + panic(nil) + } + t.Run("One", test) + t.Run("Two", test) +} + func TestCmdBisect(t *testing.T) { testenv.MustHaveGoBuild(t) out, err := exec.Command("go", "run", "cmd/vendor/golang.org/x/tools/cmd/bisect", "GODEBUG=buggy=1#PATTERN", os.Args[0], "-test.run=^TestBisectTestCase$").CombinedOutput() diff --git a/src/internal/godebugs/table.go b/src/internal/godebugs/table.go index 92a0d089ca..d5ac707a18 100644 --- a/src/internal/godebugs/table.go +++ b/src/internal/godebugs/table.go @@ -48,8 +48,12 @@ var All = []Info{ {Name: "tls10server", Package: "crypto/tls", Changed: 22, Old: "1"}, {Name: "tlsmaxrsasize", Package: "crypto/tls"}, {Name: "tlsrsakex", Package: "crypto/tls", Changed: 22, Old: "1"}, + {Name: "tlsunsafeekm", Package: "crypto/tls", Changed: 22, Old: "1"}, + {Name: "winreadlinkvolume", Package: "os", Changed: 22, Old: "0"}, + {Name: "winsymlink", Package: "os", Changed: 22, Old: "0"}, {Name: "x509sha1", Package: "crypto/x509"}, {Name: "x509usefallbackroots", Package: "crypto/x509"}, + {Name: "x509usepolicies", Package: "crypto/x509"}, {Name: "zipinsecurepath", Package: "archive/zip"}, } diff --git a/src/internal/goroot/gc.go b/src/internal/goroot/gc.go index c0216f4ea5..6b37dfa4c7 100644 --- a/src/internal/goroot/gc.go +++ b/src/internal/goroot/gc.go @@ -20,8 +20,16 @@ func IsStandardPackage(goroot, compiler, path string) bool { switch compiler { case "gc": dir := filepath.Join(goroot, "src", path) - info, err := os.Stat(dir) - return err == nil && info.IsDir() + dirents, err := os.ReadDir(dir) + if err != nil { + return false + } + for _, dirent := range dirents { + if strings.HasSuffix(dirent.Name(), ".go") { + return true + } + } + return false case "gccgo": return gccgoSearch.isStandard(path) default: diff --git a/src/internal/goroot/gccgo.go b/src/internal/goroot/gccgo.go index 62841222a7..2bbf4cda2b 100644 --- a/src/internal/goroot/gccgo.go +++ b/src/internal/goroot/gccgo.go @@ -9,6 +9,7 @@ package goroot import ( "os" "path/filepath" + "strings" ) // IsStandardPackage reports whether path is a standard package, @@ -17,8 +18,16 @@ func IsStandardPackage(goroot, compiler, path string) bool { switch compiler { case "gc": dir := filepath.Join(goroot, "src", path) - _, err := os.Stat(dir) - return err == nil + dirents, err := os.ReadDir(dir) + if err != nil { + return false + } + for _, dirent := range dirents { + if strings.HasSuffix(dirent.Name(), ".go") { + return true + } + } + return false case "gccgo": return stdpkg[path] default: diff --git a/src/internal/goversion/goversion.go b/src/internal/goversion/goversion.go index 770ef11356..a9d6f12ee1 100644 --- a/src/internal/goversion/goversion.go +++ b/src/internal/goversion/goversion.go @@ -9,4 +9,4 @@ package goversion // // It should be updated at the start of each development cycle to be // the version of the next Go 1.x release. See golang.org/issue/40705. -const Version = 22 +const Version = 23 diff --git a/src/internal/msan/doc.go b/src/internal/msan/doc.go new file mode 100644 index 0000000000..e68d341e7a --- /dev/null +++ b/src/internal/msan/doc.go @@ -0,0 +1,9 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package msan contains helper functions for manually instrumenting code +// for the memory sanitizer. +// This package exports the private msan routines in runtime unconditionally +// but without the "msan" build tag they are no-ops. +package msan diff --git a/src/internal/msan/msan.go b/src/internal/msan/msan.go new file mode 100644 index 0000000000..518153ee5a --- /dev/null +++ b/src/internal/msan/msan.go @@ -0,0 +1,28 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build msan + +package msan + +import ( + "unsafe" +) + +const Enabled = true + +//go:linkname Read runtime.msanread +func Read(addr unsafe.Pointer, sz uintptr) + +//go:linkname Write runtime.msanwrite +func Write(addr unsafe.Pointer, sz uintptr) + +//go:linkname Malloc runtime.msanmalloc +func Malloc(addr unsafe.Pointer, sz uintptr) + +//go:linkname Free runtime.msanfree +func Free(addr unsafe.Pointer, sz uintptr) + +//go:linkname Move runtime.msanmove +func Move(dst, src unsafe.Pointer, sz uintptr) diff --git a/src/internal/msan/nomsan.go b/src/internal/msan/nomsan.go new file mode 100644 index 0000000000..3dccda3ffd --- /dev/null +++ b/src/internal/msan/nomsan.go @@ -0,0 +1,28 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !msan + +package msan + +import ( + "unsafe" +) + +const Enabled = false + +func Read(addr unsafe.Pointer, sz uintptr) { +} + +func Write(addr unsafe.Pointer, sz uintptr) { +} + +func Malloc(addr unsafe.Pointer, sz uintptr) { +} + +func Free(addr unsafe.Pointer, sz uintptr) { +} + +func Move(dst, src unsafe.Pointer, sz uintptr) { +} diff --git a/src/internal/platform/supported.go b/src/internal/platform/supported.go index 4589903550..82c66e2195 100644 --- a/src/internal/platform/supported.go +++ b/src/internal/platform/supported.go @@ -206,7 +206,7 @@ func BuildModeSupported(compiler, buildmode, goos, goarch string) bool { case "plugin": switch platform { - case "linux/amd64", "linux/arm", "linux/arm64", "linux/386", "linux/s390x", "linux/ppc64le", + case "linux/amd64", "linux/arm", "linux/arm64", "linux/386", "linux/loong64", "linux/s390x", "linux/ppc64le", "android/amd64", "android/386", "darwin/amd64", "darwin/arm64", "freebsd/amd64": diff --git a/src/internal/platform/zosarch.go b/src/internal/platform/zosarch.go index 7c3db537e8..1df348518c 100644 --- a/src/internal/platform/zosarch.go +++ b/src/internal/platform/zosarch.go @@ -103,7 +103,7 @@ var distInfo = map[OSArch]osArchInfo{ {"openbsd", "arm64"}: {CgoSupported: true}, {"openbsd", "mips64"}: {CgoSupported: true, Broken: true}, {"openbsd", "ppc64"}: {}, - {"openbsd", "riscv64"}: {Broken: true}, + {"openbsd", "riscv64"}: {CgoSupported: true}, {"plan9", "386"}: {}, {"plan9", "amd64"}: {}, {"plan9", "arm"}: {}, diff --git a/src/internal/poll/fd.go b/src/internal/poll/fd.go index ef61d0cb3f..4e038d00dd 100644 --- a/src/internal/poll/fd.go +++ b/src/internal/poll/fd.go @@ -81,3 +81,14 @@ func consume(v *[][]byte, n int64) { // TestHookDidWritev is a hook for testing writev. var TestHookDidWritev = func(wrote int) {} + +// String is an internal string definition for methods/functions +// that is not intended for use outside the standard libraries. +// +// Other packages in std that import internal/poll and have some +// exported APIs (now we've got some in net.rawConn) which are only used +// internally and are not intended to be used outside the standard libraries, +// Therefore, we make those APIs use internal types like poll.FD or poll.String +// in their function signatures to disable the usability of these APIs from +// external codebase. +type String string diff --git a/src/internal/poll/fd_plan9.go b/src/internal/poll/fd_plan9.go index 7cc178a9d5..6659e9dc9b 100644 --- a/src/internal/poll/fd_plan9.go +++ b/src/internal/poll/fd_plan9.go @@ -8,6 +8,7 @@ import ( "errors" "io" "sync" + "syscall" "time" ) @@ -230,3 +231,14 @@ func (fd *FD) RawRead(f func(uintptr) bool) error { func (fd *FD) RawWrite(f func(uintptr) bool) error { return errors.New("not implemented") } + +func DupCloseOnExec(fd int) (int, string, error) { + nfd, err := syscall.Dup(int(fd), -1) + if err != nil { + return 0, "dup", err + } + // Plan9 has no syscall.CloseOnExec but + // its forkAndExecInChild closes all fds + // not related to the fork+exec. + return nfd, "", nil +} diff --git a/src/internal/poll/fd_windows.go b/src/internal/poll/fd_windows.go index 9df39edced..5eefeb90f1 100644 --- a/src/internal/poll/fd_windows.go +++ b/src/internal/poll/fd_windows.go @@ -53,14 +53,17 @@ func checkSetFileCompletionNotificationModes() { useSetFileCompletionNotificationModes = true } -func init() { +// InitWSA initiates the use of the Winsock DLL by the current process. +// It is called from the net package at init time to avoid +// loading ws2_32.dll when net is not used. +var InitWSA = sync.OnceFunc(func() { var d syscall.WSAData e := syscall.WSAStartup(uint32(0x202), &d) if e != nil { initErr = e } checkSetFileCompletionNotificationModes() -} +}) // operation contains superset of data necessary to perform all async IO. type operation struct { @@ -71,8 +74,6 @@ type operation struct { // fields used by runtime.netpoll runtimeCtx uintptr mode int32 - errno int32 - qty uint32 // fields used only by net package fd *FD @@ -83,6 +84,7 @@ type operation struct { rsan int32 handle syscall.Handle flags uint32 + qty uint32 bufs []syscall.WSABuf } @@ -174,9 +176,9 @@ func execIO(o *operation, submit func(o *operation) error) (int, error) { // Wait for our request to complete. err = fd.pd.wait(int(o.mode), fd.isFile) if err == nil { + err = windows.WSAGetOverlappedResult(fd.Sysfd, &o.o, &o.qty, false, &o.flags) // All is good. Extract our IO results and return. - if o.errno != 0 { - err = syscall.Errno(o.errno) + if err != nil { // More data available. Return back the size of received data. if err == syscall.ERROR_MORE_DATA || err == windows.WSAEMSGSIZE { return int(o.qty), err @@ -202,8 +204,8 @@ func execIO(o *operation, submit func(o *operation) error) (int, error) { } // Wait for cancellation to complete. fd.pd.waitCanceled(int(o.mode)) - if o.errno != 0 { - err = syscall.Errno(o.errno) + err = windows.WSAGetOverlappedResult(fd.Sysfd, &o.o, &o.qty, false, &o.flags) + if err != nil { if err == syscall.ERROR_OPERATION_ABORTED { // IO Canceled err = netpollErr } @@ -1037,8 +1039,7 @@ func (fd *FD) Fchmod(mode uint32) error { var du windows.FILE_BASIC_INFO du.FileAttributes = attrs - l := uint32(unsafe.Sizeof(d)) - return windows.SetFileInformationByHandle(fd.Sysfd, windows.FileBasicInfo, uintptr(unsafe.Pointer(&du)), l) + return windows.SetFileInformationByHandle(fd.Sysfd, windows.FileBasicInfo, unsafe.Pointer(&du), uint32(unsafe.Sizeof(du))) } // Fchdir wraps syscall.Fchdir. @@ -1330,3 +1331,17 @@ func (fd *FD) WriteMsgInet6(p []byte, oob []byte, sa *syscall.SockaddrInet6) (in }) return n, int(o.msg.Control.Len), err } + +func DupCloseOnExec(fd int) (int, string, error) { + proc, err := syscall.GetCurrentProcess() + if err != nil { + return 0, "GetCurrentProcess", err + } + + var nfd syscall.Handle + const inherit = false // analogous to CLOEXEC + if err := syscall.DuplicateHandle(proc, syscall.Handle(fd), proc, &nfd, 0, inherit, syscall.DUPLICATE_SAME_ACCESS); err != nil { + return 0, "DuplicateHandle", err + } + return int(nfd), "", nil +} diff --git a/src/internal/poll/fd_windows_test.go b/src/internal/poll/fd_windows_test.go index f0697a0d7b..8bf92be7c3 100644 --- a/src/internal/poll/fd_windows_test.go +++ b/src/internal/poll/fd_windows_test.go @@ -41,6 +41,8 @@ func logFD(net string, fd *poll.FD, err error) { func init() { loggedFDs = make(map[syscall.Handle]*loggedFD) *poll.LogInitFD = logFD + + poll.InitWSA() } func findLoggedFD(h syscall.Handle) (lfd *loggedFD, found bool) { @@ -131,23 +133,14 @@ func TestWSASocketConflict(t *testing.T) { var outbuf _TCP_INFO_v0 cbbr := uint32(0) - var ovs []syscall.Overlapped = make([]syscall.Overlapped, 2) - // Attempt to exercise behavior where a user-owned syscall.Overlapped - // induces an invalid pointer dereference in the Windows-specific version - // of runtime.netpoll. - ovs[1].Internal -= 1 - + var ov syscall.Overlapped // Create an event so that we can efficiently wait for completion // of a requested overlapped I/O operation. - ovs[0].HEvent, _ = windows.CreateEvent(nil, 0, 0, nil) - if ovs[0].HEvent == 0 { + ov.HEvent, _ = windows.CreateEvent(nil, 0, 0, nil) + if ov.HEvent == 0 { t.Fatalf("could not create the event!") } - - // Set the low bit of the Event Handle so that the completion - // of the overlapped I/O event will not trigger a completion event - // on any I/O completion port associated with the handle. - ovs[0].HEvent |= 0x1 + defer syscall.CloseHandle(ov.HEvent) if err = fd.WSAIoctl( SIO_TCP_INFO, @@ -156,7 +149,7 @@ func TestWSASocketConflict(t *testing.T) { (*byte)(unsafe.Pointer(&outbuf)), uint32(unsafe.Sizeof(outbuf)), &cbbr, - &ovs[0], + &ov, 0, ); err != nil && !errors.Is(err, syscall.ERROR_IO_PENDING) { t.Fatalf("could not perform the WSAIoctl: %v", err) @@ -165,14 +158,10 @@ func TestWSASocketConflict(t *testing.T) { if err != nil && errors.Is(err, syscall.ERROR_IO_PENDING) { // It is possible that the overlapped I/O operation completed // immediately so there is no need to wait for it to complete. - if res, err := syscall.WaitForSingleObject(ovs[0].HEvent, syscall.INFINITE); res != 0 { + if res, err := syscall.WaitForSingleObject(ov.HEvent, syscall.INFINITE); res != 0 { t.Fatalf("waiting for the completion of the overlapped IO failed: %v", err) } } - - if err = syscall.CloseHandle(ovs[0].HEvent); err != nil { - t.Fatalf("could not close the event handle: %v", err) - } } type _TCP_INFO_v0 struct { diff --git a/src/internal/poll/sendfile_bsd.go b/src/internal/poll/sendfile_bsd.go index 0f55cad73d..8fcdb1c22e 100644 --- a/src/internal/poll/sendfile_bsd.go +++ b/src/internal/poll/sendfile_bsd.go @@ -13,51 +13,44 @@ import "syscall" const maxSendfileSize int = 4 << 20 // SendFile wraps the sendfile system call. -func SendFile(dstFD *FD, src int, pos, remain int64) (int64, error, bool) { +func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error, handled bool) { if err := dstFD.writeLock(); err != nil { return 0, err, false } defer dstFD.writeUnlock() + if err := dstFD.pd.prepareWrite(dstFD.isFile); err != nil { return 0, err, false } dst := dstFD.Sysfd - var ( - written int64 - err error - handled = true - ) for remain > 0 { n := maxSendfileSize if int64(n) > remain { n = int(remain) } pos1 := pos - n, err1 := syscall.Sendfile(dst, src, &pos1, n) + n, err = syscall.Sendfile(dst, src, &pos1, n) if n > 0 { pos += int64(n) written += int64(n) remain -= int64(n) - } else if n == 0 && err1 == nil { - break } - if err1 == syscall.EINTR { + if err == syscall.EINTR { continue } - if err1 == syscall.EAGAIN { - if err1 = dstFD.pd.waitWrite(dstFD.isFile); err1 == nil { - continue - } + // This includes syscall.ENOSYS (no kernel + // support) and syscall.EINVAL (fd types which + // don't implement sendfile), and other errors. + // We should end the loop when there is no error + // returned from sendfile(2) or it is not a retryable error. + if err != syscall.EAGAIN { + break } - if err1 != nil { - // This includes syscall.ENOSYS (no kernel - // support) and syscall.EINVAL (fd types which - // don't implement sendfile) - err = err1 - handled = false + if err = dstFD.pd.waitWrite(dstFD.isFile); err != nil { break } } - return written, err, handled + handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL) + return } diff --git a/src/internal/poll/sendfile_linux.go b/src/internal/poll/sendfile_linux.go index cc31969a43..c2a0653294 100644 --- a/src/internal/poll/sendfile_linux.go +++ b/src/internal/poll/sendfile_linux.go @@ -11,49 +11,42 @@ import "syscall" const maxSendfileSize int = 4 << 20 // SendFile wraps the sendfile system call. -func SendFile(dstFD *FD, src int, remain int64) (int64, error, bool) { +func SendFile(dstFD *FD, src int, remain int64) (written int64, err error, handled bool) { if err := dstFD.writeLock(); err != nil { return 0, err, false } defer dstFD.writeUnlock() + if err := dstFD.pd.prepareWrite(dstFD.isFile); err != nil { return 0, err, false } dst := dstFD.Sysfd - var ( - written int64 - err error - handled = true - ) for remain > 0 { n := maxSendfileSize if int64(n) > remain { n = int(remain) } - n, err1 := syscall.Sendfile(dst, src, nil, n) + n, err = syscall.Sendfile(dst, src, nil, n) if n > 0 { written += int64(n) remain -= int64(n) - } else if n == 0 && err1 == nil { - break - } - if err1 == syscall.EINTR { continue - } - if err1 == syscall.EAGAIN { - if err1 = dstFD.pd.waitWrite(dstFD.isFile); err1 == nil { - continue - } - } - if err1 != nil { + } else if err != syscall.EAGAIN && err != syscall.EINTR { // This includes syscall.ENOSYS (no kernel // support) and syscall.EINVAL (fd types which - // don't implement sendfile) - err = err1 - handled = false + // don't implement sendfile), and other errors. + // We should end the loop when there is no error + // returned from sendfile(2) or it is not a retryable error. + break + } + if err == syscall.EINTR { + continue + } + if err = dstFD.pd.waitWrite(dstFD.isFile); err != nil { break } } - return written, err, handled + handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL) + return } diff --git a/src/internal/poll/sendfile_solaris.go b/src/internal/poll/sendfile_solaris.go index f9f685c64a..1ba0c8d064 100644 --- a/src/internal/poll/sendfile_solaris.go +++ b/src/internal/poll/sendfile_solaris.go @@ -16,29 +16,25 @@ import "syscall" const maxSendfileSize int = 4 << 20 // SendFile wraps the sendfile system call. -func SendFile(dstFD *FD, src int, pos, remain int64) (int64, error, bool) { +func SendFile(dstFD *FD, src int, pos, remain int64) (written int64, err error, handled bool) { if err := dstFD.writeLock(); err != nil { return 0, err, false } defer dstFD.writeUnlock() + if err := dstFD.pd.prepareWrite(dstFD.isFile); err != nil { return 0, err, false } dst := dstFD.Sysfd - var ( - written int64 - err error - handled = true - ) for remain > 0 { n := maxSendfileSize if int64(n) > remain { n = int(remain) } pos1 := pos - n, err1 := syscall.Sendfile(dst, src, &pos1, n) - if err1 == syscall.EAGAIN || err1 == syscall.EINTR { + n, err = syscall.Sendfile(dst, src, &pos1, n) + if err == syscall.EAGAIN || err == syscall.EINTR { // partial write may have occurred n = int(pos1 - pos) } @@ -46,25 +42,22 @@ func SendFile(dstFD *FD, src int, pos, remain int64) (int64, error, bool) { pos += int64(n) written += int64(n) remain -= int64(n) - } else if n == 0 && err1 == nil { - break - } - if err1 == syscall.EAGAIN { - if err1 = dstFD.pd.waitWrite(dstFD.isFile); err1 == nil { - continue - } - } - if err1 == syscall.EINTR { continue - } - if err1 != nil { + } else if err != syscall.EAGAIN && err != syscall.EINTR { // This includes syscall.ENOSYS (no kernel // support) and syscall.EINVAL (fd types which - // don't implement sendfile) - err = err1 - handled = false + // don't implement sendfile), and other errors. + // We should end the loop when there is no error + // returned from sendfile(2) or it is not a retryable error. + break + } + if err == syscall.EINTR { + continue + } + if err = dstFD.pd.waitWrite(dstFD.isFile); err != nil { break } } - return written, err, handled + handled = written != 0 || (err != syscall.ENOSYS && err != syscall.EINVAL) + return } diff --git a/src/internal/profile/encode.go b/src/internal/profile/encode.go index 72d6fe2fa7..94d04bf094 100644 --- a/src/internal/profile/encode.go +++ b/src/internal/profile/encode.go @@ -207,9 +207,6 @@ var profileDecoder = []decoder{ // suffix X) and populates the corresponding exported fields. // The unexported fields are cleared up to facilitate testing. func (p *Profile) postDecode() error { - if p.Empty() { - return nil - } var err error mappings := make(map[uint64]*Mapping) diff --git a/src/internal/profile/filter.go b/src/internal/profile/filter.go index 141dd1f405..1da580aea8 100644 --- a/src/internal/profile/filter.go +++ b/src/internal/profile/filter.go @@ -6,110 +6,6 @@ package profile -import "regexp" - -// FilterSamplesByName filters the samples in a profile and only keeps -// samples where at least one frame matches focus but none match ignore. -// Returns true is the corresponding regexp matched at least one sample. -func (p *Profile) FilterSamplesByName(focus, ignore, hide *regexp.Regexp) (fm, im, hm bool) { - focusOrIgnore := make(map[uint64]bool) - hidden := make(map[uint64]bool) - for _, l := range p.Location { - if ignore != nil && l.matchesName(ignore) { - im = true - focusOrIgnore[l.ID] = false - } else if focus == nil || l.matchesName(focus) { - fm = true - focusOrIgnore[l.ID] = true - } - if hide != nil && l.matchesName(hide) { - hm = true - l.Line = l.unmatchedLines(hide) - if len(l.Line) == 0 { - hidden[l.ID] = true - } - } - } - - s := make([]*Sample, 0, len(p.Sample)) - for _, sample := range p.Sample { - if focusedAndNotIgnored(sample.Location, focusOrIgnore) { - if len(hidden) > 0 { - var locs []*Location - for _, loc := range sample.Location { - if !hidden[loc.ID] { - locs = append(locs, loc) - } - } - if len(locs) == 0 { - // Remove sample with no locations (by not adding it to s). - continue - } - sample.Location = locs - } - s = append(s, sample) - } - } - p.Sample = s - - return -} - -// matchesName reports whether the function name or file in the -// location matches the regular expression. -func (loc *Location) matchesName(re *regexp.Regexp) bool { - for _, ln := range loc.Line { - if fn := ln.Function; fn != nil { - if re.MatchString(fn.Name) { - return true - } - if re.MatchString(fn.Filename) { - return true - } - } - } - return false -} - -// unmatchedLines returns the lines in the location that do not match -// the regular expression. -func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line { - var lines []Line - for _, ln := range loc.Line { - if fn := ln.Function; fn != nil { - if re.MatchString(fn.Name) { - continue - } - if re.MatchString(fn.Filename) { - continue - } - } - lines = append(lines, ln) - } - return lines -} - -// focusedAndNotIgnored looks up a slice of ids against a map of -// focused/ignored locations. The map only contains locations that are -// explicitly focused or ignored. Returns whether there is at least -// one focused location but no ignored locations. -func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool { - var f bool - for _, loc := range locs { - if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore { - if focus { - // Found focused location. Must keep searching in case there - // is an ignored one as well. - f = true - } else { - // Found ignored location. Can return false right away. - return false - } - } - } - return f -} - // TagMatch selects tags for filtering type TagMatch func(key, val string, nval int64) bool diff --git a/src/cmd/compile/internal/pgo/internal/graph/graph.go b/src/internal/profile/graph.go similarity index 96% rename from src/cmd/compile/internal/pgo/internal/graph/graph.go rename to src/internal/profile/graph.go index 4d89b1ba63..0e8e33c1ac 100644 --- a/src/cmd/compile/internal/pgo/internal/graph/graph.go +++ b/src/internal/profile/graph.go @@ -12,14 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package graph represents a pprof profile as a directed graph. +// Package profile represents a pprof profile as a directed graph. // // This package is a simplified fork of github.com/google/pprof/internal/graph. -package graph +package profile import ( "fmt" - "internal/profile" "sort" "strings" ) @@ -241,7 +240,7 @@ func (e *Edge) WeightValue() int64 { } // NewGraph computes a graph from a profile. -func NewGraph(prof *profile.Profile, o *Options) *Graph { +func NewGraph(prof *Profile, o *Options) *Graph { nodes, locationMap := CreateNodes(prof, o) seenNode := make(map[*Node]bool) seenEdge := make(map[nodePair]bool) @@ -368,13 +367,13 @@ func (l locationMap) get(id uint64) Nodes { // CreateNodes creates graph nodes for all locations in a profile. It // returns set of all nodes, plus a mapping of each location to the // set of corresponding nodes (one per location.Line). -func CreateNodes(prof *profile.Profile, o *Options) (Nodes, locationMap) { +func CreateNodes(prof *Profile, o *Options) (Nodes, locationMap) { locations := locationMap{make([]Nodes, len(prof.Location)+1), make(map[uint64]Nodes)} nm := make(NodeMap, len(prof.Location)) for _, l := range prof.Location { lines := l.Line if len(lines) == 0 { - lines = []profile.Line{{}} // Create empty line to include location info. + lines = []Line{{}} // Create empty line to include location info. } nodes := make(Nodes, len(lines)) for ln := range lines { @@ -393,7 +392,7 @@ func (nm NodeMap) nodes() Nodes { return nodes } -func (nm NodeMap) findOrInsertLine(l *profile.Location, li profile.Line, o *Options) *Node { +func (nm NodeMap) findOrInsertLine(l *Location, li Line, o *Options) *Node { var objfile string if m := l.Mapping; m != nil && m.File != "" { objfile = m.File @@ -405,7 +404,7 @@ func (nm NodeMap) findOrInsertLine(l *profile.Location, li profile.Line, o *Opti return nil } -func nodeInfo(l *profile.Location, line profile.Line, objfile string, o *Options) *NodeInfo { +func nodeInfo(l *Location, line Line, objfile string, o *Options) *NodeInfo { if line.Function == nil { return &NodeInfo{Address: l.Address} } diff --git a/src/internal/profile/legacy_profile.go b/src/internal/profile/legacy_profile.go deleted file mode 100644 index 373a6c04ca..0000000000 --- a/src/internal/profile/legacy_profile.go +++ /dev/null @@ -1,1268 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// This file implements parsers to convert legacy profiles into the -// profile.proto format. - -package profile - -import ( - "bufio" - "bytes" - "fmt" - "internal/lazyregexp" - "io" - "math" - "strconv" - "strings" -) - -var ( - countStartRE = lazyregexp.New(`\A(\w+) profile: total \d+\n\z`) - countRE = lazyregexp.New(`\A(\d+) @(( 0x[0-9a-f]+)+)\n\z`) - - heapHeaderRE = lazyregexp.New(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`) - heapSampleRE = lazyregexp.New(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`) - - contentionSampleRE = lazyregexp.New(`(\d+) *(\d+) @([ x0-9a-f]*)`) - - hexNumberRE = lazyregexp.New(`0x[0-9a-f]+`) - - growthHeaderRE = lazyregexp.New(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz`) - - fragmentationHeaderRE = lazyregexp.New(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz`) - - threadzStartRE = lazyregexp.New(`--- threadz \d+ ---`) - threadStartRE = lazyregexp.New(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`) - - procMapsRE = lazyregexp.New(`([[:xdigit:]]+)-([[:xdigit:]]+)\s+([-rwxp]+)\s+([[:xdigit:]]+)\s+([[:xdigit:]]+):([[:xdigit:]]+)\s+([[:digit:]]+)\s*(\S+)?`) - - briefMapsRE = lazyregexp.New(`\s*([[:xdigit:]]+)-([[:xdigit:]]+):\s*(\S+)(\s.*@)?([[:xdigit:]]+)?`) - - // LegacyHeapAllocated instructs the heapz parsers to use the - // allocated memory stats instead of the default in-use memory. Note - // that tcmalloc doesn't provide all allocated memory, only in-use - // stats. - LegacyHeapAllocated bool -) - -func isSpaceOrComment(line string) bool { - trimmed := strings.TrimSpace(line) - return len(trimmed) == 0 || trimmed[0] == '#' -} - -// parseGoCount parses a Go count profile (e.g., threadcreate or -// goroutine) and returns a new Profile. -func parseGoCount(b []byte) (*Profile, error) { - r := bytes.NewBuffer(b) - - var line string - var err error - for { - // Skip past comments and empty lines seeking a real header. - line, err = r.ReadString('\n') - if err != nil { - return nil, err - } - if !isSpaceOrComment(line) { - break - } - } - - m := countStartRE.FindStringSubmatch(line) - if m == nil { - return nil, errUnrecognized - } - profileType := m[1] - p := &Profile{ - PeriodType: &ValueType{Type: profileType, Unit: "count"}, - Period: 1, - SampleType: []*ValueType{{Type: profileType, Unit: "count"}}, - } - locations := make(map[uint64]*Location) - for { - line, err = r.ReadString('\n') - if err != nil { - if err == io.EOF { - break - } - return nil, err - } - if isSpaceOrComment(line) { - continue - } - if strings.HasPrefix(line, "---") { - break - } - m := countRE.FindStringSubmatch(line) - if m == nil { - return nil, errMalformed - } - n, err := strconv.ParseInt(m[1], 0, 64) - if err != nil { - return nil, errMalformed - } - fields := strings.Fields(m[2]) - locs := make([]*Location, 0, len(fields)) - for _, stk := range fields { - addr, err := strconv.ParseUint(stk, 0, 64) - if err != nil { - return nil, errMalformed - } - // Adjust all frames by -1 to land on the call instruction. - addr-- - loc := locations[addr] - if loc == nil { - loc = &Location{ - Address: addr, - } - locations[addr] = loc - p.Location = append(p.Location, loc) - } - locs = append(locs, loc) - } - p.Sample = append(p.Sample, &Sample{ - Location: locs, - Value: []int64{n}, - }) - } - - if err = parseAdditionalSections(strings.TrimSpace(line), r, p); err != nil { - return nil, err - } - return p, nil -} - -// remapLocationIDs ensures there is a location for each address -// referenced by a sample, and remaps the samples to point to the new -// location ids. -func (p *Profile) remapLocationIDs() { - seen := make(map[*Location]bool, len(p.Location)) - var locs []*Location - - for _, s := range p.Sample { - for _, l := range s.Location { - if seen[l] { - continue - } - l.ID = uint64(len(locs) + 1) - locs = append(locs, l) - seen[l] = true - } - } - p.Location = locs -} - -func (p *Profile) remapFunctionIDs() { - seen := make(map[*Function]bool, len(p.Function)) - var fns []*Function - - for _, l := range p.Location { - for _, ln := range l.Line { - fn := ln.Function - if fn == nil || seen[fn] { - continue - } - fn.ID = uint64(len(fns) + 1) - fns = append(fns, fn) - seen[fn] = true - } - } - p.Function = fns -} - -// remapMappingIDs matches location addresses with existing mappings -// and updates them appropriately. This is O(N*M), if this ever shows -// up as a bottleneck, evaluate sorting the mappings and doing a -// binary search, which would make it O(N*log(M)). -func (p *Profile) remapMappingIDs() { - if len(p.Mapping) == 0 { - return - } - - // Some profile handlers will incorrectly set regions for the main - // executable if its section is remapped. Fix them through heuristics. - - // Remove the initial mapping if named '/anon_hugepage' and has a - // consecutive adjacent mapping. - if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") { - if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start { - p.Mapping = p.Mapping[1:] - } - } - - for _, l := range p.Location { - if a := l.Address; a != 0 { - for _, m := range p.Mapping { - if m.Start <= a && a < m.Limit { - l.Mapping = m - break - } - } - } - } - - // Reset all mapping IDs. - for i, m := range p.Mapping { - m.ID = uint64(i + 1) - } -} - -var cpuInts = []func([]byte) (uint64, []byte){ - get32l, - get32b, - get64l, - get64b, -} - -func get32l(b []byte) (uint64, []byte) { - if len(b) < 4 { - return 0, nil - } - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:] -} - -func get32b(b []byte) (uint64, []byte) { - if len(b) < 4 { - return 0, nil - } - return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:] -} - -func get64l(b []byte) (uint64, []byte) { - if len(b) < 8 { - return 0, nil - } - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:] -} - -func get64b(b []byte) (uint64, []byte) { - if len(b) < 8 { - return 0, nil - } - return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:] -} - -// ParseTracebacks parses a set of tracebacks and returns a newly -// populated profile. It will accept any text file and generate a -// Profile out of it with any hex addresses it can identify, including -// a process map if it can recognize one. Each sample will include a -// tag "source" with the addresses recognized in string format. -func ParseTracebacks(b []byte) (*Profile, error) { - r := bytes.NewBuffer(b) - - p := &Profile{ - PeriodType: &ValueType{Type: "trace", Unit: "count"}, - Period: 1, - SampleType: []*ValueType{ - {Type: "trace", Unit: "count"}, - }, - } - - var sources []string - var sloc []*Location - - locs := make(map[uint64]*Location) - for { - l, err := r.ReadString('\n') - if err != nil { - if err != io.EOF { - return nil, err - } - if l == "" { - break - } - } - if sectionTrigger(l) == memoryMapSection { - break - } - if s, addrs := extractHexAddresses(l); len(s) > 0 { - for _, addr := range addrs { - // Addresses from stack traces point to the next instruction after - // each call. Adjust by -1 to land somewhere on the actual call. - addr-- - loc := locs[addr] - if locs[addr] == nil { - loc = &Location{ - Address: addr, - } - p.Location = append(p.Location, loc) - locs[addr] = loc - } - sloc = append(sloc, loc) - } - - sources = append(sources, s...) - } else { - if len(sources) > 0 || len(sloc) > 0 { - addTracebackSample(sloc, sources, p) - sloc, sources = nil, nil - } - } - } - - // Add final sample to save any leftover data. - if len(sources) > 0 || len(sloc) > 0 { - addTracebackSample(sloc, sources, p) - } - - if err := p.ParseMemoryMap(r); err != nil { - return nil, err - } - return p, nil -} - -func addTracebackSample(l []*Location, s []string, p *Profile) { - p.Sample = append(p.Sample, - &Sample{ - Value: []int64{1}, - Location: l, - Label: map[string][]string{"source": s}, - }) -} - -// parseCPU parses a profilez legacy profile and returns a newly -// populated Profile. -// -// The general format for profilez samples is a sequence of words in -// binary format. The first words are a header with the following data: -// -// 1st word -- 0 -// 2nd word -- 3 -// 3rd word -- 0 if a c++ application, 1 if a java application. -// 4th word -- Sampling period (in microseconds). -// 5th word -- Padding. -func parseCPU(b []byte) (*Profile, error) { - var parse func([]byte) (uint64, []byte) - var n1, n2, n3, n4, n5 uint64 - for _, parse = range cpuInts { - var tmp []byte - n1, tmp = parse(b) - n2, tmp = parse(tmp) - n3, tmp = parse(tmp) - n4, tmp = parse(tmp) - n5, tmp = parse(tmp) - - if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 { - b = tmp - return cpuProfile(b, int64(n4), parse) - } - } - return nil, errUnrecognized -} - -// cpuProfile returns a new Profile from C++ profilez data. -// b is the profile bytes after the header, period is the profiling -// period, and parse is a function to parse 8-byte chunks from the -// profile in its native endianness. -func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { - p := &Profile{ - Period: period * 1000, - PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, - SampleType: []*ValueType{ - {Type: "samples", Unit: "count"}, - {Type: "cpu", Unit: "nanoseconds"}, - }, - } - var err error - if b, _, err = parseCPUSamples(b, parse, true, p); err != nil { - return nil, err - } - - // If all samples have the same second-to-the-bottom frame, it - // strongly suggests that it is an uninteresting artifact of - // measurement -- a stack frame pushed by the signal handler. The - // bottom frame is always correct as it is picked up from the signal - // structure, not the stack. Check if this is the case and if so, - // remove. - if len(p.Sample) > 1 && len(p.Sample[0].Location) > 1 { - allSame := true - id1 := p.Sample[0].Location[1].Address - for _, s := range p.Sample { - if len(s.Location) < 2 || id1 != s.Location[1].Address { - allSame = false - break - } - } - if allSame { - for _, s := range p.Sample { - s.Location = append(s.Location[:1], s.Location[2:]...) - } - } - } - - if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil { - return nil, err - } - return p, nil -} - -// parseCPUSamples parses a collection of profilez samples from a -// profile. -// -// profilez samples are a repeated sequence of stack frames of the -// form: -// -// 1st word -- The number of times this stack was encountered. -// 2nd word -- The size of the stack (StackSize). -// 3rd word -- The first address on the stack. -// ... -// StackSize + 2 -- The last address on the stack -// -// The last stack trace is of the form: -// -// 1st word -- 0 -// 2nd word -- 1 -// 3rd word -- 0 -// -// Addresses from stack traces may point to the next instruction after -// each call. Optionally adjust by -1 to land somewhere on the actual -// call (except for the leaf, which is not a call). -func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) { - locs := make(map[uint64]*Location) - for len(b) > 0 { - var count, nstk uint64 - count, b = parse(b) - nstk, b = parse(b) - if b == nil || nstk > uint64(len(b)/4) { - return nil, nil, errUnrecognized - } - var sloc []*Location - addrs := make([]uint64, nstk) - for i := 0; i < int(nstk); i++ { - addrs[i], b = parse(b) - } - - if count == 0 && nstk == 1 && addrs[0] == 0 { - // End of data marker - break - } - for i, addr := range addrs { - if adjust && i > 0 { - addr-- - } - loc := locs[addr] - if loc == nil { - loc = &Location{ - Address: addr, - } - locs[addr] = loc - p.Location = append(p.Location, loc) - } - sloc = append(sloc, loc) - } - p.Sample = append(p.Sample, - &Sample{ - Value: []int64{int64(count), int64(count) * p.Period}, - Location: sloc, - }) - } - // Reached the end without finding the EOD marker. - return b, locs, nil -} - -// parseHeap parses a heapz legacy or a growthz profile and -// returns a newly populated Profile. -func parseHeap(b []byte) (p *Profile, err error) { - r := bytes.NewBuffer(b) - l, err := r.ReadString('\n') - if err != nil { - return nil, errUnrecognized - } - - sampling := "" - - if header := heapHeaderRE.FindStringSubmatch(l); header != nil { - p = &Profile{ - SampleType: []*ValueType{ - {Type: "objects", Unit: "count"}, - {Type: "space", Unit: "bytes"}, - }, - PeriodType: &ValueType{Type: "objects", Unit: "bytes"}, - } - - var period int64 - if len(header[6]) > 0 { - if period, err = strconv.ParseInt(header[6], 10, 64); err != nil { - return nil, errUnrecognized - } - } - - switch header[5] { - case "heapz_v2", "heap_v2": - sampling, p.Period = "v2", period - case "heapprofile": - sampling, p.Period = "", 1 - case "heap": - sampling, p.Period = "v2", period/2 - default: - return nil, errUnrecognized - } - } else if header = growthHeaderRE.FindStringSubmatch(l); header != nil { - p = &Profile{ - SampleType: []*ValueType{ - {Type: "objects", Unit: "count"}, - {Type: "space", Unit: "bytes"}, - }, - PeriodType: &ValueType{Type: "heapgrowth", Unit: "count"}, - Period: 1, - } - } else if header = fragmentationHeaderRE.FindStringSubmatch(l); header != nil { - p = &Profile{ - SampleType: []*ValueType{ - {Type: "objects", Unit: "count"}, - {Type: "space", Unit: "bytes"}, - }, - PeriodType: &ValueType{Type: "allocations", Unit: "count"}, - Period: 1, - } - } else { - return nil, errUnrecognized - } - - if LegacyHeapAllocated { - for _, st := range p.SampleType { - st.Type = "alloc_" + st.Type - } - } else { - for _, st := range p.SampleType { - st.Type = "inuse_" + st.Type - } - } - - locs := make(map[uint64]*Location) - for { - l, err = r.ReadString('\n') - if err != nil { - if err != io.EOF { - return nil, err - } - - if l == "" { - break - } - } - - if isSpaceOrComment(l) { - continue - } - l = strings.TrimSpace(l) - - if sectionTrigger(l) != unrecognizedSection { - break - } - - value, blocksize, addrs, err := parseHeapSample(l, p.Period, sampling) - if err != nil { - return nil, err - } - var sloc []*Location - for _, addr := range addrs { - // Addresses from stack traces point to the next instruction after - // each call. Adjust by -1 to land somewhere on the actual call. - addr-- - loc := locs[addr] - if locs[addr] == nil { - loc = &Location{ - Address: addr, - } - p.Location = append(p.Location, loc) - locs[addr] = loc - } - sloc = append(sloc, loc) - } - - p.Sample = append(p.Sample, &Sample{ - Value: value, - Location: sloc, - NumLabel: map[string][]int64{"bytes": {blocksize}}, - }) - } - - if err = parseAdditionalSections(l, r, p); err != nil { - return nil, err - } - return p, nil -} - -// parseHeapSample parses a single row from a heap profile into a new Sample. -func parseHeapSample(line string, rate int64, sampling string) (value []int64, blocksize int64, addrs []uint64, err error) { - sampleData := heapSampleRE.FindStringSubmatch(line) - if len(sampleData) != 6 { - return value, blocksize, addrs, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData)) - } - - // Use first two values by default; tcmalloc sampling generates the - // same value for both, only the older heap-profile collect separate - // stats for in-use and allocated objects. - valueIndex := 1 - if LegacyHeapAllocated { - valueIndex = 3 - } - - var v1, v2 int64 - if v1, err = strconv.ParseInt(sampleData[valueIndex], 10, 64); err != nil { - return value, blocksize, addrs, fmt.Errorf("malformed sample: %s: %v", line, err) - } - if v2, err = strconv.ParseInt(sampleData[valueIndex+1], 10, 64); err != nil { - return value, blocksize, addrs, fmt.Errorf("malformed sample: %s: %v", line, err) - } - - if v1 == 0 { - if v2 != 0 { - return value, blocksize, addrs, fmt.Errorf("allocation count was 0 but allocation bytes was %d", v2) - } - } else { - blocksize = v2 / v1 - if sampling == "v2" { - v1, v2 = scaleHeapSample(v1, v2, rate) - } - } - - value = []int64{v1, v2} - addrs = parseHexAddresses(sampleData[5]) - - return value, blocksize, addrs, nil -} - -// extractHexAddresses extracts hex numbers from a string and returns -// them, together with their numeric value, in a slice. -func extractHexAddresses(s string) ([]string, []uint64) { - hexStrings := hexNumberRE.FindAllString(s, -1) - var ids []uint64 - for _, s := range hexStrings { - if id, err := strconv.ParseUint(s, 0, 64); err == nil { - ids = append(ids, id) - } else { - // Do not expect any parsing failures due to the regexp matching. - panic("failed to parse hex value:" + s) - } - } - return hexStrings, ids -} - -// parseHexAddresses parses hex numbers from a string and returns them -// in a slice. -func parseHexAddresses(s string) []uint64 { - _, ids := extractHexAddresses(s) - return ids -} - -// scaleHeapSample adjusts the data from a heapz Sample to -// account for its probability of appearing in the collected -// data. heapz profiles are a sampling of the memory allocations -// requests in a program. We estimate the unsampled value by dividing -// each collected sample by its probability of appearing in the -// profile. heapz v2 profiles rely on a poisson process to determine -// which samples to collect, based on the desired average collection -// rate R. The probability of a sample of size S to appear in that -// profile is 1-exp(-S/R). -func scaleHeapSample(count, size, rate int64) (int64, int64) { - if count == 0 || size == 0 { - return 0, 0 - } - - if rate <= 1 { - // if rate==1 all samples were collected so no adjustment is needed. - // if rate<1 treat as unknown and skip scaling. - return count, size - } - - avgSize := float64(size) / float64(count) - scale := 1 / (1 - math.Exp(-avgSize/float64(rate))) - - return int64(float64(count) * scale), int64(float64(size) * scale) -} - -// parseContention parses a mutex or contention profile. There are 2 cases: -// "--- contentionz " for legacy C++ profiles (and backwards compatibility) -// "--- mutex:" or "--- contention:" for profiles generated by the Go runtime. -// This code converts the text output from runtime into a *Profile. (In the future -// the runtime might write a serialized Profile directly making this unnecessary.) -func parseContention(b []byte) (*Profile, error) { - r := bytes.NewBuffer(b) - var l string - var err error - for { - // Skip past comments and empty lines seeking a real header. - l, err = r.ReadString('\n') - if err != nil { - return nil, err - } - if !isSpaceOrComment(l) { - break - } - } - - if strings.HasPrefix(l, "--- contentionz ") { - return parseCppContention(r) - } else if strings.HasPrefix(l, "--- mutex:") { - return parseCppContention(r) - } else if strings.HasPrefix(l, "--- contention:") { - return parseCppContention(r) - } - return nil, errUnrecognized -} - -// parseCppContention parses the output from synchronization_profiling.cc -// for backward compatibility, and the compatible (non-debug) block profile -// output from the Go runtime. -func parseCppContention(r *bytes.Buffer) (*Profile, error) { - p := &Profile{ - PeriodType: &ValueType{Type: "contentions", Unit: "count"}, - Period: 1, - SampleType: []*ValueType{ - {Type: "contentions", Unit: "count"}, - {Type: "delay", Unit: "nanoseconds"}, - }, - } - - var cpuHz int64 - var l string - var err error - // Parse text of the form "attribute = value" before the samples. - const delimiter = '=' - for { - l, err = r.ReadString('\n') - if err != nil { - if err != io.EOF { - return nil, err - } - - if l == "" { - break - } - } - if isSpaceOrComment(l) { - continue - } - - if l = strings.TrimSpace(l); l == "" { - continue - } - - if strings.HasPrefix(l, "---") { - break - } - - index := strings.IndexByte(l, delimiter) - if index < 0 { - break - } - key := l[:index] - val := l[index+1:] - - key, val = strings.TrimSpace(key), strings.TrimSpace(val) - var err error - switch key { - case "cycles/second": - if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil { - return nil, errUnrecognized - } - case "sampling period": - if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil { - return nil, errUnrecognized - } - case "ms since reset": - ms, err := strconv.ParseInt(val, 0, 64) - if err != nil { - return nil, errUnrecognized - } - p.DurationNanos = ms * 1000 * 1000 - case "format": - // CPP contentionz profiles don't have format. - return nil, errUnrecognized - case "resolution": - // CPP contentionz profiles don't have resolution. - return nil, errUnrecognized - case "discarded samples": - default: - return nil, errUnrecognized - } - } - - locs := make(map[uint64]*Location) - for { - if !isSpaceOrComment(l) { - if l = strings.TrimSpace(l); strings.HasPrefix(l, "---") { - break - } - value, addrs, err := parseContentionSample(l, p.Period, cpuHz) - if err != nil { - return nil, err - } - var sloc []*Location - for _, addr := range addrs { - // Addresses from stack traces point to the next instruction after - // each call. Adjust by -1 to land somewhere on the actual call. - addr-- - loc := locs[addr] - if locs[addr] == nil { - loc = &Location{ - Address: addr, - } - p.Location = append(p.Location, loc) - locs[addr] = loc - } - sloc = append(sloc, loc) - } - p.Sample = append(p.Sample, &Sample{ - Value: value, - Location: sloc, - }) - } - - if l, err = r.ReadString('\n'); err != nil { - if err != io.EOF { - return nil, err - } - if l == "" { - break - } - } - } - - if err = parseAdditionalSections(l, r, p); err != nil { - return nil, err - } - - return p, nil -} - -// parseContentionSample parses a single row from a contention profile -// into a new Sample. -func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) { - sampleData := contentionSampleRE.FindStringSubmatch(line) - if sampleData == nil { - return value, addrs, errUnrecognized - } - - v1, err := strconv.ParseInt(sampleData[1], 10, 64) - if err != nil { - return value, addrs, fmt.Errorf("malformed sample: %s: %v", line, err) - } - v2, err := strconv.ParseInt(sampleData[2], 10, 64) - if err != nil { - return value, addrs, fmt.Errorf("malformed sample: %s: %v", line, err) - } - - // Unsample values if period and cpuHz are available. - // - Delays are scaled to cycles and then to nanoseconds. - // - Contentions are scaled to cycles. - if period > 0 { - if cpuHz > 0 { - cpuGHz := float64(cpuHz) / 1e9 - v1 = int64(float64(v1) * float64(period) / cpuGHz) - } - v2 = v2 * period - } - - value = []int64{v2, v1} - addrs = parseHexAddresses(sampleData[3]) - - return value, addrs, nil -} - -// parseThread parses a Threadz profile and returns a new Profile. -func parseThread(b []byte) (*Profile, error) { - r := bytes.NewBuffer(b) - - var line string - var err error - for { - // Skip past comments and empty lines seeking a real header. - line, err = r.ReadString('\n') - if err != nil { - return nil, err - } - if !isSpaceOrComment(line) { - break - } - } - - if m := threadzStartRE.FindStringSubmatch(line); m != nil { - // Advance over initial comments until first stack trace. - for { - line, err = r.ReadString('\n') - if err != nil { - if err != io.EOF { - return nil, err - } - - if line == "" { - break - } - } - if sectionTrigger(line) != unrecognizedSection || line[0] == '-' { - break - } - } - } else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { - return nil, errUnrecognized - } - - p := &Profile{ - SampleType: []*ValueType{{Type: "thread", Unit: "count"}}, - PeriodType: &ValueType{Type: "thread", Unit: "count"}, - Period: 1, - } - - locs := make(map[uint64]*Location) - // Recognize each thread and populate profile samples. - for sectionTrigger(line) == unrecognizedSection { - if strings.HasPrefix(line, "---- no stack trace for") { - line = "" - break - } - if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { - return nil, errUnrecognized - } - - var addrs []uint64 - line, addrs, err = parseThreadSample(r) - if err != nil { - return nil, errUnrecognized - } - if len(addrs) == 0 { - // We got a --same as previous threads--. Bump counters. - if len(p.Sample) > 0 { - s := p.Sample[len(p.Sample)-1] - s.Value[0]++ - } - continue - } - - var sloc []*Location - for _, addr := range addrs { - // Addresses from stack traces point to the next instruction after - // each call. Adjust by -1 to land somewhere on the actual call. - addr-- - loc := locs[addr] - if locs[addr] == nil { - loc = &Location{ - Address: addr, - } - p.Location = append(p.Location, loc) - locs[addr] = loc - } - sloc = append(sloc, loc) - } - - p.Sample = append(p.Sample, &Sample{ - Value: []int64{1}, - Location: sloc, - }) - } - - if err = parseAdditionalSections(line, r, p); err != nil { - return nil, err - } - - return p, nil -} - -// parseThreadSample parses a symbolized or unsymbolized stack trace. -// Returns the first line after the traceback, the sample (or nil if -// it hits a 'same-as-previous' marker) and an error. -func parseThreadSample(b *bytes.Buffer) (nextl string, addrs []uint64, err error) { - var l string - sameAsPrevious := false - for { - if l, err = b.ReadString('\n'); err != nil { - if err != io.EOF { - return "", nil, err - } - if l == "" { - break - } - } - if l = strings.TrimSpace(l); l == "" { - continue - } - - if strings.HasPrefix(l, "---") { - break - } - if strings.Contains(l, "same as previous thread") { - sameAsPrevious = true - continue - } - - addrs = append(addrs, parseHexAddresses(l)...) - } - - if sameAsPrevious { - return l, nil, nil - } - return l, addrs, nil -} - -// parseAdditionalSections parses any additional sections in the -// profile, ignoring any unrecognized sections. -func parseAdditionalSections(l string, b *bytes.Buffer, p *Profile) (err error) { - for { - if sectionTrigger(l) == memoryMapSection { - break - } - // Ignore any unrecognized sections. - if l, err := b.ReadString('\n'); err != nil { - if err != io.EOF { - return err - } - if l == "" { - break - } - } - } - return p.ParseMemoryMap(b) -} - -// ParseMemoryMap parses a memory map in the format of -// /proc/self/maps, and overrides the mappings in the current profile. -// It renumbers the samples and locations in the profile correspondingly. -func (p *Profile) ParseMemoryMap(rd io.Reader) error { - b := bufio.NewReader(rd) - - var attrs []string - var r *strings.Replacer - const delimiter = '=' - for { - l, err := b.ReadString('\n') - if err != nil { - if err != io.EOF { - return err - } - if l == "" { - break - } - } - if l = strings.TrimSpace(l); l == "" { - continue - } - - if r != nil { - l = r.Replace(l) - } - m, err := parseMappingEntry(l) - if err != nil { - if err == errUnrecognized { - // Recognize assignments of the form: attr=value, and replace - // $attr with value on subsequent mappings. - idx := strings.IndexByte(l, delimiter) - if idx >= 0 { - attr := l[:idx] - value := l[idx+1:] - attrs = append(attrs, "$"+strings.TrimSpace(attr), strings.TrimSpace(value)) - r = strings.NewReplacer(attrs...) - } - // Ignore any unrecognized entries - continue - } - return err - } - if m == nil || (m.File == "" && len(p.Mapping) != 0) { - // In some cases the first entry may include the address range - // but not the name of the file. It should be followed by - // another entry with the name. - continue - } - if len(p.Mapping) == 1 && p.Mapping[0].File == "" { - // Update the name if this is the entry following that empty one. - p.Mapping[0].File = m.File - continue - } - p.Mapping = append(p.Mapping, m) - } - p.remapLocationIDs() - p.remapFunctionIDs() - p.remapMappingIDs() - return nil -} - -func parseMappingEntry(l string) (*Mapping, error) { - mapping := &Mapping{} - var err error - if me := procMapsRE.FindStringSubmatch(l); len(me) == 9 { - if !strings.Contains(me[3], "x") { - // Skip non-executable entries. - return nil, nil - } - if mapping.Start, err = strconv.ParseUint(me[1], 16, 64); err != nil { - return nil, errUnrecognized - } - if mapping.Limit, err = strconv.ParseUint(me[2], 16, 64); err != nil { - return nil, errUnrecognized - } - if me[4] != "" { - if mapping.Offset, err = strconv.ParseUint(me[4], 16, 64); err != nil { - return nil, errUnrecognized - } - } - mapping.File = me[8] - return mapping, nil - } - - if me := briefMapsRE.FindStringSubmatch(l); len(me) == 6 { - if mapping.Start, err = strconv.ParseUint(me[1], 16, 64); err != nil { - return nil, errUnrecognized - } - if mapping.Limit, err = strconv.ParseUint(me[2], 16, 64); err != nil { - return nil, errUnrecognized - } - mapping.File = me[3] - if me[5] != "" { - if mapping.Offset, err = strconv.ParseUint(me[5], 16, 64); err != nil { - return nil, errUnrecognized - } - } - return mapping, nil - } - - return nil, errUnrecognized -} - -type sectionType int - -const ( - unrecognizedSection sectionType = iota - memoryMapSection -) - -var memoryMapTriggers = []string{ - "--- Memory map: ---", - "MAPPED_LIBRARIES:", -} - -func sectionTrigger(line string) sectionType { - for _, trigger := range memoryMapTriggers { - if strings.Contains(line, trigger) { - return memoryMapSection - } - } - return unrecognizedSection -} - -func (p *Profile) addLegacyFrameInfo() { - switch { - case isProfileType(p, heapzSampleTypes) || - isProfileType(p, heapzInUseSampleTypes) || - isProfileType(p, heapzAllocSampleTypes): - p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr - case isProfileType(p, contentionzSampleTypes): - p.DropFrames, p.KeepFrames = lockRxStr, "" - default: - p.DropFrames, p.KeepFrames = cpuProfilerRxStr, "" - } -} - -var heapzSampleTypes = []string{"allocations", "size"} // early Go pprof profiles -var heapzInUseSampleTypes = []string{"inuse_objects", "inuse_space"} -var heapzAllocSampleTypes = []string{"alloc_objects", "alloc_space"} -var contentionzSampleTypes = []string{"contentions", "delay"} - -func isProfileType(p *Profile, t []string) bool { - st := p.SampleType - if len(st) != len(t) { - return false - } - - for i := range st { - if st[i].Type != t[i] { - return false - } - } - return true -} - -var allocRxStr = strings.Join([]string{ - // POSIX entry points. - `calloc`, - `cfree`, - `malloc`, - `free`, - `memalign`, - `do_memalign`, - `(__)?posix_memalign`, - `pvalloc`, - `valloc`, - `realloc`, - - // TC malloc. - `tcmalloc::.*`, - `tc_calloc`, - `tc_cfree`, - `tc_malloc`, - `tc_free`, - `tc_memalign`, - `tc_posix_memalign`, - `tc_pvalloc`, - `tc_valloc`, - `tc_realloc`, - `tc_new`, - `tc_delete`, - `tc_newarray`, - `tc_deletearray`, - `tc_new_nothrow`, - `tc_newarray_nothrow`, - - // Memory-allocation routines on OS X. - `malloc_zone_malloc`, - `malloc_zone_calloc`, - `malloc_zone_valloc`, - `malloc_zone_realloc`, - `malloc_zone_memalign`, - `malloc_zone_free`, - - // Go runtime - `runtime\..*`, - - // Other misc. memory allocation routines - `BaseArena::.*`, - `(::)?do_malloc_no_errno`, - `(::)?do_malloc_pages`, - `(::)?do_malloc`, - `DoSampledAllocation`, - `MallocedMemBlock::MallocedMemBlock`, - `_M_allocate`, - `__builtin_(vec_)?delete`, - `__builtin_(vec_)?new`, - `__gnu_cxx::new_allocator::allocate`, - `__libc_malloc`, - `__malloc_alloc_template::allocate`, - `allocate`, - `cpp_alloc`, - `operator new(\[\])?`, - `simple_alloc::allocate`, -}, `|`) - -var allocSkipRxStr = strings.Join([]string{ - // Preserve Go runtime frames that appear in the middle/bottom of - // the stack. - `runtime\.panic`, - `runtime\.reflectcall`, - `runtime\.call[0-9]*`, -}, `|`) - -var cpuProfilerRxStr = strings.Join([]string{ - `ProfileData::Add`, - `ProfileData::prof_handler`, - `CpuProfiler::prof_handler`, - `__pthread_sighandler`, - `__restore`, -}, `|`) - -var lockRxStr = strings.Join([]string{ - `RecordLockProfileData`, - `(base::)?RecordLockProfileData.*`, - `(base::)?SubmitMutexProfileData.*`, - `(base::)?SubmitSpinLockProfileData.*`, - `(Mutex::)?AwaitCommon.*`, - `(Mutex::)?Unlock.*`, - `(Mutex::)?UnlockSlow.*`, - `(Mutex::)?ReaderUnlock.*`, - `(MutexLock::)?~MutexLock.*`, - `(SpinLock::)?Unlock.*`, - `(SpinLock::)?SlowUnlock.*`, - `(SpinLockHolder::)?~SpinLockHolder.*`, -}, `|`) diff --git a/src/internal/profile/profile.go b/src/internal/profile/profile.go index c779bb2b11..afd1dd72ee 100644 --- a/src/internal/profile/profile.go +++ b/src/internal/profile/profile.go @@ -11,7 +11,6 @@ import ( "bytes" "compress/gzip" "fmt" - "internal/lazyregexp" "io" "strings" "time" @@ -120,16 +119,14 @@ type Function struct { filenameX int64 } -// Parse parses a profile and checks for its validity. The input -// may be a gzip-compressed encoded protobuf or one of many legacy -// profile formats which may be unsupported in the future. +// Parse parses a profile and checks for its validity. The input must be an +// encoded pprof protobuf, which may optionally be gzip-compressed. func Parse(r io.Reader) (*Profile, error) { orig, err := io.ReadAll(r) if err != nil { return nil, err } - var p *Profile if len(orig) >= 2 && orig[0] == 0x1f && orig[1] == 0x8b { gz, err := gzip.NewReader(bytes.NewBuffer(orig)) if err != nil { @@ -141,10 +138,10 @@ func Parse(r io.Reader) (*Profile, error) { } orig = data } - if p, err = parseUncompressed(orig); err != nil { - if p, err = parseLegacy(orig); err != nil { - return nil, fmt.Errorf("parsing profile: %v", err) - } + + p, err := parseUncompressed(orig) + if err != nil { + return nil, fmt.Errorf("parsing profile: %w", err) } if err := p.CheckValid(); err != nil { @@ -153,33 +150,14 @@ func Parse(r io.Reader) (*Profile, error) { return p, nil } -var errUnrecognized = fmt.Errorf("unrecognized profile format") var errMalformed = fmt.Errorf("malformed profile format") - -func parseLegacy(data []byte) (*Profile, error) { - parsers := []func([]byte) (*Profile, error){ - parseCPU, - parseHeap, - parseGoCount, // goroutine, threadcreate - parseThread, - parseContention, - } - - for _, parser := range parsers { - p, err := parser(data) - if err == nil { - p.setMain() - p.addLegacyFrameInfo() - return p, nil - } - if err != errUnrecognized { - return nil, err - } - } - return nil, errUnrecognized -} +var ErrNoData = fmt.Errorf("empty input file") func parseUncompressed(data []byte) (*Profile, error) { + if len(data) == 0 { + return nil, ErrNoData + } + p := &Profile{} if err := unmarshal(data, p); err != nil { return nil, err @@ -192,29 +170,6 @@ func parseUncompressed(data []byte) (*Profile, error) { return p, nil } -var libRx = lazyregexp.New(`([.]so$|[.]so[._][0-9]+)`) - -// setMain scans Mapping entries and guesses which entry is main -// because legacy profiles don't obey the convention of putting main -// first. -func (p *Profile) setMain() { - for i := 0; i < len(p.Mapping); i++ { - file := strings.TrimSpace(strings.ReplaceAll(p.Mapping[i].File, "(deleted)", "")) - if len(file) == 0 { - continue - } - if len(libRx.FindStringSubmatch(file)) > 0 { - continue - } - if strings.HasPrefix(file, "[") { - continue - } - // Swap what we guess is main to position 0. - p.Mapping[i], p.Mapping[0] = p.Mapping[0], p.Mapping[i] - break - } -} - // Write writes the profile as a gzip-compressed marshaled protobuf. func (p *Profile) Write(w io.Writer) error { p.preEncode() diff --git a/src/internal/profile/profile_test.go b/src/internal/profile/profile_test.go deleted file mode 100644 index e1963f3351..0000000000 --- a/src/internal/profile/profile_test.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package profile - -import ( - "bytes" - "testing" -) - -func TestEmptyProfile(t *testing.T) { - var buf bytes.Buffer - p, err := Parse(&buf) - if err != nil { - t.Error("Want no error, got", err) - } - if p == nil { - t.Fatal("Want a valid profile, got ") - } - if !p.Empty() { - t.Errorf("Profile should be empty, got %#v", p) - } -} - -func TestParseContention(t *testing.T) { - tests := []struct { - name string - in string - wantErr bool - }{ - { - name: "valid", - in: `--- mutex: -cycles/second=3491920901 -sampling period=1 -43227965305 1659640 @ 0x45e851 0x45f764 0x4a2be1 0x44ea31 -34035731690 15760 @ 0x45e851 0x45f764 0x4a2b17 0x44ea31 -`, - }, - { - name: "valid with comment", - in: `--- mutex: -cycles/second=3491920901 -sampling period=1 -43227965305 1659640 @ 0x45e851 0x45f764 0x4a2be1 0x44ea31 -# 0x45e850 sync.(*Mutex).Unlock+0x80 /go/src/sync/mutex.go:126 -# 0x45f763 sync.(*RWMutex).Unlock+0x83 /go/src/sync/rwmutex.go:125 -# 0x4a2be0 main.main.func3+0x70 /go/src/internal/pprof/profile/a_binary.go:58 - -34035731690 15760 @ 0x45e851 0x45f764 0x4a2b17 0x44ea31 -# 0x45e850 sync.(*Mutex).Unlock+0x80 /go/src/sync/mutex.go:126 -# 0x45f763 sync.(*RWMutex).Unlock+0x83 /go/src/sync/rwmutex.go:125 -# 0x4a2b16 main.main.func2+0xd6 /go/src/internal/pprof/profile/a_binary.go:48 -`, - }, - { - name: "empty", - in: `--- mutex:`, - wantErr: true, - }, - { - name: "invalid header", - in: `--- channel: -43227965305 1659640 @ 0x45e851 0x45f764 0x4a2be1 0x44ea31`, - wantErr: true, - }, - } - for _, tc := range tests { - _, err := parseContention([]byte(tc.in)) - if tc.wantErr && err == nil { - t.Errorf("parseContention(%q) succeeded unexpectedly", tc.name) - } - if !tc.wantErr && err != nil { - t.Errorf("parseContention(%q) failed unexpectedly: %v", tc.name, err) - } - } - -} diff --git a/src/internal/reflectlite/swapper.go b/src/internal/reflectlite/swapper.go index ac17d9bbc4..e5ea535d5f 100644 --- a/src/internal/reflectlite/swapper.go +++ b/src/internal/reflectlite/swapper.go @@ -33,7 +33,7 @@ func Swapper(slice any) func(i, j int) { typ := v.Type().Elem().common() size := typ.Size() - hasPtr := typ.PtrBytes != 0 + hasPtr := typ.Pointers() // Some common & small cases, without using memmove: if hasPtr { diff --git a/src/internal/reflectlite/value.go b/src/internal/reflectlite/value.go index c47e5ea12b..f4f15d8e5f 100644 --- a/src/internal/reflectlite/value.go +++ b/src/internal/reflectlite/value.go @@ -123,8 +123,6 @@ func packEface(v Value) any { // Value is indirect, and so is the interface we're making. ptr := v.ptr if v.flag&flagAddr != 0 { - // TODO: pass safe boolean from valueInterface so - // we don't need to copy if safe==true? c := unsafe_New(t) typedmemmove(t, c, ptr) ptr = c @@ -285,7 +283,6 @@ func valueInterface(v Value) any { })(v.ptr) } - // TODO: pass safe to packEface so we don't need to copy if safe==true? return packEface(v) } diff --git a/src/runtime/internal/syscall/asm_linux_386.s b/src/internal/runtime/syscall/asm_linux_386.s similarity index 100% rename from src/runtime/internal/syscall/asm_linux_386.s rename to src/internal/runtime/syscall/asm_linux_386.s diff --git a/src/runtime/internal/syscall/asm_linux_amd64.s b/src/internal/runtime/syscall/asm_linux_amd64.s similarity index 100% rename from src/runtime/internal/syscall/asm_linux_amd64.s rename to src/internal/runtime/syscall/asm_linux_amd64.s diff --git a/src/runtime/internal/syscall/asm_linux_arm.s b/src/internal/runtime/syscall/asm_linux_arm.s similarity index 100% rename from src/runtime/internal/syscall/asm_linux_arm.s rename to src/internal/runtime/syscall/asm_linux_arm.s diff --git a/src/runtime/internal/syscall/asm_linux_arm64.s b/src/internal/runtime/syscall/asm_linux_arm64.s similarity index 100% rename from src/runtime/internal/syscall/asm_linux_arm64.s rename to src/internal/runtime/syscall/asm_linux_arm64.s diff --git a/src/internal/runtime/syscall/asm_linux_loong64.s b/src/internal/runtime/syscall/asm_linux_loong64.s new file mode 100644 index 0000000000..11c5bc2468 --- /dev/null +++ b/src/internal/runtime/syscall/asm_linux_loong64.s @@ -0,0 +1,68 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr) +// +// We need to convert to the syscall ABI. +// +// arg | ABIInternal | Syscall +// --------------------------- +// num | R4 | R11 +// a1 | R5 | R4 +// a2 | R6 | R5 +// a3 | R7 | R6 +// a4 | R8 | R7 +// a5 | R9 | R8 +// a6 | R10 | R9 +// +// r1 | R4 | R4 +// r2 | R5 | R5 +// err | R6 | part of R4 +TEXT ·Syscall6(SB),NOSPLIT,$0-80 +#ifdef GOEXPERIMENT_regabiargs + MOVV R4, R11 // syscall entry + MOVV R5, R4 + MOVV R6, R5 + MOVV R7, R6 + MOVV R8, R7 + MOVV R9, R8 + MOVV R10, R9 +#else + MOVV num+0(FP), R11 // syscall entry + MOVV a1+8(FP), R4 + MOVV a2+16(FP), R5 + MOVV a3+24(FP), R6 + MOVV a4+32(FP), R7 + MOVV a5+40(FP), R8 + MOVV a6+48(FP), R9 +#endif + SYSCALL +#ifdef GOEXPERIMENT_regabiargs + MOVV R0, R5 // r2 is not used. Always set to 0. + MOVW $-4096, R12 + BGEU R12, R4, ok + SUBVU R4, R0, R6 // errno + MOVV $-1, R4 // r1 +#else + MOVW $-4096, R12 + BGEU R12, R4, ok + MOVV $-1, R12 + MOVV R12, r1+56(FP) + MOVV R0, r2+64(FP) + SUBVU R4, R0, R4 + MOVV R4, errno+72(FP) +#endif + RET +ok: +#ifdef GOEXPERIMENT_regabiargs + // r1 already in R4 + MOVV R0, R6 // errno +#else + MOVV R4, r1+56(FP) + MOVV R0, r2+64(FP) // r2 is not used. Always set to 0. + MOVV R0, errno+72(FP) +#endif + RET diff --git a/src/runtime/internal/syscall/asm_linux_mips64x.s b/src/internal/runtime/syscall/asm_linux_mips64x.s similarity index 100% rename from src/runtime/internal/syscall/asm_linux_mips64x.s rename to src/internal/runtime/syscall/asm_linux_mips64x.s diff --git a/src/runtime/internal/syscall/asm_linux_mipsx.s b/src/internal/runtime/syscall/asm_linux_mipsx.s similarity index 100% rename from src/runtime/internal/syscall/asm_linux_mipsx.s rename to src/internal/runtime/syscall/asm_linux_mipsx.s diff --git a/src/runtime/internal/syscall/asm_linux_ppc64x.s b/src/internal/runtime/syscall/asm_linux_ppc64x.s similarity index 100% rename from src/runtime/internal/syscall/asm_linux_ppc64x.s rename to src/internal/runtime/syscall/asm_linux_ppc64x.s diff --git a/src/runtime/internal/syscall/asm_linux_riscv64.s b/src/internal/runtime/syscall/asm_linux_riscv64.s similarity index 100% rename from src/runtime/internal/syscall/asm_linux_riscv64.s rename to src/internal/runtime/syscall/asm_linux_riscv64.s diff --git a/src/runtime/internal/syscall/asm_linux_s390x.s b/src/internal/runtime/syscall/asm_linux_s390x.s similarity index 100% rename from src/runtime/internal/syscall/asm_linux_s390x.s rename to src/internal/runtime/syscall/asm_linux_s390x.s diff --git a/src/internal/runtime/syscall/defs_linux.go b/src/internal/runtime/syscall/defs_linux.go new file mode 100644 index 0000000000..b2e36a244f --- /dev/null +++ b/src/internal/runtime/syscall/defs_linux.go @@ -0,0 +1,19 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package syscall + +const ( + EPOLLIN = 0x1 + EPOLLOUT = 0x4 + EPOLLERR = 0x8 + EPOLLHUP = 0x10 + EPOLLRDHUP = 0x2000 + EPOLLET = 0x80000000 + EPOLL_CLOEXEC = 0x80000 + EPOLL_CTL_ADD = 0x1 + EPOLL_CTL_DEL = 0x2 + EPOLL_CTL_MOD = 0x3 + EFD_CLOEXEC = 0x80000 +) diff --git a/src/runtime/internal/syscall/defs_linux_386.go b/src/internal/runtime/syscall/defs_linux_386.go similarity index 63% rename from src/runtime/internal/syscall/defs_linux_386.go rename to src/internal/runtime/syscall/defs_linux_386.go index dc723a60b2..613dc77d59 100644 --- a/src/runtime/internal/syscall/defs_linux_386.go +++ b/src/internal/runtime/syscall/defs_linux_386.go @@ -10,17 +10,9 @@ const ( SYS_EPOLL_PWAIT = 319 SYS_EPOLL_CREATE1 = 329 SYS_EPOLL_PWAIT2 = 441 + SYS_EVENTFD2 = 328 - EPOLLIN = 0x1 - EPOLLOUT = 0x4 - EPOLLERR = 0x8 - EPOLLHUP = 0x10 - EPOLLRDHUP = 0x2000 - EPOLLET = 0x80000000 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 + EFD_NONBLOCK = 0x800 ) type EpollEvent struct { diff --git a/src/runtime/internal/syscall/defs_linux_amd64.go b/src/internal/runtime/syscall/defs_linux_amd64.go similarity index 63% rename from src/runtime/internal/syscall/defs_linux_amd64.go rename to src/internal/runtime/syscall/defs_linux_amd64.go index 886eb5bda2..2ba3128813 100644 --- a/src/runtime/internal/syscall/defs_linux_amd64.go +++ b/src/internal/runtime/syscall/defs_linux_amd64.go @@ -10,17 +10,9 @@ const ( SYS_EPOLL_PWAIT = 281 SYS_EPOLL_CREATE1 = 291 SYS_EPOLL_PWAIT2 = 441 + SYS_EVENTFD2 = 290 - EPOLLIN = 0x1 - EPOLLOUT = 0x4 - EPOLLERR = 0x8 - EPOLLHUP = 0x10 - EPOLLRDHUP = 0x2000 - EPOLLET = 0x80000000 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 + EFD_NONBLOCK = 0x800 ) type EpollEvent struct { diff --git a/src/runtime/internal/syscall/defs_linux_arm.go b/src/internal/runtime/syscall/defs_linux_arm.go similarity index 64% rename from src/runtime/internal/syscall/defs_linux_arm.go rename to src/internal/runtime/syscall/defs_linux_arm.go index 8f812a2f68..af3e0510b1 100644 --- a/src/runtime/internal/syscall/defs_linux_arm.go +++ b/src/internal/runtime/syscall/defs_linux_arm.go @@ -10,17 +10,9 @@ const ( SYS_EPOLL_PWAIT = 346 SYS_EPOLL_CREATE1 = 357 SYS_EPOLL_PWAIT2 = 441 + SYS_EVENTFD2 = 356 - EPOLLIN = 0x1 - EPOLLOUT = 0x4 - EPOLLERR = 0x8 - EPOLLHUP = 0x10 - EPOLLRDHUP = 0x2000 - EPOLLET = 0x80000000 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 + EFD_NONBLOCK = 0x800 ) type EpollEvent struct { diff --git a/src/runtime/internal/syscall/defs_linux_arm64.go b/src/internal/runtime/syscall/defs_linux_arm64.go similarity index 64% rename from src/runtime/internal/syscall/defs_linux_arm64.go rename to src/internal/runtime/syscall/defs_linux_arm64.go index 48e11b0c51..c924f6211a 100644 --- a/src/runtime/internal/syscall/defs_linux_arm64.go +++ b/src/internal/runtime/syscall/defs_linux_arm64.go @@ -10,17 +10,9 @@ const ( SYS_EPOLL_PWAIT = 22 SYS_FCNTL = 25 SYS_EPOLL_PWAIT2 = 441 + SYS_EVENTFD2 = 19 - EPOLLIN = 0x1 - EPOLLOUT = 0x4 - EPOLLERR = 0x8 - EPOLLHUP = 0x10 - EPOLLRDHUP = 0x2000 - EPOLLET = 0x80000000 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 + EFD_NONBLOCK = 0x800 ) type EpollEvent struct { diff --git a/src/runtime/internal/syscall/defs_linux_loong64.go b/src/internal/runtime/syscall/defs_linux_loong64.go similarity index 64% rename from src/runtime/internal/syscall/defs_linux_loong64.go rename to src/internal/runtime/syscall/defs_linux_loong64.go index b78ef81861..c1a5649a42 100644 --- a/src/runtime/internal/syscall/defs_linux_loong64.go +++ b/src/internal/runtime/syscall/defs_linux_loong64.go @@ -10,17 +10,9 @@ const ( SYS_EPOLL_PWAIT = 22 SYS_FCNTL = 25 SYS_EPOLL_PWAIT2 = 441 + SYS_EVENTFD2 = 19 - EPOLLIN = 0x1 - EPOLLOUT = 0x4 - EPOLLERR = 0x8 - EPOLLHUP = 0x10 - EPOLLRDHUP = 0x2000 - EPOLLET = 0x80000000 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 + EFD_NONBLOCK = 0x800 ) type EpollEvent struct { diff --git a/src/runtime/internal/syscall/defs_linux_mips64x.go b/src/internal/runtime/syscall/defs_linux_mips64x.go similarity index 67% rename from src/runtime/internal/syscall/defs_linux_mips64x.go rename to src/internal/runtime/syscall/defs_linux_mips64x.go index 92b49ca969..07c0aba539 100644 --- a/src/runtime/internal/syscall/defs_linux_mips64x.go +++ b/src/internal/runtime/syscall/defs_linux_mips64x.go @@ -12,17 +12,9 @@ const ( SYS_EPOLL_PWAIT = 5272 SYS_EPOLL_CREATE1 = 5285 SYS_EPOLL_PWAIT2 = 5441 + SYS_EVENTFD2 = 5284 - EPOLLIN = 0x1 - EPOLLOUT = 0x4 - EPOLLERR = 0x8 - EPOLLHUP = 0x10 - EPOLLRDHUP = 0x2000 - EPOLLET = 0x80000000 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 + EFD_NONBLOCK = 0x80 ) type EpollEvent struct { diff --git a/src/runtime/internal/syscall/defs_linux_mipsx.go b/src/internal/runtime/syscall/defs_linux_mipsx.go similarity index 66% rename from src/runtime/internal/syscall/defs_linux_mipsx.go rename to src/internal/runtime/syscall/defs_linux_mipsx.go index e28d09c7f1..a1bb5d720a 100644 --- a/src/runtime/internal/syscall/defs_linux_mipsx.go +++ b/src/internal/runtime/syscall/defs_linux_mipsx.go @@ -12,17 +12,9 @@ const ( SYS_EPOLL_PWAIT = 4313 SYS_EPOLL_CREATE1 = 4326 SYS_EPOLL_PWAIT2 = 4441 + SYS_EVENTFD2 = 4325 - EPOLLIN = 0x1 - EPOLLOUT = 0x4 - EPOLLERR = 0x8 - EPOLLHUP = 0x10 - EPOLLRDHUP = 0x2000 - EPOLLET = 0x80000000 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 + EFD_NONBLOCK = 0x80 ) type EpollEvent struct { diff --git a/src/runtime/internal/syscall/defs_linux_ppc64x.go b/src/internal/runtime/syscall/defs_linux_ppc64x.go similarity index 67% rename from src/runtime/internal/syscall/defs_linux_ppc64x.go rename to src/internal/runtime/syscall/defs_linux_ppc64x.go index a74483eb6d..78558b360f 100644 --- a/src/runtime/internal/syscall/defs_linux_ppc64x.go +++ b/src/internal/runtime/syscall/defs_linux_ppc64x.go @@ -12,17 +12,9 @@ const ( SYS_EPOLL_PWAIT = 303 SYS_EPOLL_CREATE1 = 315 SYS_EPOLL_PWAIT2 = 441 + SYS_EVENTFD2 = 314 - EPOLLIN = 0x1 - EPOLLOUT = 0x4 - EPOLLERR = 0x8 - EPOLLHUP = 0x10 - EPOLLRDHUP = 0x2000 - EPOLLET = 0x80000000 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 + EFD_NONBLOCK = 0x800 ) type EpollEvent struct { diff --git a/src/runtime/internal/syscall/defs_linux_riscv64.go b/src/internal/runtime/syscall/defs_linux_riscv64.go similarity index 64% rename from src/runtime/internal/syscall/defs_linux_riscv64.go rename to src/internal/runtime/syscall/defs_linux_riscv64.go index b78ef81861..c1a5649a42 100644 --- a/src/runtime/internal/syscall/defs_linux_riscv64.go +++ b/src/internal/runtime/syscall/defs_linux_riscv64.go @@ -10,17 +10,9 @@ const ( SYS_EPOLL_PWAIT = 22 SYS_FCNTL = 25 SYS_EPOLL_PWAIT2 = 441 + SYS_EVENTFD2 = 19 - EPOLLIN = 0x1 - EPOLLOUT = 0x4 - EPOLLERR = 0x8 - EPOLLHUP = 0x10 - EPOLLRDHUP = 0x2000 - EPOLLET = 0x80000000 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 + EFD_NONBLOCK = 0x800 ) type EpollEvent struct { diff --git a/src/runtime/internal/syscall/defs_linux_s390x.go b/src/internal/runtime/syscall/defs_linux_s390x.go similarity index 64% rename from src/runtime/internal/syscall/defs_linux_s390x.go rename to src/internal/runtime/syscall/defs_linux_s390x.go index a7bb1ba66d..b539b2d22a 100644 --- a/src/runtime/internal/syscall/defs_linux_s390x.go +++ b/src/internal/runtime/syscall/defs_linux_s390x.go @@ -10,17 +10,9 @@ const ( SYS_EPOLL_PWAIT = 312 SYS_EPOLL_CREATE1 = 327 SYS_EPOLL_PWAIT2 = 441 + SYS_EVENTFD2 = 323 - EPOLLIN = 0x1 - EPOLLOUT = 0x4 - EPOLLERR = 0x8 - EPOLLHUP = 0x10 - EPOLLRDHUP = 0x2000 - EPOLLET = 0x80000000 - EPOLL_CLOEXEC = 0x80000 - EPOLL_CTL_ADD = 0x1 - EPOLL_CTL_DEL = 0x2 - EPOLL_CTL_MOD = 0x3 + EFD_NONBLOCK = 0x800 ) type EpollEvent struct { diff --git a/src/runtime/internal/syscall/syscall_linux.go b/src/internal/runtime/syscall/syscall_linux.go similarity index 56% rename from src/runtime/internal/syscall/syscall_linux.go rename to src/internal/runtime/syscall/syscall_linux.go index 7209634edb..83df825169 100644 --- a/src/runtime/internal/syscall/syscall_linux.go +++ b/src/internal/runtime/syscall/syscall_linux.go @@ -15,29 +15,6 @@ import ( // Syscall6 calls system call number 'num' with arguments a1-6. func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr) -// syscall_RawSyscall6 is a push linkname to export Syscall6 as -// syscall.RawSyscall6. -// -// //go:uintptrkeepalive because the uintptr argument may be converted pointers -// that need to be kept alive in the caller (this is implied for Syscall6 since -// it has no body). -// -// //go:nosplit because stack copying does not account for uintptrkeepalive, so -// the stack must not grow. Stack copying cannot blindly assume that all -// uintptr arguments are pointers, because some values may look like pointers, -// but not really be pointers, and adjusting their value would break the call. -// -// This is a separate wrapper because we can't export one function as two -// names. The assembly implementations name themselves Syscall6 would not be -// affected by a linkname. -// -//go:uintptrkeepalive -//go:nosplit -//go:linkname syscall_RawSyscall6 syscall.RawSyscall6 -func syscall_RawSyscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr) { - return Syscall6(num, a1, a2, a3, a4, a5, a6) -} - func EpollCreate1(flags int32) (fd int32, errno uintptr) { r1, _, e := Syscall6(SYS_EPOLL_CREATE1, uintptr(flags), 0, 0, 0, 0, 0) return int32(r1), e @@ -60,3 +37,8 @@ func EpollCtl(epfd, op, fd int32, event *EpollEvent) (errno uintptr) { _, _, e := Syscall6(SYS_EPOLL_CTL, uintptr(epfd), uintptr(op), uintptr(fd), uintptr(unsafe.Pointer(event)), 0, 0) return e } + +func Eventfd(initval, flags int32) (fd int32, errno uintptr) { + r1, _, e := Syscall6(SYS_EVENTFD2, uintptr(initval), uintptr(flags), 0, 0, 0, 0) + return int32(r1), e +} diff --git a/src/runtime/internal/syscall/syscall_linux_test.go b/src/internal/runtime/syscall/syscall_linux_test.go similarity index 93% rename from src/runtime/internal/syscall/syscall_linux_test.go rename to src/internal/runtime/syscall/syscall_linux_test.go index 1976da5c28..14bb31c4b8 100644 --- a/src/runtime/internal/syscall/syscall_linux_test.go +++ b/src/internal/runtime/syscall/syscall_linux_test.go @@ -5,7 +5,7 @@ package syscall_test import ( - "runtime/internal/syscall" + "internal/runtime/syscall" "testing" ) diff --git a/src/internal/safefilepath/path.go b/src/internal/safefilepath/path.go index 0f0a270c30..c2cc6ce5d4 100644 --- a/src/internal/safefilepath/path.go +++ b/src/internal/safefilepath/path.go @@ -7,15 +7,20 @@ package safefilepath import ( "errors" + "io/fs" ) var errInvalidPath = errors.New("invalid path") -// FromFS converts a slash-separated path into an operating-system path. +// Localize is filepath.Localize. // -// FromFS returns an error if the path cannot be represented by the operating -// system. For example, paths containing '\' and ':' characters are rejected -// on Windows. -func FromFS(path string) (string, error) { - return fromFS(path) +// It is implemented in this package to avoid a dependency cycle +// between os and file/filepath. +// +// Tests for this function are in path/filepath. +func Localize(path string) (string, error) { + if !fs.ValidPath(path) { + return "", errInvalidPath + } + return localize(path) } diff --git a/src/internal/safefilepath/path_other.go b/src/internal/safefilepath/path_other.go deleted file mode 100644 index 974e7751a2..0000000000 --- a/src/internal/safefilepath/path_other.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !windows - -package safefilepath - -import "runtime" - -func fromFS(path string) (string, error) { - if runtime.GOOS == "plan9" { - if len(path) > 0 && path[0] == '#' { - return "", errInvalidPath - } - } - for i := range path { - if path[i] == 0 { - return "", errInvalidPath - } - } - return path, nil -} diff --git a/src/internal/safefilepath/path_plan9.go b/src/internal/safefilepath/path_plan9.go new file mode 100644 index 0000000000..55627c5102 --- /dev/null +++ b/src/internal/safefilepath/path_plan9.go @@ -0,0 +1,14 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package safefilepath + +import "internal/bytealg" + +func localize(path string) (string, error) { + if path[0] == '#' || bytealg.IndexByteString(path, 0) >= 0 { + return "", errInvalidPath + } + return path, nil +} diff --git a/src/internal/safefilepath/path_test.go b/src/internal/safefilepath/path_test.go deleted file mode 100644 index dc662c18b3..0000000000 --- a/src/internal/safefilepath/path_test.go +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package safefilepath_test - -import ( - "internal/safefilepath" - "os" - "path/filepath" - "runtime" - "testing" -) - -type PathTest struct { - path, result string -} - -const invalid = "" - -var fspathtests = []PathTest{ - {".", "."}, - {"/a/b/c", "/a/b/c"}, - {"a\x00b", invalid}, -} - -var winreservedpathtests = []PathTest{ - {`a\b`, `a\b`}, - {`a:b`, `a:b`}, - {`a/b:c`, `a/b:c`}, - {`NUL`, `NUL`}, - {`./com1`, `./com1`}, - {`a/nul/b`, `a/nul/b`}, -} - -// Whether a reserved name with an extension is reserved or not varies by -// Windows version. -var winreservedextpathtests = []PathTest{ - {"nul.txt", "nul.txt"}, - {"a/nul.txt/b", "a/nul.txt/b"}, -} - -var plan9reservedpathtests = []PathTest{ - {`#c`, `#c`}, -} - -func TestFromFS(t *testing.T) { - switch runtime.GOOS { - case "windows": - if canWriteFile(t, "NUL") { - t.Errorf("can unexpectedly write a file named NUL on Windows") - } - if canWriteFile(t, "nul.txt") { - fspathtests = append(fspathtests, winreservedextpathtests...) - } else { - winreservedpathtests = append(winreservedpathtests, winreservedextpathtests...) - } - for i := range winreservedpathtests { - winreservedpathtests[i].result = invalid - } - for i := range fspathtests { - fspathtests[i].result = filepath.FromSlash(fspathtests[i].result) - } - case "plan9": - for i := range plan9reservedpathtests { - plan9reservedpathtests[i].result = invalid - } - } - tests := fspathtests - tests = append(tests, winreservedpathtests...) - tests = append(tests, plan9reservedpathtests...) - for _, test := range tests { - got, err := safefilepath.FromFS(test.path) - if (got == "") != (err != nil) { - t.Errorf(`FromFS(%q) = %q, %v; want "" only if err != nil`, test.path, got, err) - } - if got != test.result { - t.Errorf("FromFS(%q) = %q, %v; want %q", test.path, got, err, test.result) - } - } -} - -func canWriteFile(t *testing.T, name string) bool { - path := filepath.Join(t.TempDir(), name) - os.WriteFile(path, []byte("ok"), 0666) - b, _ := os.ReadFile(path) - return string(b) == "ok" -} diff --git a/src/internal/safefilepath/path_unix.go b/src/internal/safefilepath/path_unix.go new file mode 100644 index 0000000000..873d0935ec --- /dev/null +++ b/src/internal/safefilepath/path_unix.go @@ -0,0 +1,16 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix || (js && wasm) || wasip1 + +package safefilepath + +import "internal/bytealg" + +func localize(path string) (string, error) { + if bytealg.IndexByteString(path, 0) >= 0 { + return "", errInvalidPath + } + return path, nil +} diff --git a/src/internal/safefilepath/path_windows.go b/src/internal/safefilepath/path_windows.go index 7cfd6ce2ea..b626196f11 100644 --- a/src/internal/safefilepath/path_windows.go +++ b/src/internal/safefilepath/path_windows.go @@ -5,36 +5,31 @@ package safefilepath import ( + "internal/bytealg" "syscall" - "unicode/utf8" ) -func fromFS(path string) (string, error) { - if !utf8.ValidString(path) { - return "", errInvalidPath - } - for len(path) > 1 && path[0] == '/' && path[1] == '/' { - path = path[1:] +func localize(path string) (string, error) { + for i := 0; i < len(path); i++ { + switch path[i] { + case ':', '\\', 0: + return "", errInvalidPath + } } containsSlash := false for p := path; p != ""; { // Find the next path element. - i := 0 - for i < len(p) && p[i] != '/' { - switch p[i] { - case 0, '\\', ':': - return "", errInvalidPath - } - i++ - } - part := p[:i] - if i < len(p) { - containsSlash = true - p = p[i+1:] - } else { + var element string + i := bytealg.IndexByteString(p, '/') + if i < 0 { + element = p p = "" + } else { + containsSlash = true + element = p[:i] + p = p[i+1:] } - if IsReservedName(part) { + if IsReservedName(element) { return "", errInvalidPath } } diff --git a/src/internal/syscall/unix/at_fstatat.go b/src/internal/syscall/unix/at_fstatat.go index 8f25fe9f64..25de336a80 100644 --- a/src/internal/syscall/unix/at_fstatat.go +++ b/src/internal/syscall/unix/at_fstatat.go @@ -24,5 +24,4 @@ func Fstatat(dirfd int, path string, stat *syscall.Stat_t, flags int) error { } return nil - } diff --git a/src/internal/syscall/unix/pidfd_linux.go b/src/internal/syscall/unix/pidfd_linux.go new file mode 100644 index 0000000000..02cfaa062c --- /dev/null +++ b/src/internal/syscall/unix/pidfd_linux.go @@ -0,0 +1,15 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +import "syscall" + +func PidFDSendSignal(pidfd uintptr, s syscall.Signal) error { + _, _, errno := syscall.Syscall(pidfdSendSignalTrap, pidfd, uintptr(s), 0) + if errno != 0 { + return errno + } + return nil +} diff --git a/src/internal/syscall/unix/sysnum_linux_386.go b/src/internal/syscall/unix/sysnum_linux_386.go index 2bda08ccf1..9f750a1c03 100644 --- a/src/internal/syscall/unix/sysnum_linux_386.go +++ b/src/internal/syscall/unix/sysnum_linux_386.go @@ -5,6 +5,7 @@ package unix const ( - getrandomTrap uintptr = 355 - copyFileRangeTrap uintptr = 377 + getrandomTrap uintptr = 355 + copyFileRangeTrap uintptr = 377 + pidfdSendSignalTrap uintptr = 424 ) diff --git a/src/internal/syscall/unix/sysnum_linux_amd64.go b/src/internal/syscall/unix/sysnum_linux_amd64.go index ae5239ebfb..706898d41e 100644 --- a/src/internal/syscall/unix/sysnum_linux_amd64.go +++ b/src/internal/syscall/unix/sysnum_linux_amd64.go @@ -5,6 +5,7 @@ package unix const ( - getrandomTrap uintptr = 318 - copyFileRangeTrap uintptr = 326 + getrandomTrap uintptr = 318 + copyFileRangeTrap uintptr = 326 + pidfdSendSignalTrap uintptr = 424 ) diff --git a/src/internal/syscall/unix/sysnum_linux_arm.go b/src/internal/syscall/unix/sysnum_linux_arm.go index acaec05879..c00644b552 100644 --- a/src/internal/syscall/unix/sysnum_linux_arm.go +++ b/src/internal/syscall/unix/sysnum_linux_arm.go @@ -5,6 +5,7 @@ package unix const ( - getrandomTrap uintptr = 384 - copyFileRangeTrap uintptr = 391 + getrandomTrap uintptr = 384 + copyFileRangeTrap uintptr = 391 + pidfdSendSignalTrap uintptr = 424 ) diff --git a/src/internal/syscall/unix/sysnum_linux_generic.go b/src/internal/syscall/unix/sysnum_linux_generic.go index 8c132c6bf5..bf25428e7e 100644 --- a/src/internal/syscall/unix/sysnum_linux_generic.go +++ b/src/internal/syscall/unix/sysnum_linux_generic.go @@ -11,6 +11,7 @@ package unix // means only arm64 loong64 and riscv64 use the standard numbers. const ( - getrandomTrap uintptr = 278 - copyFileRangeTrap uintptr = 285 + getrandomTrap uintptr = 278 + copyFileRangeTrap uintptr = 285 + pidfdSendSignalTrap uintptr = 424 ) diff --git a/src/internal/syscall/unix/sysnum_linux_mips64x.go b/src/internal/syscall/unix/sysnum_linux_mips64x.go index bca526d2b9..6a9e238ce3 100644 --- a/src/internal/syscall/unix/sysnum_linux_mips64x.go +++ b/src/internal/syscall/unix/sysnum_linux_mips64x.go @@ -7,6 +7,7 @@ package unix const ( - getrandomTrap uintptr = 5313 - copyFileRangeTrap uintptr = 5320 + getrandomTrap uintptr = 5313 + copyFileRangeTrap uintptr = 5320 + pidfdSendSignalTrap uintptr = 5424 ) diff --git a/src/internal/syscall/unix/sysnum_linux_mipsx.go b/src/internal/syscall/unix/sysnum_linux_mipsx.go index c86195e496..22d38f148e 100644 --- a/src/internal/syscall/unix/sysnum_linux_mipsx.go +++ b/src/internal/syscall/unix/sysnum_linux_mipsx.go @@ -7,6 +7,7 @@ package unix const ( - getrandomTrap uintptr = 4353 - copyFileRangeTrap uintptr = 4360 + getrandomTrap uintptr = 4353 + copyFileRangeTrap uintptr = 4360 + pidfdSendSignalTrap uintptr = 4424 ) diff --git a/src/internal/syscall/unix/sysnum_linux_ppc64x.go b/src/internal/syscall/unix/sysnum_linux_ppc64x.go index a4dcf2bc9d..945ec28c2a 100644 --- a/src/internal/syscall/unix/sysnum_linux_ppc64x.go +++ b/src/internal/syscall/unix/sysnum_linux_ppc64x.go @@ -7,6 +7,7 @@ package unix const ( - getrandomTrap uintptr = 359 - copyFileRangeTrap uintptr = 379 + getrandomTrap uintptr = 359 + copyFileRangeTrap uintptr = 379 + pidfdSendSignalTrap uintptr = 424 ) diff --git a/src/internal/syscall/unix/sysnum_linux_s390x.go b/src/internal/syscall/unix/sysnum_linux_s390x.go index bf2c01e4e1..2c74343820 100644 --- a/src/internal/syscall/unix/sysnum_linux_s390x.go +++ b/src/internal/syscall/unix/sysnum_linux_s390x.go @@ -5,6 +5,7 @@ package unix const ( - getrandomTrap uintptr = 349 - copyFileRangeTrap uintptr = 375 + getrandomTrap uintptr = 349 + copyFileRangeTrap uintptr = 375 + pidfdSendSignalTrap uintptr = 424 ) diff --git a/src/internal/syscall/windows/registry/registry_test.go b/src/internal/syscall/windows/registry/registry_test.go index afe7a5d1c3..12eae54a8f 100644 --- a/src/internal/syscall/windows/registry/registry_test.go +++ b/src/internal/syscall/windows/registry/registry_test.go @@ -647,9 +647,9 @@ type DynamicTimezoneinformation struct { } var ( - kernel32DLL = syscall.NewLazyDLL("kernel32") + modkernel32 = syscall.NewLazyDLL("kernel32.dll") - procGetDynamicTimeZoneInformation = kernel32DLL.NewProc("GetDynamicTimeZoneInformation") + procGetDynamicTimeZoneInformation = modkernel32.NewProc("GetDynamicTimeZoneInformation") ) func GetDynamicTimeZoneInformation(dtzi *DynamicTimezoneinformation) (rc uint32, err error) { diff --git a/src/internal/syscall/windows/reparse_windows.go b/src/internal/syscall/windows/reparse_windows.go index 02f32c6752..241dd523c5 100644 --- a/src/internal/syscall/windows/reparse_windows.go +++ b/src/internal/syscall/windows/reparse_windows.go @@ -9,10 +9,13 @@ import ( "unsafe" ) +// Reparse tag values are taken from +// https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-fscc/c8e77b37-3909-4fe6-a4ea-2b9d423b1ee4 const ( FSCTL_SET_REPARSE_POINT = 0x000900A4 IO_REPARSE_TAG_MOUNT_POINT = 0xA0000003 IO_REPARSE_TAG_DEDUP = 0x80000013 + IO_REPARSE_TAG_AF_UNIX = 0x80000023 SYMLINK_FLAG_RELATIVE = 1 ) diff --git a/src/internal/syscall/windows/syscall_windows.go b/src/internal/syscall/windows/syscall_windows.go index 5854ca60b5..03ceb5803f 100644 --- a/src/internal/syscall/windows/syscall_windows.go +++ b/src/internal/syscall/windows/syscall_windows.go @@ -10,6 +10,12 @@ import ( "unsafe" ) +// CanUseLongPaths is true when the OS supports opting into +// proper long path handling without the need for fixups. +// +//go:linkname CanUseLongPaths +var CanUseLongPaths bool + // UTF16PtrToString is like UTF16ToString, but takes *uint16 // as a parameter instead of []uint16. func UTF16PtrToString(p *uint16) string { @@ -129,11 +135,22 @@ type SecurityAttributes struct { } type FILE_BASIC_INFO struct { - CreationTime syscall.Filetime - LastAccessTime syscall.Filetime - LastWriteTime syscall.Filetime - ChangedTime syscall.Filetime + CreationTime int64 + LastAccessTime int64 + LastWriteTime int64 + ChangedTime int64 FileAttributes uint32 + + // Pad out to 8-byte alignment. + // + // Without this padding, TestChmod fails due to an argument validation error + // in SetFileInformationByHandle on windows/386. + // + // https://learn.microsoft.com/en-us/cpp/build/reference/zp-struct-member-alignment?view=msvc-170 + // says that “The C/C++ headers in the Windows SDK assume the platform's + // default alignment is used.” What we see here is padding rather than + // alignment, but maybe it is related. + _ uint32 } const ( @@ -150,7 +167,7 @@ const ( //sys GetComputerNameEx(nameformat uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW //sys MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) = MoveFileExW //sys GetModuleFileName(module syscall.Handle, fn *uint16, len uint32) (n uint32, err error) = kernel32.GetModuleFileNameW -//sys SetFileInformationByHandle(handle syscall.Handle, fileInformationClass uint32, buf uintptr, bufsize uint32) (err error) = kernel32.SetFileInformationByHandle +//sys SetFileInformationByHandle(handle syscall.Handle, fileInformationClass uint32, buf unsafe.Pointer, bufsize uint32) (err error) = kernel32.SetFileInformationByHandle //sys VirtualQuery(address uintptr, buffer *MemoryBasicInformation, length uintptr) (err error) = kernel32.VirtualQuery //sys GetTempPath2(buflen uint32, buf *uint16) (n uint32, err error) = GetTempPath2W @@ -224,6 +241,7 @@ type WSAMsg struct { } //sys WSASocket(af int32, typ int32, protocol int32, protinfo *syscall.WSAProtocolInfo, group uint32, flags uint32) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = ws2_32.WSASocketW +//sys WSAGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) = ws2_32.WSAGetOverlappedResult func loadWSASendRecvMsg() error { sendRecvMsgFunc.once.Do(func() { diff --git a/src/internal/syscall/windows/zsyscall_windows.go b/src/internal/syscall/windows/zsyscall_windows.go index 5a587ad4f1..7d3cd37b92 100644 --- a/src/internal/syscall/windows/zsyscall_windows.go +++ b/src/internal/syscall/windows/zsyscall_windows.go @@ -86,6 +86,7 @@ var ( procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") procGetProfilesDirectoryW = moduserenv.NewProc("GetProfilesDirectoryW") + procWSAGetOverlappedResult = modws2_32.NewProc("WSAGetOverlappedResult") procWSASocketW = modws2_32.NewProc("WSASocketW") ) @@ -342,7 +343,7 @@ func RtlVirtualUnwind(handlerType uint32, baseAddress uintptr, pc uintptr, entry return } -func SetFileInformationByHandle(handle syscall.Handle, fileInformationClass uint32, buf uintptr, bufsize uint32) (err error) { +func SetFileInformationByHandle(handle syscall.Handle, fileInformationClass uint32, buf unsafe.Pointer, bufsize uint32) (err error) { r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(handle), uintptr(fileInformationClass), uintptr(buf), uintptr(bufsize), 0, 0) if r1 == 0 { err = errnoErr(e1) @@ -426,6 +427,18 @@ func GetProfilesDirectory(dir *uint16, dirLen *uint32) (err error) { return } +func WSAGetOverlappedResult(h syscall.Handle, o *syscall.Overlapped, bytes *uint32, wait bool, flags *uint32) (err error) { + var _p0 uint32 + if wait { + _p0 = 1 + } + r1, _, e1 := syscall.Syscall6(procWSAGetOverlappedResult.Addr(), 5, uintptr(h), uintptr(unsafe.Pointer(o)), uintptr(unsafe.Pointer(bytes)), uintptr(_p0), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func WSASocket(af int32, typ int32, protocol int32, protinfo *syscall.WSAProtocolInfo, group uint32, flags uint32) (handle syscall.Handle, err error) { r0, _, e1 := syscall.Syscall6(procWSASocketW.Addr(), 6, uintptr(af), uintptr(typ), uintptr(protocol), uintptr(unsafe.Pointer(protinfo)), uintptr(group), uintptr(flags)) handle = syscall.Handle(r0) diff --git a/src/internal/testenv/testenv.go b/src/internal/testenv/testenv.go index 5c8013740e..3b9d2fd1e9 100644 --- a/src/internal/testenv/testenv.go +++ b/src/internal/testenv/testenv.go @@ -189,15 +189,13 @@ func findGOROOT() (string, error) { // If runtime.GOROOT() is non-empty, assume that it is valid. // // (It might not be: for example, the user may have explicitly set GOROOT - // to the wrong directory, or explicitly set GOROOT_FINAL but not GOROOT - // and hasn't moved the tree to GOROOT_FINAL yet. But those cases are + // to the wrong directory. But this case is // rare, and if that happens the user can fix what they broke.) return } // runtime.GOROOT doesn't know where GOROOT is (perhaps because the test - // binary was built with -trimpath, or perhaps because GOROOT_FINAL was set - // without GOROOT and the tree hasn't been moved there yet). + // binary was built with -trimpath). // // Since this is internal/testenv, we can cheat and assume that the caller // is a test of some package in a subdirectory of GOROOT/src. ('go test' @@ -371,6 +369,15 @@ func MustInternalLink(t testing.TB, withCgo bool) { } } +// MustInternalLinkPIE checks whether the current system can link PIE binary using +// internal linking. +// If not, MustInternalLinkPIE calls t.Skip with an explanation. +func MustInternalLinkPIE(t testing.TB) { + if !platform.InternalLinkPIESupported(runtime.GOOS, runtime.GOARCH) { + t.Skipf("skipping test: internal linking for buildmode=pie on %s/%s is not supported", runtime.GOOS, runtime.GOARCH) + } +} + // MustHaveBuildMode reports whether the current system can build programs in // the given build mode. // If not, MustHaveBuildMode calls t.Skip with an explanation. diff --git a/src/internal/testenv/testenv_test.go b/src/internal/testenv/testenv_test.go index d39a02b981..769db3a033 100644 --- a/src/internal/testenv/testenv_test.go +++ b/src/internal/testenv/testenv_test.go @@ -78,7 +78,7 @@ func TestHasGoBuild(t *testing.T) { // we will presumably find out about it when those tests fail.) switch runtime.GOOS { case "ios": - if strings.HasSuffix(b, "-corellium") { + if isCorelliumBuilder(b) { // The corellium environment is self-hosting, so it should be able // to build even though real "ios" devices can't exec. } else { @@ -89,7 +89,7 @@ func TestHasGoBuild(t *testing.T) { return } case "android": - if strings.HasSuffix(b, "-emu") && platform.MustLinkExternal(runtime.GOOS, runtime.GOARCH, false) { + if isEmulatedBuilder(b) && platform.MustLinkExternal(runtime.GOOS, runtime.GOARCH, false) { // As of 2023-05-02, the test environment on the emulated builders is // missing a C linker. t.Logf("HasGoBuild is false on %s", b) @@ -97,7 +97,7 @@ func TestHasGoBuild(t *testing.T) { } } - if strings.HasSuffix(b, "-noopt") { + if strings.Contains(b, "-noopt") { // The -noopt builder sets GO_GCFLAGS, which causes tests of 'go build' to // be skipped. t.Logf("HasGoBuild is false on %s", b) @@ -153,7 +153,7 @@ func TestMustHaveExec(t *testing.T) { t.Errorf("expected MustHaveExec to skip on %v", runtime.GOOS) } case "ios": - if b := testenv.Builder(); strings.HasSuffix(b, "-corellium") && !hasExec { + if b := testenv.Builder(); isCorelliumBuilder(b) && !hasExec { // Most ios environments can't exec, but the corellium builder can. t.Errorf("expected MustHaveExec not to skip on %v", b) } @@ -186,3 +186,23 @@ func TestCleanCmdEnvPWD(t *testing.T) { } t.Error("PWD not set in cmd.Env") } + +func isCorelliumBuilder(builderName string) bool { + // Support both the old infra's builder names and the LUCI builder names. + // The former's names are ad-hoc so we could maintain this invariant on + // the builder side. The latter's names are structured, and "corellium" will + // appear as a "host" suffix after the GOOS and GOARCH, which always begin + // with an underscore. + return strings.HasSuffix(builderName, "-corellium") || strings.Contains(builderName, "_corellium") +} + +func isEmulatedBuilder(builderName string) bool { + // Support both the old infra's builder names and the LUCI builder names. + // The former's names are ad-hoc so we could maintain this invariant on + // the builder side. The latter's names are structured, and the signifier + // of emulation "emu" will appear as a "host" suffix after the GOOS and + // GOARCH because it modifies the run environment in such a way that it + // the target GOOS and GOARCH may not match the host. This suffix always + // begins with an underscore. + return strings.HasSuffix(builderName, "-emu") || strings.Contains(builderName, "_emu") +} diff --git a/src/internal/trace/gc.go b/src/internal/trace/gc.go index e6a23835d6..ca91969cfb 100644 --- a/src/internal/trace/gc.go +++ b/src/internal/trace/gc.go @@ -7,7 +7,6 @@ package trace import ( "container/heap" tracev2 "internal/trace/v2" - "io" "math" "sort" "strings" @@ -212,13 +211,7 @@ func MutatorUtilization(events []*Event, flags UtilFlags) [][]MutatorUtil { // // If the UtilPerProc flag is not given, this always returns a single // utilization function. Otherwise, it returns one function per P. -func MutatorUtilizationV2(trace io.Reader, flags UtilFlags) ([][]MutatorUtil, error) { - // Create a reader. - r, err := tracev2.NewReader(trace) - if err != nil { - return nil, err - } - +func MutatorUtilizationV2(events []tracev2.Event, flags UtilFlags) [][]MutatorUtil { // Set up a bunch of analysis state. type perP struct { // gc > 0 indicates that GC is active on this P. @@ -255,16 +248,9 @@ func MutatorUtilizationV2(trace io.Reader, flags UtilFlags) ([][]MutatorUtil, er } // Iterate through the trace, tracking mutator utilization. - var lastEv tracev2.Event - for { - // Read a single event. - ev, err := r.ReadEvent() - if err == io.EOF { - break - } - if err != nil { - return nil, err - } + var lastEv *tracev2.Event + for i := range events { + ev := &events[i] lastEv = ev // Process the event. @@ -451,8 +437,8 @@ func MutatorUtilizationV2(trace io.Reader, flags UtilFlags) ([][]MutatorUtil, er } // No events in the stream. - if lastEv.Kind() == tracev2.EventBad { - return nil, nil + if lastEv == nil { + return nil } // Add final 0 utilization event to any remaining series. This @@ -463,7 +449,7 @@ func MutatorUtilizationV2(trace io.Reader, flags UtilFlags) ([][]MutatorUtil, er for i := range ps { out[ps[i].series] = addUtil(out[ps[i].series], mu) } - return out, nil + return out } func addUtil(util []MutatorUtil, mu MutatorUtil) []MutatorUtil { diff --git a/src/internal/trace/gc_test.go b/src/internal/trace/gc_test.go index 2bdcfef006..4bbf1604f5 100644 --- a/src/internal/trace/gc_test.go +++ b/src/internal/trace/gc_test.go @@ -6,7 +6,10 @@ package trace import ( "bytes" + "internal/trace/v2" + tracev2 "internal/trace/v2" "internal/trace/v2/testtrace" + "io" "math" "os" "testing" @@ -133,12 +136,23 @@ func TestMMUTrace(t *testing.T) { if err != nil { t.Fatalf("malformed test %s: bad trace file: %v", testPath, err) } - // Pass the trace through MutatorUtilizationV2. - mu, err := MutatorUtilizationV2(r, UtilSTW|UtilBackground|UtilAssist) + var events []tracev2.Event + tr, err := trace.NewReader(r) if err != nil { - t.Fatalf("failed to compute mutator utilization or parse trace: %v", err) + t.Fatalf("malformed test %s: bad trace file: %v", testPath, err) } - check(t, mu) + for { + ev, err := tr.ReadEvent() + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("malformed test %s: bad trace file: %v", testPath, err) + } + events = append(events, ev) + } + // Pass the trace through MutatorUtilizationV2 and check it. + check(t, MutatorUtilizationV2(events, UtilSTW|UtilBackground|UtilAssist)) }) } diff --git a/src/internal/trace/parser.go b/src/internal/trace/parser.go index 67fa60b8fb..3bbfbebab4 100644 --- a/src/internal/trace/parser.go +++ b/src/internal/trace/parser.go @@ -136,17 +136,23 @@ type rawEvent struct { sargs []string } -// readTrace does wire-format parsing and verification. -// It does not care about specific event types and argument meaning. -func readTrace(r io.Reader) (ver int, events []rawEvent, strings map[uint64]string, err error) { +func ReadVersion(r io.Reader) (ver int, off int, err error) { // Read and validate trace header. var buf [16]byte - off, err := io.ReadFull(r, buf[:]) + off, err = io.ReadFull(r, buf[:]) if err != nil { err = fmt.Errorf("failed to read header: read %v, err %v", off, err) return } ver, err = parseHeader(buf[:]) + return +} + +// readTrace does wire-format parsing and verification. +// It does not care about specific event types and argument meaning. +func readTrace(r io.Reader) (ver int, events []rawEvent, strings map[uint64]string, err error) { + var off int + ver, off, err = ReadVersion(r) if err != nil { return } @@ -161,6 +167,7 @@ func readTrace(r io.Reader) (ver int, events []rawEvent, strings map[uint64]stri } // Read events. + var buf [16]byte strings = make(map[uint64]string) for { // Read event type and number of arguments (1 byte). diff --git a/src/internal/trace/goroutinesv2.go b/src/internal/trace/summary.go similarity index 67% rename from src/internal/trace/goroutinesv2.go rename to src/internal/trace/summary.go index 6b13f20425..b714e01f4a 100644 --- a/src/internal/trace/goroutinesv2.go +++ b/src/internal/trace/summary.go @@ -6,17 +6,22 @@ package trace import ( tracev2 "internal/trace/v2" - "io" "sort" "time" ) +// Summary is the analysis result produced by the summarizer. +type Summary struct { + Goroutines map[tracev2.GoID]*GoroutineSummary + Tasks map[tracev2.TaskID]*UserTaskSummary +} + // GoroutineSummary contains statistics and execution details of a single goroutine. // (For v2 traces.) type GoroutineSummary struct { ID tracev2.GoID Name string // A non-unique human-friendly identifier for the goroutine. - PC uint64 // The start PC of the goroutine. + PC uint64 // The first PC we saw for the entry function of the goroutine CreationTime tracev2.Time // Timestamp of the first appearance in the trace. StartTime tracev2.Time // Timestamp of the first time it started running. 0 if the goroutine never ran. EndTime tracev2.Time // Timestamp of when the goroutine exited. 0 if the goroutine never exited. @@ -35,6 +40,45 @@ type GoroutineSummary struct { *goroutineSummary } +// UserTaskSummary represents a task in the trace. +type UserTaskSummary struct { + ID tracev2.TaskID + Name string + Parent *UserTaskSummary // nil if the parent is unknown. + Children []*UserTaskSummary + + // Task begin event. An EventTaskBegin event or nil. + Start *tracev2.Event + + // End end event. Normally EventTaskEnd event or nil. + End *tracev2.Event + + // Logs is a list of tracev2.EventLog events associated with the task. + Logs []*tracev2.Event + + // List of regions in the task, sorted based on the start time. + Regions []*UserRegionSummary + + // Goroutines is the set of goroutines associated with this task. + Goroutines map[tracev2.GoID]*GoroutineSummary +} + +// Complete returns true if we have complete information about the task +// from the trace: both a start and an end. +func (s *UserTaskSummary) Complete() bool { + return s.Start != nil && s.End != nil +} + +// Descendents returns a slice consisting of itself (always the first task returned), +// and the transitive closure of all of its children. +func (s *UserTaskSummary) Descendents() []*UserTaskSummary { + descendents := []*UserTaskSummary{s} + for _, child := range s.Children { + descendents = append(descendents, child.Descendents()...) + } + return descendents +} + // UserRegionSummary represents a region and goroutine execution stats // while the region was active. (For v2 traces.) type UserRegionSummary struct { @@ -58,13 +102,51 @@ type UserRegionSummary struct { // GoroutineExecStats contains statistics about a goroutine's execution // during a period of time. type GoroutineExecStats struct { + // These stats are all non-overlapping. ExecTime time.Duration SchedWaitTime time.Duration BlockTimeByReason map[string]time.Duration SyscallTime time.Duration SyscallBlockTime time.Duration - RangeTime map[string]time.Duration - TotalTime time.Duration + + // TotalTime is the duration of the goroutine's presence in the trace. + // Necessarily overlaps with other stats. + TotalTime time.Duration + + // Total time the goroutine spent in certain ranges; may overlap + // with other stats. + RangeTime map[string]time.Duration +} + +func (s GoroutineExecStats) NonOverlappingStats() map[string]time.Duration { + stats := map[string]time.Duration{ + "Execution time": s.ExecTime, + "Sched wait time": s.SchedWaitTime, + "Syscall execution time": s.SyscallTime, + "Block time (syscall)": s.SyscallBlockTime, + "Unknown time": s.UnknownTime(), + } + for reason, dt := range s.BlockTimeByReason { + stats["Block time ("+reason+")"] += dt + } + // N.B. Don't include RangeTime or TotalTime; they overlap with these other + // stats. + return stats +} + +// UnknownTime returns whatever isn't accounted for in TotalTime. +func (s GoroutineExecStats) UnknownTime() time.Duration { + sum := s.ExecTime + s.SchedWaitTime + s.SyscallTime + + s.SyscallBlockTime + for _, dt := range s.BlockTimeByReason { + sum += dt + } + // N.B. Don't include range time. Ranges overlap with + // other stats, whereas these stats are non-overlapping. + if sum < s.TotalTime { + return s.TotalTime - sum + } + return 0 } // sub returns the stats v-s. @@ -171,39 +253,14 @@ type goroutineSummary struct { activeRegions []*UserRegionSummary // stack of active regions } -// SummarizeGoroutines generates statistics for all goroutines in the trace. -func SummarizeGoroutines(trace io.Reader) (map[tracev2.GoID]*GoroutineSummary, error) { - // Create the analysis state. - b := goroutineStatsBuilder{ - gs: make(map[tracev2.GoID]*GoroutineSummary), - syscallingP: make(map[tracev2.ProcID]tracev2.GoID), - syscallingG: make(map[tracev2.GoID]tracev2.ProcID), - rangesP: make(map[rangeP]tracev2.GoID), - } - - // Process the trace. - r, err := tracev2.NewReader(trace) - if err != nil { - return nil, err - } - for { - ev, err := r.ReadEvent() - if err == io.EOF { - break - } - if err != nil { - return nil, err - } - b.event(ev) - } - return b.finalize(), nil -} - -// goroutineStatsBuilder constructs per-goroutine time statistics for v2 traces. -type goroutineStatsBuilder struct { +// Summarizer constructs per-goroutine time statistics for v2 traces. +type Summarizer struct { // gs contains the map of goroutine summaries we're building up to return to the caller. gs map[tracev2.GoID]*GoroutineSummary + // tasks contains the map of task summaries we're building up to return to the caller. + tasks map[tracev2.TaskID]*UserTaskSummary + // syscallingP and syscallingG represent a binding between a P and G in a syscall. // Used to correctly identify and clean up after syscalls (blocking or otherwise). syscallingP map[tracev2.ProcID]tracev2.GoID @@ -219,22 +276,33 @@ type goroutineStatsBuilder struct { syncTs tracev2.Time // timestamp of the last sync event processed (or the first timestamp in the trace). } +// NewSummarizer creates a new struct to build goroutine stats from a trace. +func NewSummarizer() *Summarizer { + return &Summarizer{ + gs: make(map[tracev2.GoID]*GoroutineSummary), + tasks: make(map[tracev2.TaskID]*UserTaskSummary), + syscallingP: make(map[tracev2.ProcID]tracev2.GoID), + syscallingG: make(map[tracev2.GoID]tracev2.ProcID), + rangesP: make(map[rangeP]tracev2.GoID), + } +} + type rangeP struct { id tracev2.ProcID name string } -// event feeds a single event into the stats builder. -func (b *goroutineStatsBuilder) event(ev tracev2.Event) { - if b.syncTs == 0 { - b.syncTs = ev.Time() +// Event feeds a single event into the stats summarizer. +func (s *Summarizer) Event(ev *tracev2.Event) { + if s.syncTs == 0 { + s.syncTs = ev.Time() } - b.lastTs = ev.Time() + s.lastTs = ev.Time() switch ev.Kind() { // Record sync time for the RangeActive events. case tracev2.EventSync: - b.syncTs = ev.Time() + s.syncTs = ev.Time() // Handle state transitions. case tracev2.EventStateTransition: @@ -250,14 +318,14 @@ func (b *goroutineStatsBuilder) event(ev tracev2.Event) { } // Handle transition out. - g := b.gs[id] + g := s.gs[id] switch old { case tracev2.GoUndetermined, tracev2.GoNotExist: g = &GoroutineSummary{ID: id, goroutineSummary: &goroutineSummary{}} // If we're coming out of GoUndetermined, then the creation time is the // time of the last sync. if old == tracev2.GoUndetermined { - g.CreationTime = b.syncTs + g.CreationTime = s.syncTs } else { g.CreationTime = ev.Time() } @@ -276,14 +344,12 @@ func (b *goroutineStatsBuilder) event(ev tracev2.Event) { // // N.B. ev.Goroutine() will always be NoGoroutine for the // Undetermined case, so this is will simply not fire. - if creatorG := b.gs[ev.Goroutine()]; creatorG != nil && len(creatorG.activeRegions) > 0 { + if creatorG := s.gs[ev.Goroutine()]; creatorG != nil && len(creatorG.activeRegions) > 0 { regions := creatorG.activeRegions s := regions[len(regions)-1] - if s.TaskID != tracev2.NoTask { - g.activeRegions = []*UserRegionSummary{{TaskID: s.TaskID, Start: &ev}} - } + g.activeRegions = []*UserRegionSummary{{TaskID: s.TaskID, Start: ev}} } - b.gs[g.ID] = g + s.gs[g.ID] = g case tracev2.GoRunning: // Record execution time as we transition out of running g.ExecTime += ev.Time().Sub(g.lastStartTime) @@ -313,24 +379,31 @@ func (b *goroutineStatsBuilder) event(ev tracev2.Event) { g.lastSyscallBlockTime = 0 // Clear the syscall map. - delete(b.syscallingP, b.syscallingG[id]) - delete(b.syscallingG, id) + delete(s.syscallingP, s.syscallingG[id]) + delete(s.syscallingG, id) } } - // The goroutine hasn't been identified yet. Take any stack we - // can get and identify it by the bottom-most frame of that stack. - if g.PC == 0 { - stk := ev.Stack() + // The goroutine hasn't been identified yet. Take the transition stack + // and identify the goroutine by the root frame of that stack. + // This root frame will be identical for all transitions on this + // goroutine, because it represents its immutable start point. + if g.Name == "" { + stk := st.Stack if stk != tracev2.NoStack { var frame tracev2.StackFrame var ok bool stk.Frames(func(f tracev2.StackFrame) bool { frame = f ok = true - return false + return true }) if ok { + // NB: this PC won't actually be consistent for + // goroutines which existed at the start of the + // trace. The UI doesn't use it directly; this + // mainly serves as an indication that we + // actually saw a call stack for the goroutine g.PC = frame.PC g.Name = frame.Func } @@ -356,10 +429,10 @@ func (b *goroutineStatsBuilder) event(ev tracev2.Event) { // "Forever" is like goroutine death. fallthrough case tracev2.GoNotExist: - g.finalize(ev.Time(), &ev) + g.finalize(ev.Time(), ev) case tracev2.GoSyscall: - b.syscallingP[ev.Proc()] = id - b.syscallingG[id] = ev.Proc() + s.syscallingP[ev.Proc()] = id + s.syscallingG[id] = ev.Proc() g.lastSyscallTime = ev.Time() } @@ -369,10 +442,10 @@ func (b *goroutineStatsBuilder) event(ev tracev2.Event) { id := st.Resource.Proc() old, new := st.Proc() if old != new && new == tracev2.ProcIdle { - if goid, ok := b.syscallingP[id]; ok { - g := b.gs[goid] + if goid, ok := s.syscallingP[id]; ok { + g := s.gs[goid] g.lastSyscallBlockTime = ev.Time() - delete(b.syscallingP, id) + delete(s.syscallingP, id) } } } @@ -388,14 +461,14 @@ func (b *goroutineStatsBuilder) event(ev tracev2.Event) { // goroutine blocked often in mark assist will have both high mark assist // and high block times. Those interested in a deeper view can look at the // trace viewer. - g = b.gs[r.Scope.Goroutine()] + g = s.gs[r.Scope.Goroutine()] case tracev2.ResourceProc: // N.B. These ranges are not actually bound to the goroutine, they're // bound to the P. But if we happen to be on the P the whole time, let's // try to attribute it to the goroutine. (e.g. GC sweeps are here.) - g = b.gs[ev.Goroutine()] + g = s.gs[ev.Goroutine()] if g != nil { - b.rangesP[rangeP{id: r.Scope.Proc(), name: r.Name}] = ev.Goroutine() + s.rangesP[rangeP{id: r.Scope.Proc(), name: r.Name}] = ev.Goroutine() } } if g == nil { @@ -403,9 +476,9 @@ func (b *goroutineStatsBuilder) event(ev tracev2.Event) { } if ev.Kind() == tracev2.EventRangeActive { if ts := g.lastRangeTime[r.Name]; ts != 0 { - g.RangeTime[r.Name] += b.syncTs.Sub(ts) + g.RangeTime[r.Name] += s.syncTs.Sub(ts) } - g.lastRangeTime[r.Name] = b.syncTs + g.lastRangeTime[r.Name] = s.syncTs } else { g.lastRangeTime[r.Name] = ev.Time() } @@ -414,16 +487,16 @@ func (b *goroutineStatsBuilder) event(ev tracev2.Event) { var g *GoroutineSummary switch r.Scope.Kind { case tracev2.ResourceGoroutine: - g = b.gs[r.Scope.Goroutine()] + g = s.gs[r.Scope.Goroutine()] case tracev2.ResourceProc: rp := rangeP{id: r.Scope.Proc(), name: r.Name} - if goid, ok := b.rangesP[rp]; ok { + if goid, ok := s.rangesP[rp]; ok { if goid == ev.Goroutine() { // As the comment in the RangeBegin case states, this is only OK // if we finish on the same goroutine we started on. - g = b.gs[goid] + g = s.gs[goid] } - delete(b.rangesP, rp) + delete(s.rangesP, rp) } } if g == nil { @@ -438,16 +511,21 @@ func (b *goroutineStatsBuilder) event(ev tracev2.Event) { // Handle user-defined regions. case tracev2.EventRegionBegin: - g := b.gs[ev.Goroutine()] + g := s.gs[ev.Goroutine()] r := ev.Region() - g.activeRegions = append(g.activeRegions, &UserRegionSummary{ + region := &UserRegionSummary{ Name: r.Type, TaskID: r.Task, - Start: &ev, + Start: ev, GoroutineExecStats: g.snapshotStat(ev.Time()), - }) + } + g.activeRegions = append(g.activeRegions, region) + // Associate the region and current goroutine to the task. + task := s.getOrAddTask(r.Task) + task.Regions = append(task.Regions, region) + task.Goroutines[g.ID] = g case tracev2.EventRegionEnd: - g := b.gs[ev.Goroutine()] + g := s.gs[ev.Goroutine()] r := ev.Region() var sd *UserRegionSummary if regionStk := g.activeRegions; len(regionStk) > 0 { @@ -456,21 +534,63 @@ func (b *goroutineStatsBuilder) event(ev tracev2.Event) { sd = regionStk[n-1] regionStk = regionStk[:n-1] g.activeRegions = regionStk + // N.B. No need to add the region to a task; the EventRegionBegin already handled it. } else { // This is an "end" without a start. Just fabricate the region now. sd = &UserRegionSummary{Name: r.Type, TaskID: r.Task} + // Associate the region and current goroutine to the task. + task := s.getOrAddTask(r.Task) + task.Goroutines[g.ID] = g + task.Regions = append(task.Regions, sd) } sd.GoroutineExecStats = g.snapshotStat(ev.Time()).sub(sd.GoroutineExecStats) - sd.End = &ev + sd.End = ev g.Regions = append(g.Regions, sd) + + // Handle tasks and logs. + case tracev2.EventTaskBegin, tracev2.EventTaskEnd: + // Initialize the task. + t := ev.Task() + task := s.getOrAddTask(t.ID) + task.Name = t.Type + task.Goroutines[ev.Goroutine()] = s.gs[ev.Goroutine()] + if ev.Kind() == tracev2.EventTaskBegin { + task.Start = ev + } else { + task.End = ev + } + // Initialize the parent, if one exists and it hasn't been done yet. + // We need to avoid doing it twice, otherwise we could appear twice + // in the parent's Children list. + if t.Parent != tracev2.NoTask && task.Parent == nil { + parent := s.getOrAddTask(t.Parent) + task.Parent = parent + parent.Children = append(parent.Children, task) + } + case tracev2.EventLog: + log := ev.Log() + // Just add the log to the task. We'll create the task if it + // doesn't exist (it's just been mentioned now). + task := s.getOrAddTask(log.Task) + task.Goroutines[ev.Goroutine()] = s.gs[ev.Goroutine()] + task.Logs = append(task.Logs, ev) } } -// finalize indicates to the builder that we're done processing the trace. +func (s *Summarizer) getOrAddTask(id tracev2.TaskID) *UserTaskSummary { + task := s.tasks[id] + if task == nil { + task = &UserTaskSummary{ID: id, Goroutines: make(map[tracev2.GoID]*GoroutineSummary)} + s.tasks[id] = task + } + return task +} + +// Finalize indicates to the summarizer that we're done processing the trace. // It cleans up any remaining state and returns the full summary. -func (b *goroutineStatsBuilder) finalize() map[tracev2.GoID]*GoroutineSummary { - for _, g := range b.gs { - g.finalize(b.lastTs, nil) +func (s *Summarizer) Finalize() *Summary { + for _, g := range s.gs { + g.finalize(s.lastTs, nil) // Sort based on region start time. sort.Slice(g.Regions, func(i, j int) bool { @@ -486,17 +606,16 @@ func (b *goroutineStatsBuilder) finalize() map[tracev2.GoID]*GoroutineSummary { }) g.goroutineSummary = nil } - return b.gs + return &Summary{ + Goroutines: s.gs, + Tasks: s.tasks, + } } // RelatedGoroutinesV2 finds a set of goroutines related to goroutine goid for v2 traces. // The association is based on whether they have synchronized with each other in the Go // scheduler (one has unblocked another). -func RelatedGoroutinesV2(trace io.Reader, goid tracev2.GoID) (map[tracev2.GoID]struct{}, error) { - r, err := tracev2.NewReader(trace) - if err != nil { - return nil, err - } +func RelatedGoroutinesV2(events []tracev2.Event, goid tracev2.GoID) map[tracev2.GoID]struct{} { // Process all the events, looking for transitions of goroutines // out of GoWaiting. If there was an active goroutine when this // happened, then we know that active goroutine unblocked another. @@ -506,14 +625,7 @@ func RelatedGoroutinesV2(trace io.Reader, goid tracev2.GoID) (map[tracev2.GoID]s operand tracev2.GoID } var unblockEdges []unblockEdge - for { - ev, err := r.ReadEvent() - if err == io.EOF { - break - } - if err != nil { - return nil, err - } + for _, ev := range events { if ev.Goroutine() == tracev2.NoGoroutine { continue } @@ -551,5 +663,5 @@ func RelatedGoroutinesV2(trace io.Reader, goid tracev2.GoID) (map[tracev2.GoID]s } gmap = gmap1 } - return gmap, nil + return gmap } diff --git a/src/internal/trace/goroutinesv2_test.go b/src/internal/trace/summary_test.go similarity index 52% rename from src/internal/trace/goroutinesv2_test.go rename to src/internal/trace/summary_test.go index 99ec8dd8b0..9978b57d98 100644 --- a/src/internal/trace/goroutinesv2_test.go +++ b/src/internal/trace/summary_test.go @@ -7,18 +7,23 @@ package trace import ( tracev2 "internal/trace/v2" "internal/trace/v2/testtrace" + "io" "testing" ) func TestSummarizeGoroutinesTrace(t *testing.T) { - summaries := summarizeTraceTest(t, "v2/testdata/tests/go122-gc-stress.test") + summaries := summarizeTraceTest(t, "v2/testdata/tests/go122-gc-stress.test").Goroutines var ( hasSchedWaitTime bool hasSyncBlockTime bool hasGCMarkAssistTime bool ) + + assertContainsGoroutine(t, summaries, "runtime.gcBgMarkWorker") + assertContainsGoroutine(t, summaries, "main.main.func1") + for _, summary := range summaries { - basicSummaryChecks(t, summary) + basicGoroutineSummaryChecks(t, summary) hasSchedWaitTime = hasSchedWaitTime || summary.SchedWaitTime > 0 if dt, ok := summary.BlockTimeByReason["sync"]; ok && dt > 0 { hasSyncBlockTime = true @@ -39,7 +44,7 @@ func TestSummarizeGoroutinesTrace(t *testing.T) { } func TestSummarizeGoroutinesRegionsTrace(t *testing.T) { - summaries := summarizeTraceTest(t, "v2/testdata/tests/go122-annotations.test") + summaries := summarizeTraceTest(t, "v2/testdata/tests/go122-annotations.test").Goroutines type region struct { startKind tracev2.EventKind endKind tracev2.EventKind @@ -57,7 +62,7 @@ func TestSummarizeGoroutinesRegionsTrace(t *testing.T) { "post-existing region": {tracev2.EventRegionBegin, tracev2.EventBad}, } for _, summary := range summaries { - basicSummaryChecks(t, summary) + basicGoroutineSummaryChecks(t, summary) for _, region := range summary.Regions { want, ok := wantRegions[region.Name] if !ok { @@ -72,7 +77,175 @@ func TestSummarizeGoroutinesRegionsTrace(t *testing.T) { } } -func basicSummaryChecks(t *testing.T, summary *GoroutineSummary) { +func TestSummarizeTasksTrace(t *testing.T) { + summaries := summarizeTraceTest(t, "v2/testdata/tests/go122-annotations-stress.test").Tasks + type task struct { + name string + parent *tracev2.TaskID + children []tracev2.TaskID + logs []tracev2.Log + goroutines []tracev2.GoID + } + parent := func(id tracev2.TaskID) *tracev2.TaskID { + p := new(tracev2.TaskID) + *p = id + return p + } + wantTasks := map[tracev2.TaskID]task{ + tracev2.BackgroundTask: { + // The background task (0) is never any task's parent. + logs: []tracev2.Log{ + {Task: tracev2.BackgroundTask, Category: "log", Message: "before do"}, + {Task: tracev2.BackgroundTask, Category: "log", Message: "before do"}, + }, + goroutines: []tracev2.GoID{1}, + }, + 1: { + // This started before tracing started and has no parents. + // Task 2 is technically a child, but we lost that information. + children: []tracev2.TaskID{3, 7, 16}, + logs: []tracev2.Log{ + {Task: 1, Category: "log", Message: "before do"}, + {Task: 1, Category: "log", Message: "before do"}, + }, + goroutines: []tracev2.GoID{1}, + }, + 2: { + // This started before tracing started and its parent is technically (1), but that information was lost. + children: []tracev2.TaskID{8, 17}, + logs: []tracev2.Log{ + {Task: 2, Category: "log", Message: "before do"}, + {Task: 2, Category: "log", Message: "before do"}, + }, + goroutines: []tracev2.GoID{1}, + }, + 3: { + parent: parent(1), + children: []tracev2.TaskID{10, 19}, + logs: []tracev2.Log{ + {Task: 3, Category: "log", Message: "before do"}, + {Task: 3, Category: "log", Message: "before do"}, + }, + goroutines: []tracev2.GoID{1}, + }, + 4: { + // Explicitly, no parent. + children: []tracev2.TaskID{12, 21}, + logs: []tracev2.Log{ + {Task: 4, Category: "log", Message: "before do"}, + {Task: 4, Category: "log", Message: "before do"}, + }, + goroutines: []tracev2.GoID{1}, + }, + 12: { + parent: parent(4), + children: []tracev2.TaskID{13}, + logs: []tracev2.Log{ + // TODO(mknyszek): This is computed asynchronously in the trace, + // which makes regenerating this test very annoying, since it will + // likely break this test. Resolve this by making the order not matter. + {Task: 12, Category: "log2", Message: "do"}, + {Task: 12, Category: "log", Message: "fanout region4"}, + {Task: 12, Category: "log", Message: "fanout region0"}, + {Task: 12, Category: "log", Message: "fanout region1"}, + {Task: 12, Category: "log", Message: "fanout region2"}, + {Task: 12, Category: "log", Message: "before do"}, + {Task: 12, Category: "log", Message: "fanout region3"}, + }, + goroutines: []tracev2.GoID{1, 5, 6, 7, 8, 9}, + }, + 13: { + // Explicitly, no children. + parent: parent(12), + logs: []tracev2.Log{ + {Task: 13, Category: "log2", Message: "do"}, + }, + goroutines: []tracev2.GoID{7}, + }, + } + for id, summary := range summaries { + want, ok := wantTasks[id] + if !ok { + continue + } + if id != summary.ID { + t.Errorf("ambiguous task %d (or %d?): field likely set incorrectly", id, summary.ID) + } + + // Check parent. + if want.parent != nil { + if summary.Parent == nil { + t.Errorf("expected parent %d for task %d without a parent", *want.parent, id) + } else if summary.Parent.ID != *want.parent { + t.Errorf("bad parent for task %d: want %d, got %d", id, *want.parent, summary.Parent.ID) + } + } else if summary.Parent != nil { + t.Errorf("unexpected parent %d for task %d", summary.Parent.ID, id) + } + + // Check children. + gotChildren := make(map[tracev2.TaskID]struct{}) + for _, child := range summary.Children { + gotChildren[child.ID] = struct{}{} + } + for _, wantChild := range want.children { + if _, ok := gotChildren[wantChild]; ok { + delete(gotChildren, wantChild) + } else { + t.Errorf("expected child task %d for task %d not found", wantChild, id) + } + } + if len(gotChildren) != 0 { + for child := range gotChildren { + t.Errorf("unexpected child task %d for task %d", child, id) + } + } + + // Check logs. + if len(want.logs) != len(summary.Logs) { + t.Errorf("wanted %d logs for task %d, got %d logs instead", len(want.logs), id, len(summary.Logs)) + } else { + for i := range want.logs { + if want.logs[i] != summary.Logs[i].Log() { + t.Errorf("log mismatch: want %#v, got %#v", want.logs[i], summary.Logs[i].Log()) + } + } + } + + // Check goroutines. + if len(want.goroutines) != len(summary.Goroutines) { + t.Errorf("wanted %d goroutines for task %d, got %d goroutines instead", len(want.goroutines), id, len(summary.Goroutines)) + } else { + for _, goid := range want.goroutines { + g, ok := summary.Goroutines[goid] + if !ok { + t.Errorf("want goroutine %d for task %d, not found", goid, id) + continue + } + if g.ID != goid { + t.Errorf("goroutine summary for %d does not match task %d listing of %d", g.ID, id, goid) + } + } + } + + // Marked as seen. + delete(wantTasks, id) + } + if len(wantTasks) != 0 { + t.Errorf("failed to find tasks: %#v", wantTasks) + } +} + +func assertContainsGoroutine(t *testing.T, summaries map[tracev2.GoID]*GoroutineSummary, name string) { + for _, summary := range summaries { + if summary.Name == name { + return + } + } + t.Errorf("missing goroutine %s", name) +} + +func basicGoroutineSummaryChecks(t *testing.T, summary *GoroutineSummary) { if summary.ID == tracev2.NoGoroutine { t.Error("summary found for no goroutine") return @@ -90,16 +263,31 @@ func basicSummaryChecks(t *testing.T, summary *GoroutineSummary) { } } -func summarizeTraceTest(t *testing.T, testPath string) map[tracev2.GoID]*GoroutineSummary { - r, _, err := testtrace.ParseFile(testPath) +func summarizeTraceTest(t *testing.T, testPath string) *Summary { + trace, _, err := testtrace.ParseFile(testPath) if err != nil { t.Fatalf("malformed test %s: bad trace file: %v", testPath, err) } - summaries, err := SummarizeGoroutines(r) + // Create the analysis state. + s := NewSummarizer() + + // Create a reader. + r, err := tracev2.NewReader(trace) if err != nil { - t.Fatalf("failed to process trace %s: %v", testPath, err) + t.Fatalf("failed to create trace reader for %s: %v", testPath, err) } - return summaries + // Process the trace. + for { + ev, err := r.ReadEvent() + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("failed to process trace %s: %v", testPath, err) + } + s.Event(&ev) + } + return s.Finalize() } func checkRegionEvents(t *testing.T, wantStart, wantEnd tracev2.EventKind, goid tracev2.GoID, region *UserRegionSummary) { @@ -200,15 +388,33 @@ func basicGoroutineExecStatsChecks(t *testing.T, stats *GoroutineExecStats) { func TestRelatedGoroutinesV2Trace(t *testing.T) { testPath := "v2/testdata/tests/go122-gc-stress.test" - r, _, err := testtrace.ParseFile(testPath) + trace, _, err := testtrace.ParseFile(testPath) if err != nil { t.Fatalf("malformed test %s: bad trace file: %v", testPath, err) } - targetg := tracev2.GoID(86) - got, err := RelatedGoroutinesV2(r, targetg) + + // Create a reader. + r, err := tracev2.NewReader(trace) if err != nil { - t.Fatalf("failed to find related goroutines for %s: %v", testPath, err) + t.Fatalf("failed to create trace reader for %s: %v", testPath, err) } + + // Collect all the events. + var events []tracev2.Event + for { + ev, err := r.ReadEvent() + if err == io.EOF { + break + } + if err != nil { + t.Fatalf("failed to process trace %s: %v", testPath, err) + } + events = append(events, ev) + } + + // Test the function. + targetg := tracev2.GoID(86) + got := RelatedGoroutinesV2(events, targetg) want := map[tracev2.GoID]struct{}{ tracev2.GoID(86): struct{}{}, // N.B. Result includes target. tracev2.GoID(71): struct{}{}, diff --git a/src/internal/trace/traceviewer/emitter.go b/src/internal/trace/traceviewer/emitter.go new file mode 100644 index 0000000000..c91c743a7b --- /dev/null +++ b/src/internal/trace/traceviewer/emitter.go @@ -0,0 +1,813 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package traceviewer + +import ( + "encoding/json" + "fmt" + "internal/trace" + "internal/trace/traceviewer/format" + "io" + "strconv" + "time" +) + +type TraceConsumer struct { + ConsumeTimeUnit func(unit string) + ConsumeViewerEvent func(v *format.Event, required bool) + ConsumeViewerFrame func(key string, f format.Frame) + Flush func() +} + +// ViewerDataTraceConsumer returns a TraceConsumer that writes to w. The +// startIdx and endIdx are used for splitting large traces. They refer to +// indexes in the traceEvents output array, not the events in the trace input. +func ViewerDataTraceConsumer(w io.Writer, startIdx, endIdx int64) TraceConsumer { + allFrames := make(map[string]format.Frame) + requiredFrames := make(map[string]format.Frame) + enc := json.NewEncoder(w) + written := 0 + index := int64(-1) + + io.WriteString(w, "{") + return TraceConsumer{ + ConsumeTimeUnit: func(unit string) { + io.WriteString(w, `"displayTimeUnit":`) + enc.Encode(unit) + io.WriteString(w, ",") + }, + ConsumeViewerEvent: func(v *format.Event, required bool) { + index++ + if !required && (index < startIdx || index > endIdx) { + // not in the range. Skip! + return + } + WalkStackFrames(allFrames, v.Stack, func(id int) { + s := strconv.Itoa(id) + requiredFrames[s] = allFrames[s] + }) + WalkStackFrames(allFrames, v.EndStack, func(id int) { + s := strconv.Itoa(id) + requiredFrames[s] = allFrames[s] + }) + if written == 0 { + io.WriteString(w, `"traceEvents": [`) + } + if written > 0 { + io.WriteString(w, ",") + } + enc.Encode(v) + // TODO(mknyszek): get rid of the extra \n inserted by enc.Encode. + // Same should be applied to splittingTraceConsumer. + written++ + }, + ConsumeViewerFrame: func(k string, v format.Frame) { + allFrames[k] = v + }, + Flush: func() { + io.WriteString(w, `], "stackFrames":`) + enc.Encode(requiredFrames) + io.WriteString(w, `}`) + }, + } +} + +func SplittingTraceConsumer(max int) (*splitter, TraceConsumer) { + type eventSz struct { + Time float64 + Sz int + Frames []int + } + + var ( + // data.Frames contains only the frames for required events. + data = format.Data{Frames: make(map[string]format.Frame)} + + allFrames = make(map[string]format.Frame) + + sizes []eventSz + cw countingWriter + ) + + s := new(splitter) + + return s, TraceConsumer{ + ConsumeTimeUnit: func(unit string) { + data.TimeUnit = unit + }, + ConsumeViewerEvent: func(v *format.Event, required bool) { + if required { + // Store required events inside data so flush + // can include them in the required part of the + // trace. + data.Events = append(data.Events, v) + WalkStackFrames(allFrames, v.Stack, func(id int) { + s := strconv.Itoa(id) + data.Frames[s] = allFrames[s] + }) + WalkStackFrames(allFrames, v.EndStack, func(id int) { + s := strconv.Itoa(id) + data.Frames[s] = allFrames[s] + }) + return + } + enc := json.NewEncoder(&cw) + enc.Encode(v) + size := eventSz{Time: v.Time, Sz: cw.size + 1} // +1 for ",". + // Add referenced stack frames. Their size is computed + // in flush, where we can dedup across events. + WalkStackFrames(allFrames, v.Stack, func(id int) { + size.Frames = append(size.Frames, id) + }) + WalkStackFrames(allFrames, v.EndStack, func(id int) { + size.Frames = append(size.Frames, id) // This may add duplicates. We'll dedup later. + }) + sizes = append(sizes, size) + cw.size = 0 + }, + ConsumeViewerFrame: func(k string, v format.Frame) { + allFrames[k] = v + }, + Flush: func() { + // Calculate size of the mandatory part of the trace. + // This includes thread names and stack frames for + // required events. + cw.size = 0 + enc := json.NewEncoder(&cw) + enc.Encode(data) + requiredSize := cw.size + + // Then calculate size of each individual event and + // their stack frames, grouping them into ranges. We + // only include stack frames relevant to the events in + // the range to reduce overhead. + + var ( + start = 0 + + eventsSize = 0 + + frames = make(map[string]format.Frame) + framesSize = 0 + ) + for i, ev := range sizes { + eventsSize += ev.Sz + + // Add required stack frames. Note that they + // may already be in the map. + for _, id := range ev.Frames { + s := strconv.Itoa(id) + _, ok := frames[s] + if ok { + continue + } + f := allFrames[s] + frames[s] = f + framesSize += stackFrameEncodedSize(uint(id), f) + } + + total := requiredSize + framesSize + eventsSize + if total < max { + continue + } + + // Reached max size, commit this range and + // start a new range. + startTime := time.Duration(sizes[start].Time * 1000) + endTime := time.Duration(ev.Time * 1000) + s.Ranges = append(s.Ranges, Range{ + Name: fmt.Sprintf("%v-%v", startTime, endTime), + Start: start, + End: i + 1, + StartTime: int64(startTime), + EndTime: int64(endTime), + }) + start = i + 1 + frames = make(map[string]format.Frame) + framesSize = 0 + eventsSize = 0 + } + if len(s.Ranges) <= 1 { + s.Ranges = nil + return + } + + if end := len(sizes) - 1; start < end { + s.Ranges = append(s.Ranges, Range{ + Name: fmt.Sprintf("%v-%v", time.Duration(sizes[start].Time*1000), time.Duration(sizes[end].Time*1000)), + Start: start, + End: end, + StartTime: int64(sizes[start].Time * 1000), + EndTime: int64(sizes[end].Time * 1000), + }) + } + }, + } +} + +type splitter struct { + Ranges []Range +} + +type countingWriter struct { + size int +} + +func (cw *countingWriter) Write(data []byte) (int, error) { + cw.size += len(data) + return len(data), nil +} + +func stackFrameEncodedSize(id uint, f format.Frame) int { + // We want to know the marginal size of traceviewer.Data.Frames for + // each event. Running full JSON encoding of the map for each event is + // far too slow. + // + // Since the format is fixed, we can easily compute the size without + // encoding. + // + // A single entry looks like one of the following: + // + // "1":{"name":"main.main:30"}, + // "10":{"name":"pkg.NewSession:173","parent":9}, + // + // The parent is omitted if 0. The trailing comma is omitted from the + // last entry, but we don't need that much precision. + const ( + baseSize = len(`"`) + len(`":{"name":"`) + len(`"},`) + + // Don't count the trailing quote on the name, as that is + // counted in baseSize. + parentBaseSize = len(`,"parent":`) + ) + + size := baseSize + + size += len(f.Name) + + // Bytes for id (always positive). + for id > 0 { + size += 1 + id /= 10 + } + + if f.Parent > 0 { + size += parentBaseSize + // Bytes for parent (always positive). + for f.Parent > 0 { + size += 1 + f.Parent /= 10 + } + } + + return size +} + +// WalkStackFrames calls fn for id and all of its parent frames from allFrames. +func WalkStackFrames(allFrames map[string]format.Frame, id int, fn func(id int)) { + for id != 0 { + f, ok := allFrames[strconv.Itoa(id)] + if !ok { + break + } + fn(id) + id = f.Parent + } +} + +type Mode int + +const ( + ModeGoroutineOriented Mode = 1 << iota + ModeTaskOriented + ModeThreadOriented // Mutually exclusive with ModeGoroutineOriented. +) + +// NewEmitter returns a new Emitter that writes to c. The rangeStart and +// rangeEnd args are used for splitting large traces. +func NewEmitter(c TraceConsumer, rangeStart, rangeEnd time.Duration) *Emitter { + c.ConsumeTimeUnit("ns") + + return &Emitter{ + c: c, + rangeStart: rangeStart, + rangeEnd: rangeEnd, + frameTree: frameNode{children: make(map[uint64]frameNode)}, + resources: make(map[uint64]string), + tasks: make(map[uint64]task), + } +} + +type Emitter struct { + c TraceConsumer + rangeStart time.Duration + rangeEnd time.Duration + + heapStats, prevHeapStats heapStats + gstates, prevGstates [gStateCount]int64 + threadStats, prevThreadStats [threadStateCount]int64 + gomaxprocs uint64 + frameTree frameNode + frameSeq int + arrowSeq uint64 + filter func(uint64) bool + resourceType string + resources map[uint64]string + focusResource uint64 + tasks map[uint64]task + asyncSliceSeq uint64 +} + +type task struct { + name string + sortIndex int +} + +func (e *Emitter) Gomaxprocs(v uint64) { + if v > e.gomaxprocs { + e.gomaxprocs = v + } +} + +func (e *Emitter) Resource(id uint64, name string) { + if e.filter != nil && !e.filter(id) { + return + } + e.resources[id] = name +} + +func (e *Emitter) SetResourceType(name string) { + e.resourceType = name +} + +func (e *Emitter) SetResourceFilter(filter func(uint64) bool) { + e.filter = filter +} + +func (e *Emitter) Task(id uint64, name string, sortIndex int) { + e.tasks[id] = task{name, sortIndex} +} + +func (e *Emitter) Slice(s SliceEvent) { + if e.filter != nil && !e.filter(s.Resource) { + return + } + e.slice(s, format.ProcsSection, "") +} + +func (e *Emitter) TaskSlice(s SliceEvent) { + e.slice(s, format.TasksSection, pickTaskColor(s.Resource)) +} + +func (e *Emitter) slice(s SliceEvent, sectionID uint64, cname string) { + if !e.tsWithinRange(s.Ts) && !e.tsWithinRange(s.Ts+s.Dur) { + return + } + e.OptionalEvent(&format.Event{ + Name: s.Name, + Phase: "X", + Time: viewerTime(s.Ts), + Dur: viewerTime(s.Dur), + PID: sectionID, + TID: s.Resource, + Stack: s.Stack, + EndStack: s.EndStack, + Arg: s.Arg, + Cname: cname, + }) +} + +type SliceEvent struct { + Name string + Ts time.Duration + Dur time.Duration + Resource uint64 + Stack int + EndStack int + Arg any +} + +func (e *Emitter) AsyncSlice(s AsyncSliceEvent) { + if !e.tsWithinRange(s.Ts) && !e.tsWithinRange(s.Ts+s.Dur) { + return + } + if e.filter != nil && !e.filter(s.Resource) { + return + } + cname := "" + if s.TaskColorIndex != 0 { + cname = pickTaskColor(s.TaskColorIndex) + } + e.asyncSliceSeq++ + e.OptionalEvent(&format.Event{ + Category: s.Category, + Name: s.Name, + Phase: "b", + Time: viewerTime(s.Ts), + TID: s.Resource, + ID: e.asyncSliceSeq, + Scope: s.Scope, + Stack: s.Stack, + Cname: cname, + }) + e.OptionalEvent(&format.Event{ + Category: s.Category, + Name: s.Name, + Phase: "e", + Time: viewerTime(s.Ts + s.Dur), + TID: s.Resource, + ID: e.asyncSliceSeq, + Scope: s.Scope, + Stack: s.EndStack, + Arg: s.Arg, + Cname: cname, + }) +} + +type AsyncSliceEvent struct { + SliceEvent + Category string + Scope string + TaskColorIndex uint64 // Take on the same color as the task with this ID. +} + +func (e *Emitter) Instant(i InstantEvent) { + if !e.tsWithinRange(i.Ts) { + return + } + if e.filter != nil && !e.filter(i.Resource) { + return + } + cname := "" + e.OptionalEvent(&format.Event{ + Name: i.Name, + Category: i.Category, + Phase: "I", + Scope: "t", + Time: viewerTime(i.Ts), + PID: format.ProcsSection, + TID: i.Resource, + Stack: i.Stack, + Cname: cname, + Arg: i.Arg, + }) +} + +type InstantEvent struct { + Ts time.Duration + Name string + Category string + Resource uint64 + Stack int + Arg any +} + +func (e *Emitter) Arrow(a ArrowEvent) { + if e.filter != nil && (!e.filter(a.FromResource) || !e.filter(a.ToResource)) { + return + } + e.arrow(a, format.ProcsSection) +} + +func (e *Emitter) TaskArrow(a ArrowEvent) { + e.arrow(a, format.TasksSection) +} + +func (e *Emitter) arrow(a ArrowEvent, sectionID uint64) { + if !e.tsWithinRange(a.Start) || !e.tsWithinRange(a.End) { + return + } + e.arrowSeq++ + e.OptionalEvent(&format.Event{ + Name: a.Name, + Phase: "s", + TID: a.FromResource, + PID: sectionID, + ID: e.arrowSeq, + Time: viewerTime(a.Start), + Stack: a.FromStack, + }) + e.OptionalEvent(&format.Event{ + Name: a.Name, + Phase: "t", + TID: a.ToResource, + PID: sectionID, + ID: e.arrowSeq, + Time: viewerTime(a.End), + }) +} + +type ArrowEvent struct { + Name string + Start time.Duration + End time.Duration + FromResource uint64 + FromStack int + ToResource uint64 +} + +func (e *Emitter) Event(ev *format.Event) { + e.c.ConsumeViewerEvent(ev, true) +} + +func (e *Emitter) HeapAlloc(ts time.Duration, v uint64) { + e.heapStats.heapAlloc = v + e.emitHeapCounters(ts) +} + +func (e *Emitter) Focus(id uint64) { + e.focusResource = id +} + +func (e *Emitter) GoroutineTransition(ts time.Duration, from, to GState) { + e.gstates[from]-- + e.gstates[to]++ + if e.prevGstates == e.gstates { + return + } + if e.tsWithinRange(ts) { + e.OptionalEvent(&format.Event{ + Name: "Goroutines", + Phase: "C", + Time: viewerTime(ts), + PID: 1, + Arg: &format.GoroutineCountersArg{ + Running: uint64(e.gstates[GRunning]), + Runnable: uint64(e.gstates[GRunnable]), + GCWaiting: uint64(e.gstates[GWaitingGC]), + }, + }) + } + e.prevGstates = e.gstates +} + +func (e *Emitter) IncThreadStateCount(ts time.Duration, state ThreadState, delta int64) { + e.threadStats[state] += delta + if e.prevThreadStats == e.threadStats { + return + } + if e.tsWithinRange(ts) { + e.OptionalEvent(&format.Event{ + Name: "Threads", + Phase: "C", + Time: viewerTime(ts), + PID: 1, + Arg: &format.ThreadCountersArg{ + Running: int64(e.threadStats[ThreadStateRunning]), + InSyscall: int64(e.threadStats[ThreadStateInSyscall]), + // TODO(mknyszek): Why is InSyscallRuntime not included here? + }, + }) + } + e.prevThreadStats = e.threadStats +} + +func (e *Emitter) HeapGoal(ts time.Duration, v uint64) { + // This cutoff at 1 PiB is a Workaround for https://github.com/golang/go/issues/63864. + // + // TODO(mknyszek): Remove this once the problem has been fixed. + const PB = 1 << 50 + if v > PB { + v = 0 + } + e.heapStats.nextGC = v + e.emitHeapCounters(ts) +} + +func (e *Emitter) emitHeapCounters(ts time.Duration) { + if e.prevHeapStats == e.heapStats { + return + } + diff := uint64(0) + if e.heapStats.nextGC > e.heapStats.heapAlloc { + diff = e.heapStats.nextGC - e.heapStats.heapAlloc + } + if e.tsWithinRange(ts) { + e.OptionalEvent(&format.Event{ + Name: "Heap", + Phase: "C", + Time: viewerTime(ts), + PID: 1, + Arg: &format.HeapCountersArg{Allocated: e.heapStats.heapAlloc, NextGC: diff}, + }) + } + e.prevHeapStats = e.heapStats +} + +// Err returns an error if the emitter is in an invalid state. +func (e *Emitter) Err() error { + if e.gstates[GRunnable] < 0 || e.gstates[GRunning] < 0 || e.threadStats[ThreadStateInSyscall] < 0 || e.threadStats[ThreadStateInSyscallRuntime] < 0 { + return fmt.Errorf( + "runnable=%d running=%d insyscall=%d insyscallRuntime=%d", + e.gstates[GRunnable], + e.gstates[GRunning], + e.threadStats[ThreadStateInSyscall], + e.threadStats[ThreadStateInSyscallRuntime], + ) + } + return nil +} + +func (e *Emitter) tsWithinRange(ts time.Duration) bool { + return e.rangeStart <= ts && ts <= e.rangeEnd +} + +// OptionalEvent emits ev if it's within the time range of of the consumer, i.e. +// the selected trace split range. +func (e *Emitter) OptionalEvent(ev *format.Event) { + e.c.ConsumeViewerEvent(ev, false) +} + +func (e *Emitter) Flush() { + e.processMeta(format.StatsSection, "STATS", 0) + + if len(e.tasks) != 0 { + e.processMeta(format.TasksSection, "TASKS", 1) + } + for id, task := range e.tasks { + e.threadMeta(format.TasksSection, id, task.name, task.sortIndex) + } + + e.processMeta(format.ProcsSection, e.resourceType, 2) + + e.threadMeta(format.ProcsSection, trace.GCP, "GC", -6) + e.threadMeta(format.ProcsSection, trace.NetpollP, "Network", -5) + e.threadMeta(format.ProcsSection, trace.TimerP, "Timers", -4) + e.threadMeta(format.ProcsSection, trace.SyscallP, "Syscalls", -3) + + for id, name := range e.resources { + priority := int(id) + if e.focusResource != 0 && id == e.focusResource { + // Put the focus goroutine on top. + priority = -2 + } + e.threadMeta(format.ProcsSection, id, name, priority) + } + + e.c.Flush() +} + +func (e *Emitter) threadMeta(sectionID, tid uint64, name string, priority int) { + e.Event(&format.Event{ + Name: "thread_name", + Phase: "M", + PID: sectionID, + TID: tid, + Arg: &format.NameArg{Name: name}, + }) + e.Event(&format.Event{ + Name: "thread_sort_index", + Phase: "M", + PID: sectionID, + TID: tid, + Arg: &format.SortIndexArg{Index: priority}, + }) +} + +func (e *Emitter) processMeta(sectionID uint64, name string, priority int) { + e.Event(&format.Event{ + Name: "process_name", + Phase: "M", + PID: sectionID, + Arg: &format.NameArg{Name: name}, + }) + e.Event(&format.Event{ + Name: "process_sort_index", + Phase: "M", + PID: sectionID, + Arg: &format.SortIndexArg{Index: priority}, + }) +} + +// Stack emits the given frames and returns a unique id for the stack. No +// pointers to the given data are being retained beyond the call to Stack. +func (e *Emitter) Stack(stk []*trace.Frame) int { + return e.buildBranch(e.frameTree, stk) +} + +// buildBranch builds one branch in the prefix tree rooted at ctx.frameTree. +func (e *Emitter) buildBranch(parent frameNode, stk []*trace.Frame) int { + if len(stk) == 0 { + return parent.id + } + last := len(stk) - 1 + frame := stk[last] + stk = stk[:last] + + node, ok := parent.children[frame.PC] + if !ok { + e.frameSeq++ + node.id = e.frameSeq + node.children = make(map[uint64]frameNode) + parent.children[frame.PC] = node + e.c.ConsumeViewerFrame(strconv.Itoa(node.id), format.Frame{Name: fmt.Sprintf("%v:%v", frame.Fn, frame.Line), Parent: parent.id}) + } + return e.buildBranch(node, stk) +} + +type heapStats struct { + heapAlloc uint64 + nextGC uint64 +} + +func viewerTime(t time.Duration) float64 { + return float64(t) / float64(time.Microsecond) +} + +type GState int + +const ( + GDead GState = iota + GRunnable + GRunning + GWaiting + GWaitingGC + + gStateCount +) + +type ThreadState int + +const ( + ThreadStateInSyscall ThreadState = iota + ThreadStateInSyscallRuntime + ThreadStateRunning + + threadStateCount +) + +type frameNode struct { + id int + children map[uint64]frameNode +} + +// Mapping from more reasonable color names to the reserved color names in +// https://github.com/catapult-project/catapult/blob/master/tracing/tracing/base/color_scheme.html#L50 +// The chrome trace viewer allows only those as cname values. +const ( + colorLightMauve = "thread_state_uninterruptible" // 182, 125, 143 + colorOrange = "thread_state_iowait" // 255, 140, 0 + colorSeafoamGreen = "thread_state_running" // 126, 200, 148 + colorVistaBlue = "thread_state_runnable" // 133, 160, 210 + colorTan = "thread_state_unknown" // 199, 155, 125 + colorIrisBlue = "background_memory_dump" // 0, 180, 180 + colorMidnightBlue = "light_memory_dump" // 0, 0, 180 + colorDeepMagenta = "detailed_memory_dump" // 180, 0, 180 + colorBlue = "vsync_highlight_color" // 0, 0, 255 + colorGrey = "generic_work" // 125, 125, 125 + colorGreen = "good" // 0, 125, 0 + colorDarkGoldenrod = "bad" // 180, 125, 0 + colorPeach = "terrible" // 180, 0, 0 + colorBlack = "black" // 0, 0, 0 + colorLightGrey = "grey" // 221, 221, 221 + colorWhite = "white" // 255, 255, 255 + colorYellow = "yellow" // 255, 255, 0 + colorOlive = "olive" // 100, 100, 0 + colorCornflowerBlue = "rail_response" // 67, 135, 253 + colorSunsetOrange = "rail_animation" // 244, 74, 63 + colorTangerine = "rail_idle" // 238, 142, 0 + colorShamrockGreen = "rail_load" // 13, 168, 97 + colorGreenishYellow = "startup" // 230, 230, 0 + colorDarkGrey = "heap_dump_stack_frame" // 128, 128, 128 + colorTawny = "heap_dump_child_node_arrow" // 204, 102, 0 + colorLemon = "cq_build_running" // 255, 255, 119 + colorLime = "cq_build_passed" // 153, 238, 102 + colorPink = "cq_build_failed" // 238, 136, 136 + colorSilver = "cq_build_abandoned" // 187, 187, 187 + colorManzGreen = "cq_build_attempt_runnig" // 222, 222, 75 + colorKellyGreen = "cq_build_attempt_passed" // 108, 218, 35 + colorAnotherGrey = "cq_build_attempt_failed" // 187, 187, 187 +) + +var colorForTask = []string{ + colorLightMauve, + colorOrange, + colorSeafoamGreen, + colorVistaBlue, + colorTan, + colorMidnightBlue, + colorIrisBlue, + colorDeepMagenta, + colorGreen, + colorDarkGoldenrod, + colorPeach, + colorOlive, + colorCornflowerBlue, + colorSunsetOrange, + colorTangerine, + colorShamrockGreen, + colorTawny, + colorLemon, + colorLime, + colorPink, + colorSilver, + colorManzGreen, + colorKellyGreen, +} + +func pickTaskColor(id uint64) string { + idx := id % uint64(len(colorForTask)) + return colorForTask[idx] +} diff --git a/src/cmd/internal/traceviewer/format.go b/src/internal/trace/traceviewer/format/format.go similarity index 60% rename from src/cmd/internal/traceviewer/format.go rename to src/internal/trace/traceviewer/format/format.go index 3636c1053d..83f3276704 100644 --- a/src/cmd/internal/traceviewer/format.go +++ b/src/internal/trace/traceviewer/format/format.go @@ -7,7 +7,10 @@ // // The official description of the format is in this file: // https://docs.google.com/document/d/1CvAClvFfyA5R-PhYUmn5OOQtYMH4h6I0nSsKchNAySU/preview -package traceviewer +// +// Note: This can't be part of the parent traceviewer package as that would +// throw. go_bootstrap cannot depend on the cgo version of package net in ./make.bash. +package format type Data struct { Events []*Event `json:"traceEvents"` @@ -36,3 +39,41 @@ type Frame struct { Name string `json:"name"` Parent int `json:"parent,omitempty"` } + +type NameArg struct { + Name string `json:"name"` +} + +type BlockedArg struct { + Blocked string `json:"blocked"` +} + +type SortIndexArg struct { + Index int `json:"sort_index"` +} + +type HeapCountersArg struct { + Allocated uint64 + NextGC uint64 +} + +const ( + ProcsSection = 0 // where Goroutines or per-P timelines are presented. + StatsSection = 1 // where counters are presented. + TasksSection = 2 // where Task hierarchy & timeline is presented. +) + +type GoroutineCountersArg struct { + Running uint64 + Runnable uint64 + GCWaiting uint64 +} + +type ThreadCountersArg struct { + Running int64 + InSyscall int64 +} + +type ThreadIDArg struct { + ThreadID uint64 +} diff --git a/src/internal/trace/traceviewer/histogram.go b/src/internal/trace/traceviewer/histogram.go new file mode 100644 index 0000000000..d4c8749dc9 --- /dev/null +++ b/src/internal/trace/traceviewer/histogram.go @@ -0,0 +1,86 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package traceviewer + +import ( + "fmt" + "html/template" + "math" + "strings" + "time" +) + +// TimeHistogram is an high-dynamic-range histogram for durations. +type TimeHistogram struct { + Count int + Buckets []int + MinBucket, MaxBucket int +} + +// Five buckets for every power of 10. +var logDiv = math.Log(math.Pow(10, 1.0/5)) + +// Add adds a single sample to the histogram. +func (h *TimeHistogram) Add(d time.Duration) { + var bucket int + if d > 0 { + bucket = int(math.Log(float64(d)) / logDiv) + } + if len(h.Buckets) <= bucket { + h.Buckets = append(h.Buckets, make([]int, bucket-len(h.Buckets)+1)...) + h.Buckets = h.Buckets[:cap(h.Buckets)] + } + h.Buckets[bucket]++ + if bucket < h.MinBucket || h.MaxBucket == 0 { + h.MinBucket = bucket + } + if bucket > h.MaxBucket { + h.MaxBucket = bucket + } + h.Count++ +} + +// BucketMin returns the minimum duration value for a provided bucket. +func (h *TimeHistogram) BucketMin(bucket int) time.Duration { + return time.Duration(math.Exp(float64(bucket) * logDiv)) +} + +// ToHTML renders the histogram as HTML. +func (h *TimeHistogram) ToHTML(urlmaker func(min, max time.Duration) string) template.HTML { + if h == nil || h.Count == 0 { + return template.HTML("") + } + + const barWidth = 400 + + maxCount := 0 + for _, count := range h.Buckets { + if count > maxCount { + maxCount = count + } + } + + w := new(strings.Builder) + fmt.Fprintf(w, `
    WhenElapsedGoroutineEvents
    {{$el.WhenString}}{{$el.Duration}} + Task {{$el.ID}} + (goroutine view) + ({{if .Complete}}complete{{else}}incomplete{{end}}) +
    {{.WhenString}}{{elapsed .Elapsed}}{{.Goroutine}}{{.What}}
    `) + for i := h.MinBucket; i <= h.MaxBucket; i++ { + // Tick label. + if h.Buckets[i] > 0 { + fmt.Fprintf(w, ``, urlmaker(h.BucketMin(i), h.BucketMin(i+1)), h.BucketMin(i)) + } else { + fmt.Fprintf(w, ``, h.BucketMin(i)) + } + // Bucket bar. + width := h.Buckets[i] * barWidth / maxCount + fmt.Fprintf(w, ``, width) + // Bucket count. + fmt.Fprintf(w, ``, h.Buckets[i]) + fmt.Fprintf(w, "\n") + + } + // Final tick label. + fmt.Fprintf(w, ``, h.BucketMin(h.MaxBucket+1)) + fmt.Fprintf(w, `
    %s
    %s
     
    %d
    %s
    `) + return template.HTML(w.String()) +} diff --git a/src/internal/trace/traceviewer/http.go b/src/internal/trace/traceviewer/http.go new file mode 100644 index 0000000000..5258db05d8 --- /dev/null +++ b/src/internal/trace/traceviewer/http.go @@ -0,0 +1,422 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package traceviewer + +import ( + "embed" + "fmt" + "html/template" + "net/http" + "strings" +) + +func MainHandler(views []View) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { + if err := templMain.Execute(w, views); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + }) +} + +const CommonStyle = ` +/* See https://github.com/golang/pkgsite/blob/master/static/shared/typography/typography.css */ +body { + font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', Helvetica, Arial, sans-serif, 'Apple Color Emoji', 'Segoe UI Emoji'; + font-size: 1rem; + line-height: normal; + max-width: 9in; + margin: 1em; +} +h1 { font-size: 1.5rem; } +h2 { font-size: 1.375rem; } +h1,h2 { + font-weight: 600; + line-height: 1.25em; + word-break: break-word; +} +p { color: grey85; font-size:85%; } +code, +pre, +textarea.code { + font-family: SFMono-Regular, Consolas, 'Liberation Mono', Menlo, monospace; + font-size: 0.875rem; + line-height: 1.5em; +} + +pre, +textarea.code { + background-color: var(--color-background-accented); + border: var(--border); + border-radius: var(--border-radius); + color: var(--color-text); + overflow-x: auto; + padding: 0.625rem; + tab-size: 4; + white-space: pre; +} +` + +var templMain = template.Must(template.New("").Parse(` + + + +

    cmd/trace: the Go trace event viewer

    +

    + This web server provides various visualizations of an event log gathered during + the execution of a Go program that uses the runtime/trace package. +

    + +

    Event timelines for running goroutines

    +{{range $i, $view := $}} +{{if $view.Ranges}} +{{if eq $i 0}} +

    + Large traces are split into multiple sections of equal data size + (not duration) to avoid overwhelming the visualizer. +

    +{{end}} + +{{else}} + +{{end}} +{{end}} +

    + This view displays a series of timelines for a type of resource. + The "by proc" view consists of a timeline for each of the GOMAXPROCS + logical processors, showing which goroutine (if any) was running on that + logical processor at each moment. + The "by thread" view (if available) consists of a similar timeline for each + OS thread. + + Each goroutine has an identifying number (e.g. G123), main function, + and color. + + A colored bar represents an uninterrupted span of execution. + + Execution of a goroutine may migrate from one logical processor to another, + causing a single colored bar to be horizontally continuous but + vertically displaced. +

    +

    + Clicking on a span reveals information about it, such as its + duration, its causal predecessors and successors, and the stack trace + at the final moment when it yielded the logical processor, for example + because it made a system call or tried to acquire a mutex. + + Directly underneath each bar, a smaller bar or more commonly a fine + vertical line indicates an event occurring during its execution. + Some of these are related to garbage collection; most indicate that + a goroutine yielded its logical processor but then immediately resumed execution + on the same logical processor. Clicking on the event displays the stack trace + at the moment it occurred. +

    +

    + The causal relationships between spans of goroutine execution + can be displayed by clicking the Flow Events button at the top. +

    +

    + At the top ("STATS"), there are three additional timelines that + display statistical information. + + "Goroutines" is a time series of the count of existing goroutines; + clicking on it displays their breakdown by state at that moment: + running, runnable, or waiting. + + "Heap" is a time series of the amount of heap memory allocated (in orange) + and (in green) the allocation limit at which the next GC cycle will begin. + + "Threads" shows the number of kernel threads in existence: there is + always one kernel thread per logical processor, and additional threads + are created for calls to non-Go code such as a system call or a + function written in C. +

    +

    + Above the event trace for the first logical processor are + traces for various runtime-internal events. + + The "GC" bar shows when the garbage collector is running, and in which stage. + Garbage collection may temporarily affect all the logical processors + and the other metrics. + + The "Network", "Timers", and "Syscalls" traces indicate events in + the runtime that cause goroutines to wake up. +

    +

    + The visualization allows you to navigate events at scales ranging from several + seconds to a handful of nanoseconds. + + Consult the documentation for the Chromium Trace Event Profiling Tool + for help navigating the view. +

    + + +

    + This view displays information about each set of goroutines that + shares the same main function. + + Clicking on a main function shows links to the four types of + blocking profile (see below) applied to that subset of goroutines. + + It also shows a table of specific goroutine instances, with various + execution statistics and a link to the event timeline for each one. + + The timeline displays only the selected goroutine and any others it + interacts with via block/unblock events. (The timeline is + goroutine-oriented rather than logical processor-oriented.) +

    + +

    Profiles

    +

    + Each link below displays a global profile in zoomable graph form as + produced by pprof's "web" command. + + In addition there is a link to download the profile for offline + analysis with pprof. + + All four profiles represent causes of delay that prevent a goroutine + from running on a logical processor: because it was waiting for the network, + for a synchronization operation on a mutex or channel, for a system call, + or for a logical processor to become available. +

    + + +

    User-defined tasks and regions

    +

    + The trace API allows a target program to annotate a region of code + within a goroutine, such as a key function, so that its performance + can be analyzed. + + Log events may be + associated with a region to record progress and relevant values. + + The API also allows annotation of higher-level + tasks, + which may involve work across many goroutines. +

    +

    + The links below display, for each region and task, a histogram of its execution times. + + Each histogram bucket contains a sample trace that records the + sequence of events such as goroutine creations, log events, and + subregion start/end times. + + For each task, you can click through to a logical-processor or + goroutine-oriented view showing the tasks and regions on the + timeline. + + Such information may help uncover which steps in a region are + unexpectedly slow, or reveal relationships between the data values + logged in a request and its running time. +

    + + +

    Garbage collection metrics

    + +

    + This chart indicates the maximum GC pause time (the largest x value + for which y is zero), and more generally, the fraction of time that + the processors are available to application goroutines ("mutators"), + for any time window of a specified size, in the worst case. +

    + + +`)) + +type View struct { + Type ViewType + Ranges []Range +} + +type ViewType string + +const ( + ViewProc ViewType = "proc" + ViewThread ViewType = "thread" +) + +func (v View) URL(rangeIdx int) string { + if rangeIdx < 0 { + return fmt.Sprintf("/trace?view=%s", v.Type) + } + return v.Ranges[rangeIdx].URL(v.Type) +} + +type Range struct { + Name string + Start int + End int + StartTime int64 + EndTime int64 +} + +func (r Range) URL(viewType ViewType) string { + return fmt.Sprintf("/trace?view=%s&start=%d&end=%d", viewType, r.Start, r.End) +} + +func TraceHandler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if err := r.ParseForm(); err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + html := strings.ReplaceAll(templTrace, "{{PARAMS}}", r.Form.Encode()) + w.Write([]byte(html)) + }) +} + +// https://chromium.googlesource.com/catapult/+/9508452e18f130c98499cb4c4f1e1efaedee8962/tracing/docs/embedding-trace-viewer.md +// This is almost verbatim copy of https://chromium-review.googlesource.com/c/catapult/+/2062938/2/tracing/bin/index.html +var templTrace = ` + + + + + + + + + + + + + +` + +//go:embed static/trace_viewer_full.html static/webcomponents.min.js +var staticContent embed.FS + +func StaticHandler() http.Handler { + return http.FileServer(http.FS(staticContent)) +} diff --git a/src/cmd/trace/mmu.go b/src/internal/trace/traceviewer/mmu.go similarity index 83% rename from src/cmd/trace/mmu.go rename to src/internal/trace/traceviewer/mmu.go index b71dcd6411..0bc1233b44 100644 --- a/src/cmd/trace/mmu.go +++ b/src/internal/trace/traceviewer/mmu.go @@ -1,4 +1,4 @@ -// Copyright 2017 The Go Authors. All rights reserved. +// Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -23,7 +23,7 @@ // could potentially put confidence intervals on these estimates and // render this progressively as we refine the distributions. -package main +package traceviewer import ( "encoding/json" @@ -38,10 +38,25 @@ import ( "time" ) -func init() { - http.HandleFunc("/mmu", httpMMU) - http.HandleFunc("/mmuPlot", httpMMUPlot) - http.HandleFunc("/mmuDetails", httpMMUDetails) +type MutatorUtilFunc func(trace.UtilFlags) ([][]trace.MutatorUtil, error) + +func MMUHandlerFunc(ranges []Range, f MutatorUtilFunc) http.HandlerFunc { + mmu := &mmu{ + cache: make(map[trace.UtilFlags]*mmuCacheEntry), + f: f, + ranges: ranges, + } + return func(w http.ResponseWriter, r *http.Request) { + switch r.FormValue("mode") { + case "plot": + mmu.HandlePlot(w, r) + return + case "details": + mmu.HandleDetails(w, r) + return + } + http.ServeContent(w, r, "", time.Time{}, strings.NewReader(templMMU)) + } } var utilFlagNames = map[string]trace.UtilFlags{ @@ -52,6 +67,14 @@ var utilFlagNames = map[string]trace.UtilFlags{ "sweep": trace.UtilSweep, } +func requestUtilFlags(r *http.Request) trace.UtilFlags { + var flags trace.UtilFlags + for _, flagStr := range strings.Split(r.FormValue("flags"), "|") { + flags |= utilFlagNames[flagStr] + } + return flags +} + type mmuCacheEntry struct { init sync.Once util [][]trace.MutatorUtil @@ -59,51 +82,39 @@ type mmuCacheEntry struct { err error } -var mmuCache struct { - m map[trace.UtilFlags]*mmuCacheEntry - lock sync.Mutex +type mmu struct { + mu sync.Mutex + cache map[trace.UtilFlags]*mmuCacheEntry + f MutatorUtilFunc + ranges []Range } -func init() { - mmuCache.m = make(map[trace.UtilFlags]*mmuCacheEntry) -} - -func getMMUCurve(r *http.Request) ([][]trace.MutatorUtil, *trace.MMUCurve, error) { - var flags trace.UtilFlags - for _, flagStr := range strings.Split(r.FormValue("flags"), "|") { - flags |= utilFlagNames[flagStr] +func (m *mmu) get(flags trace.UtilFlags) ([][]trace.MutatorUtil, *trace.MMUCurve, error) { + m.mu.Lock() + entry := m.cache[flags] + if entry == nil { + entry = new(mmuCacheEntry) + m.cache[flags] = entry } + m.mu.Unlock() - mmuCache.lock.Lock() - c := mmuCache.m[flags] - if c == nil { - c = new(mmuCacheEntry) - mmuCache.m[flags] = c - } - mmuCache.lock.Unlock() - - c.init.Do(func() { - events, err := parseEvents() + entry.init.Do(func() { + util, err := m.f(flags) if err != nil { - c.err = err + entry.err = err } else { - c.util = trace.MutatorUtilization(events, flags) - c.mmuCurve = trace.NewMMUCurve(c.util) + entry.util = util + entry.mmuCurve = trace.NewMMUCurve(util) } }) - return c.util, c.mmuCurve, c.err + return entry.util, entry.mmuCurve, entry.err } -// httpMMU serves the MMU plot page. -func httpMMU(w http.ResponseWriter, r *http.Request) { - http.ServeContent(w, r, "", time.Time{}, strings.NewReader(templMMU)) -} - -// httpMMUPlot serves the JSON data for the MMU plot. -func httpMMUPlot(w http.ResponseWriter, r *http.Request) { - mu, mmuCurve, err := getMMUCurve(r) +// HandlePlot serves the JSON data for the MMU plot. +func (m *mmu) HandlePlot(w http.ResponseWriter, r *http.Request) { + mu, mmuCurve, err := m.get(requestUtilFlags(r)) if err != nil { - http.Error(w, fmt.Sprintf("failed to parse events: %v", err), http.StatusInternalServerError) + http.Error(w, fmt.Sprintf("failed to produce MMU data: %v", err), http.StatusInternalServerError) return } @@ -202,7 +213,7 @@ var templMMU = ` container.css('opacity', '.5'); refreshChart.count++; var seq = refreshChart.count; - $.getJSON('/mmuPlot?flags=' + mmuFlags()) + $.getJSON('?mode=plot&flags=' + mmuFlags()) .fail(function(xhr, status, error) { alert('failed to load plot: ' + status); }) @@ -275,7 +286,7 @@ var templMMU = ` var details = $('#details'); details.empty(); var windowNS = curve[items[0].row][0]; - var url = '/mmuDetails?window=' + windowNS + '&flags=' + mmuFlags(); + var url = '?mode=details&window=' + windowNS + '&flags=' + mmuFlags(); $.getJSON(url) .fail(function(xhr, status, error) { details.text(status + ': ' + url + ' could not be loaded'); @@ -357,11 +368,11 @@ var templMMU = ` ` -// httpMMUDetails serves details of an MMU graph at a particular window. -func httpMMUDetails(w http.ResponseWriter, r *http.Request) { - _, mmuCurve, err := getMMUCurve(r) +// HandleDetails serves details of an MMU graph at a particular window. +func (m *mmu) HandleDetails(w http.ResponseWriter, r *http.Request) { + _, mmuCurve, err := m.get(requestUtilFlags(r)) if err != nil { - http.Error(w, fmt.Sprintf("failed to parse events: %v", err), http.StatusInternalServerError) + http.Error(w, fmt.Sprintf("failed to produce MMU data: %v", err), http.StatusInternalServerError) return } @@ -376,7 +387,7 @@ func httpMMUDetails(w http.ResponseWriter, r *http.Request) { // Construct a link for each window. var links []linkedUtilWindow for _, ui := range worst { - links = append(links, newLinkedUtilWindow(ui, time.Duration(window))) + links = append(links, m.newLinkedUtilWindow(ui, time.Duration(window))) } err = json.NewEncoder(w).Encode(links) @@ -391,13 +402,13 @@ type linkedUtilWindow struct { URL string } -func newLinkedUtilWindow(ui trace.UtilWindow, window time.Duration) linkedUtilWindow { +func (m *mmu) newLinkedUtilWindow(ui trace.UtilWindow, window time.Duration) linkedUtilWindow { // Find the range containing this window. var r Range - for _, r = range ranges { + for _, r = range m.ranges { if r.EndTime > ui.Time { break } } - return linkedUtilWindow{ui, fmt.Sprintf("%s#%v:%v", r.URL(), float64(ui.Time)/1e6, float64(ui.Time+int64(window))/1e6)} + return linkedUtilWindow{ui, fmt.Sprintf("%s#%v:%v", r.URL(ViewProc), float64(ui.Time)/1e6, float64(ui.Time+int64(window))/1e6)} } diff --git a/src/internal/trace/traceviewer/pprof.go b/src/internal/trace/traceviewer/pprof.go new file mode 100644 index 0000000000..1377b3c614 --- /dev/null +++ b/src/internal/trace/traceviewer/pprof.go @@ -0,0 +1,150 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Serving of pprof-like profiles. + +package traceviewer + +import ( + "bufio" + "fmt" + "internal/profile" + "internal/trace" + "net/http" + "os" + "os/exec" + "path/filepath" + "runtime" + "time" +) + +type ProfileFunc func(r *http.Request) ([]ProfileRecord, error) + +// SVGProfileHandlerFunc serves pprof-like profile generated by prof as svg. +func SVGProfileHandlerFunc(f ProfileFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if r.FormValue("raw") != "" { + w.Header().Set("Content-Type", "application/octet-stream") + + failf := func(s string, args ...any) { + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + w.Header().Set("X-Go-Pprof", "1") + http.Error(w, fmt.Sprintf(s, args...), http.StatusInternalServerError) + } + records, err := f(r) + if err != nil { + failf("failed to get records: %v", err) + return + } + if err := BuildProfile(records).Write(w); err != nil { + failf("failed to write profile: %v", err) + return + } + return + } + + blockf, err := os.CreateTemp("", "block") + if err != nil { + http.Error(w, fmt.Sprintf("failed to create temp file: %v", err), http.StatusInternalServerError) + return + } + defer func() { + blockf.Close() + os.Remove(blockf.Name()) + }() + records, err := f(r) + if err != nil { + http.Error(w, fmt.Sprintf("failed to generate profile: %v", err), http.StatusInternalServerError) + } + blockb := bufio.NewWriter(blockf) + if err := BuildProfile(records).Write(blockb); err != nil { + http.Error(w, fmt.Sprintf("failed to write profile: %v", err), http.StatusInternalServerError) + return + } + if err := blockb.Flush(); err != nil { + http.Error(w, fmt.Sprintf("failed to flush temp file: %v", err), http.StatusInternalServerError) + return + } + if err := blockf.Close(); err != nil { + http.Error(w, fmt.Sprintf("failed to close temp file: %v", err), http.StatusInternalServerError) + return + } + svgFilename := blockf.Name() + ".svg" + if output, err := exec.Command(goCmd(), "tool", "pprof", "-svg", "-output", svgFilename, blockf.Name()).CombinedOutput(); err != nil { + http.Error(w, fmt.Sprintf("failed to execute go tool pprof: %v\n%s", err, output), http.StatusInternalServerError) + return + } + defer os.Remove(svgFilename) + w.Header().Set("Content-Type", "image/svg+xml") + http.ServeFile(w, r, svgFilename) + } +} + +type ProfileRecord struct { + Stack []*trace.Frame + Count uint64 + Time time.Duration +} + +func BuildProfile(prof []ProfileRecord) *profile.Profile { + p := &profile.Profile{ + PeriodType: &profile.ValueType{Type: "trace", Unit: "count"}, + Period: 1, + SampleType: []*profile.ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: "nanoseconds"}, + }, + } + locs := make(map[uint64]*profile.Location) + funcs := make(map[string]*profile.Function) + for _, rec := range prof { + var sloc []*profile.Location + for _, frame := range rec.Stack { + loc := locs[frame.PC] + if loc == nil { + fn := funcs[frame.File+frame.Fn] + if fn == nil { + fn = &profile.Function{ + ID: uint64(len(p.Function) + 1), + Name: frame.Fn, + SystemName: frame.Fn, + Filename: frame.File, + } + p.Function = append(p.Function, fn) + funcs[frame.File+frame.Fn] = fn + } + loc = &profile.Location{ + ID: uint64(len(p.Location) + 1), + Address: frame.PC, + Line: []profile.Line{ + { + Function: fn, + Line: int64(frame.Line), + }, + }, + } + p.Location = append(p.Location, loc) + locs[frame.PC] = loc + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, &profile.Sample{ + Value: []int64{int64(rec.Count), int64(rec.Time)}, + Location: sloc, + }) + } + return p +} + +func goCmd() string { + var exeSuffix string + if runtime.GOOS == "windows" { + exeSuffix = ".exe" + } + path := filepath.Join(runtime.GOROOT(), "bin", "go"+exeSuffix) + if _, err := os.Stat(path); err == nil { + return path + } + return "go" +} diff --git a/src/cmd/trace/static/README.md b/src/internal/trace/traceviewer/static/README.md similarity index 100% rename from src/cmd/trace/static/README.md rename to src/internal/trace/traceviewer/static/README.md diff --git a/src/cmd/trace/static/trace_viewer_full.html b/src/internal/trace/traceviewer/static/trace_viewer_full.html similarity index 100% rename from src/cmd/trace/static/trace_viewer_full.html rename to src/internal/trace/traceviewer/static/trace_viewer_full.html diff --git a/src/cmd/trace/static/webcomponents.min.js b/src/internal/trace/traceviewer/static/webcomponents.min.js similarity index 100% rename from src/cmd/trace/static/webcomponents.min.js rename to src/internal/trace/traceviewer/static/webcomponents.min.js diff --git a/src/internal/trace/v2/base.go b/src/internal/trace/v2/base.go index e7cee29a88..e2ba09362b 100644 --- a/src/internal/trace/v2/base.go +++ b/src/internal/trace/v2/base.go @@ -9,6 +9,7 @@ package trace import ( "fmt" + "math" "strings" "internal/trace/v2/event" @@ -44,6 +45,7 @@ type evTable struct { freq frequency strings dataTable[stringID, string] stacks dataTable[stackID, stack] + pcs map[uint64]frame // extraStrings are strings that get generated during // parsing but haven't come directly from the trace, so @@ -123,8 +125,12 @@ func (d *dataTable[EI, E]) compactify() { minID = id } } + if maxID >= math.MaxInt { + // We can't create a slice big enough to hold maxID elements + return + } // We're willing to waste at most 2x memory. - if int(maxID-minID) > 2*len(d.sparse) { + if int(maxID-minID) > max(len(d.sparse), 2*len(d.sparse)) { return } if int(minID) > len(d.sparse) { @@ -146,7 +152,7 @@ func (d *dataTable[EI, E]) get(id EI) (E, bool) { if id == 0 { return *new(E), true } - if int(id) < len(d.dense) { + if uint64(id) < uint64(len(d.dense)) { if d.present[id/8]&(uint8(1)<<(id%8)) != 0 { return d.dense[id], true } @@ -236,12 +242,12 @@ func (s cpuSample) asEvent(table *evTable) Event { // stack represents a goroutine stack sample. type stack struct { - frames []frame + pcs []uint64 } func (s stack) String() string { var sb strings.Builder - for _, frame := range s.frames { + for _, frame := range s.pcs { fmt.Fprintf(&sb, "\t%#v\n", frame) } return sb.String() diff --git a/src/internal/trace/v2/batch.go b/src/internal/trace/v2/batch.go index 899eb0f59b..d7afc06eec 100644 --- a/src/internal/trace/v2/batch.go +++ b/src/internal/trace/v2/batch.go @@ -5,7 +5,6 @@ package trace import ( - "bufio" "bytes" "encoding/binary" "fmt" @@ -43,7 +42,10 @@ func (b *batch) isFreqBatch() bool { } // readBatch reads the next full batch from r. -func readBatch(r *bufio.Reader) (batch, uint64, error) { +func readBatch(r interface { + io.Reader + io.ByteReader +}) (batch, uint64, error) { // Read batch header byte. b, err := r.ReadByte() if err != nil { diff --git a/src/internal/trace/v2/batchcursor.go b/src/internal/trace/v2/batchcursor.go index fe6275074a..8dc34fd22f 100644 --- a/src/internal/trace/v2/batchcursor.go +++ b/src/internal/trace/v2/batchcursor.go @@ -68,7 +68,7 @@ func readTimedBaseEvent(b []byte, e *baseEvent) (int, timestamp, error) { // Get the event type. typ := event.Type(b[0]) specs := go122.Specs() - if int(typ) > len(specs) { + if int(typ) >= len(specs) { return 0, 0, fmt.Errorf("found invalid event type: %v", typ) } e.typ = typ @@ -82,11 +82,17 @@ func readTimedBaseEvent(b []byte, e *baseEvent) (int, timestamp, error) { // Read timestamp diff. ts, nb := binary.Uvarint(b[n:]) + if nb <= 0 { + return 0, 0, fmt.Errorf("found invalid uvarint for timestamp") + } n += nb // Read the rest of the arguments. for i := 0; i < len(spec.Args)-1; i++ { arg, nb := binary.Uvarint(b[n:]) + if nb <= 0 { + return 0, 0, fmt.Errorf("found invalid uvarint") + } e.args[i] = arg n += nb } diff --git a/src/internal/trace/v2/event.go b/src/internal/trace/v2/event.go index 7ec4698d88..ec5e27e57a 100644 --- a/src/internal/trace/v2/event.go +++ b/src/internal/trace/v2/event.go @@ -203,8 +203,14 @@ type RangeAttribute struct { // are of the same type). type TaskID uint64 -// NoTask indicates the lack of a task. -const NoTask = TaskID(0) +const ( + // NoTask indicates the lack of a task. + NoTask = TaskID(^uint64(0)) + + // BackgroundTask is the global task that events are attached to if there was + // no other task in the context at the point the event was emitted. + BackgroundTask = TaskID(0) +) // Task provides details about a Task event. type Task struct { @@ -258,7 +264,8 @@ func (s Stack) Frames(yield func(f StackFrame) bool) bool { return true } stk := s.table.stacks.mustGet(s.id) - for _, f := range stk.frames { + for _, pc := range stk.pcs { + f := s.table.pcs[pc] sf := StackFrame{ PC: f.pc, Func: s.table.strings.mustGet(f.funcID), @@ -568,22 +575,28 @@ func (e Event) StateTransition() StateTransition { s = goStateTransition(GoID(e.base.args[0]), GoRunnable, GoRunning) case go122.EvGoDestroy: s = goStateTransition(e.ctx.G, GoRunning, GoNotExist) + s.Stack = e.Stack() // This event references the resource the event happened on. case go122.EvGoDestroySyscall: s = goStateTransition(e.ctx.G, GoSyscall, GoNotExist) case go122.EvGoStop: s = goStateTransition(e.ctx.G, GoRunning, GoRunnable) s.Reason = e.table.strings.mustGet(stringID(e.base.args[0])) + s.Stack = e.Stack() // This event references the resource the event happened on. case go122.EvGoBlock: s = goStateTransition(e.ctx.G, GoRunning, GoWaiting) s.Reason = e.table.strings.mustGet(stringID(e.base.args[0])) + s.Stack = e.Stack() // This event references the resource the event happened on. case go122.EvGoUnblock: s = goStateTransition(GoID(e.base.args[0]), GoWaiting, GoRunnable) case go122.EvGoSyscallBegin: s = goStateTransition(e.ctx.G, GoRunning, GoSyscall) + s.Stack = e.Stack() // This event references the resource the event happened on. case go122.EvGoSyscallEnd: s = goStateTransition(e.ctx.G, GoSyscall, GoRunning) + s.Stack = e.Stack() // This event references the resource the event happened on. case go122.EvGoSyscallEndBlocked: s = goStateTransition(e.ctx.G, GoSyscall, GoRunnable) + s.Stack = e.Stack() // This event references the resource the event happened on. case go122.EvGoStatus: // N.B. ordering.advance populates e.base.extra. s = goStateTransition(GoID(e.base.args[0]), GoState(e.base.extra(version.Go122)[0]), go122GoStatus2GoState[e.base.args[2]]) diff --git a/src/internal/trace/v2/generation.go b/src/internal/trace/v2/generation.go index 4cdf76e21c..da31585266 100644 --- a/src/internal/trace/v2/generation.go +++ b/src/internal/trace/v2/generation.go @@ -43,7 +43,9 @@ type spilledBatch struct { // batch read of the next generation, if any. func readGeneration(r *bufio.Reader, spill *spilledBatch) (*generation, *spilledBatch, error) { g := &generation{ - evTable: new(evTable), + evTable: &evTable{ + pcs: make(map[uint64]frame), + }, batches: make(map[ThreadID][]batch), } // Process the spilled batch. @@ -106,7 +108,7 @@ func readGeneration(r *bufio.Reader, spill *spilledBatch) (*generation, *spilled g.strings.compactify() // Validate stacks. - if err := validateStackStrings(&g.stacks, &g.strings); err != nil { + if err := validateStackStrings(&g.stacks, &g.strings, g.pcs); err != nil { return nil, nil, err } @@ -130,7 +132,7 @@ func processBatch(g *generation, b batch) error { return err } case b.isStacksBatch(): - if err := addStacks(&g.stacks, b); err != nil { + if err := addStacks(&g.stacks, g.pcs, b); err != nil { return err } case b.isCPUSamplesBatch(): @@ -156,11 +158,20 @@ func processBatch(g *generation, b batch) error { // validateStackStrings makes sure all the string references in // the stack table are present in the string table. -func validateStackStrings(stacks *dataTable[stackID, stack], strings *dataTable[stringID, string]) error { +func validateStackStrings( + stacks *dataTable[stackID, stack], + strings *dataTable[stringID, string], + frames map[uint64]frame, +) error { var err error stacks.forEach(func(id stackID, stk stack) bool { - for _, frame := range stk.frames { - _, ok := strings.get(frame.funcID) + for _, pc := range stk.pcs { + frame, ok := frames[pc] + if !ok { + err = fmt.Errorf("found unknown pc %x for stack %d", pc, id) + return false + } + _, ok = strings.get(frame.funcID) if !ok { err = fmt.Errorf("found invalid func string ID %d for stack %d", frame.funcID, id) return false @@ -237,7 +248,7 @@ func addStrings(stringTable *dataTable[stringID, string], b batch) error { // addStacks takes a batch whose first byte is an EvStacks event // (indicating that the batch contains only stacks) and adds each // string contained therein to the provided stacks map. -func addStacks(stackTable *dataTable[stackID, stack], b batch) error { +func addStacks(stackTable *dataTable[stackID, stack], pcs map[uint64]frame, b batch) error { if !b.isStacksBatch() { return fmt.Errorf("internal error: addStacks called on non-stacks batch") } @@ -273,7 +284,7 @@ func addStacks(stackTable *dataTable[stackID, stack], b batch) error { } // Each frame consists of 4 fields: pc, funcID (string), fileID (string), line. - frames := make([]frame, 0, nFrames) + frames := make([]uint64, 0, nFrames) for i := uint64(0); i < nFrames; i++ { // Read the frame data. pc, err := binary.ReadUvarint(r) @@ -292,16 +303,20 @@ func addStacks(stackTable *dataTable[stackID, stack], b batch) error { if err != nil { return fmt.Errorf("reading frame %d's line for stack %d: %w", i+1, id, err) } - frames = append(frames, frame{ - pc: pc, - funcID: stringID(funcID), - fileID: stringID(fileID), - line: line, - }) + frames = append(frames, pc) + + if _, ok := pcs[pc]; !ok { + pcs[pc] = frame{ + pc: pc, + funcID: stringID(funcID), + fileID: stringID(fileID), + line: line, + } + } } // Add the stack to the map. - if err := stackTable.insert(stackID(id), stack{frames: frames}); err != nil { + if err := stackTable.insert(stackID(id), stack{pcs: frames}); err != nil { return err } } diff --git a/src/internal/trace/v2/order.go b/src/internal/trace/v2/order.go index 8b503d4dc4..cedb29726e 100644 --- a/src/internal/trace/v2/order.go +++ b/src/internal/trace/v2/order.go @@ -23,6 +23,23 @@ type ordering struct { gcSeq uint64 gcState gcState initialGen uint64 + + // Some events like GoDestroySyscall produce two events instead of one. + // extraEvent is this extra space. advance must not be called unless + // the extraEvent has been consumed with consumeExtraEvent. + // + // TODO(mknyszek): Replace this with a more formal queue. + extraEvent Event +} + +// consumeExtraEvent consumes the extra event. +func (o *ordering) consumeExtraEvent() Event { + if o.extraEvent.Kind() == EventBad { + return Event{} + } + r := o.extraEvent + o.extraEvent = Event{} + return r } // advance checks if it's valid to proceed with ev which came from thread m. @@ -75,6 +92,9 @@ func (o *ordering) advance(ev *baseEvent, evt *evTable, m ThreadID, gen uint64) case go122.EvProcStatus: pid := ProcID(ev.args[0]) status := go122.ProcStatus(ev.args[1]) + if int(status) >= len(go122ProcStatus2ProcState) { + return curCtx, false, fmt.Errorf("invalid status for proc %d: %d", pid, status) + } oldState := go122ProcStatus2ProcState[status] if s, ok := o.pStates[pid]; ok { if status == go122.ProcSyscallAbandoned && s.status == go122.ProcSyscall { @@ -83,6 +103,12 @@ func (o *ordering) advance(ev *baseEvent, evt *evTable, m ThreadID, gen uint64) // we haven't lost the relevant information. Promote the status and advance. oldState = ProcRunning ev.args[1] = uint64(go122.ProcSyscall) + } else if status == go122.ProcSyscallAbandoned && s.status == go122.ProcSyscallAbandoned { + // If we're passing through ProcSyscallAbandoned, then there's no promotion + // to do. We've lost the M that this P is associated with. However it got there, + // it's going to appear as idle in the API, so pass through as idle. + oldState = ProcIdle + ev.args[1] = uint64(go122.ProcSyscallAbandoned) } else if s.status != status { return curCtx, false, fmt.Errorf("inconsistent status for proc %d: old %v vs. new %v", pid, s.status, status) } @@ -101,9 +127,13 @@ func (o *ordering) advance(ev *baseEvent, evt *evTable, m ThreadID, gen uint64) if status == go122.ProcRunning || status == go122.ProcSyscall { newCtx.P = pid } - // Set the current context to the state of the M current running this G. Otherwise - // we'll emit a Running -> Running event that doesn't correspond to the right M. - if status == go122.ProcSyscallAbandoned && oldState != ProcUndetermined { + // If we're advancing through ProcSyscallAbandoned *but* oldState is running then we've + // promoted it to ProcSyscall. However, because it's ProcSyscallAbandoned, we know this + // P is about to get stolen and its status very likely isn't being emitted by the same + // thread it was bound to. Since this status is Running -> Running and Running is binding, + // we need to make sure we emit it in the right context: the context to which it is bound. + // Find it, and set our current context to it. + if status == go122.ProcSyscallAbandoned && oldState == ProcRunning { // N.B. This is slow but it should be fairly rare. found := false for mid, ms := range o.mStates { @@ -206,6 +236,17 @@ func (o *ordering) advance(ev *baseEvent, evt *evTable, m ThreadID, gen uint64) // Validate that the M we're stealing from is what we expect. mid := ThreadID(ev.args[2]) // The M we're stealing from. + + if mid == curCtx.M { + // We're stealing from ourselves. This behaves like a ProcStop. + if curCtx.P != pid { + return curCtx, false, fmt.Errorf("tried to self-steal proc %d (thread %d), but got proc %d instead", pid, mid, curCtx.P) + } + newCtx.P = NoProc + return curCtx, true, nil + } + + // We're stealing from some other M. mState, ok := o.mStates[mid] if !ok { return curCtx, false, fmt.Errorf("stole proc from non-existent thread %d", mid) @@ -230,6 +271,10 @@ func (o *ordering) advance(ev *baseEvent, evt *evTable, m ThreadID, gen uint64) gid := GoID(ev.args[0]) mid := ThreadID(ev.args[1]) status := go122.GoStatus(ev.args[2]) + + if int(status) >= len(go122GoStatus2GoState) { + return curCtx, false, fmt.Errorf("invalid status for goroutine %d: %d", gid, status) + } oldState := go122GoStatus2GoState[status] if s, ok := o.gStates[gid]; ok { if s.status != status { @@ -257,6 +302,13 @@ func (o *ordering) advance(ev *baseEvent, evt *evTable, m ThreadID, gen uint64) // Otherwise, we're talking about a G sitting in a syscall on an M. // Validate the named M. if mid == curCtx.M { + if gen != o.initialGen && curCtx.G != gid { + // If this isn't the first generation, we *must* have seen this + // binding occur already. Even if the G was blocked in a syscall + // for multiple generations since trace start, we would have seen + // a previous GoStatus event that bound the goroutine to an M. + return curCtx, false, fmt.Errorf("inconsistent thread for syscalling goroutine %d: thread has goroutine %d", gid, curCtx.G) + } newCtx.G = gid break } @@ -474,7 +526,7 @@ func (o *ordering) advance(ev *baseEvent, evt *evTable, m ThreadID, gen uint64) // This event indicates that a goroutine is effectively // being created out of a cgo callback. Such a goroutine // is 'created' in the syscall state. - if err := validateCtx(curCtx, event.SchedReqs{Thread: event.MustHave, Proc: event.MustNotHave, Goroutine: event.MustNotHave}); err != nil { + if err := validateCtx(curCtx, event.SchedReqs{Thread: event.MustHave, Proc: event.MayHave, Goroutine: event.MustNotHave}); err != nil { return curCtx, false, err } // This goroutine is effectively being created. Add a state for it. @@ -491,6 +543,15 @@ func (o *ordering) advance(ev *baseEvent, evt *evTable, m ThreadID, gen uint64) // cgo callback is disappearing, either because the callback // ending or the C thread that called it is being destroyed. // + // Also, treat this as if we lost our P too. + // The thread ID may be reused by the platform and we'll get + // really confused if we try to steal the P is this is running + // with later. The new M with the same ID could even try to + // steal back this P from itself! + // + // The runtime is careful to make sure that any GoCreateSyscall + // event will enter the runtime emitting events for reacquiring a P. + // // Note: we might have a P here. The P might not be released // eagerly by the runtime, and it might get stolen back later // (or never again, if the program is going to exit). @@ -508,6 +569,32 @@ func (o *ordering) advance(ev *baseEvent, evt *evTable, m ThreadID, gen uint64) // This goroutine is exiting itself. delete(o.gStates, curCtx.G) newCtx.G = NoGoroutine + + // If we have a proc, then we're dissociating from it now. See the comment at the top of the case. + if curCtx.P != NoProc { + pState, ok := o.pStates[curCtx.P] + if !ok { + return curCtx, false, fmt.Errorf("found invalid proc %d during %s", curCtx.P, go122.EventString(typ)) + } + if pState.status != go122.ProcSyscall { + return curCtx, false, fmt.Errorf("proc %d in unexpected state %s during %s", curCtx.P, pState.status, go122.EventString(typ)) + } + // See the go122-create-syscall-reuse-thread-id test case for more details. + pState.status = go122.ProcSyscallAbandoned + newCtx.P = NoProc + + // Queue an extra self-ProcSteal event. + o.extraEvent = Event{ + table: evt, + ctx: curCtx, + base: baseEvent{ + typ: go122.EvProcSteal, + time: ev.time, + }, + } + o.extraEvent.base.args[0] = uint64(curCtx.P) + o.extraEvent.base.extra(version.Go122)[0] = uint64(go122.ProcSyscall) + } return curCtx, true, nil // Handle tasks. Tasks are interesting because: @@ -525,6 +612,13 @@ func (o *ordering) advance(ev *baseEvent, evt *evTable, m ThreadID, gen uint64) // Get the parent ID, but don't validate it. There's no guarantee // we actually have information on whether it's active. parentID := TaskID(ev.args[1]) + if parentID == BackgroundTask { + // Note: a value of 0 here actually means no parent, *not* the + // background task. Automatic background task attachment only + // applies to regions. + parentID = NoTask + ev.args[1] = uint64(NoTask) + } // Validate the name and record it. We'll need to pass it through to // EvUserTaskEnd. @@ -562,7 +656,11 @@ func (o *ordering) advance(ev *baseEvent, evt *evTable, m ThreadID, gen uint64) if !ok { return curCtx, false, fmt.Errorf("invalid string ID %v for %v event", nameID, typ) } - if err := o.gStates[curCtx.G].beginRegion(userRegion{tid, name}); err != nil { + gState, ok := o.gStates[curCtx.G] + if !ok { + return curCtx, false, fmt.Errorf("encountered EvUserRegionBegin without known state for current goroutine %d", curCtx.G) + } + if err := gState.beginRegion(userRegion{tid, name}); err != nil { return curCtx, false, err } return curCtx, true, nil @@ -576,7 +674,11 @@ func (o *ordering) advance(ev *baseEvent, evt *evTable, m ThreadID, gen uint64) if !ok { return curCtx, false, fmt.Errorf("invalid string ID %v for %v event", nameID, typ) } - if err := o.gStates[curCtx.G].endRegion(userRegion{tid, name}); err != nil { + gState, ok := o.gStates[curCtx.G] + if !ok { + return curCtx, false, fmt.Errorf("encountered EvUserRegionEnd without known state for current goroutine %d", curCtx.G) + } + if err := gState.endRegion(userRegion{tid, name}); err != nil { return curCtx, false, err } return curCtx, true, nil @@ -678,7 +780,11 @@ func (o *ordering) advance(ev *baseEvent, evt *evTable, m ThreadID, gen uint64) // ever reference curCtx.P. However, be lenient about this like we are with // GCMarkAssistActive; there's no reason the runtime couldn't change to block // in the middle of a sweep. - if err := o.pStates[pid].activeRange(makeRangeType(typ, 0), gen == o.initialGen); err != nil { + pState, ok := o.pStates[pid] + if !ok { + return curCtx, false, fmt.Errorf("encountered GCSweepActive for unknown proc %d", pid) + } + if err := pState.activeRange(makeRangeType(typ, 0), gen == o.initialGen); err != nil { return curCtx, false, err } return curCtx, true, nil @@ -701,7 +807,11 @@ func (o *ordering) advance(ev *baseEvent, evt *evTable, m ThreadID, gen uint64) if typ == go122.EvSTWBegin { desc = stringID(ev.args[0]) } - if err := o.gStates[curCtx.G].beginRange(makeRangeType(typ, desc)); err != nil { + gState, ok := o.gStates[curCtx.G] + if !ok { + return curCtx, false, fmt.Errorf("encountered event of type %d without known state for current goroutine %d", typ, curCtx.G) + } + if err := gState.beginRange(makeRangeType(typ, desc)); err != nil { return curCtx, false, err } return curCtx, true, nil @@ -710,7 +820,11 @@ func (o *ordering) advance(ev *baseEvent, evt *evTable, m ThreadID, gen uint64) // N.B. Like GoStatus, this can happen at any time, because it can // reference a non-running goroutine. Don't check anything about the // current scheduler context. - if err := o.gStates[gid].activeRange(makeRangeType(typ, 0), gen == o.initialGen); err != nil { + gState, ok := o.gStates[gid] + if !ok { + return curCtx, false, fmt.Errorf("uninitialized goroutine %d found during %s", gid, go122.EventString(typ)) + } + if err := gState.activeRange(makeRangeType(typ, 0), gen == o.initialGen); err != nil { return curCtx, false, err } return curCtx, true, nil @@ -718,7 +832,11 @@ func (o *ordering) advance(ev *baseEvent, evt *evTable, m ThreadID, gen uint64) if err := validateCtx(curCtx, event.UserGoReqs); err != nil { return curCtx, false, err } - desc, err := o.gStates[curCtx.G].endRange(typ) + gState, ok := o.gStates[curCtx.G] + if !ok { + return curCtx, false, fmt.Errorf("encountered event of type %d without known state for current goroutine %d", typ, curCtx.G) + } + desc, err := gState.endRange(typ) if err != nil { return curCtx, false, err } @@ -837,6 +955,10 @@ func (s *gState) beginRegion(r userRegion) error { // endRegion ends a user region on the goroutine. func (s *gState) endRegion(r userRegion) error { + if len(s.regions) == 0 { + // We do not know about regions that began before tracing started. + return nil + } if next := s.regions[len(s.regions)-1]; next != r { return fmt.Errorf("misuse of region in goroutine %v: region end %v when the inner-most active region start event is %v", s.id, r, next) } diff --git a/src/internal/trace/v2/reader.go b/src/internal/trace/v2/reader.go index b58cc6fcb1..824ca23df3 100644 --- a/src/internal/trace/v2/reader.go +++ b/src/internal/trace/v2/reader.go @@ -85,6 +85,11 @@ func (r *Reader) ReadEvent() (e Event, err error) { r.lastTs = e.base.time }() + // Consume any extra events produced during parsing. + if ev := r.order.consumeExtraEvent(); ev.Kind() != EventBad { + return ev, nil + } + // Check if we need to refresh the generation. if len(r.frontier) == 0 && len(r.cpuSamples) == 0 { if !r.emittedSync { @@ -152,6 +157,9 @@ func (r *Reader) ReadEvent() (e Event, err error) { } // Try to advance the head of the frontier, which should have the minimum timestamp. // This should be by far the most common case + if len(r.frontier) == 0 { + return Event{}, fmt.Errorf("broken trace: frontier is empty:\n[gen=%d]\n\n%s\n%s\n", r.gen.gen, dumpFrontier(r.frontier), dumpOrdering(&r.order)) + } bc := r.frontier[0] if ctx, ok, err := r.order.advance(&bc.ev, r.gen.evTable, bc.m, r.gen.gen); err != nil { return Event{}, err diff --git a/src/internal/trace/v2/reader_test.go b/src/internal/trace/v2/reader_test.go index 4f00002e37..393e1c80b0 100644 --- a/src/internal/trace/v2/reader_test.go +++ b/src/internal/trace/v2/reader_test.go @@ -46,6 +46,53 @@ func TestReaderGolden(t *testing.T) { } } +func FuzzReader(f *testing.F) { + // Currently disabled because the parser doesn't do much validation and most + // getters can be made to panic. Turn this on once the parser is meant to + // reject invalid traces. + const testGetters = false + + f.Fuzz(func(t *testing.T, b []byte) { + r, err := trace.NewReader(bytes.NewReader(b)) + if err != nil { + return + } + for { + ev, err := r.ReadEvent() + if err != nil { + break + } + + if !testGetters { + continue + } + // Make sure getters don't do anything that panics + switch ev.Kind() { + case trace.EventLabel: + ev.Label() + case trace.EventLog: + ev.Log() + case trace.EventMetric: + ev.Metric() + case trace.EventRangeActive, trace.EventRangeBegin: + ev.Range() + case trace.EventRangeEnd: + ev.Range() + ev.RangeAttributes() + case trace.EventStateTransition: + ev.StateTransition() + case trace.EventRegionBegin, trace.EventRegionEnd: + ev.Region() + case trace.EventTaskBegin, trace.EventTaskEnd: + ev.Task() + case trace.EventSync: + case trace.EventStackSample: + case trace.EventBad: + } + } + }) +} + func testReader(t *testing.T, tr io.Reader, exp *testtrace.Expectation) { r, err := trace.NewReader(tr) if err != nil { diff --git a/src/internal/trace/v2/testdata/cmd/gotraceraw/main.go b/src/internal/trace/v2/testdata/cmd/gotraceraw/main.go index a0d595dec1..3df11be7a8 100644 --- a/src/internal/trace/v2/testdata/cmd/gotraceraw/main.go +++ b/src/internal/trace/v2/testdata/cmd/gotraceraw/main.go @@ -69,11 +69,9 @@ func main() { } if err != nil { log.Fatal(err) - break } if err := tw.WriteEvent(ev); err != nil { log.Fatal(err) - break } } } diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/0cb1786dee0f090b b/src/internal/trace/v2/testdata/fuzz/FuzzReader/0cb1786dee0f090b new file mode 100644 index 0000000000..326ebe1c6e --- /dev/null +++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/0cb1786dee0f090b @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("go 1.22 trace\x00\x00\x00\x01\x0100\x85\x00\x190000\x01\x0100\x88\x00\b0000000") \ No newline at end of file diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/1e45307d5b2ec36d b/src/internal/trace/v2/testdata/fuzz/FuzzReader/1e45307d5b2ec36d new file mode 100644 index 0000000000..406af9caa6 --- /dev/null +++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/1e45307d5b2ec36d @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("go 1.22 trace\x00\x00\x00\x01000\x85\x00\b0001") \ No newline at end of file diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/2b05796f9b2fc48d b/src/internal/trace/v2/testdata/fuzz/FuzzReader/2b05796f9b2fc48d new file mode 100644 index 0000000000..50fdccda6b --- /dev/null +++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/2b05796f9b2fc48d @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("go 1.22 trace\x00\x00\x00\x01\x0100\x85\x00-0000\x01\x0100\x88\x00\b0000000") \ No newline at end of file diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/2b9be9aebe08d511 b/src/internal/trace/v2/testdata/fuzz/FuzzReader/2b9be9aebe08d511 new file mode 100644 index 0000000000..6bcb99adfc --- /dev/null +++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/2b9be9aebe08d511 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("go 1.22 trace\x00\x00\x00\x01\x0100\x85\x00\x0f00\x120\x01\x0100\x88\x00\b0000000") \ No newline at end of file diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/344331b314da0b08 b/src/internal/trace/v2/testdata/fuzz/FuzzReader/344331b314da0b08 new file mode 100644 index 0000000000..de6e4694be --- /dev/null +++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/344331b314da0b08 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("go 1.22 trace\x00\x00\x00\x01\x0100\x85\x00\b0000\x01\x01\xff00\xb8\x00\x1900\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x04\x1900\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x04\x1900\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x04\x1901\xff\xff\xff\xff\xff\xff\xff\xff0\x800") \ No newline at end of file diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/365d7b5b633b3f97 b/src/internal/trace/v2/testdata/fuzz/FuzzReader/365d7b5b633b3f97 new file mode 100644 index 0000000000..8dc370f383 --- /dev/null +++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/365d7b5b633b3f97 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("go 1.22 trace\x00\x00\x00\x0100\x8c0\x85\x00\b0000") \ No newline at end of file diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/4d9ddc909984e871 b/src/internal/trace/v2/testdata/fuzz/FuzzReader/4d9ddc909984e871 new file mode 100644 index 0000000000..040b2a4cae --- /dev/null +++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/4d9ddc909984e871 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("go 1.22 trace\x00\x00\x00\x01\x0100\x11\r\xa700\x01\x19000\x02$000000\x01\x0100\x05\b0000\x01\x0110\x11\r\xa700\x01\x19 00\x02\x110 0000") diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/56f073e57903588c b/src/internal/trace/v2/testdata/fuzz/FuzzReader/56f073e57903588c new file mode 100644 index 0000000000..d34fe3f06c --- /dev/null +++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/56f073e57903588c @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("go 1.22 trace\x00\x00\x00\x01\x0100\x85\x00\x1f0000\x01\x0100\x88\x00\b0000000") \ No newline at end of file diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/9d6ee7d3ddf8d566 b/src/internal/trace/v2/testdata/fuzz/FuzzReader/9d6ee7d3ddf8d566 new file mode 100644 index 0000000000..5677261155 --- /dev/null +++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/9d6ee7d3ddf8d566 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("go 1.22 trace\x00\x00\x00\x01\x0100\x11\r\xa700\x01\x19000\x02#000000\x01\x0100\x05\b0000\x01\x0110\x11\r\xa700\x01\x19 00\x02\x110 0000") diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/aeb749b6bc317b66 b/src/internal/trace/v2/testdata/fuzz/FuzzReader/aeb749b6bc317b66 new file mode 100644 index 0000000000..f93b5a90da --- /dev/null +++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/aeb749b6bc317b66 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("go 1.22 trace\x00\x00\x00\x01000\x85\x00\b0000") \ No newline at end of file diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/closing-unknown-region b/src/internal/trace/v2/testdata/fuzz/FuzzReader/closing-unknown-region new file mode 100644 index 0000000000..7433214030 --- /dev/null +++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/closing-unknown-region @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("go 1.22 trace\x00\x00\x00\x01\x01\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x87ߕ\xb4\x99\xb2\x06\x05\b\xa8ֹ\a\x01\x01\xf6\x9f\n\x9fÕ\xb4\x99\xb2\x06\x11\r\xa7\x02\x00\x01\x19\x05\x01\xf6\x9f\n\x02+\x04\x01\x00\x00") \ No newline at end of file diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/d478e18d2d6756b7 b/src/internal/trace/v2/testdata/fuzz/FuzzReader/d478e18d2d6756b7 new file mode 100644 index 0000000000..3e5fda833a --- /dev/null +++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/d478e18d2d6756b7 @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("go 1.22 trace\x00\x00\x00\x01\x0100\x85\x00\"0000\x01\x0100\x88\x00\b0000000") \ No newline at end of file diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/d91203cd397aa0bc b/src/internal/trace/v2/testdata/fuzz/FuzzReader/d91203cd397aa0bc new file mode 100644 index 0000000000..d24b94ac97 --- /dev/null +++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/d91203cd397aa0bc @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("go 1.22 trace\x00\x00\x00\x01001\x85\x00\b0000") \ No newline at end of file diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/invalid-proc-state b/src/internal/trace/v2/testdata/fuzz/FuzzReader/invalid-proc-state new file mode 100644 index 0000000000..e5d3258111 --- /dev/null +++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/invalid-proc-state @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("go 1.22 trace\x00\x00\x00\x01\x01\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x87ߕ\xb4\x99\xb2\x06\x05\b\xa8ֹ\a\x01\x01\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x94镴\x99\xb2\x06\x05\r\xa7\x02\x00E") \ No newline at end of file diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/large-id b/src/internal/trace/v2/testdata/fuzz/FuzzReader/large-id new file mode 100644 index 0000000000..0fb6273b44 --- /dev/null +++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/large-id @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("go 1.22 trace\x00\x00\x00\x01\x01\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x87ߕ\xb4\x99\xb2\x06\x05\b\xa8ֹ\a\x01\x01\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x94镴\x99\xb2\x06\f\x02\x03\xff\xff\xff\xff\xff\xff\xff\x9f\x1d\x00") \ No newline at end of file diff --git a/src/internal/trace/v2/testdata/fuzz/FuzzReader/malformed-timestamp b/src/internal/trace/v2/testdata/fuzz/FuzzReader/malformed-timestamp new file mode 100644 index 0000000000..850ca50f87 --- /dev/null +++ b/src/internal/trace/v2/testdata/fuzz/FuzzReader/malformed-timestamp @@ -0,0 +1,2 @@ +go test fuzz v1 +[]byte("go 1.22 trace\x00\x00\x00\x01\x01\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x87ߕ\xb4\x99\xb2\x06\x05\b\xa8ֹ\a\x01\x01\xfa\x9f\n\xa5ѕ\xb4\x99\xb2\x06\x0e\n\x97\x96\x96\x96\x96\x96\x96\x96\x96\x96\x01\x01\x01") diff --git a/src/internal/trace/v2/testdata/generators/go122-create-syscall-reuse-thread-id.go b/src/internal/trace/v2/testdata/generators/go122-create-syscall-reuse-thread-id.go new file mode 100644 index 0000000000..107cce2cc2 --- /dev/null +++ b/src/internal/trace/v2/testdata/generators/go122-create-syscall-reuse-thread-id.go @@ -0,0 +1,61 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests a G being created from within a syscall. +// +// Specifically, it tests a scenerio wherein a C +// thread is calling into Go, creating a goroutine in +// a syscall (in the tracer's model). The system is free +// to reuse thread IDs, so first a thread ID is used to +// call into Go, and then is used for a Go-created thread. +// +// This is a regression test. The trace parser didn't correctly +// model GoDestroySyscall as dropping its P (even if the runtime +// did). It turns out this is actually fine if all the threads +// in the trace have unique IDs, since the P just stays associated +// with an eternally dead thread, and it's stolen by some other +// thread later. But if thread IDs are reused, then the tracer +// gets confused when trying to advance events on the new thread. +// The now-dead thread which exited on a GoDestroySyscall still has +// its P associated and this transfers to the newly-live thread +// in the parser's state because they share a thread ID. + +package main + +import ( + "internal/trace/v2" + "internal/trace/v2/event/go122" + testgen "internal/trace/v2/internal/testgen/go122" +) + +func main() { + testgen.Main(gen) +} + +func gen(t *testgen.Trace) { + g := t.Generation(1) + + // A C thread calls into Go and acquires a P. It returns + // back to C, destroying the G. + b0 := g.Batch(trace.ThreadID(0), 0) + b0.Event("GoCreateSyscall", trace.GoID(4)) + b0.Event("GoSyscallEndBlocked") + b0.Event("ProcStatus", trace.ProcID(0), go122.ProcIdle) + b0.Event("ProcStart", trace.ProcID(0), testgen.Seq(1)) + b0.Event("GoStatus", trace.GoID(4), trace.NoThread, go122.GoRunnable) + b0.Event("GoStart", trace.GoID(4), testgen.Seq(1)) + b0.Event("GoSyscallBegin", testgen.Seq(2), testgen.NoStack) + b0.Event("GoDestroySyscall") + + // A new Go-created thread with the same ID appears and + // starts running, then tries to steal the P from the + // first thread. The stealing is interesting because if + // the parser handles GoDestroySyscall wrong, then we + // have a self-steal here potentially that doesn't make + // sense. + b1 := g.Batch(trace.ThreadID(0), 0) + b1.Event("ProcStatus", trace.ProcID(1), go122.ProcIdle) + b1.Event("ProcStart", trace.ProcID(1), testgen.Seq(1)) + b1.Event("ProcSteal", trace.ProcID(0), testgen.Seq(3), trace.ThreadID(0)) +} diff --git a/src/internal/trace/v2/testdata/generators/go122-create-syscall-with-p.go b/src/internal/trace/v2/testdata/generators/go122-create-syscall-with-p.go new file mode 100644 index 0000000000..4cb1c4a9a7 --- /dev/null +++ b/src/internal/trace/v2/testdata/generators/go122-create-syscall-with-p.go @@ -0,0 +1,52 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests a G being created from within a syscall. +// +// Specifically, it tests a scenerio wherein a C +// thread is calling into Go, creating a goroutine in +// a syscall (in the tracer's model). Because the actual +// m can be reused, it's possible for that m to have never +// had its P (in _Psyscall) stolen if the runtime doesn't +// model the scenario correctly. Make sure we reject such +// traces. + +package main + +import ( + "internal/trace/v2" + "internal/trace/v2/event/go122" + testgen "internal/trace/v2/internal/testgen/go122" +) + +func main() { + testgen.Main(gen) +} + +func gen(t *testgen.Trace) { + t.ExpectFailure(".*expected a proc but didn't have one.*") + + g := t.Generation(1) + + // A C thread calls into Go and acquires a P. It returns + // back to C, destroying the G. It then comes back to Go + // on the same thread and again returns to C. + // + // Note: on pthread platforms this can't happen on the + // same thread because the m is stashed in TLS between + // calls into Go, until the thread dies. This is still + // possible on other platforms, however. + b0 := g.Batch(trace.ThreadID(0), 0) + b0.Event("GoCreateSyscall", trace.GoID(4)) + b0.Event("ProcStatus", trace.ProcID(0), go122.ProcIdle) + b0.Event("ProcStart", trace.ProcID(0), testgen.Seq(1)) + b0.Event("GoSyscallEndBlocked") + b0.Event("GoStart", trace.GoID(4), testgen.Seq(1)) + b0.Event("GoSyscallBegin", testgen.Seq(2), testgen.NoStack) + b0.Event("GoDestroySyscall") + b0.Event("GoCreateSyscall", trace.GoID(4)) + b0.Event("GoSyscallEnd") + b0.Event("GoSyscallBegin", testgen.Seq(3), testgen.NoStack) + b0.Event("GoDestroySyscall") +} diff --git a/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-self.go b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-self.go new file mode 100644 index 0000000000..dd947346c6 --- /dev/null +++ b/src/internal/trace/v2/testdata/generators/go122-syscall-steal-proc-self.go @@ -0,0 +1,37 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests syscall P stealing. +// +// Specifically, it tests a scenario where a thread 'steals' +// a P from itself. It's just a ProcStop with extra steps when +// it happens on the same P. + +package main + +import ( + "internal/trace/v2" + "internal/trace/v2/event/go122" + testgen "internal/trace/v2/internal/testgen/go122" +) + +func main() { + testgen.Main(gen) +} + +func gen(t *testgen.Trace) { + t.DisableTimestamps() + + g := t.Generation(1) + + // A goroutine execute a syscall and steals its own P, then starts running + // on that P. + b0 := g.Batch(trace.ThreadID(0), 0) + b0.Event("ProcStatus", trace.ProcID(0), go122.ProcRunning) + b0.Event("GoStatus", trace.GoID(1), trace.ThreadID(0), go122.GoRunning) + b0.Event("GoSyscallBegin", testgen.Seq(1), testgen.NoStack) + b0.Event("ProcSteal", trace.ProcID(0), testgen.Seq(2), trace.ThreadID(0)) + b0.Event("ProcStart", trace.ProcID(0), testgen.Seq(3)) + b0.Event("GoSyscallEndBlocked") +} diff --git a/src/internal/trace/v2/testdata/generators/go122-task-across-generations.go b/src/internal/trace/v2/testdata/generators/go122-task-across-generations.go index 94e9933996..06ef96e51a 100644 --- a/src/internal/trace/v2/testdata/generators/go122-task-across-generations.go +++ b/src/internal/trace/v2/testdata/generators/go122-task-across-generations.go @@ -29,7 +29,7 @@ func gen(t *testgen.Trace) { b1 := g1.Batch(trace.ThreadID(0), 0) b1.Event("ProcStatus", trace.ProcID(0), go122.ProcRunning) b1.Event("GoStatus", trace.GoID(1), trace.ThreadID(0), go122.GoRunning) - b1.Event("UserTaskBegin", trace.TaskID(2), trace.NoTask, "my task", testgen.NoStack) + b1.Event("UserTaskBegin", trace.TaskID(2), trace.TaskID(0) /* 0 means no parent, not background */, "my task", testgen.NoStack) g2 := t.Generation(2) diff --git a/src/internal/trace/v2/testdata/mktests.go b/src/internal/trace/v2/testdata/mktests.go index 5242163594..96cbbe4b1f 100644 --- a/src/internal/trace/v2/testdata/mktests.go +++ b/src/internal/trace/v2/testdata/mktests.go @@ -7,7 +7,12 @@ package main import ( + "bytes" "fmt" + "internal/trace/v2/raw" + "internal/trace/v2/version" + "internal/txtar" + "io" "log" "os" "os/exec" @@ -17,27 +22,58 @@ import ( func main() { log.SetFlags(0) - if err := run(); err != nil { + ctx, err := newContext() + if err != nil { + log.Fatal(err) + } + if err := ctx.runGenerators(); err != nil { + log.Fatal(err) + } + if err := ctx.runTestProg("./testprog/annotations.go"); err != nil { + log.Fatal(err) + } + if err := ctx.runTestProg("./testprog/annotations-stress.go"); err != nil { log.Fatal(err) } } -func run() error { +type context struct { + testNames map[string]struct{} + filter *regexp.Regexp +} + +func newContext() (*context, error) { + var filter *regexp.Regexp + var err error + if pattern := os.Getenv("GOTRACETEST"); pattern != "" { + filter, err = regexp.Compile(pattern) + if err != nil { + return nil, fmt.Errorf("compiling regexp %q for GOTRACETEST: %v", pattern, err) + } + } + return &context{ + testNames: make(map[string]struct{}), + filter: filter, + }, nil +} + +func (ctx *context) register(testName string) (skip bool, err error) { + if _, ok := ctx.testNames[testName]; ok { + return true, fmt.Errorf("duplicate test %s found", testName) + } + if ctx.filter != nil { + return !ctx.filter.MatchString(testName), nil + } + return false, nil +} + +func (ctx *context) runGenerators() error { generators, err := filepath.Glob("./generators/*.go") if err != nil { return fmt.Errorf("reading generators: %v", err) } genroot := "./tests" - // Grab a pattern, if any. - var re *regexp.Regexp - if pattern := os.Getenv("GOTRACETEST"); pattern != "" { - re, err = regexp.Compile(pattern) - if err != nil { - return fmt.Errorf("compiling regexp %q for GOTRACETEST: %v", pattern, err) - } - } - if err := os.MkdirAll(genroot, 0777); err != nil { return fmt.Errorf("creating generated root: %v", err) } @@ -46,7 +82,11 @@ func run() error { name = name[:len(name)-len(filepath.Ext(name))] // Skip if we have a pattern and this test doesn't match. - if re != nil && !re.MatchString(name) { + skip, err := ctx.register(name) + if err != nil { + return err + } + if skip { continue } @@ -64,3 +104,59 @@ func run() error { } return nil } + +func (ctx *context) runTestProg(progPath string) error { + name := filepath.Base(progPath) + name = name[:len(name)-len(filepath.Ext(name))] + name = fmt.Sprintf("go1%d-%s", version.Current, name) + + // Skip if we have a pattern and this test doesn't match. + skip, err := ctx.register(name) + if err != nil { + return err + } + if skip { + return nil + } + + // Create command. + var trace, stderr bytes.Buffer + cmd := exec.Command("go", "run", progPath) + // TODO(mknyszek): Remove if goexperiment.Exectracer2 becomes the default. + cmd.Env = append(os.Environ(), "GOEXPERIMENT=exectracer2") + cmd.Stdout = &trace + cmd.Stderr = &stderr + + // Run trace program; the trace will appear in stdout. + fmt.Fprintf(os.Stderr, "running trace program %s...\n", name) + if err := cmd.Run(); err != nil { + log.Fatalf("running trace program: %v:\n%s", err, stderr.String()) + } + + // Write out the trace. + var textTrace bytes.Buffer + r, err := raw.NewReader(&trace) + if err != nil { + log.Fatalf("reading trace: %v", err) + } + w, err := raw.NewTextWriter(&textTrace, version.Current) + for { + ev, err := r.ReadEvent() + if err == io.EOF { + break + } + if err != nil { + log.Fatalf("reading trace: %v", err) + } + if err := w.WriteEvent(ev); err != nil { + log.Fatalf("writing trace: %v", err) + } + } + testData := txtar.Format(&txtar.Archive{ + Files: []txtar.File{ + {Name: "expect", Data: []byte("SUCCESS")}, + {Name: "trace", Data: textTrace.Bytes()}, + }, + }) + return os.WriteFile(fmt.Sprintf("./tests/%s.test", name), testData, 0o664) +} diff --git a/src/internal/trace/v2/testdata/testprog/gc-stress.go b/src/internal/trace/v2/testdata/testprog/gc-stress.go index 70d3a246c3..017f7f07bf 100644 --- a/src/internal/trace/v2/testdata/testprog/gc-stress.go +++ b/src/internal/trace/v2/testdata/testprog/gc-stress.go @@ -39,7 +39,7 @@ func makeTree(depth int) *node { var trees [16]*node var ballast *[16]*[8192]*node -var sink []byte +var sink [][]byte func main() { for i := range trees { @@ -54,10 +54,15 @@ func main() { } } } - for i := 0; i < runtime.GOMAXPROCS(-1); i++ { + + procs := runtime.GOMAXPROCS(-1) + sink = make([][]byte, procs) + + for i := 0; i < procs; i++ { + i := i go func() { for { - sink = make([]byte, rand.Intn(32<<10)) + sink[i] = make([]byte, rand.Intn(32<<10)) } }() } diff --git a/src/internal/trace/v2/testdata/testprog/wait-on-pipe.go b/src/internal/trace/v2/testdata/testprog/wait-on-pipe.go new file mode 100644 index 0000000000..912f5dd3bc --- /dev/null +++ b/src/internal/trace/v2/testdata/testprog/wait-on-pipe.go @@ -0,0 +1,66 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Tests a goroutine sitting blocked in a syscall for +// an entire generation. This is a regression test for +// #65196. + +//go:build ignore + +package main + +import ( + "log" + "os" + "runtime/trace" + "syscall" + "time" +) + +func main() { + // Create a pipe to block on. + var p [2]int + if err := syscall.Pipe(p[:]); err != nil { + log.Fatalf("failed to create pipe: %v", err) + } + rfd, wfd := p[0], p[1] + + // Create a goroutine that blocks on the pipe. + done := make(chan struct{}) + go func() { + var data [1]byte + _, err := syscall.Read(rfd, data[:]) + if err != nil { + log.Fatalf("failed to read from pipe: %v", err) + } + done <- struct{}{} + }() + + // Give the goroutine ample chance to block on the pipe. + time.Sleep(10 * time.Millisecond) + + // Start tracing. + if err := trace.Start(os.Stdout); err != nil { + log.Fatalf("failed to start tracing: %v", err) + } + + // This isn't enough to have a full generation pass by default, + // but it is generally enough in stress mode. + time.Sleep(100 * time.Millisecond) + + // Write to the pipe to unblock it. + if _, err := syscall.Write(wfd, []byte{10}); err != nil { + log.Fatalf("failed to write to pipe: %v", err) + } + + // Wait for the goroutine to unblock and start running. + // This is helpful to catch incorrect information written + // down for the syscall-blocked goroutine, since it'll start + // executing, and that execution information will be + // inconsistent. + <-done + + // Stop tracing. + trace.Stop() +} diff --git a/src/internal/trace/v2/testdata/tests/go122-annotations-stress.test b/src/internal/trace/v2/testdata/tests/go122-annotations-stress.test new file mode 100644 index 0000000000..8da8c0f318 --- /dev/null +++ b/src/internal/trace/v2/testdata/tests/go122-annotations-stress.test @@ -0,0 +1,1179 @@ +-- expect -- +SUCCESS +-- trace -- +Trace Go1.22 +EventBatch gen=1 m=18446744073709551615 time=2753926854385 size=5 +Frequency freq=15625000 +EventBatch gen=1 m=1986497 time=2753925247434 size=1430 +ProcStart dt=336 p=2 p_seq=1 +GoStart dt=191 g=19 g_seq=1 +HeapAlloc dt=389 heapalloc_value=1622016 +HeapAlloc dt=4453 heapalloc_value=1662976 +GoBlock dt=572 reason_string=12 stack=29 +ProcStop dt=26 +ProcStart dt=160734 p=2 p_seq=2 +ProcStop dt=21 +ProcStart dt=159292 p=0 p_seq=7 +GoStart dt=299 g=49 g_seq=1 +UserRegionBegin dt=183 task=8 name_string=33 stack=26 +UserLog dt=26 task=8 key_string=24 value_string=49 stack=27 +UserRegionEnd dt=8 task=8 name_string=33 stack=28 +GoDestroy dt=3 +GoStart dt=20 g=50 g_seq=1 +UserRegionBegin dt=40 task=8 name_string=35 stack=26 +UserLog dt=9 task=8 key_string=24 value_string=50 stack=27 +UserRegionEnd dt=2 task=8 name_string=35 stack=28 +GoDestroy dt=1 +ProcStop dt=18 +ProcStart dt=141801 p=4 p_seq=5 +ProcStop dt=18 +ProcStart dt=16860 p=4 p_seq=6 +GoUnblock dt=53 g=1 g_seq=5 stack=0 +GoUnblock dt=9 g=51 g_seq=3 stack=0 +GoStart dt=162 g=51 g_seq=4 +UserTaskEnd dt=35 task=9 stack=36 +UserRegionEnd dt=16 task=8 name_string=31 stack=28 +GoDestroy dt=2 +GoStart dt=20 g=1 g_seq=6 +UserTaskEnd dt=14 task=8 stack=54 +UserLog dt=26 task=3 key_string=24 value_string=51 stack=55 +UserTaskBegin dt=14 task=10 parent_task=3 name_string=26 stack=56 +UserLog dt=42 task=10 key_string=27 value_string=52 stack=57 +UserRegionBegin dt=12 task=10 name_string=29 stack=58 +GoCreate dt=36 new_g=35 new_stack=17 stack=59 +GoCreate dt=11 new_g=36 new_stack=17 stack=59 +GoCreate dt=18 new_g=37 new_stack=17 stack=59 +GoCreate dt=10 new_g=38 new_stack=17 stack=59 +GoCreate dt=6 new_g=39 new_stack=17 stack=59 +GoCreate dt=8 new_g=40 new_stack=17 stack=59 +UserRegionEnd dt=7 task=10 name_string=29 stack=60 +GoBlock dt=9 reason_string=19 stack=61 +GoStart dt=15 g=40 g_seq=1 +UserRegionBegin dt=110 task=10 name_string=53 stack=26 +UserLog dt=16 task=10 key_string=24 value_string=54 stack=27 +UserRegionEnd dt=2 task=10 name_string=53 stack=28 +GoDestroy dt=2 +GoStart dt=6 g=38 g_seq=1 +UserRegionBegin dt=31 task=10 name_string=30 stack=26 +UserLog dt=5 task=10 key_string=24 value_string=55 stack=27 +UserRegionEnd dt=2 task=10 name_string=30 stack=28 +GoDestroy dt=1 +GoStart dt=2 g=39 g_seq=1 +UserRegionBegin dt=23 task=10 name_string=56 stack=26 +UserLog dt=6 task=10 key_string=24 value_string=57 stack=27 +UserRegionEnd dt=1 task=10 name_string=56 stack=28 +GoDestroy dt=1 +GoStart dt=8 g=35 g_seq=1 +UserRegionBegin dt=17 task=10 name_string=33 stack=26 +UserLog dt=4 task=10 key_string=24 value_string=58 stack=27 +UserRegionEnd dt=2 task=10 name_string=33 stack=28 +GoDestroy dt=1 +GoStart dt=3 g=36 g_seq=1 +UserRegionBegin dt=19 task=10 name_string=35 stack=26 +UserLog dt=4 task=10 key_string=24 value_string=59 stack=27 +UserRegionEnd dt=2 task=10 name_string=35 stack=28 +GoDestroy dt=1 +ProcStop dt=11 +ProcStart dt=142205 p=0 p_seq=9 +ProcStop dt=19 +ProcStart dt=16811 p=0 p_seq=10 +GoUnblock dt=26 g=1 g_seq=7 stack=0 +GoStart dt=201 g=1 g_seq=8 +UserTaskEnd dt=24 task=10 stack=62 +UserLog dt=18 task=4 key_string=24 value_string=63 stack=63 +UserTaskBegin dt=11 task=12 parent_task=4 name_string=26 stack=64 +UserLog dt=21 task=12 key_string=27 value_string=64 stack=65 +UserRegionBegin dt=7 task=12 name_string=29 stack=66 +GoCreate dt=33 new_g=5 new_stack=17 stack=67 +GoCreate dt=12 new_g=6 new_stack=17 stack=67 +GoCreate dt=9 new_g=7 new_stack=17 stack=67 +GoCreate dt=8 new_g=8 new_stack=17 stack=67 +GoCreate dt=19 new_g=9 new_stack=17 stack=67 +UserRegionEnd dt=14 task=12 name_string=29 stack=68 +GoBlock dt=11 reason_string=19 stack=69 +GoStart dt=13 g=9 g_seq=1 +UserRegionBegin dt=70 task=12 name_string=56 stack=26 +UserLog dt=11 task=12 key_string=24 value_string=65 stack=27 +UserRegionEnd dt=3 task=12 name_string=56 stack=28 +GoDestroy dt=2 +GoStart dt=7 g=5 g_seq=1 +UserRegionBegin dt=24 task=12 name_string=33 stack=26 +UserLog dt=5 task=12 key_string=24 value_string=66 stack=27 +UserRegionEnd dt=2 task=12 name_string=33 stack=28 +GoDestroy dt=2 +GoStart dt=8 g=6 g_seq=1 +UserRegionBegin dt=15 task=12 name_string=35 stack=26 +UserLog dt=7 task=12 key_string=24 value_string=67 stack=27 +UserRegionEnd dt=2 task=12 name_string=35 stack=28 +GoDestroy dt=1 +GoStart dt=2 g=7 g_seq=1 +UserRegionBegin dt=13 task=12 name_string=31 stack=26 +UserLog dt=5 task=12 key_string=24 value_string=68 stack=27 +UserLog dt=6 task=12 key_string=24 value_string=69 stack=30 +UserTaskBegin dt=5 task=13 parent_task=12 name_string=26 stack=31 +UserLog dt=7 task=13 key_string=27 value_string=70 stack=32 +UserRegionBegin dt=4 task=13 name_string=29 stack=33 +UserRegionEnd dt=6 task=13 name_string=29 stack=34 +GoBlock dt=18 reason_string=19 stack=35 +GoStart dt=12 g=8 g_seq=1 +UserRegionBegin dt=22 task=12 name_string=30 stack=26 +UserLog dt=5 task=12 key_string=24 value_string=71 stack=27 +UserRegionEnd dt=2 task=12 name_string=30 stack=28 +GoDestroy dt=1 +ProcStop dt=20 +ProcStart dt=141838 p=4 p_seq=8 +ProcStop dt=16 +ProcStart dt=17652 p=4 p_seq=9 +GoUnblock dt=48 g=1 g_seq=9 stack=0 +GoUnblock dt=8 g=7 g_seq=2 stack=0 +GoStart dt=271 g=7 g_seq=3 +UserTaskEnd dt=25 task=13 stack=36 +UserRegionEnd dt=15 task=12 name_string=31 stack=28 +GoDestroy dt=4 +GoStart dt=19 g=1 g_seq=10 +UserTaskEnd dt=19 task=12 stack=70 +UserLog dt=21 task=0 key_string=24 value_string=72 stack=13 +UserTaskBegin dt=19 task=14 parent_task=0 name_string=26 stack=14 +UserLog dt=37 task=14 key_string=27 value_string=73 stack=15 +UserRegionBegin dt=6 task=14 name_string=29 stack=16 +GoCreate dt=28 new_g=41 new_stack=17 stack=18 +GoCreate dt=14 new_g=42 new_stack=17 stack=18 +GoCreate dt=12 new_g=43 new_stack=17 stack=18 +GoCreate dt=10 new_g=44 new_stack=17 stack=18 +UserRegionEnd dt=5 task=14 name_string=29 stack=19 +GoBlock dt=9 reason_string=19 stack=20 +GoStart dt=16 g=44 g_seq=1 +UserRegionBegin dt=107 task=14 name_string=30 stack=26 +UserLog dt=16 task=14 key_string=24 value_string=74 stack=27 +UserRegionEnd dt=3 task=14 name_string=30 stack=28 +GoDestroy dt=2 +GoStart dt=7 g=41 g_seq=1 +UserRegionBegin dt=30 task=14 name_string=33 stack=26 +UserLog dt=7 task=14 key_string=24 value_string=75 stack=27 +UserRegionEnd dt=2 task=14 name_string=33 stack=28 +GoDestroy dt=2 +GoStart dt=7 g=42 g_seq=1 +UserRegionBegin dt=27 task=14 name_string=35 stack=26 +UserLog dt=7 task=14 key_string=24 value_string=76 stack=27 +UserRegionEnd dt=2 task=14 name_string=35 stack=28 +GoDestroy dt=2 +ProcStop dt=28 +ProcStart dt=141923 p=0 p_seq=12 +ProcStop dt=19 +ProcStart dt=16780 p=0 p_seq=13 +GoUnblock dt=22 g=43 g_seq=2 stack=0 +GoStart dt=162 g=43 g_seq=3 +UserTaskEnd dt=16 task=15 stack=36 +UserRegionEnd dt=12 task=14 name_string=31 stack=28 +GoDestroy dt=2 +ProcStop dt=8 +ProcStart dt=1532 p=2 p_seq=9 +ProcStop dt=12 +ProcStart dt=141906 p=4 p_seq=11 +ProcStop dt=16 +ProcStart dt=16784 p=4 p_seq=12 +GoUnblock dt=20 g=1 g_seq=13 stack=0 +GoStart dt=191 g=1 g_seq=14 +UserTaskEnd dt=15 task=16 stack=45 +UserLog dt=17 task=2 key_string=24 value_string=84 stack=46 +UserTaskBegin dt=8 task=17 parent_task=2 name_string=26 stack=47 +UserLog dt=20 task=17 key_string=27 value_string=85 stack=48 +UserRegionBegin dt=6 task=17 name_string=29 stack=49 +GoCreate dt=28 new_g=45 new_stack=17 stack=50 +GoCreate dt=9 new_g=46 new_stack=17 stack=50 +GoCreate dt=10 new_g=47 new_stack=17 stack=50 +UserRegionEnd dt=5 task=17 name_string=29 stack=51 +GoBlock dt=6 reason_string=19 stack=52 +GoStart dt=10 g=47 g_seq=1 +UserRegionBegin dt=69 task=17 name_string=31 stack=26 +UserLog dt=11 task=17 key_string=24 value_string=86 stack=27 +UserLog dt=7 task=17 key_string=24 value_string=87 stack=30 +UserTaskBegin dt=5 task=18 parent_task=17 name_string=26 stack=31 +UserLog dt=7 task=18 key_string=27 value_string=88 stack=32 +UserRegionBegin dt=5 task=18 name_string=29 stack=33 +UserRegionEnd dt=4 task=18 name_string=29 stack=34 +HeapAlloc dt=35 heapalloc_value=1818624 +GoBlock dt=14 reason_string=19 stack=35 +HeapAlloc dt=11 heapalloc_value=1826816 +GoStart dt=10 g=45 g_seq=1 +UserRegionBegin dt=29 task=17 name_string=33 stack=26 +UserLog dt=9 task=17 key_string=24 value_string=89 stack=27 +UserRegionEnd dt=3 task=17 name_string=33 stack=28 +GoDestroy dt=1 +GoStart dt=5 g=46 g_seq=1 +UserRegionBegin dt=15 task=17 name_string=35 stack=26 +UserLog dt=8 task=17 key_string=24 value_string=90 stack=27 +UserRegionEnd dt=2 task=17 name_string=35 stack=28 +GoDestroy dt=1 +ProcStop dt=3 +ProcStart dt=141981 p=0 p_seq=16 +ProcStop dt=19 +ProcStart dt=17153 p=0 p_seq=17 +GoUnblock dt=44 g=1 g_seq=15 stack=0 +GoUnblock dt=11 g=47 g_seq=2 stack=0 +GoStart dt=215 g=47 g_seq=3 +UserTaskEnd dt=22 task=18 stack=36 +UserRegionEnd dt=9 task=17 name_string=31 stack=28 +GoDestroy dt=3 +GoStart dt=19 g=1 g_seq=16 +UserTaskEnd dt=13 task=17 stack=54 +UserLog dt=18 task=3 key_string=24 value_string=91 stack=55 +UserTaskBegin dt=7 task=19 parent_task=3 name_string=26 stack=56 +UserLog dt=27 task=19 key_string=27 value_string=92 stack=57 +UserRegionBegin dt=8 task=19 name_string=29 stack=58 +GoCreate dt=30 new_g=10 new_stack=17 stack=59 +GoCreate dt=9 new_g=11 new_stack=17 stack=59 +GoCreate dt=11 new_g=12 new_stack=17 stack=59 +GoCreate dt=7 new_g=13 new_stack=17 stack=59 +GoCreate dt=7 new_g=14 new_stack=17 stack=59 +GoCreate dt=9 new_g=15 new_stack=17 stack=59 +UserRegionEnd dt=5 task=19 name_string=29 stack=60 +GoBlock dt=7 reason_string=19 stack=61 +GoStart dt=17 g=15 g_seq=1 +UserRegionBegin dt=61 task=19 name_string=53 stack=26 +UserLog dt=10 task=19 key_string=24 value_string=93 stack=27 +UserRegionEnd dt=3 task=19 name_string=53 stack=28 +GoDestroy dt=1 +GoStart dt=4 g=10 g_seq=1 +UserRegionBegin dt=26 task=19 name_string=33 stack=26 +UserLog dt=7 task=19 key_string=24 value_string=94 stack=27 +UserRegionEnd dt=2 task=19 name_string=33 stack=28 +GoDestroy dt=1 +GoStart dt=4 g=11 g_seq=1 +UserRegionBegin dt=20 task=19 name_string=35 stack=26 +UserLog dt=5 task=19 key_string=24 value_string=95 stack=27 +UserRegionEnd dt=2 task=19 name_string=35 stack=28 +GoDestroy dt=1 +GoStart dt=7 g=12 g_seq=1 +UserRegionBegin dt=14 task=19 name_string=31 stack=26 +UserLog dt=4 task=19 key_string=24 value_string=96 stack=27 +UserLog dt=4 task=19 key_string=24 value_string=97 stack=30 +UserTaskBegin dt=7 task=20 parent_task=19 name_string=26 stack=31 +UserLog dt=5 task=20 key_string=27 value_string=98 stack=32 +UserRegionBegin dt=4 task=20 name_string=29 stack=33 +UserRegionEnd dt=5 task=20 name_string=29 stack=34 +GoBlock dt=9 reason_string=19 stack=35 +GoStart dt=9 g=14 g_seq=1 +UserRegionBegin dt=28 task=19 name_string=56 stack=26 +UserLog dt=7 task=19 key_string=24 value_string=99 stack=27 +UserRegionEnd dt=2 task=19 name_string=56 stack=28 +GoDestroy dt=2 +ProcStop dt=17 +ProcStart dt=141933 p=2 p_seq=11 +ProcStop dt=13 +ProcStart dt=16744 p=2 p_seq=12 +GoUnblock dt=29 g=1 g_seq=17 stack=0 +GoUnblock dt=7 g=12 g_seq=2 stack=0 +GoStart dt=172 g=12 g_seq=3 +UserTaskEnd dt=15 task=20 stack=36 +UserRegionEnd dt=8 task=19 name_string=31 stack=28 +GoDestroy dt=2 +GoStart dt=11 g=1 g_seq=18 +UserTaskEnd dt=14 task=19 stack=62 +UserLog dt=16 task=4 key_string=24 value_string=101 stack=63 +UserTaskBegin dt=6 task=21 parent_task=4 name_string=26 stack=64 +UserLog dt=25 task=21 key_string=27 value_string=102 stack=65 +UserRegionBegin dt=7 task=21 name_string=29 stack=66 +GoCreate dt=23 new_g=54 new_stack=17 stack=67 +GoCreate dt=8 new_g=55 new_stack=17 stack=67 +GoCreate dt=17 new_g=56 new_stack=17 stack=67 +GoCreate dt=8 new_g=57 new_stack=17 stack=67 +GoCreate dt=7 new_g=58 new_stack=17 stack=67 +UserRegionEnd dt=4 task=21 name_string=29 stack=68 +GoBlock dt=9 reason_string=19 stack=69 +GoStart dt=7 g=58 g_seq=1 +UserRegionBegin dt=46 task=21 name_string=56 stack=26 +UserLog dt=8 task=21 key_string=24 value_string=103 stack=27 +UserRegionEnd dt=4 task=21 name_string=56 stack=28 +GoDestroy dt=1 +GoStart dt=3 g=54 g_seq=1 +UserRegionBegin dt=19 task=21 name_string=33 stack=26 +UserLog dt=7 task=21 key_string=24 value_string=104 stack=27 +UserRegionEnd dt=2 task=21 name_string=33 stack=28 +GoDestroy dt=1 +GoStart dt=2 g=55 g_seq=1 +UserRegionBegin dt=17 task=21 name_string=35 stack=26 +UserLog dt=4 task=21 key_string=24 value_string=105 stack=27 +UserRegionEnd dt=2 task=21 name_string=35 stack=28 +GoDestroy dt=1 +GoStart dt=5 g=56 g_seq=1 +UserRegionBegin dt=16 task=21 name_string=31 stack=26 +UserLog dt=4 task=21 key_string=24 value_string=106 stack=27 +UserLog dt=3 task=21 key_string=24 value_string=107 stack=30 +UserTaskBegin dt=4 task=22 parent_task=21 name_string=26 stack=31 +UserLog dt=6 task=22 key_string=27 value_string=108 stack=32 +UserRegionBegin dt=4 task=22 name_string=29 stack=33 +UserRegionEnd dt=7 task=22 name_string=29 stack=34 +GoBlock dt=14 reason_string=19 stack=35 +GoStart dt=3 g=57 g_seq=1 +UserRegionBegin dt=22 task=21 name_string=30 stack=26 +UserLog dt=6 task=21 key_string=24 value_string=109 stack=27 +UserRegionEnd dt=2 task=21 name_string=30 stack=28 +GoDestroy dt=2 +ProcStop dt=10 +ProcStart dt=128031 p=4 p_seq=15 +ProcStop dt=16 +ProcStart dt=33758 p=2 p_seq=15 +ProcStop dt=18 +EventBatch gen=1 m=1986496 time=2753925246280 size=267 +ProcStart dt=549 p=0 p_seq=1 +GoStart dt=211 g=18 g_seq=1 +GoBlock dt=3533 reason_string=12 stack=21 +GoStart dt=41 g=21 g_seq=1 +GoBlock dt=150 reason_string=10 stack=22 +GoStart dt=93 g=20 g_seq=1 +GoSyscallBegin dt=51 p_seq=2 stack=23 +GoSyscallEnd dt=400 +GoBlock dt=582 reason_string=15 stack=25 +GoStart dt=26 g=23 g_seq=1 +HeapAlloc dt=50 heapalloc_value=1646592 +UserRegionBegin dt=2921 task=5 name_string=31 stack=26 +UserLog dt=28 task=5 key_string=24 value_string=37 stack=27 +UserLog dt=13 task=5 key_string=24 value_string=38 stack=30 +UserTaskBegin dt=15 task=6 parent_task=5 name_string=26 stack=31 +HeapAlloc dt=26 heapalloc_value=1687552 +UserLog dt=14 task=6 key_string=27 value_string=39 stack=32 +UserRegionBegin dt=9 task=6 name_string=29 stack=33 +UserRegionEnd dt=6 task=6 name_string=29 stack=34 +GoBlock dt=15 reason_string=19 stack=35 +ProcStop dt=30 +ProcStart dt=156949 p=4 p_seq=2 +GoUnblock dt=46 g=1 g_seq=1 stack=0 +GoStart dt=253 g=1 g_seq=2 +UserTaskEnd dt=27 task=5 stack=37 +UserLog dt=23 task=1 key_string=24 value_string=40 stack=38 +UserTaskBegin dt=14 task=7 parent_task=1 name_string=26 stack=39 +HeapAlloc dt=596 heapalloc_value=1695744 +HeapAlloc dt=18 heapalloc_value=1703936 +UserLog dt=17 task=7 key_string=27 value_string=41 stack=40 +UserRegionBegin dt=14 task=7 name_string=29 stack=41 +HeapAlloc dt=10 heapalloc_value=1712128 +HeapAlloc dt=17 heapalloc_value=1720320 +GoCreate dt=44 new_g=33 new_stack=17 stack=42 +GoCreate dt=175 new_g=34 new_stack=17 stack=42 +UserRegionEnd dt=50 task=7 name_string=29 stack=43 +GoBlock dt=9 reason_string=19 stack=44 +HeapAlloc dt=16 heapalloc_value=1728512 +GoStart dt=239 g=34 g_seq=1 +HeapAlloc dt=21 heapalloc_value=1736704 +UserRegionBegin dt=92 task=7 name_string=35 stack=26 +UserLog dt=15 task=7 key_string=24 value_string=42 stack=27 +UserRegionEnd dt=4 task=7 name_string=35 stack=28 +GoDestroy dt=2 +ProcStop dt=21 +ProcStart dt=800974 p=4 p_seq=10 +ProcStop dt=39 +ProcStart dt=158775 p=0 p_seq=15 +ProcStop dt=24 +ProcStart dt=159722 p=4 p_seq=13 +GoStart dt=254 g=13 g_seq=1 +UserRegionBegin dt=239 task=19 name_string=30 stack=26 +UserLog dt=23 task=19 key_string=24 value_string=100 stack=27 +UserRegionEnd dt=6 task=19 name_string=30 stack=28 +GoDestroy dt=7 +ProcStop dt=22 +EventBatch gen=1 m=1986495 time=2753925251756 size=320 +ProcStart dt=705 p=4 p_seq=1 +ProcStop dt=1279 +ProcStart dt=158975 p=0 p_seq=5 +ProcStop dt=23 +ProcStart dt=792 p=0 p_seq=6 +GoStart dt=187 g=33 g_seq=1 +UserRegionBegin dt=244 task=7 name_string=33 stack=26 +UserLog dt=32 task=7 key_string=24 value_string=43 stack=27 +UserRegionEnd dt=7 task=7 name_string=33 stack=28 +GoDestroy dt=5 +ProcStop dt=24 +ProcStart dt=160255 p=4 p_seq=4 +ProcStop dt=27 +ProcStart dt=159067 p=2 p_seq=5 +GoStart dt=222 g=37 g_seq=1 +UserRegionBegin dt=114 task=10 name_string=31 stack=26 +UserLog dt=16 task=10 key_string=24 value_string=60 stack=27 +UserLog dt=8 task=10 key_string=24 value_string=61 stack=30 +UserTaskBegin dt=8 task=11 parent_task=10 name_string=26 stack=31 +UserLog dt=19 task=11 key_string=27 value_string=62 stack=32 +UserRegionBegin dt=6 task=11 name_string=29 stack=33 +UserRegionEnd dt=7 task=11 name_string=29 stack=34 +GoBlock dt=15 reason_string=19 stack=35 +ProcStop dt=11 +ProcStart dt=160101 p=4 p_seq=7 +ProcStop dt=21 +ProcStart dt=159647 p=2 p_seq=7 +GoStart dt=277 g=43 g_seq=1 +UserRegionBegin dt=126 task=14 name_string=31 stack=26 +UserLog dt=21 task=14 key_string=24 value_string=77 stack=27 +UserLog dt=9 task=14 key_string=24 value_string=78 stack=30 +UserTaskBegin dt=8 task=15 parent_task=14 name_string=26 stack=31 +UserLog dt=17 task=15 key_string=27 value_string=79 stack=32 +UserRegionBegin dt=6 task=15 name_string=29 stack=33 +UserRegionEnd dt=8 task=15 name_string=29 stack=34 +GoBlock dt=23 reason_string=19 stack=35 +ProcStop dt=17 +ProcStart dt=159706 p=0 p_seq=14 +GoStart dt=229 g=52 g_seq=1 +UserRegionBegin dt=103 task=16 name_string=33 stack=26 +UserLog dt=20 task=16 key_string=24 value_string=83 stack=27 +UserRegionEnd dt=4 task=16 name_string=33 stack=28 +GoDestroy dt=3 +ProcStop dt=17 +ProcStart dt=319699 p=2 p_seq=10 +ProcStop dt=20 +ProcStart dt=158728 p=4 p_seq=14 +ProcStop dt=17 +ProcStart dt=110606 p=2 p_seq=13 +ProcStop dt=10 +ProcStart dt=16732 p=2 p_seq=14 +GoUnblock dt=45 g=18 g_seq=2 stack=0 +GoStart dt=184 g=18 g_seq=3 +GoBlock dt=114 reason_string=12 stack=21 +ProcStop dt=8 +ProcStart dt=16779 p=4 p_seq=16 +ProcStop dt=11 +ProcStart dt=16790 p=4 p_seq=17 +GoUnblock dt=23 g=1 g_seq=19 stack=0 +GoUnblock dt=8 g=56 g_seq=2 stack=0 +GoStart dt=142 g=56 g_seq=3 +UserTaskEnd dt=14 task=22 stack=36 +UserRegionEnd dt=8 task=21 name_string=31 stack=28 +GoDestroy dt=5 +GoStart dt=18 g=1 g_seq=20 +UserTaskEnd dt=17 task=21 stack=70 +UserTaskEnd dt=12 task=4 stack=71 +HeapAlloc dt=802 heapalloc_value=1835008 +HeapAlloc dt=41 heapalloc_value=1843200 +HeapAlloc dt=13 heapalloc_value=1851392 +EventBatch gen=1 m=1986494 time=2753925248778 size=47 +ProcStart dt=390 p=3 p_seq=1 +GoStart dt=1718 g=22 g_seq=1 +HeapAlloc dt=1807 heapalloc_value=1654784 +HeapAlloc dt=406 heapalloc_value=1671168 +HeapAlloc dt=15 heapalloc_value=1679360 +UserRegionBegin dt=49 task=5 name_string=35 stack=26 +UserLog dt=30 task=5 key_string=24 value_string=36 stack=27 +UserRegionEnd dt=5 task=5 name_string=35 stack=28 +GoDestroy dt=5 +ProcStop dt=42 +EventBatch gen=1 m=1986492 time=2753925244400 size=582 +ProcStatus dt=67 p=1 pstatus=1 +GoStatus dt=4 g=1 m=1986492 gstatus=2 +ProcsChange dt=220 procs_value=8 stack=1 +STWBegin dt=127 kind_string=21 stack=2 +HeapGoal dt=3 heapgoal_value=4194304 +ProcStatus dt=2 p=0 pstatus=2 +ProcStatus dt=2 p=2 pstatus=2 +ProcStatus dt=1 p=3 pstatus=2 +ProcStatus dt=1 p=4 pstatus=2 +ProcStatus dt=1 p=5 pstatus=2 +ProcStatus dt=1 p=6 pstatus=2 +ProcStatus dt=1 p=7 pstatus=2 +ProcsChange dt=353 procs_value=8 stack=3 +STWEnd dt=277 +HeapAlloc dt=243 heapalloc_value=1605632 +HeapAlloc dt=24 heapalloc_value=1613824 +GoCreate dt=209 new_g=18 new_stack=4 stack=5 +GoCreate dt=561 new_g=19 new_stack=6 stack=7 +GoCreate dt=25 new_g=20 new_stack=8 stack=9 +UserTaskEnd dt=309 task=2 stack=10 +UserTaskBegin dt=26 task=3 parent_task=1 name_string=22 stack=11 +UserTaskBegin dt=918 task=4 parent_task=0 name_string=23 stack=12 +UserLog dt=461 task=0 key_string=24 value_string=25 stack=13 +UserTaskBegin dt=420 task=5 parent_task=0 name_string=26 stack=14 +UserLog dt=673 task=5 key_string=27 value_string=28 stack=15 +UserRegionBegin dt=15 task=5 name_string=29 stack=16 +HeapAlloc dt=51 heapalloc_value=1630208 +GoCreate dt=24 new_g=21 new_stack=17 stack=18 +GoCreate dt=17 new_g=22 new_stack=17 stack=18 +GoCreate dt=10 new_g=23 new_stack=17 stack=18 +GoCreate dt=9 new_g=24 new_stack=17 stack=18 +UserRegionEnd dt=549 task=5 name_string=29 stack=19 +GoBlock dt=14 reason_string=19 stack=20 +GoStart dt=378 g=24 g_seq=1 +HeapAlloc dt=65 heapalloc_value=1638400 +GoUnblock dt=559 g=21 g_seq=2 stack=24 +UserRegionBegin dt=1498 task=5 name_string=30 stack=26 +UserLog dt=35 task=5 key_string=24 value_string=32 stack=27 +UserRegionEnd dt=8 task=5 name_string=30 stack=28 +GoDestroy dt=5 +GoStart dt=24 g=21 g_seq=3 +UserRegionBegin dt=60 task=5 name_string=33 stack=26 +UserLog dt=7 task=5 key_string=24 value_string=34 stack=27 +UserRegionEnd dt=2 task=5 name_string=33 stack=28 +GoDestroy dt=2 +ProcStop dt=34 +ProcStart dt=141874 p=0 p_seq=3 +ProcStop dt=21 +ProcStart dt=16770 p=0 p_seq=4 +GoUnblock dt=29 g=23 g_seq=2 stack=0 +GoStart dt=176 g=23 g_seq=3 +UserTaskEnd dt=19 task=6 stack=36 +UserRegionEnd dt=14 task=5 name_string=31 stack=28 +GoDestroy dt=2 +ProcStop dt=12 +ProcStart dt=2251 p=4 p_seq=3 +ProcStop dt=22 +ProcStart dt=141952 p=2 p_seq=3 +ProcStop dt=27 +ProcStart dt=16789 p=2 p_seq=4 +GoUnblock dt=35 g=1 g_seq=3 stack=0 +GoStart dt=214 g=1 g_seq=4 +UserTaskEnd dt=26 task=7 stack=45 +UserLog dt=27 task=2 key_string=24 value_string=44 stack=46 +UserTaskBegin dt=10 task=8 parent_task=2 name_string=26 stack=47 +HeapAlloc dt=52 heapalloc_value=1744896 +HeapAlloc dt=22 heapalloc_value=1753088 +UserLog dt=13 task=8 key_string=27 value_string=45 stack=48 +UserRegionBegin dt=11 task=8 name_string=29 stack=49 +HeapAlloc dt=7 heapalloc_value=1761280 +HeapAlloc dt=18 heapalloc_value=1769472 +GoCreate dt=52 new_g=49 new_stack=17 stack=50 +GoCreate dt=12 new_g=50 new_stack=17 stack=50 +HeapAlloc dt=11 heapalloc_value=1777664 +GoCreate dt=9 new_g=51 new_stack=17 stack=50 +UserRegionEnd dt=9 task=8 name_string=29 stack=51 +GoBlock dt=11 reason_string=19 stack=52 +HeapAlloc dt=12 heapalloc_value=1785856 +GoStart dt=14 g=51 g_seq=1 +HeapAlloc dt=18 heapalloc_value=1794048 +UserRegionBegin dt=95 task=8 name_string=31 stack=26 +UserLog dt=22 task=8 key_string=24 value_string=46 stack=27 +UserLog dt=8 task=8 key_string=24 value_string=47 stack=30 +UserTaskBegin dt=5 task=9 parent_task=8 name_string=26 stack=31 +UserLog dt=7 task=9 key_string=27 value_string=48 stack=32 +UserRegionBegin dt=4 task=9 name_string=29 stack=33 +UserRegionEnd dt=7 task=9 name_string=29 stack=34 +HeapAlloc dt=11 heapalloc_value=1802240 +GoStop dt=674 reason_string=16 stack=53 +GoStart dt=12 g=51 g_seq=2 +GoBlock dt=8 reason_string=19 stack=35 +HeapAlloc dt=16 heapalloc_value=1810432 +ProcStop dt=8 +ProcStart dt=159907 p=0 p_seq=8 +ProcStop dt=25 +ProcStart dt=159186 p=2 p_seq=6 +GoUnblock dt=22 g=37 g_seq=2 stack=0 +GoStart dt=217 g=37 g_seq=3 +UserTaskEnd dt=19 task=11 stack=36 +UserRegionEnd dt=15 task=10 name_string=31 stack=28 +GoDestroy dt=5 +ProcStop dt=16 +ProcStart dt=160988 p=0 p_seq=11 +ProcStop dt=29 +ProcStart dt=158554 p=2 p_seq=8 +GoUnblock dt=38 g=1 g_seq=11 stack=0 +GoStart dt=240 g=1 g_seq=12 +UserTaskEnd dt=25 task=14 stack=37 +UserLog dt=23 task=1 key_string=24 value_string=80 stack=38 +UserTaskBegin dt=11 task=16 parent_task=1 name_string=26 stack=39 +UserLog dt=36 task=16 key_string=27 value_string=81 stack=40 +UserRegionBegin dt=13 task=16 name_string=29 stack=41 +GoCreate dt=39 new_g=52 new_stack=17 stack=42 +GoCreate dt=23 new_g=53 new_stack=17 stack=42 +UserRegionEnd dt=11 task=16 name_string=29 stack=43 +GoBlock dt=9 reason_string=19 stack=44 +GoStart dt=244 g=53 g_seq=1 +UserRegionBegin dt=101 task=16 name_string=35 stack=26 +UserLog dt=17 task=16 key_string=24 value_string=82 stack=27 +UserRegionEnd dt=4 task=16 name_string=35 stack=28 +GoDestroy dt=3 +ProcStop dt=28 +EventBatch gen=1 m=18446744073709551615 time=2753926855140 size=56 +GoStatus dt=74 g=2 m=18446744073709551615 gstatus=4 +GoStatus dt=3 g=3 m=18446744073709551615 gstatus=4 +GoStatus dt=1 g=4 m=18446744073709551615 gstatus=4 +GoStatus dt=1 g=17 m=18446744073709551615 gstatus=4 +EventBatch gen=1 m=18446744073709551615 time=2753926855560 size=1759 +Stacks +Stack id=45 nframes=3 + pc=4804964 func=110 file=111 line=80 + pc=4804052 func=112 file=113 line=84 + pc=4803566 func=114 file=113 line=44 +Stack id=22 nframes=7 + pc=4633935 func=115 file=116 line=90 + pc=4633896 func=117 file=118 line=223 + pc=4633765 func=119 file=118 line=216 + pc=4633083 func=120 file=118 line=131 + pc=4764601 func=121 file=122 line=152 + pc=4765335 func=123 file=122 line=238 + pc=4804612 func=124 file=113 line=70 +Stack id=9 nframes=2 + pc=4802543 func=125 file=126 line=128 + pc=4803332 func=114 file=113 line=30 +Stack id=71 nframes=2 + pc=4803671 func=110 file=111 line=80 + pc=4803666 func=114 file=113 line=51 +Stack id=10 nframes=2 + pc=4803415 func=110 file=111 line=80 + pc=4803410 func=114 file=113 line=33 +Stack id=18 nframes=4 + pc=4804196 func=127 file=113 line=69 + pc=4802140 func=128 file=111 line=141 + pc=4804022 func=112 file=113 line=67 + pc=4803543 func=114 file=113 line=43 +Stack id=37 nframes=3 + pc=4804964 func=110 file=111 line=80 + pc=4804052 func=112 file=113 line=84 + pc=4803543 func=114 file=113 line=43 +Stack id=31 nframes=4 + pc=4803865 func=112 file=113 line=61 + pc=4804890 func=129 file=113 line=73 + pc=4802140 func=128 file=111 line=141 + pc=4804691 func=124 file=113 line=70 +Stack id=55 nframes=2 + pc=4803832 func=112 file=113 line=58 + pc=4803609 func=114 file=113 line=46 +Stack id=47 nframes=2 + pc=4803865 func=112 file=113 line=61 + pc=4803589 func=114 file=113 line=45 +Stack id=38 nframes=2 + pc=4803832 func=112 file=113 line=58 + pc=4803566 func=114 file=113 line=44 +Stack id=56 nframes=2 + pc=4803865 func=112 file=113 line=61 + pc=4803609 func=114 file=113 line=46 +Stack id=33 nframes=4 + pc=4804022 func=112 file=113 line=67 + pc=4804890 func=129 file=113 line=73 + pc=4802140 func=128 file=111 line=141 + pc=4804691 func=124 file=113 line=70 +Stack id=44 nframes=3 + pc=4599892 func=130 file=131 line=195 + pc=4804036 func=112 file=113 line=83 + pc=4803566 func=114 file=113 line=44 +Stack id=3 nframes=4 + pc=4421707 func=132 file=133 line=1382 + pc=4533555 func=134 file=135 line=255 + pc=4802469 func=125 file=126 line=125 + pc=4803332 func=114 file=113 line=30 +Stack id=6 nframes=1 + pc=4539520 func=136 file=135 line=868 +Stack id=58 nframes=2 + pc=4804022 func=112 file=113 line=67 + pc=4803609 func=114 file=113 line=46 +Stack id=64 nframes=2 + pc=4803865 func=112 file=113 line=61 + pc=4803629 func=114 file=113 line=47 +Stack id=62 nframes=3 + pc=4804964 func=110 file=111 line=80 + pc=4804052 func=112 file=113 line=84 + pc=4803609 func=114 file=113 line=46 +Stack id=34 nframes=4 + pc=4804022 func=112 file=113 line=67 + pc=4804890 func=129 file=113 line=73 + pc=4802140 func=128 file=111 line=141 + pc=4804691 func=124 file=113 line=70 +Stack id=30 nframes=4 + pc=4803832 func=112 file=113 line=58 + pc=4804890 func=129 file=113 line=73 + pc=4802140 func=128 file=111 line=141 + pc=4804691 func=124 file=113 line=70 +Stack id=32 nframes=4 + pc=4803943 func=112 file=113 line=64 + pc=4804890 func=129 file=113 line=73 + pc=4802140 func=128 file=111 line=141 + pc=4804691 func=124 file=113 line=70 +Stack id=26 nframes=1 + pc=4804691 func=124 file=113 line=70 +Stack id=46 nframes=2 + pc=4803832 func=112 file=113 line=58 + pc=4803589 func=114 file=113 line=45 +Stack id=50 nframes=4 + pc=4804196 func=127 file=113 line=69 + pc=4802140 func=128 file=111 line=141 + pc=4804022 func=112 file=113 line=67 + pc=4803589 func=114 file=113 line=45 +Stack id=59 nframes=4 + pc=4804196 func=127 file=113 line=69 + pc=4802140 func=128 file=111 line=141 + pc=4804022 func=112 file=113 line=67 + pc=4803609 func=114 file=113 line=46 +Stack id=7 nframes=4 + pc=4539492 func=137 file=135 line=868 + pc=4533572 func=134 file=135 line=258 + pc=4802469 func=125 file=126 line=125 + pc=4803332 func=114 file=113 line=30 +Stack id=17 nframes=1 + pc=4804512 func=124 file=113 line=69 +Stack id=57 nframes=2 + pc=4803943 func=112 file=113 line=64 + pc=4803609 func=114 file=113 line=46 +Stack id=41 nframes=2 + pc=4804022 func=112 file=113 line=67 + pc=4803566 func=114 file=113 line=44 +Stack id=63 nframes=2 + pc=4803832 func=112 file=113 line=58 + pc=4803629 func=114 file=113 line=47 +Stack id=60 nframes=2 + pc=4804022 func=112 file=113 line=67 + pc=4803609 func=114 file=113 line=46 +Stack id=5 nframes=4 + pc=4542549 func=138 file=139 line=42 + pc=4533560 func=134 file=135 line=257 + pc=4802469 func=125 file=126 line=125 + pc=4803332 func=114 file=113 line=30 +Stack id=40 nframes=2 + pc=4803943 func=112 file=113 line=64 + pc=4803566 func=114 file=113 line=44 +Stack id=21 nframes=3 + pc=4217905 func=140 file=141 line=442 + pc=4539946 func=142 file=135 line=928 + pc=4542714 func=143 file=139 line=54 +Stack id=2 nframes=3 + pc=4533284 func=134 file=135 line=238 + pc=4802469 func=125 file=126 line=125 + pc=4803332 func=114 file=113 line=30 +Stack id=53 nframes=6 + pc=4247492 func=144 file=145 line=1374 + pc=4599676 func=130 file=131 line=186 + pc=4804036 func=112 file=113 line=83 + pc=4804890 func=129 file=113 line=73 + pc=4802140 func=128 file=111 line=141 + pc=4804691 func=124 file=113 line=70 +Stack id=20 nframes=3 + pc=4599892 func=130 file=131 line=195 + pc=4804036 func=112 file=113 line=83 + pc=4803543 func=114 file=113 line=43 +Stack id=70 nframes=3 + pc=4804964 func=110 file=111 line=80 + pc=4804052 func=112 file=113 line=84 + pc=4803629 func=114 file=113 line=47 +Stack id=15 nframes=2 + pc=4803943 func=112 file=113 line=64 + pc=4803543 func=114 file=113 line=43 +Stack id=65 nframes=2 + pc=4803943 func=112 file=113 line=64 + pc=4803629 func=114 file=113 line=47 +Stack id=28 nframes=1 + pc=4804691 func=124 file=113 line=70 +Stack id=48 nframes=2 + pc=4803943 func=112 file=113 line=64 + pc=4803589 func=114 file=113 line=45 +Stack id=61 nframes=3 + pc=4599892 func=130 file=131 line=195 + pc=4804036 func=112 file=113 line=83 + pc=4803609 func=114 file=113 line=46 +Stack id=13 nframes=2 + pc=4803832 func=112 file=113 line=58 + pc=4803543 func=114 file=113 line=43 +Stack id=29 nframes=3 + pc=4217905 func=140 file=141 line=442 + pc=4539946 func=142 file=135 line=928 + pc=4539559 func=136 file=135 line=871 +Stack id=51 nframes=2 + pc=4804022 func=112 file=113 line=67 + pc=4803589 func=114 file=113 line=45 +Stack id=42 nframes=4 + pc=4804196 func=127 file=113 line=69 + pc=4802140 func=128 file=111 line=141 + pc=4804022 func=112 file=113 line=67 + pc=4803566 func=114 file=113 line=44 +Stack id=14 nframes=2 + pc=4803865 func=112 file=113 line=61 + pc=4803543 func=114 file=113 line=43 +Stack id=39 nframes=2 + pc=4803865 func=112 file=113 line=61 + pc=4803566 func=114 file=113 line=44 +Stack id=49 nframes=2 + pc=4804022 func=112 file=113 line=67 + pc=4803589 func=114 file=113 line=45 +Stack id=52 nframes=3 + pc=4599892 func=130 file=131 line=195 + pc=4804036 func=112 file=113 line=83 + pc=4803589 func=114 file=113 line=45 +Stack id=24 nframes=7 + pc=4634510 func=146 file=116 line=223 + pc=4634311 func=117 file=118 line=240 + pc=4633765 func=119 file=118 line=216 + pc=4633083 func=120 file=118 line=131 + pc=4764601 func=121 file=122 line=152 + pc=4765335 func=123 file=122 line=238 + pc=4804612 func=124 file=113 line=70 +Stack id=43 nframes=2 + pc=4804022 func=112 file=113 line=67 + pc=4803566 func=114 file=113 line=44 +Stack id=19 nframes=2 + pc=4804022 func=112 file=113 line=67 + pc=4803543 func=114 file=113 line=43 +Stack id=69 nframes=3 + pc=4599892 func=130 file=131 line=195 + pc=4804036 func=112 file=113 line=83 + pc=4803629 func=114 file=113 line=47 +Stack id=16 nframes=2 + pc=4804022 func=112 file=113 line=67 + pc=4803543 func=114 file=113 line=43 +Stack id=54 nframes=3 + pc=4804964 func=110 file=111 line=80 + pc=4804052 func=112 file=113 line=84 + pc=4803589 func=114 file=113 line=45 +Stack id=35 nframes=5 + pc=4599892 func=130 file=131 line=195 + pc=4804036 func=112 file=113 line=83 + pc=4804890 func=129 file=113 line=73 + pc=4802140 func=128 file=111 line=141 + pc=4804691 func=124 file=113 line=70 +Stack id=27 nframes=3 + pc=4804862 func=129 file=113 line=71 + pc=4802140 func=128 file=111 line=141 + pc=4804691 func=124 file=113 line=70 +Stack id=4 nframes=1 + pc=4542656 func=143 file=139 line=42 +Stack id=8 nframes=1 + pc=4802720 func=147 file=126 line=128 +Stack id=66 nframes=2 + pc=4804022 func=112 file=113 line=67 + pc=4803629 func=114 file=113 line=47 +Stack id=1 nframes=4 + pc=4548715 func=148 file=149 line=255 + pc=4533263 func=134 file=135 line=237 + pc=4802469 func=125 file=126 line=125 + pc=4803332 func=114 file=113 line=30 +Stack id=67 nframes=4 + pc=4804196 func=127 file=113 line=69 + pc=4802140 func=128 file=111 line=141 + pc=4804022 func=112 file=113 line=67 + pc=4803629 func=114 file=113 line=47 +Stack id=23 nframes=7 + pc=4641050 func=150 file=151 line=964 + pc=4751591 func=152 file=153 line=209 + pc=4751583 func=154 file=155 line=736 + pc=4751136 func=156 file=155 line=380 + pc=4753008 func=157 file=158 line=46 + pc=4753000 func=159 file=160 line=183 + pc=4802778 func=147 file=126 line=134 +Stack id=11 nframes=1 + pc=4803445 func=114 file=113 line=36 +Stack id=68 nframes=2 + pc=4804022 func=112 file=113 line=67 + pc=4803629 func=114 file=113 line=47 +Stack id=36 nframes=5 + pc=4804964 func=110 file=111 line=80 + pc=4804052 func=112 file=113 line=84 + pc=4804890 func=129 file=113 line=73 + pc=4802140 func=128 file=111 line=141 + pc=4804691 func=124 file=113 line=70 +Stack id=12 nframes=1 + pc=4803492 func=114 file=113 line=39 +Stack id=25 nframes=1 + pc=4802788 func=147 file=126 line=130 +EventBatch gen=1 m=18446744073709551615 time=2753925243266 size=3466 +Strings +String id=1 + data="Not worker" +String id=2 + data="GC (dedicated)" +String id=3 + data="GC (fractional)" +String id=4 + data="GC (idle)" +String id=5 + data="unspecified" +String id=6 + data="forever" +String id=7 + data="network" +String id=8 + data="select" +String id=9 + data="sync.(*Cond).Wait" +String id=10 + data="sync" +String id=11 + data="chan send" +String id=12 + data="chan receive" +String id=13 + data="GC mark assist wait for work" +String id=14 + data="GC background sweeper wait" +String id=15 + data="system goroutine wait" +String id=16 + data="preempted" +String id=17 + data="wait for debug call" +String id=18 + data="wait until GC ends" +String id=19 + data="sleep" +String id=20 + data="runtime.Gosched" +String id=21 + data="start trace" +String id=22 + data="type2" +String id=23 + data="type3" +String id=24 + data="log" +String id=25 + data="before do" +String id=26 + data="do" +String id=27 + data="log2" +String id=28 + data="do" +String id=29 + data="fanout" +String id=30 + data="region3" +String id=31 + data="region2" +String id=32 + data="fanout region3" +String id=33 + data="region0" +String id=34 + data="fanout region0" +String id=35 + data="region1" +String id=36 + data="fanout region1" +String id=37 + data="fanout region2" +String id=38 + data="before do" +String id=39 + data="do" +String id=40 + data="before do" +String id=41 + data="do" +String id=42 + data="fanout region1" +String id=43 + data="fanout region0" +String id=44 + data="before do" +String id=45 + data="do" +String id=46 + data="fanout region2" +String id=47 + data="before do" +String id=48 + data="do" +String id=49 + data="fanout region0" +String id=50 + data="fanout region1" +String id=51 + data="before do" +String id=52 + data="do" +String id=53 + data="region5" +String id=54 + data="fanout region5" +String id=55 + data="fanout region3" +String id=56 + data="region4" +String id=57 + data="fanout region4" +String id=58 + data="fanout region0" +String id=59 + data="fanout region1" +String id=60 + data="fanout region2" +String id=61 + data="before do" +String id=62 + data="do" +String id=63 + data="before do" +String id=64 + data="do" +String id=65 + data="fanout region4" +String id=66 + data="fanout region0" +String id=67 + data="fanout region1" +String id=68 + data="fanout region2" +String id=69 + data="before do" +String id=70 + data="do" +String id=71 + data="fanout region3" +String id=72 + data="before do" +String id=73 + data="do" +String id=74 + data="fanout region3" +String id=75 + data="fanout region0" +String id=76 + data="fanout region1" +String id=77 + data="fanout region2" +String id=78 + data="before do" +String id=79 + data="do" +String id=80 + data="before do" +String id=81 + data="do" +String id=82 + data="fanout region1" +String id=83 + data="fanout region0" +String id=84 + data="before do" +String id=85 + data="do" +String id=86 + data="fanout region2" +String id=87 + data="before do" +String id=88 + data="do" +String id=89 + data="fanout region0" +String id=90 + data="fanout region1" +String id=91 + data="before do" +String id=92 + data="do" +String id=93 + data="fanout region5" +String id=94 + data="fanout region0" +String id=95 + data="fanout region1" +String id=96 + data="fanout region2" +String id=97 + data="before do" +String id=98 + data="do" +String id=99 + data="fanout region4" +String id=100 + data="fanout region3" +String id=101 + data="before do" +String id=102 + data="do" +String id=103 + data="fanout region4" +String id=104 + data="fanout region0" +String id=105 + data="fanout region1" +String id=106 + data="fanout region2" +String id=107 + data="before do" +String id=108 + data="do" +String id=109 + data="fanout region3" +String id=110 + data="runtime/trace.(*Task).End" +String id=111 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace/annotation.go" +String id=112 + data="main.do" +String id=113 + data="/usr/local/google/home/mknyszek/work/go-1/src/internal/trace/v2/testdata/testprog/annotations-stress.go" +String id=114 + data="main.main" +String id=115 + data="sync.(*Mutex).Lock" +String id=116 + data="/usr/local/google/home/mknyszek/work/go-1/src/sync/mutex.go" +String id=117 + data="sync.(*Pool).pinSlow" +String id=118 + data="/usr/local/google/home/mknyszek/work/go-1/src/sync/pool.go" +String id=119 + data="sync.(*Pool).pin" +String id=120 + data="sync.(*Pool).Get" +String id=121 + data="fmt.newPrinter" +String id=122 + data="/usr/local/google/home/mknyszek/work/go-1/src/fmt/print.go" +String id=123 + data="fmt.Sprintf" +String id=124 + data="main.do.func1.1" +String id=125 + data="runtime/trace.Start" +String id=126 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace/trace.go" +String id=127 + data="main.do.func1" +String id=128 + data="runtime/trace.WithRegion" +String id=129 + data="main.do.func1.1.1" +String id=130 + data="time.Sleep" +String id=131 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/time.go" +String id=132 + data="runtime.startTheWorld" +String id=133 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/proc.go" +String id=134 + data="runtime.StartTrace" +String id=135 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace2.go" +String id=136 + data="runtime.(*traceAdvancerState).start.func1" +String id=137 + data="runtime.(*traceAdvancerState).start" +String id=138 + data="runtime.traceStartReadCPU" +String id=139 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace2cpu.go" +String id=140 + data="runtime.chanrecv1" +String id=141 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/chan.go" +String id=142 + data="runtime.(*wakeableSleep).sleep" +String id=143 + data="runtime.traceStartReadCPU.func1" +String id=144 + data="runtime.newobject" +String id=145 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/malloc.go" +String id=146 + data="sync.(*Mutex).Unlock" +String id=147 + data="runtime/trace.Start.func1" +String id=148 + data="runtime.traceLocker.Gomaxprocs" +String id=149 + data="/usr/local/google/home/mknyszek/work/go-1/src/runtime/trace2runtime.go" +String id=150 + data="syscall.write" +String id=151 + data="/usr/local/google/home/mknyszek/work/go-1/src/syscall/zsyscall_linux_amd64.go" +String id=152 + data="syscall.Write" +String id=153 + data="/usr/local/google/home/mknyszek/work/go-1/src/syscall/syscall_unix.go" +String id=154 + data="internal/poll.ignoringEINTRIO" +String id=155 + data="/usr/local/google/home/mknyszek/work/go-1/src/internal/poll/fd_unix.go" +String id=156 + data="internal/poll.(*FD).Write" +String id=157 + data="os.(*File).write" +String id=158 + data="/usr/local/google/home/mknyszek/work/go-1/src/os/file_posix.go" +String id=159 + data="os.(*File).Write" +String id=160 + data="/usr/local/google/home/mknyszek/work/go-1/src/os/file.go" diff --git a/src/internal/trace/v2/testdata/tests/go122-annotations.test b/src/internal/trace/v2/testdata/tests/go122-annotations.test index 4749d82004..e468673497 100644 --- a/src/internal/trace/v2/testdata/tests/go122-annotations.test +++ b/src/internal/trace/v2/testdata/tests/go122-annotations.test @@ -220,7 +220,7 @@ String id=18 String id=19 data="sleep" String id=20 - data="runtime.GoSched" + data="runtime.Gosched" String id=21 data="start trace" String id=22 diff --git a/src/internal/trace/v2/testdata/tests/go122-create-syscall-reuse-thread-id.test b/src/internal/trace/v2/testdata/tests/go122-create-syscall-reuse-thread-id.test new file mode 100644 index 0000000000..1820738384 --- /dev/null +++ b/src/internal/trace/v2/testdata/tests/go122-create-syscall-reuse-thread-id.test @@ -0,0 +1,23 @@ +-- expect -- +SUCCESS +-- trace -- +Trace Go1.22 +EventBatch gen=1 m=0 time=0 size=37 +GoCreateSyscall dt=1 new_g=4 +GoSyscallEndBlocked dt=1 +ProcStatus dt=1 p=0 pstatus=2 +ProcStart dt=1 p=0 p_seq=1 +GoStatus dt=1 g=4 m=18446744073709551615 gstatus=1 +GoStart dt=1 g=4 g_seq=1 +GoSyscallBegin dt=1 p_seq=2 stack=0 +GoDestroySyscall dt=1 +EventBatch gen=1 m=0 time=0 size=13 +ProcStatus dt=1 p=1 pstatus=2 +ProcStart dt=1 p=1 p_seq=1 +ProcSteal dt=1 p=0 p_seq=3 m=0 +EventBatch gen=1 m=18446744073709551615 time=0 size=5 +Frequency freq=15625000 +EventBatch gen=1 m=18446744073709551615 time=0 size=1 +Stacks +EventBatch gen=1 m=18446744073709551615 time=0 size=1 +Strings diff --git a/src/internal/trace/v2/testdata/tests/go122-create-syscall-with-p.test b/src/internal/trace/v2/testdata/tests/go122-create-syscall-with-p.test new file mode 100644 index 0000000000..9b329b8bae --- /dev/null +++ b/src/internal/trace/v2/testdata/tests/go122-create-syscall-with-p.test @@ -0,0 +1,22 @@ +-- expect -- +FAILURE ".*expected a proc but didn't have one.*" +-- trace -- +Trace Go1.22 +EventBatch gen=1 m=0 time=0 size=34 +GoCreateSyscall dt=1 new_g=4 +ProcStatus dt=1 p=0 pstatus=2 +ProcStart dt=1 p=0 p_seq=1 +GoSyscallEndBlocked dt=1 +GoStart dt=1 g=4 g_seq=1 +GoSyscallBegin dt=1 p_seq=2 stack=0 +GoDestroySyscall dt=1 +GoCreateSyscall dt=1 new_g=4 +GoSyscallEnd dt=1 +GoSyscallBegin dt=1 p_seq=3 stack=0 +GoDestroySyscall dt=1 +EventBatch gen=1 m=18446744073709551615 time=0 size=5 +Frequency freq=15625000 +EventBatch gen=1 m=18446744073709551615 time=0 size=1 +Stacks +EventBatch gen=1 m=18446744073709551615 time=0 size=1 +Strings diff --git a/src/internal/trace/v2/testdata/tests/go122-gc-stress.test b/src/internal/trace/v2/testdata/tests/go122-gc-stress.test index 8d77fe14af..d5e7266f1e 100644 --- a/src/internal/trace/v2/testdata/tests/go122-gc-stress.test +++ b/src/internal/trace/v2/testdata/tests/go122-gc-stress.test @@ -4086,7 +4086,7 @@ String id=18 String id=19 data="sleep" String id=20 - data="runtime.GoSched" + data="runtime.Gosched" String id=21 data="GC mark termination" String id=22 diff --git a/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-self.test b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-self.test new file mode 100644 index 0000000000..6484eb6d35 --- /dev/null +++ b/src/internal/trace/v2/testdata/tests/go122-syscall-steal-proc-self.test @@ -0,0 +1,17 @@ +-- expect -- +SUCCESS +-- trace -- +Trace Go1.22 +EventBatch gen=1 m=0 time=0 size=24 +ProcStatus dt=0 p=0 pstatus=1 +GoStatus dt=0 g=1 m=0 gstatus=2 +GoSyscallBegin dt=0 p_seq=1 stack=0 +ProcSteal dt=0 p=0 p_seq=2 m=0 +ProcStart dt=0 p=0 p_seq=3 +GoSyscallEndBlocked dt=0 +EventBatch gen=1 m=18446744073709551615 time=0 size=5 +Frequency freq=15625000 +EventBatch gen=1 m=18446744073709551615 time=0 size=1 +Stacks +EventBatch gen=1 m=18446744073709551615 time=0 size=1 +Strings diff --git a/src/internal/trace/v2/testtrace/validation.go b/src/internal/trace/v2/testtrace/validation.go index fcbc10801b..021c7785fd 100644 --- a/src/internal/trace/v2/testtrace/validation.go +++ b/src/internal/trace/v2/testtrace/validation.go @@ -169,6 +169,11 @@ func (v *Validator) Event(ev trace.Event) error { state.binding = ctx } } else if old.Executing() && !new.Executing() { + if tr.Stack != ev.Stack() { + // This is a case where the transition is happening to a goroutine that is also executing, so + // these two stacks should always match. + e.Errorf("StateTransition.Stack doesn't match Event.Stack") + } ctx := state.binding if ctx != nil { if ctx.G != id { @@ -220,7 +225,7 @@ func (v *Validator) Event(ev trace.Event) error { ctx := state.binding if ctx != nil { if ctx.P != id { - e.Errorf("tried to stop proc %d when it wasn't currently executing (currently executing %d) on thread %d", id, ctx.P, ev.Thread()) + e.Errorf("tried to stop proc %d when it wasn't currently executing (currently executing %d) on thread %d", id, ctx.P, ctx.M) } ctx.P = trace.NoProc state.binding = nil @@ -251,9 +256,14 @@ func (v *Validator) Event(ev trace.Event) error { case trace.EventTaskBegin: // Validate task begin. t := ev.Task() - if t.ID == trace.NoTask { + if t.ID == trace.NoTask || t.ID == trace.BackgroundTask { + // The background task should never have an event emitted for it. e.Errorf("found invalid task ID for task of type %s", t.Type) } + if t.Parent == trace.BackgroundTask { + // It's not possible for a task to be a subtask of the background task. + e.Errorf("found background task as the parent for task of type %s", t.Type) + } // N.B. Don't check the task type. Empty string is a valid task type. v.tasks[t.ID] = t.Type case trace.EventTaskEnd: diff --git a/src/internal/trace/v2/trace_test.go b/src/internal/trace/v2/trace_test.go index 7823b01e93..2514f796c8 100644 --- a/src/internal/trace/v2/trace_test.go +++ b/src/internal/trace/v2/trace_test.go @@ -8,6 +8,7 @@ import ( "bufio" "bytes" "fmt" + "internal/race" "internal/testenv" "internal/trace/v2" "internal/trace/v2/testtrace" @@ -35,7 +36,7 @@ func TestTraceAnnotations(t *testing.T) { {trace.EventRegionEnd, trace.TaskID(1), []string{"region0"}}, {trace.EventTaskEnd, trace.TaskID(1), []string{"task0"}}, // Currently, pre-existing region is not recorded to avoid allocations. - {trace.EventRegionBegin, trace.NoTask, []string{"post-existing region"}}, + {trace.EventRegionBegin, trace.BackgroundTask, []string{"post-existing region"}}, } r, err := trace.NewReader(bytes.NewReader(tb)) if err != nil { @@ -213,7 +214,7 @@ func TestTraceFutileWakeup(t *testing.T) { // Check to make sure that no goroutine in the "special" trace region // ends up blocking, unblocking, then immediately blocking again. // - // The goroutines are careful to call runtime.GoSched in between blocking, + // The goroutines are careful to call runtime.Gosched in between blocking, // so there should never be a clean block/unblock on the goroutine unless // the runtime was generating extraneous events. const ( @@ -521,17 +522,31 @@ func TestTraceManyStartStop(t *testing.T) { testTraceProg(t, "many-start-stop.go", nil) } +func TestTraceWaitOnPipe(t *testing.T) { + switch runtime.GOOS { + case "dragonfly", "freebsd", "linux", "netbsd", "openbsd", "solaris": + testTraceProg(t, "wait-on-pipe.go", nil) + return + } + t.Skip("no applicable syscall.Pipe on " + runtime.GOOS) +} + func testTraceProg(t *testing.T, progName string, extra func(t *testing.T, trace, stderr []byte, stress bool)) { testenv.MustHaveGoRun(t) // Check if we're on a builder. onBuilder := testenv.Builder() != "" + onOldBuilder := !strings.Contains(testenv.Builder(), "gotip") && !strings.Contains(testenv.Builder(), "go1") testPath := filepath.Join("./testdata/testprog", progName) testName := progName runTest := func(t *testing.T, stress bool) { // Run the program and capture the trace, which is always written to stdout. - cmd := testenv.Command(t, testenv.GoToolPath(t), "run", testPath) + cmd := testenv.Command(t, testenv.GoToolPath(t), "run") + if race.Enabled { + cmd.Args = append(cmd.Args, "-race") + } + cmd.Args = append(cmd.Args, testPath) cmd.Env = append(os.Environ(), "GOEXPERIMENT=exectracer2") if stress { // Advance a generation constantly. @@ -539,7 +554,7 @@ func testTraceProg(t *testing.T, progName string, extra func(t *testing.T, trace } // Capture stdout and stderr. // - // The protoocol for these programs is that stdout contains the trace data + // The protocol for these programs is that stdout contains the trace data // and stderr is an expectation in string format. var traceBuf, errBuf bytes.Buffer cmd.Stdout = &traceBuf @@ -567,7 +582,18 @@ func testTraceProg(t *testing.T, progName string, extra func(t *testing.T, trace // data is critical for debugging and this is the only way // we can currently make sure it's retained. t.Log("found bad trace; dumping to test log...") - t.Log(dumpTraceToText(t, tb)) + s := dumpTraceToText(t, tb) + if onOldBuilder && len(s) > 1<<20+512<<10 { + // The old build infrastructure truncates logs at ~2 MiB. + // Let's assume we're the only failure and give ourselves + // up to 1.5 MiB to dump the trace. + // + // TODO(mknyszek): Remove this when we've migrated off of + // the old infrastructure. + t.Logf("text trace too large to dump (%d bytes)", len(s)) + } else { + t.Log(s) + } } else if t.Failed() || *dumpTraces { // We asked to dump the trace or failed. Write the trace to a file. t.Logf("wrote trace to file: %s", dumpTraceToFile(t, testName, stress, tb)) diff --git a/src/internal/trace/v2/version/version.go b/src/internal/trace/v2/version/version.go index deb8f2b9fc..28189f80db 100644 --- a/src/internal/trace/v2/version/version.go +++ b/src/internal/trace/v2/version/version.go @@ -1,3 +1,7 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package version import ( @@ -12,7 +16,8 @@ import ( type Version uint32 const ( - Go122 Version = 22 + Go122 Version = 22 + Current = Go122 ) var versions = map[Version][]event.Spec{ diff --git a/src/internal/types/testdata/check/cycles5.go b/src/internal/types/testdata/check/cycles5.go index a6145058bb..de85c03d8a 100644 --- a/src/internal/types/testdata/check/cycles5.go +++ b/src/internal/types/testdata/check/cycles5.go @@ -161,8 +161,8 @@ var a12 = makeArray() func makeArray() (res T12) { return } // issue #20770 -var r /* ERROR "invalid cycle in declaration of r" */ = newReader() -func newReader() r +var r = newReader() +func newReader() r // ERROR "r is not a type" // variations of the theme of #8699 and #20770 var arr /* ERROR "cycle" */ = f() diff --git a/src/internal/types/testdata/check/cycles5a.go b/src/internal/types/testdata/check/cycles5a.go index ed5853e3f2..e10f554e5c 100644 --- a/src/internal/types/testdata/check/cycles5a.go +++ b/src/internal/types/testdata/check/cycles5a.go @@ -161,8 +161,8 @@ var a12 = makeArray() func makeArray() (res T12) { return } // issue #20770 -var r /* ERROR "invalid cycle in declaration of r" */ = newReader() -func newReader() r +var r = newReader() +func newReader() r // ERROR "r is not a type" // variations of the theme of #8699 and #20770 var arr /* ERROR "cycle" */ = f() diff --git a/src/internal/types/testdata/check/decls1.go b/src/internal/types/testdata/check/decls1.go index 06f3b2e6cb..6cdbf27f4c 100644 --- a/src/internal/types/testdata/check/decls1.go +++ b/src/internal/types/testdata/check/decls1.go @@ -63,7 +63,7 @@ var ( t12 complex64 = -(u + *t11) / *&v t13 int = a /* ERROR "shifted operand" */ << d t14 int = i << j - t15 math /* ERROR "not in selector" */ + t15 math /* ERROR "math is not a type" */ t16 math.xxx /* ERROR "undefined" */ t17 math /* ERROR "not a type" */ .Pi t18 float64 = math.Pi * 10.0 diff --git a/src/internal/types/testdata/check/go1_12.go b/src/internal/types/testdata/check/go1_12.go index b47d3de147..f1266c23cc 100644 --- a/src/internal/types/testdata/check/go1_12.go +++ b/src/internal/types/testdata/check/go1_12.go @@ -10,18 +10,18 @@ package p // numeric literals const ( - _ = 1_000 // ERROR "underscores in numeric literals requires go1.13 or later" - _ = 0b111 // ERROR "binary literals requires go1.13 or later" - _ = 0o567 // ERROR "0o/0O-style octal literals requires go1.13 or later" + _ = 1_000 // ERROR "underscore in numeric literal requires go1.13 or later" + _ = 0b111 // ERROR "binary literal requires go1.13 or later" + _ = 0o567 // ERROR "0o/0O-style octal literal requires go1.13 or later" _ = 0xabc // ok - _ = 0x0p1 // ERROR "hexadecimal floating-point literals requires go1.13 or later" + _ = 0x0p1 // ERROR "hexadecimal floating-point literal requires go1.13 or later" - _ = 0B111 // ERROR "binary" - _ = 0O567 // ERROR "octal" - _ = 0Xabc // ok - _ = 0X0P1 // ERROR "hexadecimal floating-point" + _ = 0b111 // ERROR "binary" + _ = 0o567 // ERROR "octal" + _ = 0xabc // ok + _ = 0x0p1 // ERROR "hexadecimal floating-point" - _ = 1_000i // ERROR "underscores" + _ = 1_000i // ERROR "underscore" _ = 0b111i // ERROR "binary" _ = 0o567i // ERROR "octal" _ = 0xabci // ERROR "hexadecimal floating-point" diff --git a/src/internal/types/testdata/check/go1_8.go b/src/internal/types/testdata/check/go1_8.go index 6a7e639792..d386d5e60b 100644 --- a/src/internal/types/testdata/check/go1_8.go +++ b/src/internal/types/testdata/check/go1_8.go @@ -9,4 +9,4 @@ package p // type alias declarations -type any = /* ERROR "type aliases requires go1.9 or later" */ interface{} +type any = /* ERROR "type alias requires go1.9 or later" */ interface{} diff --git a/src/internal/types/testdata/check/issues0.go b/src/internal/types/testdata/check/issues0.go index 2f4d266b8a..3bf4a31446 100644 --- a/src/internal/types/testdata/check/issues0.go +++ b/src/internal/types/testdata/check/issues0.go @@ -104,7 +104,7 @@ func issue10979() { // issue11347 // These should not crash. -var a1, b1 /* ERROR "cycle" */ , c1 /* ERROR "cycle" */ b1 = 0 > 0<<""[""[c1]]>c1 +var a1, b1, c1 /* ERROR "cycle" */ b1 /* ERROR "b1 is not a type" */ = 0 > 0<<""[""[c1]]>c1 var a2, b2 /* ERROR "cycle" */ = 0 /* ERROR "assignment mismatch" */ /* ERROR "assignment mismatch" */ > 0<<""[b2] var a3, b3 /* ERROR "cycle" */ = int /* ERROR "assignment mismatch" */ /* ERROR "assignment mismatch" */ (1<<""[b3]) @@ -137,7 +137,7 @@ func issue10260() { _ = x /* ERROR "impossible type assertion: x.(T1)\n\tT1 does not implement I1 (method foo has pointer receiver)" */ .(T1) T1{}.foo /* ERROR "cannot call pointer method foo on T1" */ () - x.Foo /* ERROR "x.Foo undefined (type I1 has no field or method Foo, but does have foo)" */ () + x.Foo /* ERROR "x.Foo undefined (type I1 has no field or method Foo, but does have method foo)" */ () _ = i2 /* ERROR "impossible type assertion: i2.(*T1)\n\t*T1 does not implement I2 (wrong type for method foo)\n\t\thave foo()\n\t\twant foo(int)" */ .(*T1) diff --git a/src/internal/types/testdata/check/lookup1.go b/src/internal/types/testdata/check/lookup1.go new file mode 100644 index 0000000000..048288db77 --- /dev/null +++ b/src/internal/types/testdata/check/lookup1.go @@ -0,0 +1,73 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package lookup + +import "math/big" // provides big.Float struct with unexported fields and methods + +func _() { + var s struct { + x, aBc int + } + _ = s.x + _ = s /* ERROR "invalid operation: cannot call non-function s.x (variable of type int)" */ .x() + _ = s.X // ERROR "s.X undefined (type struct{x int; aBc int} has no field or method X, but does have field x)" + _ = s.X /* ERROR "s.X undefined (type struct{x int; aBc int} has no field or method X, but does have field x)" */ () + + _ = s.aBc + _ = s.abc // ERROR "s.abc undefined (type struct{x int; aBc int} has no field or method abc, but does have field aBc)" + _ = s.ABC // ERROR "s.ABC undefined (type struct{x int; aBc int} has no field or method ABC, but does have field aBc)" +} + +func _() { + type S struct { + x int + } + var s S + _ = s.x + _ = s /* ERROR "invalid operation: cannot call non-function s.x (variable of type int)" */ .x() + _ = s.X // ERROR "s.X undefined (type S has no field or method X, but does have field x)" + _ = s.X /* ERROR "s.X undefined (type S has no field or method X, but does have field x)" */ () +} + +type S struct { + x int +} + +func (S) m() {} +func (S) aBc() {} + +func _() { + var s S + _ = s.m + s.m() + _ = s.M // ERROR "s.M undefined (type S has no field or method M, but does have method m)" + s.M /* ERROR "s.M undefined (type S has no field or method M, but does have method m)" */ () + + _ = s.aBc + _ = s.abc // ERROR "s.abc undefined (type S has no field or method abc, but does have method aBc)" + _ = s.ABC // ERROR "s.ABC undefined (type S has no field or method ABC, but does have method aBc)" +} + +func _() { + type P *S + var s P + _ = s.m // ERROR "s.m undefined (type P has no field or method m)" + _ = s.M // ERROR "s.M undefined (type P has no field or method M)" + _ = s.x + _ = s.X // ERROR "s.X undefined (type P has no field or method X, but does have field x)" +} + +func _() { + var x big.Float + _ = x.neg // ERROR "x.neg undefined (type big.Float has no field or method neg, but does have method Neg)" + _ = x.nEg // ERROR "x.nEg undefined (type big.Float has no field or method nEg)" + _ = x.Neg + _ = x.NEg // ERROR "x.NEg undefined (type big.Float has no field or method NEg, but does have method Neg)" + + _ = x.form // ERROR "x.form undefined (cannot refer to unexported field form)" + _ = x.fOrm // ERROR "x.fOrm undefined (type big.Float has no field or method fOrm)" + _ = x.Form // ERROR "x.Form undefined (type big.Float has no field or method Form, but does have unexported field form)" + _ = x.FOrm // ERROR "x.FOrm undefined (type big.Float has no field or method FOrm)" +} diff --git a/src/internal/types/testdata/check/lookup2.go b/src/internal/types/testdata/check/lookup2.go new file mode 100644 index 0000000000..a274da1ddc --- /dev/null +++ b/src/internal/types/testdata/check/lookup2.go @@ -0,0 +1,94 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +import ( + "go/ast" + "math/big" +) + +// case sel pkg have message (examples for general lookup) +// --------------------------------------------------------------------------------------------------------- +// ok x.Foo == Foo +// misspelled x.Foo == FoO type X has no field or method Foo, but does have field FoO +// misspelled x.Foo == foo type X has no field or method Foo, but does have field foo +// misspelled x.Foo == foO type X has no field or method Foo, but does have field foO +// +// misspelled x.foo == Foo type X has no field or method foo, but does have field Foo +// misspelled x.foo == FoO type X has no field or method foo, but does have field FoO +// ok x.foo == foo +// misspelled x.foo == foO type X has no field or method foo, but does have field foO +// +// ok x.Foo != Foo +// misspelled x.Foo != FoO type X has no field or method Foo, but does have field FoO +// unexported x.Foo != foo type X has no field or method Foo, but does have unexported field foo +// missing x.Foo != foO type X has no field or method Foo +// +// misspelled x.foo != Foo type X has no field or method foo, but does have field Foo +// missing x.foo != FoO type X has no field or method foo +// inaccessible x.foo != foo cannot refer to unexported field foo +// missing x.foo != foO type X has no field or method foo + +type S struct { + Foo1 int + FoO2 int + foo3 int + foO4 int +} + +func _() { + var x S + _ = x.Foo1 // OK + _ = x.Foo2 // ERROR "x.Foo2 undefined (type S has no field or method Foo2, but does have field FoO2)" + _ = x.Foo3 // ERROR "x.Foo3 undefined (type S has no field or method Foo3, but does have field foo3)" + _ = x.Foo4 // ERROR "x.Foo4 undefined (type S has no field or method Foo4, but does have field foO4)" + + _ = x.foo1 // ERROR "x.foo1 undefined (type S has no field or method foo1, but does have field Foo1)" + _ = x.foo2 // ERROR "x.foo2 undefined (type S has no field or method foo2, but does have field FoO2)" + _ = x.foo3 // OK + _ = x.foo4 // ERROR "x.foo4 undefined (type S has no field or method foo4, but does have field foO4)" +} + +func _() { + _ = S{Foo1: 0} // OK + _ = S{Foo2 /* ERROR "unknown field Foo2 in struct literal of type S, but does have FoO2" */ : 0} + _ = S{Foo3 /* ERROR "unknown field Foo3 in struct literal of type S, but does have foo3" */ : 0} + _ = S{Foo4 /* ERROR "unknown field Foo4 in struct literal of type S, but does have foO4" */ : 0} + + _ = S{foo1 /* ERROR "unknown field foo1 in struct literal of type S, but does have Foo1" */ : 0} + _ = S{foo2 /* ERROR "unknown field foo2 in struct literal of type S, but does have FoO2" */ : 0} + _ = S{foo3: 0} // OK + _ = S{foo4 /* ERROR "unknown field foo4 in struct literal of type S, but does have foO4" */ : 0} +} + +// The following tests follow the same pattern as above but operate on an imported type instead of S. +// Currently our testing framework doesn't make it easy to define an imported package for testing, so +// instead we use the big.Float and ast.File types as they provide a suitable mix of exported and un- +// exported fields and methods. + +func _() { + var x *big.Float + _ = x.Neg // OK + _ = x.NeG // ERROR "x.NeG undefined (type *big.Float has no field or method NeG, but does have method Neg)" + _ = x.Form // ERROR "x.Form undefined (type *big.Float has no field or method Form, but does have unexported field form)" + _ = x.ForM // ERROR "x.ForM undefined (type *big.Float has no field or method ForM)" + + _ = x.abs // ERROR "x.abs undefined (type *big.Float has no field or method abs, but does have method Abs)" + _ = x.abS // ERROR "x.abS undefined (type *big.Float has no field or method abS)" + _ = x.form // ERROR "x.form undefined (cannot refer to unexported field form)" + _ = x.forM // ERROR "x.forM undefined (type *big.Float has no field or method forM)" +} + +func _() { + _ = ast.File{Name: nil} // OK + _ = ast.File{NamE /* ERROR "unknown field NamE in struct literal of type ast.File, but does have Name" */ : nil} + _ = big.Float{Form /* ERROR "unknown field Form in struct literal of type big.Float, but does have unexported form" */ : 0} + _ = big.Float{ForM /* ERROR "unknown field ForM in struct literal of type big.Float" */ : 0} + + _ = ast.File{name /* ERROR "unknown field name in struct literal of type ast.File, but does have Name" */ : nil} + _ = ast.File{namE /* ERROR "unknown field namE in struct literal of type ast.File" */ : nil} + _ = big.Float{form /* ERROR "cannot refer to unexported field form in struct literal of type big.Float" */ : 0} + _ = big.Float{forM /* ERROR "unknown field forM in struct literal of type big.Float" */ : 0} +} diff --git a/src/internal/types/testdata/check/typeinference.go b/src/internal/types/testdata/check/typeinference.go index 0478d9390f..8dac938ef9 100644 --- a/src/internal/types/testdata/check/typeinference.go +++ b/src/internal/types/testdata/check/typeinference.go @@ -8,8 +8,9 @@ package typeInference // basic inference type Tb[P ~*Q, Q any] int + func _() { - var x Tb /* ERROR "got 1 arguments" */ [*int] + var x Tb /* ERROR "not enough type arguments for type Tb: have 1, want 2" */ [*int] var y Tb[*int, int] x = y /* ERRORx `cannot use y .* in assignment` */ _ = x @@ -17,8 +18,9 @@ func _() { // recursive inference type Tr[A any, B *C, C *D, D *A] int + func _() { - var x Tr /* ERROR "got 1 arguments" */ [string] + var x Tr /* ERROR "not enough type arguments for type Tr: have 1, want 4" */ [string] var y Tr[string, ***string, **string, *string] var z Tr[int, ***int, **int, *int] x = y /* ERRORx `cannot use y .* in assignment` */ @@ -28,22 +30,30 @@ func _() { // other patterns of inference type To0[A any, B []A] int -type To1[A any, B struct{a A}] int +type To1[A any, B struct{ a A }] int type To2[A any, B [][]A] int type To3[A any, B [3]*A] int -type To4[A any, B any, C struct{a A; b B}] int +type To4[A any, B any, C struct { + a A + b B +}] int + func _() { - var _ To0 /* ERROR "got 1 arguments" */ [int] - var _ To1 /* ERROR "got 1 arguments" */ [int] - var _ To2 /* ERROR "got 1 arguments" */ [int] - var _ To3 /* ERROR "got 1 arguments" */ [int] - var _ To4 /* ERROR "got 2 arguments" */ [int, string] + var _ To0 /* ERROR "not enough type arguments for type To0: have 1, want 2" */ [int] + var _ To1 /* ERROR "not enough type arguments for type To1: have 1, want 2" */ [int] + var _ To2 /* ERROR "not enough type arguments for type To2: have 1, want 2" */ [int] + var _ To3 /* ERROR "not enough type arguments for type To3: have 1, want 2" */ [int] + var _ To4 /* ERROR "not enough type arguments for type To4: have 2, want 3" */ [int, string] } // failed inference type Tf0[A, B any] int -type Tf1[A any, B ~struct{a A; c C}, C any] int +type Tf1[A any, B ~struct { + a A + c C +}, C any] int + func _() { - var _ Tf0 /* ERROR "got 1 arguments but 2 type parameters" */ [int] - var _ Tf1 /* ERROR "got 1 arguments but 3 type parameters" */ [int] + var _ Tf0 /* ERROR "not enough type arguments for type Tf0: have 1, want 2" */ [int] + var _ Tf1 /* ERROR "not enough type arguments for type Tf1: have 1, want 3" */ [int] } diff --git a/src/internal/types/testdata/check/typeinst0.go b/src/internal/types/testdata/check/typeinst0.go index bbcdaec04a..3baeb2214a 100644 --- a/src/internal/types/testdata/check/typeinst0.go +++ b/src/internal/types/testdata/check/typeinst0.go @@ -18,10 +18,6 @@ type T2[P any] struct { type List[P any] []P -// Alias type declarations cannot have type parameters. -// Issue #46477 proposes to change that. -type A1[P any] = /* ERROR "cannot be alias" */ struct{} - // Pending clarification of #46477 we disallow aliases // of generic types. type A2 = List // ERROR "cannot use generic type" @@ -42,7 +38,7 @@ type _ myInt /* ERROR "not a generic type" */ [] // ERROR "expected type argumen // TODO(gri) better error messages type _ T1[] // ERROR "expected type argument list" type _ T1[x /* ERROR "not a type" */ ] -type _ T1 /* ERROR "got 2 arguments but 1 type parameters" */ [int, float32] +type _ T1 /* ERROR "too many type arguments for type T1: have 2, want 1" */ [int, float32] var _ T2[int] = T2[int]{} diff --git a/src/internal/types/testdata/examples/inference2.go b/src/internal/types/testdata/examples/inference2.go index 6097c2b5eb..91f9df1d84 100644 --- a/src/internal/types/testdata/examples/inference2.go +++ b/src/internal/types/testdata/examples/inference2.go @@ -27,9 +27,9 @@ var ( _ func(int) int = f3[int] v6 func(int, int) = f4 - v7 func(int, string) = f4 // ERROR "type func(int, string) of variable in assignment does not match inferred type func(int, int) for func(P, P)" + v7 func(int, string) = f4 // ERROR "inferred type func(int, int) for func(P, P) does not match type func(int, string) of v7" v8 func(int) []int = f5 - v9 func(string) []int = f5 // ERROR "type func(string) []int of variable in assignment does not match inferred type func(string) []string for func(P) []P" + v9 func(string) []int = f5 // ERROR "inferred type func(string) []string for func(P) []P does not match type func(string) []int of v9" _, _ func(int) = f1, f1 _, _ func(int) = f1, f2 // ERROR "cannot infer P" @@ -49,9 +49,13 @@ func _() { v5 = f3[int] v6 = f4 - v7 = f4 // ERROR "type func(int, string) of variable in assignment does not match inferred type func(int, int) for func(P, P)" + v7 = f4 // ERROR "inferred type func(int, int) for func(P, P) does not match type func(int, string) of v7" v8 = f5 - v9 = f5 // ERROR "type func(string) []int of variable in assignment does not match inferred type func(string) []string for func(P) []P" + v9 = f5 // ERROR "inferred type func(string) []string for func(P) []P does not match type func(string) []int of v9" + + // non-trivial LHS + var a [2]func(string) []int + a[0] = f5 // ERROR "inferred type func(string) []string for func(P) []P does not match type func(string) []int of a[0]" } // Return statements @@ -62,11 +66,11 @@ func _() func(int) int { return f3[int] } func _() func(int, int) { return f4 } func _() func(int, string) { - return f4 /* ERROR "type func(int, string) of variable in assignment does not match inferred type func(int, int) for func(P, P)" */ + return f4 /* ERROR "inferred type func(int, int) for func(P, P) does not match type func(int, string) of result variable" */ } func _() func(int) []int { return f5 } func _() func(string) []int { - return f5 /* ERROR "type func(string) []int of variable in assignment does not match inferred type func(string) []string for func(P) []P" */ + return f5 /* ERROR "inferred type func(string) []string for func(P) []P does not match type func(string) []int of result variable" */ } func _() (_, _ func(int)) { return f1, f1 } diff --git a/src/internal/types/testdata/fixedbugs/issue39634.go b/src/internal/types/testdata/fixedbugs/issue39634.go index 591b00e404..6fbc7cd7bc 100644 --- a/src/internal/types/testdata/fixedbugs/issue39634.go +++ b/src/internal/types/testdata/fixedbugs/issue39634.go @@ -2,9 +2,13 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Examples adjusted to match new [T any] syntax for type parameters. +// Examples from the issue adjusted to match new [T any] syntax for type parameters. // Also, previously permitted empty type parameter lists and instantiations // are now syntax errors. +// +// The primary concern here is that these tests shouldn't crash the type checker. +// The quality of the error messages is secondary as these are all pretty esoteric +// or artificial test cases. package p @@ -39,7 +43,7 @@ type foo9[A any] interface { foo9 /* ERROR "invalid recursive type" */ [A] } func _() { var _ = new(foo9[int]) } // crash 12 -var u /* ERROR "cycle" */ , i [func /* ERROR "used as value" */ /* ERROR "used as value" */ (u, c /* ERROR "undefined" */ /* ERROR "undefined" */ ) {}(0, len /* ERROR "must be called" */ /* ERROR "must be called" */ )]c /* ERROR "undefined" */ /* ERROR "undefined" */ +var u, i [func /* ERROR "used as value" */ /* ERROR "used as value" */ (u /* ERROR "u is not a type" */ /* ERROR "u is not a type" */ , c /* ERROR "undefined" */ /* ERROR "undefined" */ ) {}(0, len /* ERROR "must be called" */ /* ERROR "must be called" */ )]c /* ERROR "undefined" */ /* ERROR "undefined" */ // crash 15 func y15() { var a /* ERROR "declared and not used" */ interface{ p() } = G15[string]{} } diff --git a/src/internal/types/testdata/fixedbugs/issue49541.go b/src/internal/types/testdata/fixedbugs/issue49541.go index da3731195b..665ed1da7c 100644 --- a/src/internal/types/testdata/fixedbugs/issue49541.go +++ b/src/internal/types/testdata/fixedbugs/issue49541.go @@ -13,7 +13,7 @@ func (S[A, B]) m() {} // TODO(gri): with type-type inference enabled we should only report one error // below. See issue #50588. -func _[A any](s S /* ERROR "got 1 arguments but 2 type parameters" */ [A]) { +func _[A any](s S /* ERROR "not enough type arguments for type S: have 1, want 2" */ [A]) { // we should see no follow-on errors below s.f = 1 s.m() @@ -22,7 +22,7 @@ func _[A any](s S /* ERROR "got 1 arguments but 2 type parameters" */ [A]) { // another test case from the issue func _() { - X /* ERROR "cannot infer Q" */ (Interface[*F /* ERROR "got 1 arguments but 2 type parameters" */ [string]](Impl{})) + X /* ERROR "cannot infer Q" */ (Interface[*F /* ERROR "not enough type arguments for type F: have 1, want 2" */ [string]](Impl{})) } func X[Q Qer](fs Interface[Q]) { diff --git a/src/internal/types/testdata/fixedbugs/issue49736.go b/src/internal/types/testdata/fixedbugs/issue49736.go new file mode 100644 index 0000000000..83e53a4937 --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue49736.go @@ -0,0 +1,17 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +import "math/big" + +// From go.dev/issue/18419 +func _(x *big.Float) { + x.form /* ERROR "x.form undefined (cannot refer to unexported field form)" */ () +} + +// From go.dev/issue/31053 +func _() { + _ = big.Float{form /* ERROR "cannot refer to unexported field form in struct literal of type big.Float" */ : 0} +} diff --git a/src/internal/types/testdata/fixedbugs/issue50729b.go b/src/internal/types/testdata/fixedbugs/issue50729b.go new file mode 100644 index 0000000000..bc1f4406e5 --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue50729b.go @@ -0,0 +1,15 @@ +// -gotypesalias=1 + +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type d[T any] struct{} +type ( + b d[a] +) + +type a = func(c) +type c struct{ a } diff --git a/src/internal/types/testdata/fixedbugs/issue50929.go b/src/internal/types/testdata/fixedbugs/issue50929.go index 64c7cd664f..a665e229be 100644 --- a/src/internal/types/testdata/fixedbugs/issue50929.go +++ b/src/internal/types/testdata/fixedbugs/issue50929.go @@ -16,7 +16,7 @@ func G[A, B any](F[A, B]) { func _() { // TODO(gri) only report one error below (issue #50932) - var x F /* ERROR "got 1 arguments but 2 type parameters" */ [int] + var x F /* ERROR "not enough type arguments for type F: have 1, want 2" */ [int] G(x /* ERROR "does not match" */) } @@ -46,9 +46,9 @@ func NSG[G any](c RSC[G]) { fmt.Println(c) } -func MMD[Rc RC /* ERROR "got 1 arguments" */ [RG], RG any, G any]() M /* ERROR "got 2 arguments" */ [Rc, RG] { +func MMD[Rc RC /* ERROR "not enough type arguments for type RC: have 1, want 2" */ [RG], RG any, G any]() M /* ERROR "not enough type arguments for type" */ [Rc, RG] { - var nFn NFn /* ERROR "got 2 arguments" */ [Rc, RG] + var nFn NFn /* ERROR "not enough type arguments for type NFn: have 2, want 3" */ [Rc, RG] var empty Rc switch any(empty).(type) { @@ -58,11 +58,11 @@ func MMD[Rc RC /* ERROR "got 1 arguments" */ [RG], RG any, G any]() M /* ERROR " nFn = NSG /* ERROR "cannot use NSG[G]" */ [G] } - return M /* ERROR "got 2 arguments" */ [Rc, RG]{ + return M /* ERROR "not enough type arguments for type M: have 2, want 3" */ [Rc, RG]{ Fn: func(rc Rc) { - NC(nFn /* ERROR "does not match" */ ) + NC(nFn /* ERROR "does not match" */) }, } - return M /* ERROR "got 2 arguments" */ [Rc, RG]{} + return M /* ERROR "not enough type arguments for type M: have 2, want 3" */ [Rc, RG]{} } diff --git a/src/internal/types/testdata/fixedbugs/issue51232.go b/src/internal/types/testdata/fixedbugs/issue51232.go index 27693a3e4d..c5832d2976 100644 --- a/src/internal/types/testdata/fixedbugs/issue51232.go +++ b/src/internal/types/testdata/fixedbugs/issue51232.go @@ -11,20 +11,20 @@ type RC[RG any] interface { type Fn[RCT RC[RG], RG any] func(RCT) type F[RCT RC[RG], RG any] interface { - Fn() Fn /* ERROR "got 1 arguments" */ [RCT] + Fn() Fn /* ERROR "not enough type arguments for type Fn: have 1, want 2" */ [RCT] } type concreteF[RCT RC[RG], RG any] struct { - makeFn func() Fn /* ERROR "got 1 arguments" */ [RCT] + makeFn func() Fn /* ERROR "not enough type arguments for type Fn: have 1, want 2" */ [RCT] } -func (c *concreteF[RCT, RG]) Fn() Fn /* ERROR "got 1 arguments" */ [RCT] { +func (c *concreteF[RCT, RG]) Fn() Fn /* ERROR "not enough type arguments for type Fn: have 1, want 2" */ [RCT] { return c.makeFn() } -func NewConcrete[RCT RC[RG], RG any](Rc RCT) F /* ERROR "got 1 arguments" */ [RCT] { +func NewConcrete[RCT RC[RG], RG any](Rc RCT) F /* ERROR "not enough type arguments for type F: have 1, want 2" */ [RCT] { // TODO(rfindley): eliminate the duplicate error below. - return & /* ERRORx `cannot use .* as F\[RCT\]` */ concreteF /* ERROR "got 1 arguments" */ [RCT]{ + return & /* ERRORx `cannot use .* as F\[RCT\]` */ concreteF /* ERROR "not enough type arguments for type concreteF: have 1, want 2" */ [RCT]{ makeFn: nil, } } diff --git a/src/internal/types/testdata/fixedbugs/issue51233.go b/src/internal/types/testdata/fixedbugs/issue51233.go index e2f97fc456..d96d3d1aa0 100644 --- a/src/internal/types/testdata/fixedbugs/issue51233.go +++ b/src/internal/types/testdata/fixedbugs/issue51233.go @@ -12,16 +12,16 @@ type RC[RG any] interface { type Fn[RCT RC[RG], RG any] func(RCT) -type FFn[RCT RC[RG], RG any] func() Fn /* ERROR "got 1 arguments" */ [RCT] +type FFn[RCT RC[RG], RG any] func() Fn /* ERROR "not enough type arguments for type Fn: have 1, want 2" */ [RCT] type F[RCT RC[RG], RG any] interface { - Fn() Fn /* ERROR "got 1 arguments" */ [RCT] + Fn() Fn /* ERROR "not enough type arguments for type Fn: have 1, want 2" */ [RCT] } type concreteF[RCT RC[RG], RG any] struct { - makeFn FFn /* ERROR "got 1 arguments" */ [RCT] + makeFn FFn /* ERROR "not enough type arguments for type FFn: have 1, want 2" */ [RCT] } -func (c *concreteF[RCT, RG]) Fn() Fn /* ERROR "got 1 arguments" */ [RCT] { +func (c *concreteF[RCT, RG]) Fn() Fn /* ERROR "not enough type arguments for type Fn: have 1, want 2" */ [RCT] { return c.makeFn() } diff --git a/src/internal/types/testdata/fixedbugs/issue51339.go b/src/internal/types/testdata/fixedbugs/issue51339.go index 65c213462b..fd10daa2c2 100644 --- a/src/internal/types/testdata/fixedbugs/issue51339.go +++ b/src/internal/types/testdata/fixedbugs/issue51339.go @@ -9,10 +9,12 @@ package p type T[P any, B *P] struct{} -func (T /* ERROR "cannot use generic type" */ ) m0() {} +func (T /* ERROR "cannot use generic type" */) m0() {} // TODO(rfindley): eliminate the duplicate errors here. -func (/* ERROR "got 1 type parameter, but receiver base type declares 2" */ T /* ERROR "got 1 arguments but 2 type parameters" */ [_]) m1() {} +func ( /* ERROR "got 1 type parameter, but receiver base type declares 2" */ T /* ERROR "not enough type arguments for type" */ [_]) m1() { +} func (T[_, _]) m2() {} + // TODO(gri) this error is unfortunate (issue #51343) -func (T /* ERROR "got 3 arguments but 2 type parameters" */ [_, _, _]) m3() {} +func (T /* ERROR "too many type arguments for type" */ [_, _, _]) m3() {} diff --git a/src/internal/types/testdata/fixedbugs/issue60542.go b/src/internal/types/testdata/fixedbugs/issue60542.go index b536ddb198..b617c2b57e 100644 --- a/src/internal/types/testdata/fixedbugs/issue60542.go +++ b/src/internal/types/testdata/fixedbugs/issue60542.go @@ -9,4 +9,4 @@ func Clip[S ~[]E, E any](s S) S { } var versions func() -var _ = Clip /* ERROR "S (type func()) does not satisfy ~[]E" */ (versions) +var _ = Clip /* ERROR "in call to Clip, S (type func()) does not satisfy ~[]E" */ (versions) diff --git a/src/internal/types/testdata/fixedbugs/issue60688.go b/src/internal/types/testdata/fixedbugs/issue60688.go index 38d90ee8cc..61b9f91510 100644 --- a/src/internal/types/testdata/fixedbugs/issue60688.go +++ b/src/internal/types/testdata/fixedbugs/issue60688.go @@ -13,4 +13,4 @@ func g[P any](P, string) {} // be identical to match). // The result is an error from type inference, rather than an // error from an assignment mismatch. -var f func(int, String) = g // ERROR "type func(int, String) of variable in assignment does not match inferred type func(int, string) for func(P, string)" +var f func(int, String) = g // ERROR "inferred type func(int, string) for func(P, string) does not match type func(int, String) of f" diff --git a/src/internal/types/testdata/fixedbugs/issue60747.go b/src/internal/types/testdata/fixedbugs/issue60747.go new file mode 100644 index 0000000000..6587a4e557 --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue60747.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func f[P any](P) P { panic(0) } + +var v func(string) int = f // ERROR "inferred type func(string) string for func(P) P does not match type func(string) int of v" + +func _() func(string) int { + return f // ERROR "inferred type func(string) string for func(P) P does not match type func(string) int of result variable" +} diff --git a/src/internal/types/testdata/fixedbugs/issue61685.go b/src/internal/types/testdata/fixedbugs/issue61685.go new file mode 100644 index 0000000000..b88b222eb9 --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue61685.go @@ -0,0 +1,15 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func _[T any](x any) { + f /* ERROR "T (type I[T]) does not satisfy I[T] (wrong type for method m)" */ (x.(I[T])) +} + +func f[T I[T]](T) {} + +type I[T any] interface { + m(T) +} diff --git a/src/internal/types/testdata/fixedbugs/issue64406.go b/src/internal/types/testdata/fixedbugs/issue64406.go new file mode 100644 index 0000000000..54b959dbba --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue64406.go @@ -0,0 +1,23 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package issue64406 + +import ( + "unsafe" +) + +func sliceData[E any, S ~[]E](s S) *E { + return unsafe.SliceData(s) +} + +func slice[E any, S ~*E](s S) []E { + return unsafe.Slice(s, 0) +} + +func f() { + s := []uint32{0} + _ = sliceData(s) + _ = slice(&s) +} diff --git a/src/internal/types/testdata/fixedbugs/issue64704.go b/src/internal/types/testdata/fixedbugs/issue64704.go new file mode 100644 index 0000000000..c8e9056cdd --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue64704.go @@ -0,0 +1,12 @@ +// -lang=go1.21 + +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +func _() { + for range 10 /* ERROR "cannot range over 10 (untyped int constant): requires go1.22 or later" */ { + } +} diff --git a/src/internal/types/testdata/fixedbugs/issue65344.go b/src/internal/types/testdata/fixedbugs/issue65344.go new file mode 100644 index 0000000000..9f8337cf2b --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue65344.go @@ -0,0 +1,19 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type T1 C /* ERROR "C is not a type" */ + +// TODO(gri) try to avoid this follow-on error +const C = T1(0 /* ERROR "cannot convert 0 (untyped int constant) to type T1" */) + +type T2 V /* ERROR "V is not a type" */ + +var V T2 + +func _() { + // don't produce errors here + _ = C + V +} diff --git a/src/internal/types/testdata/fixedbugs/issue65711.go b/src/internal/types/testdata/fixedbugs/issue65711.go new file mode 100644 index 0000000000..2c26a9208b --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue65711.go @@ -0,0 +1,25 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type A[P any] [1]P + +type B[P any] A[P] + +type C /* ERROR "invalid recursive type" */ B[C] + +// test case from issue + +type Foo[T any] struct { + baz T +} + +type Bar[T any] struct { + foo Foo[T] +} + +type Baz /* ERROR "invalid recursive type" */ struct { + bar Bar[Baz] +} diff --git a/src/internal/types/testdata/fixedbugs/issue65854.go b/src/internal/types/testdata/fixedbugs/issue65854.go new file mode 100644 index 0000000000..744777a94f --- /dev/null +++ b/src/internal/types/testdata/fixedbugs/issue65854.go @@ -0,0 +1,13 @@ +// -gotypesalias=1 + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type A = int + +type T[P any] *A + +var _ T[int] diff --git a/src/internal/types/testdata/fixedbugs/issue6977.go b/src/internal/types/testdata/fixedbugs/issue6977.go index c455d3a849..ffe4a7464b 100644 --- a/src/internal/types/testdata/fixedbugs/issue6977.go +++ b/src/internal/types/testdata/fixedbugs/issue6977.go @@ -54,29 +54,32 @@ type ( T8 interface { T7; T7 } T9 interface { T8; T8 } - T10 interface { T9; T9 } - T11 interface { T10; T10 } - T12 interface { T11; T11 } - T13 interface { T12; T12 } - T14 interface { T13; T13 } - T15 interface { T14; T14 } - T16 interface { T15; T15 } - T17 interface { T16; T16 } - T18 interface { T17; T17 } - T19 interface { T18; T18 } + // TODO(gri) Enable this longer test once we have found a solution + // for the incorrect optimization in the validType check + // (see TODO in validtype.go). + // T10 interface { T9; T9 } + // T11 interface { T10; T10 } + // T12 interface { T11; T11 } + // T13 interface { T12; T12 } + // T14 interface { T13; T13 } + // T15 interface { T14; T14 } + // T16 interface { T15; T15 } + // T17 interface { T16; T16 } + // T18 interface { T17; T17 } + // T19 interface { T18; T18 } - T20 interface { T19; T19 } - T21 interface { T20; T20 } - T22 interface { T21; T21 } - T23 interface { T22; T22 } - T24 interface { T23; T23 } - T25 interface { T24; T24 } - T26 interface { T25; T25 } - T27 interface { T26; T26 } - T28 interface { T27; T27 } - T29 interface { T28; T28 } + // T20 interface { T19; T19 } + // T21 interface { T20; T20 } + // T22 interface { T21; T21 } + // T23 interface { T22; T22 } + // T24 interface { T23; T23 } + // T25 interface { T24; T24 } + // T26 interface { T25; T25 } + // T27 interface { T26; T26 } + // T28 interface { T27; T27 } + // T29 interface { T28; T28 } ) // Verify that m is present. -var x T29 +var x T9 // T29 var _ = x.m diff --git a/src/internal/types/testdata/spec/range.go b/src/internal/types/testdata/spec/range.go index 4ae270d233..07bd6b6769 100644 --- a/src/internal/types/testdata/spec/range.go +++ b/src/internal/types/testdata/spec/range.go @@ -1,5 +1,3 @@ -// -goexperiment=rangefunc - // Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/src/internal/types/testdata/spec/range_int.go b/src/internal/types/testdata/spec/range_int.go index 178f01bae7..7f722e2d99 100644 --- a/src/internal/types/testdata/spec/range_int.go +++ b/src/internal/types/testdata/spec/range_int.go @@ -7,6 +7,12 @@ package p +// test framework assumes 64-bit int/uint sizes by default +const ( + maxInt = 1<<63 - 1 + maxUint = 1<<64 - 1 +) + type MyInt int32 func _() { @@ -38,7 +44,7 @@ func _() { for i, j /* ERROR "range over 10 (untyped int constant) permits only one iteration variable" */ := range 10 { _, _ = i, j } - for i /* ERROR "cannot use i (value of type MyInt) as int value in assignment" */ = range MyInt(10) { + for i = range MyInt /* ERROR "cannot use MyInt(10) (constant 10 of type MyInt) as int value in range clause" */ (10) { _ = i } for mi := range MyInt(10) { @@ -63,3 +69,63 @@ func _[T ~int](x T) { for range x { // ok } } + +func issue65133() { + for range maxInt { + } + for range maxInt /* ERROR "cannot use maxInt + 1 (untyped int constant 9223372036854775808) as int value in range clause (overflows)" */ + 1 { + } + for range maxUint /* ERROR "cannot use maxUint (untyped int constant 18446744073709551615) as int value in range clause (overflows)" */ { + } + + for i := range maxInt { + _ = i + } + for i := range maxInt /* ERROR "cannot use maxInt + 1 (untyped int constant 9223372036854775808) as int value in range clause (overflows)" */ + 1 { + _ = i + } + for i := range maxUint /* ERROR "cannot use maxUint (untyped int constant 18446744073709551615) as int value in range clause (overflows)" */ { + _ = i + } + + var i int + _ = i + for i = range maxInt { + } + for i = range maxInt /* ERROR "cannot use maxInt + 1 (untyped int constant 9223372036854775808) as int value in range clause (overflows)" */ + 1 { + } + for i = range maxUint /* ERROR "cannot use maxUint (untyped int constant 18446744073709551615) as int value in range clause (overflows)" */ { + } + + var j uint + _ = j + for j = range maxInt { + } + for j = range maxInt + 1 { + } + for j = range maxUint { + } + for j = range maxUint /* ERROR "cannot use maxUint + 1 (untyped int constant 18446744073709551616) as uint value in range clause (overflows)" */ + 1 { + } + + for range 256 { + } + for _ = range 256 { + } + for i = range 256 { + } + for i := range 256 { + _ = i + } + + var u8 uint8 + _ = u8 + for u8 = range - /* ERROR "cannot use -1 (untyped int constant) as uint8 value in range clause (overflows)" */ 1 { + } + for u8 = range 0 { + } + for u8 = range 255 { + } + for u8 = range 256 /* ERROR "cannot use 256 (untyped int constant) as uint8 value in range clause (overflows)" */ { + } +} diff --git a/src/internal/types/testdata/spec/typeAliases1.22.go b/src/internal/types/testdata/spec/typeAliases1.22.go new file mode 100644 index 0000000000..4b7beeed49 --- /dev/null +++ b/src/internal/types/testdata/spec/typeAliases1.22.go @@ -0,0 +1,10 @@ +// -lang=go1.22 + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package aliasTypes + +type _ = int +type _[P /* ERROR "generic type alias requires go1.23 or later" */ any] = int diff --git a/src/internal/types/testdata/spec/typeAliases1.23a.go b/src/internal/types/testdata/spec/typeAliases1.23a.go new file mode 100644 index 0000000000..0ea21a4e32 --- /dev/null +++ b/src/internal/types/testdata/spec/typeAliases1.23a.go @@ -0,0 +1,10 @@ +// -lang=go1.23 -gotypesalias=0 + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package aliasTypes + +type _ = int +type _ /* ERROR "generic type alias requires GODEBUG=gotypesalias=1" */ [P any] = int diff --git a/src/internal/types/testdata/spec/typeAliases1.23b.go b/src/internal/types/testdata/spec/typeAliases1.23b.go new file mode 100644 index 0000000000..9dae0ea778 --- /dev/null +++ b/src/internal/types/testdata/spec/typeAliases1.23b.go @@ -0,0 +1,41 @@ +// -lang=go1.23 -gotypesalias=1 + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package aliasTypes + +type _ = int +type _[P any] = int + +// A type alias may have fewer type parameters than its RHS. +type RHS[P any, Q ~int] struct { + p P + q Q +} + +type _[P any] = RHS[P, int] + +// Or it may have more type parameters than its RHS. +type _[P any, Q ~int, R comparable] = RHS[P, Q] + +// The type parameters of a type alias must implement the +// corresponding type constraints of the type parameters +// on the RHS (if any) +type _[P any, Q ~int] = RHS[P, Q] +type _[P any, Q int] = RHS[P, Q] +type _[P int | float64] = RHS[P, int] +type _[P, Q any] = RHS[P, Q /* ERROR "Q does not satisfy ~int" */] + +// ---------------------------------------------------------------------------- +// NOTE: The code below does now work yet. +// TODO: Implement this. + +// A generic type alias may be used like any other generic type. +type A[P any] = RHS[P, int] + +func _(a A /* ERROR "not a generic type" */ [string]) { + a.p = "foo" + a.q = 42 +} diff --git a/src/internal/types/testdata/spec/typeAliases1.8.go b/src/internal/types/testdata/spec/typeAliases1.8.go new file mode 100644 index 0000000000..ecc01bbc34 --- /dev/null +++ b/src/internal/types/testdata/spec/typeAliases1.8.go @@ -0,0 +1,10 @@ +// -lang=go1.8 + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package aliasTypes + +type _ = /* ERROR "type alias requires go1.9 or later" */ int +type _[P /* ERROR "generic type alias requires go1.23 or later" */ interface{}] = int diff --git a/src/internal/xcoff/ar.go b/src/internal/xcoff/ar.go index 9cbd50d149..e616f377a3 100644 --- a/src/internal/xcoff/ar.go +++ b/src/internal/xcoff/ar.go @@ -54,7 +54,7 @@ type Archive struct { closer io.Closer } -// MemberHeader holds information about a big archive file header +// ArchiveHeader holds information about a big archive file header type ArchiveHeader struct { magic string } diff --git a/src/internal/xcoff/file.go b/src/internal/xcoff/file.go index 12f78ccb69..9b9627a74a 100644 --- a/src/internal/xcoff/file.go +++ b/src/internal/xcoff/file.go @@ -8,6 +8,7 @@ package xcoff import ( "debug/dwarf" "encoding/binary" + "errors" "fmt" "internal/saferio" "io" @@ -261,7 +262,7 @@ func NewFile(r io.ReaderAt) (*File, error) { } r2 := r if scnptr == 0 { // .bss must have all 0s - r2 = zeroReaderAt{} + r2 = &nobitsSectionReader{} } s.sr = io.NewSectionReader(r2, int64(scnptr), int64(s.Size)) s.ReaderAt = s.sr @@ -451,15 +452,10 @@ func NewFile(r io.ReaderAt) (*File, error) { return f, nil } -// zeroReaderAt is ReaderAt that reads 0s. -type zeroReaderAt struct{} +type nobitsSectionReader struct{} -// ReadAt writes len(p) 0s into p. -func (w zeroReaderAt) ReadAt(p []byte, off int64) (n int, err error) { - for i := range p { - p[i] = 0 - } - return len(p), nil +func (*nobitsSectionReader) ReadAt(p []byte, off int64) (n int, err error) { + return 0, errors.New("unexpected read from section with uninitialized data") } // Data reads and returns the contents of the XCOFF section s. diff --git a/src/internal/zstd/fuzz_test.go b/src/internal/zstd/fuzz_test.go index 4b5c9961d8..e945f41241 100644 --- a/src/internal/zstd/fuzz_test.go +++ b/src/internal/zstd/fuzz_test.go @@ -25,6 +25,7 @@ var badStrings = []string{ "(\xb5/\xfd00\xec\x00\x00&@\x05\x05A7002\x02\x00\x02\x00\x02\x0000000000000000", "(\xb5/\xfd00\xec\x00\x00V@\x05\x0517002\x02\x00\x02\x00\x02\x0000000000000000", "\x50\x2a\x4d\x18\x02\x00\x00\x00", + "(\xb5/\xfd\xe40000000\xfa20\x000", } // This is a simple fuzzer to see if the decompressor panics. diff --git a/src/internal/zstd/zstd.go b/src/internal/zstd/zstd.go index 0230076f50..0370f601cb 100644 --- a/src/internal/zstd/zstd.go +++ b/src/internal/zstd/zstd.go @@ -237,7 +237,7 @@ retry: // Figure out the maximum amount of data we need to retain // for backreferences. - var windowSize int + var windowSize uint64 if !singleSegment { // Window descriptor. RFC 3.1.1.1.2. windowDescriptor := r.scratch[0] @@ -246,7 +246,7 @@ retry: windowLog := exponent + 10 windowBase := uint64(1) << windowLog windowAdd := (windowBase / 8) * mantissa - windowSize = int(windowBase + windowAdd) + windowSize = windowBase + windowAdd // Default zstd sets limits on the window size. if fuzzing && (windowLog > 31 || windowSize > 1<<27) { @@ -288,12 +288,13 @@ retry: // When Single_Segment_Flag is set, Window_Descriptor is not present. // In this case, Window_Size is Frame_Content_Size. if singleSegment { - windowSize = int(r.remainingFrameSize) + windowSize = r.remainingFrameSize } // RFC 8878 3.1.1.1.1.2. permits us to set an 8M max on window size. - if windowSize > 8<<20 { - windowSize = 8 << 20 + const maxWindowSize = 8 << 20 + if windowSize > maxWindowSize { + windowSize = maxWindowSize } relativeOffset += headerSize @@ -307,7 +308,7 @@ retry: r.repeatedOffset2 = 4 r.repeatedOffset3 = 8 r.huffmanTableBits = 0 - r.window.reset(windowSize) + r.window.reset(int(windowSize)) r.seqTables[0] = nil r.seqTables[1] = nil r.seqTables[2] = nil diff --git a/src/io/fs/fs.go b/src/io/fs/fs.go index d6c75c4cf4..6891d75a0e 100644 --- a/src/io/fs/fs.go +++ b/src/io/fs/fs.go @@ -5,6 +5,9 @@ // Package fs defines basic interfaces to a file system. // A file system can be provided by the host operating system // but also by other packages. +// +// See the [testing/fstest] package for support with testing +// implementations of file systems. package fs import ( @@ -18,6 +21,9 @@ import ( // The FS interface is the minimum implementation required of the file system. // A file system may implement additional interfaces, // such as [ReadFileFS], to provide additional or optimized functionality. +// +// [testing/fstest.TestFS] may be used to test implementations of an FS for +// correctness. type FS interface { // Open opens the named file. // diff --git a/src/io/fs/readdir_test.go b/src/io/fs/readdir_test.go index a2b2c121ff..4c409ae7a0 100644 --- a/src/io/fs/readdir_test.go +++ b/src/io/fs/readdir_test.go @@ -5,6 +5,7 @@ package fs_test import ( + "errors" . "io/fs" "os" "testing" @@ -91,3 +92,20 @@ func TestFileInfoToDirEntry(t *testing.T) { }) } } + +func errorPath(err error) string { + var perr *PathError + if !errors.As(err, &perr) { + return "" + } + return perr.Path +} + +func TestReadDirPath(t *testing.T) { + fsys := os.DirFS(t.TempDir()) + _, err1 := ReadDir(fsys, "non-existent") + _, err2 := ReadDir(struct{ FS }{fsys}, "non-existent") + if s1, s2 := errorPath(err1), errorPath(err2); s1 != s2 { + t.Fatalf("s1: %s != s2: %s", s1, s2) + } +} diff --git a/src/io/fs/readfile_test.go b/src/io/fs/readfile_test.go index 07219c1445..3c521f6142 100644 --- a/src/io/fs/readfile_test.go +++ b/src/io/fs/readfile_test.go @@ -6,6 +6,7 @@ package fs_test import ( . "io/fs" + "os" "testing" "testing/fstest" "time" @@ -57,3 +58,12 @@ func TestReadFile(t *testing.T) { t.Fatalf(`ReadFile(sub(.), "hello.txt") = %q, %v, want %q, nil`, data, err, "hello, world") } } + +func TestReadFilePath(t *testing.T) { + fsys := os.DirFS(t.TempDir()) + _, err1 := ReadFile(fsys, "non-existent") + _, err2 := ReadFile(struct{ FS }{fsys}, "non-existent") + if s1, s2 := errorPath(err1), errorPath(err2); s1 != s2 { + t.Fatalf("s1: %s != s2: %s", s1, s2) + } +} diff --git a/src/io/fs/sub.go b/src/io/fs/sub.go index 9999e63b26..70ac623077 100644 --- a/src/io/fs/sub.go +++ b/src/io/fs/sub.go @@ -33,7 +33,7 @@ type SubFS interface { // chroot-style security mechanism, and Sub does not change that fact. func Sub(fsys FS, dir string) (FS, error) { if !ValidPath(dir) { - return nil, &PathError{Op: "sub", Path: dir, Err: errors.New("invalid name")} + return nil, &PathError{Op: "sub", Path: dir, Err: ErrInvalid} } if dir == "." { return fsys, nil @@ -52,7 +52,7 @@ type subFS struct { // fullName maps name to the fully-qualified name dir/name. func (f *subFS) fullName(op string, name string) (string, error) { if !ValidPath(name) { - return "", &PathError{Op: op, Path: name, Err: errors.New("invalid name")} + return "", &PathError{Op: op, Path: name, Err: ErrInvalid} } return path.Join(f.dir, name), nil } diff --git a/src/io/fs/sub_test.go b/src/io/fs/sub_test.go index 451b0efb02..c0bb2fd5b8 100644 --- a/src/io/fs/sub_test.go +++ b/src/io/fs/sub_test.go @@ -5,6 +5,7 @@ package fs_test import ( + "errors" . "io/fs" "testing" ) @@ -54,4 +55,9 @@ func TestSub(t *testing.T) { if pe.Path != "nonexist" { t.Fatalf("Open(nonexist): err.Path = %q, want %q", pe.Path, "nonexist") } + + _, err = sub.Open("./") + if !errors.Is(err, ErrInvalid) { + t.Fatalf("Open(./): error is %v, want %v", err, ErrInvalid) + } } diff --git a/src/io/fs/walk.go b/src/io/fs/walk.go index 48145d4cfc..2e8a8db111 100644 --- a/src/io/fs/walk.go +++ b/src/io/fs/walk.go @@ -9,12 +9,12 @@ import ( "path" ) -// SkipDir is used as a return value from WalkDirFuncs to indicate that +// SkipDir is used as a return value from [WalkDirFunc] to indicate that // the directory named in the call is to be skipped. It is not returned // as an error by any function. var SkipDir = errors.New("skip this directory") -// SkipAll is used as a return value from WalkDirFuncs to indicate that +// SkipAll is used as a return value from [WalkDirFunc] to indicate that // all remaining files and directories are to be skipped. It is not returned // as an error by any function. var SkipAll = errors.New("skip everything and stop the walk") diff --git a/src/iter/iter.go b/src/iter/iter.go new file mode 100644 index 0000000000..40e4770347 --- /dev/null +++ b/src/iter/iter.go @@ -0,0 +1,169 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.rangefunc + +// Package iter provides basic definitions and operations +// related to iteration in Go. +// +// This package is experimental and can only be imported +// when building with GOEXPERIMENT=rangefunc. +package iter + +import ( + "internal/race" + "unsafe" +) + +// Seq is an iterator over sequences of individual values. +// When called as seq(yield), seq calls yield(v) for each value v in the sequence, +// stopping early if yield returns false. +type Seq[V any] func(yield func(V) bool) + +// Seq2 is an iterator over sequences of pairs of values, most commonly key-value pairs. +// When called as seq(yield), seq calls yield(k, v) for each pair (k, v) in the sequence, +// stopping early if yield returns false. +type Seq2[K, V any] func(yield func(K, V) bool) + +type coro struct{} + +//go:linkname newcoro runtime.newcoro +func newcoro(func(*coro)) *coro + +//go:linkname coroswitch runtime.coroswitch +func coroswitch(*coro) + +// Pull converts the “push-style” iterator sequence seq +// into a “pull-style” iterator accessed by the two functions +// next and stop. +// +// Next returns the next value in the sequence +// and a boolean indicating whether the value is valid. +// When the sequence is over, next returns the zero V and false. +// It is valid to call next after reaching the end of the sequence +// or after calling stop. These calls will continue +// to return the zero V and false. +// +// Stop ends the iteration. It must be called when the caller is +// no longer interested in next values and next has not yet +// signaled that the sequence is over (with a false boolean return). +// It is valid to call stop multiple times and when next has +// already returned false. +// +// It is an error to call next or stop from multiple goroutines +// simultaneously. +func Pull[V any](seq Seq[V]) (next func() (V, bool), stop func()) { + var ( + v V + ok bool + done bool + racer int + ) + c := newcoro(func(c *coro) { + race.Acquire(unsafe.Pointer(&racer)) + yield := func(v1 V) bool { + if done { + return false + } + v, ok = v1, true + race.Release(unsafe.Pointer(&racer)) + coroswitch(c) + race.Acquire(unsafe.Pointer(&racer)) + return !done + } + seq(yield) + var v0 V + v, ok = v0, false + done = true + race.Release(unsafe.Pointer(&racer)) + }) + next = func() (v1 V, ok1 bool) { + race.Write(unsafe.Pointer(&racer)) // detect races + if done { + return + } + race.Release(unsafe.Pointer(&racer)) + coroswitch(c) + race.Acquire(unsafe.Pointer(&racer)) + return v, ok + } + stop = func() { + race.Write(unsafe.Pointer(&racer)) // detect races + if !done { + done = true + race.Release(unsafe.Pointer(&racer)) + coroswitch(c) + race.Acquire(unsafe.Pointer(&racer)) + } + } + return next, stop +} + +// Pull2 converts the “push-style” iterator sequence seq +// into a “pull-style” iterator accessed by the two functions +// next and stop. +// +// Next returns the next pair in the sequence +// and a boolean indicating whether the pair is valid. +// When the sequence is over, next returns a pair of zero values and false. +// It is valid to call next after reaching the end of the sequence +// or after calling stop. These calls will continue +// to return a pair of zero values and false. +// +// Stop ends the iteration. It must be called when the caller is +// no longer interested in next values and next has not yet +// signaled that the sequence is over (with a false boolean return). +// It is valid to call stop multiple times and when next has +// already returned false. +// +// It is an error to call next or stop from multiple goroutines +// simultaneously. +func Pull2[K, V any](seq Seq2[K, V]) (next func() (K, V, bool), stop func()) { + var ( + k K + v V + ok bool + done bool + racer int + ) + c := newcoro(func(c *coro) { + race.Acquire(unsafe.Pointer(&racer)) + yield := func(k1 K, v1 V) bool { + if done { + return false + } + k, v, ok = k1, v1, true + race.Release(unsafe.Pointer(&racer)) + coroswitch(c) + race.Acquire(unsafe.Pointer(&racer)) + return !done + } + seq(yield) + var k0 K + var v0 V + k, v, ok = k0, v0, false + done = true + race.Release(unsafe.Pointer(&racer)) + }) + next = func() (k1 K, v1 V, ok1 bool) { + race.Write(unsafe.Pointer(&racer)) // detect races + if done { + return + } + race.Release(unsafe.Pointer(&racer)) + coroswitch(c) + race.Acquire(unsafe.Pointer(&racer)) + return k, v, ok + } + stop = func() { + race.Write(unsafe.Pointer(&racer)) // detect races + if !done { + done = true + race.Release(unsafe.Pointer(&racer)) + coroswitch(c) + race.Acquire(unsafe.Pointer(&racer)) + } + } + return next, stop +} diff --git a/src/iter/pull_test.go b/src/iter/pull_test.go new file mode 100644 index 0000000000..38e0ee993a --- /dev/null +++ b/src/iter/pull_test.go @@ -0,0 +1,118 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build goexperiment.rangefunc + +package iter + +import ( + "fmt" + "runtime" + "testing" +) + +func count(n int) Seq[int] { + return func(yield func(int) bool) { + for i := range n { + if !yield(i) { + break + } + } + } +} + +func squares(n int) Seq2[int, int64] { + return func(yield func(int, int64) bool) { + for i := range n { + if !yield(i, int64(i)*int64(i)) { + break + } + } + } +} + +func TestPull(t *testing.T) { + + for end := 0; end <= 3; end++ { + t.Run(fmt.Sprint(end), func(t *testing.T) { + ng := runtime.NumGoroutine() + wantNG := func(want int) { + if xg := runtime.NumGoroutine() - ng; xg != want { + t.Helper() + t.Errorf("have %d extra goroutines, want %d", xg, want) + } + } + wantNG(0) + next, stop := Pull(count(3)) + wantNG(1) + for i := range end { + v, ok := next() + if v != i || ok != true { + t.Fatalf("next() = %d, %v, want %d, %v", v, ok, i, true) + } + wantNG(1) + } + wantNG(1) + if end < 3 { + stop() + wantNG(0) + } + for range 2 { + v, ok := next() + if v != 0 || ok != false { + t.Fatalf("next() = %d, %v, want %d, %v", v, ok, 0, false) + } + wantNG(0) + } + wantNG(0) + + stop() + stop() + stop() + wantNG(0) + }) + } +} + +func TestPull2(t *testing.T) { + for end := 0; end <= 3; end++ { + t.Run(fmt.Sprint(end), func(t *testing.T) { + ng := runtime.NumGoroutine() + wantNG := func(want int) { + if xg := runtime.NumGoroutine() - ng; xg != want { + t.Helper() + t.Errorf("have %d extra goroutines, want %d", xg, want) + } + } + wantNG(0) + next, stop := Pull2(squares(3)) + wantNG(1) + for i := range end { + k, v, ok := next() + if k != i || v != int64(i*i) || ok != true { + t.Fatalf("next() = %d, %d, %v, want %d, %d, %v", k, v, ok, i, i*i, true) + } + wantNG(1) + } + wantNG(1) + if end < 3 { + stop() + wantNG(0) + } + for range 2 { + k, v, ok := next() + if v != 0 || ok != false { + t.Fatalf("next() = %d, %d, %v, want %d, %d, %v", k, v, ok, 0, 0, false) + } + wantNG(0) + } + wantNG(0) + + stop() + stop() + stop() + wantNG(0) + }) + } +} diff --git a/src/log/slog/doc.go b/src/log/slog/doc.go index 001559326b..cc034ca4b9 100644 --- a/src/log/slog/doc.go +++ b/src/log/slog/doc.go @@ -310,8 +310,10 @@ Then use a value of that type in log calls: Now computeExpensiveValue will only be called when the line is enabled. The built-in handlers acquire a lock before calling [io.Writer.Write] -to ensure that each record is written in one piece. User-defined -handlers are responsible for their own locking. +to ensure that exactly one [Record] is written at a time in its entirety. +Although each log record has a timestamp, +the built-in handlers do not use that time to sort the written records. +User-defined handlers are responsible for their own locking and sorting. # Writing a handler diff --git a/src/log/slog/handler.go b/src/log/slog/handler.go index 2182bfb609..2ff85b582e 100644 --- a/src/log/slog/handler.go +++ b/src/log/slog/handler.go @@ -76,11 +76,11 @@ type Handler interface { // A Handler should treat WithGroup as starting a Group of Attrs that ends // at the end of the log event. That is, // - // logger.WithGroup("s").LogAttrs(level, msg, slog.Int("a", 1), slog.Int("b", 2)) + // logger.WithGroup("s").LogAttrs(ctx, level, msg, slog.Int("a", 1), slog.Int("b", 2)) // // should behave like // - // logger.LogAttrs(level, msg, slog.Group("s", slog.Int("a", 1), slog.Int("b", 2))) + // logger.LogAttrs(ctx, level, msg, slog.Group("s", slog.Int("a", 1), slog.Int("b", 2))) // // If the name is empty, WithGroup returns the receiver. WithGroup(name string) Handler diff --git a/src/log/slog/internal/benchmarks/handlers_test.go b/src/log/slog/internal/benchmarks/handlers_test.go index 6c00c80286..0e0fbf169f 100644 --- a/src/log/slog/internal/benchmarks/handlers_test.go +++ b/src/log/slog/internal/benchmarks/handlers_test.go @@ -1,3 +1,7 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package benchmarks import ( diff --git a/src/log/slog/level_test.go b/src/log/slog/level_test.go index 0b28e71e4c..217a0d7204 100644 --- a/src/log/slog/level_test.go +++ b/src/log/slog/level_test.go @@ -5,6 +5,7 @@ package slog import ( + "bytes" "flag" "strings" "testing" @@ -50,12 +51,16 @@ func TestLevelVar(t *testing.T) { } -func TestMarshalJSON(t *testing.T) { +func TestLevelMarshalJSON(t *testing.T) { want := LevelWarn - 3 + wantData := []byte(`"INFO+1"`) data, err := want.MarshalJSON() if err != nil { t.Fatal(err) } + if !bytes.Equal(data, wantData) { + t.Errorf("got %s, want %s", string(data), string(wantData)) + } var got Level if err := got.UnmarshalJSON(data); err != nil { t.Fatal(err) @@ -67,10 +72,14 @@ func TestMarshalJSON(t *testing.T) { func TestLevelMarshalText(t *testing.T) { want := LevelWarn - 3 + wantData := []byte("INFO+1") data, err := want.MarshalText() if err != nil { t.Fatal(err) } + if !bytes.Equal(data, wantData) { + t.Errorf("got %s, want %s", string(data), string(wantData)) + } var got Level if err := got.UnmarshalText(data); err != nil { t.Fatal(err) diff --git a/src/log/slog/logger.go b/src/log/slog/logger.go index f03aeec295..10aa6a2b31 100644 --- a/src/log/slog/logger.go +++ b/src/log/slog/logger.go @@ -53,7 +53,8 @@ func init() { // Default returns the default [Logger]. func Default() *Logger { return defaultLogger.Load() } -// SetDefault makes l the default [Logger]. +// SetDefault makes l the default [Logger], which is used by +// the top-level functions [Info], [Debug] and so on. // After this call, output from the log package's default Logger // (as with [log.Print], etc.) will be logged using l's Handler, // at a level controlled by [SetLogLoggerLevel]. @@ -145,7 +146,6 @@ func (l *Logger) WithGroup(name string) *Logger { c := l.clone() c.handler = l.handler.WithGroup(name) return c - } // New creates a new Logger with the given non-nil Handler. diff --git a/src/log/slog/value_test.go b/src/log/slog/value_test.go index 923a4e0ccc..033f945407 100644 --- a/src/log/slog/value_test.go +++ b/src/log/slog/value_test.go @@ -30,9 +30,16 @@ func TestValueEqual(t *testing.T) { BoolValue(true), BoolValue(false), TimeValue(testTime), + TimeValue(time.Date(2001, 1, 2, 3, 4, 5, 0, time.UTC)), AnyValue(&x), AnyValue(&y), GroupValue(Bool("b", true), Int("i", 3)), + GroupValue(Bool("b", true), Int("i", 4)), + GroupValue(Bool("b", true), Int("j", 4)), + DurationValue(3 * time.Second), + DurationValue(2 * time.Second), + StringValue("foo"), + StringValue("fuu"), } for i, v1 := range vals { for j, v2 := range vals { @@ -164,6 +171,7 @@ func TestValueAny(t *testing.T) { time.Minute, time.Time{}, 3.14, + "foo", } { v := AnyValue(want) got := v.Any() diff --git a/src/make.bash b/src/make.bash index 76ad51624a..933573dd9d 100755 --- a/src/make.bash +++ b/src/make.bash @@ -7,9 +7,6 @@ # Environment variables that control make.bash: # -# GOROOT_FINAL: The expected final Go root, baked into binaries. -# The default is the location of the Go tree during the build. -# # GOHOSTARCH: The architecture for host tools (compilers and # binaries). Binaries of this type must be executable on the current # system, so the only common reason to set this is to set diff --git a/src/make.bat b/src/make.bat index d9f9b6cb6a..53122cbaef 100644 --- a/src/make.bat +++ b/src/make.bat @@ -4,9 +4,6 @@ :: Environment variables that control make.bat: :: -:: GOROOT_FINAL: The expected final Go root, baked into binaries. -:: The default is the location of the Go tree during the build. -:: :: GOHOSTARCH: The architecture for host tools (compilers and :: binaries). Binaries of this type must be executable on the current :: system, so the only common reason to set this is to set diff --git a/src/make.rc b/src/make.rc index b10be7dbd1..607e9360dc 100755 --- a/src/make.rc +++ b/src/make.rc @@ -7,9 +7,6 @@ # Environment variables that control make.rc: # -# GOROOT_FINAL: The expected final Go root, baked into binaries. -# The default is the location of the Go tree during the build. -# # GOHOSTARCH: The architecture for host tools (compilers and # binaries). Binaries of this type must be executable on the current # system, so the only common reason to set this is to set diff --git a/src/maps/maps.go b/src/maps/maps.go index befde18c9c..c92106710d 100644 --- a/src/maps/maps.go +++ b/src/maps/maps.go @@ -5,6 +5,10 @@ // Package maps defines various functions useful with maps of any type. package maps +import ( + _ "unsafe" +) + // Equal reports whether two maps contain the same key/value pairs. // Values are compared using ==. func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool { @@ -34,6 +38,8 @@ func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M } // clone is implemented in the runtime package. +// +//go:linkname clone maps.clone func clone(m any) any // Clone returns a copy of m. This is a shallow clone: diff --git a/src/maps/maps_test.go b/src/maps/maps_test.go index 5e3f9ca03b..fa30fe8c2b 100644 --- a/src/maps/maps_test.go +++ b/src/maps/maps_test.go @@ -182,3 +182,61 @@ func TestCloneWithMapAssign(t *testing.T) { } } } + +func TestCloneLarge(t *testing.T) { + // See issue 64474. + type K [17]float64 // > 128 bytes + type V [17]float64 + + var zero float64 + negZero := -zero + + for tst := 0; tst < 3; tst++ { + // Initialize m with a key and value. + m := map[K]V{} + var k1 K + var v1 V + m[k1] = v1 + + switch tst { + case 0: // nothing, just a 1-entry map + case 1: + // Add more entries to make it 2 buckets + // 1 entry already + // 7 more fill up 1 bucket + // 1 more to grow to 2 buckets + for i := 0; i < 7+1; i++ { + m[K{float64(i) + 1}] = V{} + } + case 2: + // Capture the map mid-grow + // 1 entry already + // 7 more fill up 1 bucket + // 5 more (13 total) fill up 2 buckets + // 13 more (26 total) fill up 4 buckets + // 1 more to start the 4->8 bucket grow + for i := 0; i < 7+5+13+1; i++ { + m[K{float64(i) + 1}] = V{} + } + } + + // Clone m, which should freeze the map's contents. + c := Clone(m) + + // Update m with new key and value. + k2, v2 := k1, v1 + k2[0] = negZero + v2[0] = 1.0 + m[k2] = v2 + + // Make sure c still has its old key and value. + for k, v := range c { + if math.Signbit(k[0]) { + t.Errorf("tst%d: sign bit of key changed; got %v want %v", tst, k, k1) + } + if v != v1 { + t.Errorf("tst%d: value changed; got %v want %v", tst, v, v1) + } + } + } +} diff --git a/src/math/big/arith_ppc64x.s b/src/math/big/arith_ppc64x.s index 9512a12270..c483e252ab 100644 --- a/src/math/big/arith_ppc64x.s +++ b/src/math/big/arith_ppc64x.s @@ -599,33 +599,80 @@ done: // func addMulVVW(z, x []Word, y Word) (c Word) TEXT ·addMulVVW(SB), NOSPLIT, $0 - MOVD z+0(FP), R10 // R10 = z[] - MOVD x+24(FP), R8 // R8 = x[] - MOVD y+48(FP), R9 // R9 = y - MOVD z_len+8(FP), R22 // R22 = z_len + MOVD z+0(FP), R3 // R3 = z[] + MOVD x+24(FP), R4 // R4 = x[] + MOVD y+48(FP), R5 // R5 = y + MOVD z_len+8(FP), R6 // R6 = z_len - MOVD R0, R3 // R3 will be the index register - CMP R0, R22 - MOVD R0, R4 // R4 = c = 0 - MOVD R22, CTR // Initialize loop counter - BEQ done - PCALIGN $16 + CMP R6, $4 + MOVD R0, R9 // R9 = c = 0 + BLT tail + SRD $2, R6, R7 + MOVD R7, CTR // Initialize loop counter + PCALIGN $16 loop: - MOVD (R8)(R3), R20 // Load x[i] - MOVD (R10)(R3), R21 // Load z[i] - MULLD R9, R20, R6 // R6 = Low-order(x[i]*y) - MULHDU R9, R20, R7 // R7 = High-order(x[i]*y) - ADDC R21, R6 // R6 = z0 - ADDZE R7 // R7 = z1 - ADDC R4, R6 // R6 = z0 + c + 0 - ADDZE R7, R4 // c += z1 - MOVD R6, (R10)(R3) // Store z[i] - ADD $8, R3 - BC 16, 0, loop // bdnz + MOVD 0(R4), R14 // x[i] + MOVD 8(R4), R16 // x[i+1] + MOVD 16(R4), R18 // x[i+2] + MOVD 24(R4), R20 // x[i+3] + MOVD 0(R3), R15 // z[i] + MOVD 8(R3), R17 // z[i+1] + MOVD 16(R3), R19 // z[i+2] + MOVD 24(R3), R21 // z[i+3] + MULLD R5, R14, R10 // low x[i]*y + MULHDU R5, R14, R11 // high x[i]*y + ADDC R15, R10 + ADDZE R11 + ADDC R9, R10 + ADDZE R11, R9 + MULLD R5, R16, R14 // low x[i+1]*y + MULHDU R5, R16, R15 // high x[i+1]*y + ADDC R17, R14 + ADDZE R15 + ADDC R9, R14 + ADDZE R15, R9 + MULLD R5, R18, R16 // low x[i+2]*y + MULHDU R5, R18, R17 // high x[i+2]*y + ADDC R19, R16 + ADDZE R17 + ADDC R9, R16 + ADDZE R17, R9 + MULLD R5, R20, R18 // low x[i+3]*y + MULHDU R5, R20, R19 // high x[i+3]*y + ADDC R21, R18 + ADDZE R19 + ADDC R9, R18 + ADDZE R19, R9 + MOVD R10, 0(R3) // z[i] + MOVD R14, 8(R3) // z[i+1] + MOVD R16, 16(R3) // z[i+2] + MOVD R18, 24(R3) // z[i+3] + ADD $32, R3 + ADD $32, R4 + BDNZ loop + + ANDCC $3, R6 +tail: + CMP R0, R6 + BEQ done + MOVD R6, CTR + PCALIGN $16 +tailloop: + MOVD 0(R4), R14 + MOVD 0(R3), R15 + MULLD R5, R14, R10 + MULHDU R5, R14, R11 + ADDC R15, R10 + ADDZE R11 + ADDC R9, R10 + ADDZE R11, R9 + MOVD R10, 0(R3) + ADD $8, R3 + ADD $8, R4 + BDNZ tailloop done: - MOVD R4, c+56(FP) + MOVD R9, c+56(FP) RET - diff --git a/src/math/big/float_test.go b/src/math/big/float_test.go index 7d6bf034df..bb045a0b48 100644 --- a/src/math/big/float_test.go +++ b/src/math/big/float_test.go @@ -194,13 +194,11 @@ func alike(x, y *Float) bool { func alike32(x, y float32) bool { // we can ignore NaNs return x == y && math.Signbit(float64(x)) == math.Signbit(float64(y)) - } func alike64(x, y float64) bool { // we can ignore NaNs return x == y && math.Signbit(x) == math.Signbit(y) - } func TestFloatMantExp(t *testing.T) { diff --git a/src/math/big/int_test.go b/src/math/big/int_test.go index cb964a43cd..088bce09f9 100644 --- a/src/math/big/int_test.go +++ b/src/math/big/int_test.go @@ -200,12 +200,22 @@ var mulRangesZ = []struct { "638952175999932299156089414639761565182862536979208272237582" + "511852109168640000000000000000000000", // -99! }, + + // overflow situations + {math.MaxInt64 - 0, math.MaxInt64, "9223372036854775807"}, + {math.MaxInt64 - 1, math.MaxInt64, "85070591730234615838173535747377725442"}, + {math.MaxInt64 - 2, math.MaxInt64, "784637716923335094969050127519550606919189611815754530810"}, + {math.MaxInt64 - 3, math.MaxInt64, "7237005577332262206126809393809643289012107973151163787181513908099760521240"}, } func TestMulRangeZ(t *testing.T) { var tmp Int // test entirely positive ranges for i, r := range mulRangesN { + // skip mulRangesN entries that overflow int64 + if int64(r.a) < 0 || int64(r.b) < 0 { + continue + } prod := tmp.MulRange(int64(r.a), int64(r.b)).String() if prod != r.prod { t.Errorf("#%da: got %s; want %s", i, prod, r.prod) diff --git a/src/math/big/nat.go b/src/math/big/nat.go index b9f4026a04..ecb7d363d4 100644 --- a/src/math/big/nat.go +++ b/src/math/big/nat.go @@ -624,7 +624,7 @@ func (z nat) mulRange(a, b uint64) nat { case a+1 == b: return z.mul(nat(nil).setUint64(a), nat(nil).setUint64(b)) } - m := (a + b) / 2 + m := a + (b-a)/2 // avoid overflow return z.mul(nat(nil).mulRange(a, m), nat(nil).mulRange(m+1, b)) } diff --git a/src/math/big/nat_test.go b/src/math/big/nat_test.go index b84a7be5bc..4722548fa9 100644 --- a/src/math/big/nat_test.go +++ b/src/math/big/nat_test.go @@ -6,6 +6,7 @@ package big import ( "fmt" + "math" "runtime" "strings" "testing" @@ -155,6 +156,10 @@ var mulRangesN = []struct { "638952175999932299156089414639761565182862536979208272237582" + "51185210916864000000000000000000000000", // 100! }, + {math.MaxUint64 - 0, math.MaxUint64, "18446744073709551615"}, + {math.MaxUint64 - 1, math.MaxUint64, "340282366920938463408034375210639556610"}, + {math.MaxUint64 - 2, math.MaxUint64, "6277101735386680761794095221682035635525021984684230311930"}, + {math.MaxUint64 - 3, math.MaxUint64, "115792089237316195360799967654821100226821973275796746098729803619699194331160"}, } func TestMulRangeN(t *testing.T) { diff --git a/src/math/floor_asm.go b/src/math/floor_asm.go index fb419d6da2..5cb45f5a7e 100644 --- a/src/math/floor_asm.go +++ b/src/math/floor_asm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build 386 || amd64 || arm64 || ppc64 || ppc64le || s390x || wasm +//go:build 386 || amd64 || arm64 || ppc64 || ppc64le || riscv64 || s390x || wasm package math diff --git a/src/math/floor_noasm.go b/src/math/floor_noasm.go index 5641c7ea0a..6754ca8fc8 100644 --- a/src/math/floor_noasm.go +++ b/src/math/floor_noasm.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !386 && !amd64 && !arm64 && !ppc64 && !ppc64le && !s390x && !wasm +//go:build !386 && !amd64 && !arm64 && !ppc64 && !ppc64le && !riscv64 && !s390x && !wasm package math diff --git a/src/math/floor_riscv64.s b/src/math/floor_riscv64.s new file mode 100644 index 0000000000..62ce963781 --- /dev/null +++ b/src/math/floor_riscv64.s @@ -0,0 +1,41 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +#include "textflag.h" + +#define PosInf 0x7FF0000000000000 + +// The rounding mode of RISC-V is different from Go spec. + +#define ROUNDFN(NAME, MODE) \ +TEXT NAME(SB),NOSPLIT,$0; \ + MOVD x+0(FP), F0; \ + /* whether x is NaN */; \ + FEQD F0, F0, X6; \ + BNEZ X6, 3(PC); \ + /* return NaN if x is NaN */; \ + MOVD F0, ret+8(FP); \ + RET; \ + MOV $PosInf, X6; \ + FMVDX X6, F1; \ + FABSD F0, F2; \ + /* if abs(x) > +Inf, return Inf instead of round(x) */; \ + FLTD F1, F2, X6; \ + /* Inf should keep same signed with x then return */; \ + BEQZ X6, 3(PC); \ + FCVTLD.MODE F0, X6; \ + FCVTDL X6, F1; \ + /* rounding will drop signed bit in RISCV, restore it */; \ + FSGNJD F0, F1, F0; \ + MOVD F0, ret+8(FP); \ + RET + +// func archFloor(x float64) float64 +ROUNDFN(·archFloor, RDN) + +// func archCeil(x float64) float64 +ROUNDFN(·archCeil, RUP) + +// func archTrunc(x float64) float64 +ROUNDFN(·archTrunc, RTZ) diff --git a/src/math/rand/rand.go b/src/math/rand/rand.go index 78e176e78f..a8ed9c0cb7 100644 --- a/src/math/rand/rand.go +++ b/src/math/rand/rand.go @@ -273,7 +273,7 @@ func (r *Rand) Read(p []byte) (n int, err error) { switch src := r.src.(type) { case *lockedSource: return src.read(p, &r.readVal, &r.readPos) - case *fastSource: + case *runtimeSource: return src.read(p, &r.readVal, &r.readPos) } return read(p, r.src, &r.readVal, &r.readPos) @@ -328,8 +328,8 @@ func globalRand() *Rand { r.Seed(1) } else { r = &Rand{ - src: &fastSource{}, - s64: &fastSource{}, + src: &runtimeSource{}, + s64: &runtimeSource{}, } } @@ -346,29 +346,29 @@ func globalRand() *Rand { return r } -//go:linkname fastrand64 -func fastrand64() uint64 +//go:linkname runtime_rand runtime.rand +func runtime_rand() uint64 -// fastSource is an implementation of Source64 that uses the runtime +// runtimeSource is an implementation of Source64 that uses the runtime // fastrand functions. -type fastSource struct { +type runtimeSource struct { // The mutex is used to avoid race conditions in Read. mu sync.Mutex } -func (*fastSource) Int63() int64 { - return int64(fastrand64() & rngMask) +func (*runtimeSource) Int63() int64 { + return int64(runtime_rand() & rngMask) } -func (*fastSource) Seed(int64) { - panic("internal error: call to fastSource.Seed") +func (*runtimeSource) Seed(int64) { + panic("internal error: call to runtimeSource.Seed") } -func (*fastSource) Uint64() uint64 { - return fastrand64() +func (*runtimeSource) Uint64() uint64 { + return runtime_rand() } -func (fs *fastSource) read(p []byte, readVal *int64, readPos *int8) (n int, err error) { +func (fs *runtimeSource) read(p []byte, readVal *int64, readPos *int8) (n int, err error) { fs.mu.Lock() n, err = read(p, fs, readVal, readPos) fs.mu.Unlock() @@ -405,7 +405,7 @@ func Seed(seed int64) { // Otherwise either // 1) orig == nil, which is the normal case when Seed is the first // top-level function to be called, or - // 2) orig is already a fastSource, in which case we need to change + // 2) orig is already a runtimeSource, in which case we need to change // to a lockedSource. // Either way we do the same thing. diff --git a/src/math/rand/rand_test.go b/src/math/rand/rand_test.go index 4ad2ae2230..016cc69920 100644 --- a/src/math/rand/rand_test.go +++ b/src/math/rand/rand_test.go @@ -14,6 +14,7 @@ import ( . "math/rand" "os" "runtime" + "strings" "sync" "testing" "testing/iotest" @@ -45,14 +46,14 @@ var testSeeds = []int64{1, 1754801282, 1698661970, 1550503961} // checkSimilarDistribution returns success if the mean and stddev of the // two statsResults are similar. -func (this *statsResults) checkSimilarDistribution(expected *statsResults) error { - if !nearEqual(this.mean, expected.mean, expected.closeEnough, expected.maxError) { - s := fmt.Sprintf("mean %v != %v (allowed error %v, %v)", this.mean, expected.mean, expected.closeEnough, expected.maxError) +func (sr *statsResults) checkSimilarDistribution(expected *statsResults) error { + if !nearEqual(sr.mean, expected.mean, expected.closeEnough, expected.maxError) { + s := fmt.Sprintf("mean %v != %v (allowed error %v, %v)", sr.mean, expected.mean, expected.closeEnough, expected.maxError) fmt.Println(s) return errors.New(s) } - if !nearEqual(this.stddev, expected.stddev, expected.closeEnough, expected.maxError) { - s := fmt.Sprintf("stddev %v != %v (allowed error %v, %v)", this.stddev, expected.stddev, expected.closeEnough, expected.maxError) + if !nearEqual(sr.stddev, expected.stddev, expected.closeEnough, expected.maxError) { + s := fmt.Sprintf("stddev %v != %v (allowed error %v, %v)", sr.stddev, expected.stddev, expected.closeEnough, expected.maxError) fmt.Println(s) return errors.New(s) } @@ -331,7 +332,7 @@ func TestExpTables(t *testing.T) { func hasSlowFloatingPoint() bool { switch runtime.GOARCH { case "arm": - return os.Getenv("GOARM") == "5" + return os.Getenv("GOARM") == "5" || strings.HasSuffix(os.Getenv("GOARM"), ",softfloat") case "mips", "mipsle", "mips64", "mips64le": // Be conservative and assume that all mips boards // have emulated floating point. diff --git a/src/math/rand/v2/chacha8.go b/src/math/rand/v2/chacha8.go new file mode 100644 index 0000000000..6b9aa72782 --- /dev/null +++ b/src/math/rand/v2/chacha8.go @@ -0,0 +1,46 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand + +import "internal/chacha8rand" + +// A ChaCha8 is a ChaCha8-based cryptographically strong +// random number generator. +type ChaCha8 struct { + state chacha8rand.State +} + +// NewChaCha8 returns a new ChaCha8 seeded with the given seed. +func NewChaCha8(seed [32]byte) *ChaCha8 { + c := new(ChaCha8) + c.state.Init(seed) + return c +} + +// Seed resets the ChaCha8 to behave the same way as NewChaCha8(seed). +func (c *ChaCha8) Seed(seed [32]byte) { + c.state.Init(seed) +} + +// Uint64 returns a uniformly distributed random uint64 value. +func (c *ChaCha8) Uint64() uint64 { + for { + x, ok := c.state.Next() + if ok { + return x + } + c.state.Refill() + } +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +func (c *ChaCha8) UnmarshalBinary(data []byte) error { + return chacha8rand.Unmarshal(&c.state, data) +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (c *ChaCha8) MarshalBinary() ([]byte, error) { + return chacha8rand.Marshal(&c.state), nil +} diff --git a/src/math/rand/v2/chacha8_test.go b/src/math/rand/v2/chacha8_test.go new file mode 100644 index 0000000000..2c55b479b2 --- /dev/null +++ b/src/math/rand/v2/chacha8_test.go @@ -0,0 +1,531 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package rand_test + +import ( + . "math/rand/v2" + "testing" +) + +func TestChaCha8(t *testing.T) { + p := NewChaCha8(chacha8seed) + for i, x := range chacha8output { + if u := p.Uint64(); u != x { + t.Errorf("ChaCha8 #%d = %#x, want %#x", i, u, x) + } + } + + p.Seed(chacha8seed) + for i, x := range chacha8output { + if u := p.Uint64(); u != x { + t.Errorf("ChaCha8 #%d = %#x, want %#x", i, u, x) + } + } +} + +func TestChaCha8Marshal(t *testing.T) { + p := NewChaCha8(chacha8seed) + for i, x := range chacha8output { + enc, err := p.MarshalBinary() + if err != nil { + t.Fatalf("#%d: MarshalBinary: %v", i, err) + } + if string(enc) != chacha8marshal[i] { + t.Fatalf("#%d: MarshalBinary=%q, want %q", i, enc, chacha8marshal[i]) + } + *p = ChaCha8{} + if err := p.UnmarshalBinary(enc); err != nil { + t.Fatalf("#%d: UnmarshalBinary: %v", i, err) + } + if u := p.Uint64(); u != x { + t.Errorf("ChaCha8 #%d = %#x, want %#x", i, u, x) + } + } +} + +func BenchmarkChaCha8(b *testing.B) { + p := NewChaCha8([32]byte{1, 2, 3, 4, 5}) + var t uint64 + for n := b.N; n > 0; n-- { + t += p.Uint64() + } + Sink = t +} + +// Golden output test to make sure algorithm never changes, +// so that its use in math/rand/v2 stays stable. + +var chacha8seed = [32]byte([]byte("ABCDEFGHIJKLMNOPQRSTUVWXYZ123456")) + +var chacha8output = []uint64{ + 0xb773b6063d4616a5, 0x1160af22a66abc3c, 0x8c2599d9418d287c, 0x7ee07e037edc5cd6, + 0xcfaa9ee02d1c16ad, 0x0e090eef8febea79, 0x3c82d271128b5b3e, 0x9c5addc11252a34f, + 0xdf79bb617d6ceea6, 0x36d553591f9d736a, 0xeef0d14e181ee01f, 0x089bfc760ae58436, + 0xd9e52b59cc2ad268, 0xeb2fb4444b1b8aba, 0x4f95c8a692c46661, 0xc3c6323217cae62c, + 0x91ebb4367f4e2e7e, 0x784cf2c6a0ec9bc6, 0x5c34ec5c34eabe20, 0x4f0a8f515570daa8, + 0xfc35dcb4113d6bf2, 0x5b0da44c645554bc, 0x6d963da3db21d9e1, 0xeeaefc3150e500f3, + 0x2d37923dda3750a5, 0x380d7a626d4bc8b0, 0xeeaf68ede3d7ee49, 0xf4356695883b717c, + 0x846a9021392495a4, 0x8e8510549630a61b, 0x18dc02545dbae493, 0x0f8f9ff0a65a3d43, + 0xccf065f7190ff080, 0xfd76d1aa39673330, 0x95d232936cba6433, 0x6c7456d1070cbd17, + 0x462acfdaff8c6562, 0x5bafab866d34fc6a, 0x0c862f78030a2988, 0xd39a83e407c3163d, + 0xc00a2b7b45f22ebf, 0x564307c62466b1a9, 0x257e0424b0c072d4, 0x6fb55e99496c28fe, + 0xae9873a88f5cd4e0, 0x4657362ac60d3773, 0x1c83f91ecdf23e8e, 0x6fdc0792c15387c0, + 0x36dad2a30dfd2b5c, 0xa4b593290595bdb7, 0x4de18934e4cc02c5, 0xcdc0d604f015e3a7, + 0xfba0dbf69ad80321, 0x60e8bea3d139de87, 0xd18a4d851ef48756, 0x6366447c2215f34a, + 0x05682e97d3d007ee, 0x4c0e8978c6d54ab2, 0xcf1e9f6a6712edc2, 0x061439414c80cfd3, + 0xd1a8b6e2745c0ead, 0x31a7918d45c410e8, 0xabcc61ad90216eec, 0x4040d92d2032a71a, + 0x3cd2f66ffb40cd68, 0xdcd051c07295857a, 0xeab55cbcd9ab527e, 0x18471dce781bdaac, + 0xf7f08cd144dc7252, 0x5804e0b13d7f40d1, 0x5cb1a446e4b2d35b, 0xe6d4a728d2138a06, + 0x05223e40ca60dad8, 0x2d61ec3206ac6a68, 0xab692356874c17b8, 0xc30954417676de1c, + 0x4f1ace3732225624, 0xfba9510813988338, 0x997f200f52752e11, 0x1116aaafe86221fa, + 0x07ce3b5cb2a13519, 0x2956bc72bc458314, 0x4188b7926140eb78, 0x56ca6dbfd4adea4d, + 0x7fe3c22349340ce5, 0x35c08f9c37675f8a, 0x11e1c7fbef5ed521, 0x98adc8464ec1bc75, + 0xd163b2c73d1203f8, 0x8c761ee043a2f3f3, 0x24b99d6accecd7b7, 0x793e31aa112f0370, + 0x8e87dc2a19285139, 0x4247ae04f7096e25, 0x514f3122926fe20f, 0xdc6fb3f045d2a7e9, + 0x15cb30cecdd18eba, 0xcbc7fdecf6900274, 0x3fb5c696dc8ba021, 0xd1664417c8d274e6, + 0x05f7e445ea457278, 0xf920bbca1b9db657, 0x0c1950b4da22cb99, 0xf875baf1af09e292, + 0xbed3d7b84250f838, 0xf198e8080fd74160, 0xc9eda51d9b7ea703, 0xf709ef55439bf8f6, + 0xd20c74feebf116fc, 0x305668eb146d7546, 0x829af3ec10d89787, 0x15b8f9697b551dbc, + 0xfc823c6c8e64b8c9, 0x345585e8183b40bc, 0x674b4171d6581368, 0x1234d81cd670e9f7, + 0x0e505210d8a55e19, 0xe8258d69eeeca0dc, 0x05d4c452e8baf67e, 0xe8dbe30116a45599, + 0x1cf08ce1b1176f00, 0xccf7d0a4b81ecb49, 0x303fea136b2c430e, 0x861d6c139c06c871, + 0x5f41df72e05e0487, 0x25bd7e1e1ae26b1d, 0xbe9f4004d662a41d, 0x65bf58d483188546, + 0xd1b27cff69db13cc, 0x01a6663372c1bb36, 0x578dd7577b727f4d, 0x19c78f066c083cf6, + 0xdbe014d4f9c391bb, 0x97fbb2dd1d13ffb3, 0x31c91e0af9ef8d4f, 0x094dfc98402a43ba, + 0x069bd61bea37b752, 0x5b72d762e8d986ca, 0x72ee31865904bc85, 0xd1f5fdc5cd36c33e, + 0xba9b4980a8947cad, 0xece8f05eac49ab43, 0x65fe1184abae38e7, 0x2d7cb9dea5d31452, + 0xcc71489476e467e3, 0x4c03a258a578c68c, 0x00efdf9ecb0fd8fc, 0x9924cad471e2666d, + 0x87f8668318f765e9, 0xcb4dc57c1b55f5d8, 0xd373835a86604859, 0xe526568b5540e482, + 0x1f39040f08586fec, 0xb764f3f00293f8e6, 0x049443a2f6bd50a8, 0x76fec88697d3941a, + 0x3efb70d039bae7a2, 0xe2f4611368eca8a8, 0x7c007a96e01d2425, 0xbbcce5768e69c5bf, + 0x784fb4985c42aac3, 0xf72b5091aa223874, 0x3630333fb1e62e07, 0x8e7319ebdebbb8de, + 0x2a3982bca959fa00, 0xb2b98b9f964ba9b3, 0xf7e31014adb71951, 0xebd0fca3703acc82, + 0xec654e2a2fe6419a, 0xb326132d55a52e2c, 0x2248c57f44502978, 0x32710c2f342daf16, + 0x0517b47b5acb2bec, 0x4c7a718fca270937, 0xd69142bed0bcc541, 0xe40ebcb8ff52ce88, + 0x3e44a2dbc9f828d4, 0xc74c2f4f8f873f58, 0x3dbf648eb799e45b, 0x33f22475ee0e86f8, + 0x1eb4f9ee16d47f65, 0x40f8d2b8712744e3, 0xb886b4da3cb14572, 0x2086326fbdd6f64d, + 0xcc3de5907dd882b9, 0xa2e8b49a5ee909df, 0xdbfb8e7823964c10, 0x70dd6089ef0df8d5, + 0x30141663cdd9c99f, 0x04b805325c240365, 0x7483d80314ac12d6, 0x2b271cb91aa7f5f9, + 0x97e2245362abddf0, 0x5a84f614232a9fab, 0xf71125fcda4b7fa2, 0x1ca5a61d74b27267, + 0x38cc6a9b3adbcb45, 0xdde1bb85dc653e39, 0xe9d0c8fa64f89fd4, 0x02c5fb1ecd2b4188, + 0xf2bd137bca5756e5, 0xadefe25d121be155, 0x56cd1c3c5d893a8e, 0x4c50d337beb65bb9, + 0x918c5151675cf567, 0xaba649ffcfb56a1e, 0x20c74ab26a2247cd, 0x71166bac853c08da, + 0xb07befe2e584fc5d, 0xda45ff2a588dbf32, 0xdb98b03c4d75095e, 0x60285ae1aaa65a4c, + 0xf93b686a263140b8, 0xde469752ee1c180e, 0xcec232dc04129aae, 0xeb916baa1835ea04, + 0xd49c21c8b64388ff, 0x72a82d9658864888, 0x003348ef7eac66a8, 0x7f6f67e655b209eb, + 0x532ffb0b7a941b25, 0xd940ade6128deede, 0xdf24f2a1af89fe23, 0x95aa3b4988195ae0, + 0x3da649404f94be4a, 0x692dad132c3f7e27, 0x40aee76ecaaa9eb8, 0x1294a01e09655024, + 0x6df797abdba4e4f5, 0xea2fb6024c1d7032, 0x5f4e0492295489fc, 0x57972914ea22e06a, + 0x9a8137d133aad473, 0xa2e6dd6ae7cdf2f3, 0x9f42644f18086647, 0x16d03301c170bd3e, + 0x908c416fa546656d, 0xe081503be22e123e, 0x077cf09116c4cc72, 0xcbd25cd264b7f229, + 0x3db2f468ec594031, 0x46c00e734c9badd5, 0xd0ec0ac72075d861, 0x3037cb3cf80b7630, + 0x574c3d7b3a2721c6, 0xae99906a0076824b, 0xb175a5418b532e70, 0xd8b3e251ee231ddd, + 0xb433eec25dca1966, 0x530f30dc5cff9a93, 0x9ff03d98b53cd335, 0xafc4225076558cdf, + 0xef81d3a28284402a, 0x110bdbf51c110a28, 0x9ae1b255d027e8f6, 0x7de3e0aa24688332, + 0xe483c3ecd2067ee2, 0xf829328b276137e6, 0xa413ccad57562cad, 0xe6118e8b496acb1f, + 0x8288dca6da5ec01f, 0xa53777dc88c17255, 0x8a00f1e0d5716eda, 0x618e6f47b7a720a8, + 0x9e3907b0c692a841, 0x978b42ca963f34f3, 0x75e4b0cd98a7d7ef, 0xde4dbd6e0b5f4752, + 0x0252e4153f34493f, 0x50f0e7d803734ef9, 0x237766a38ed167ee, 0x4124414001ee39a0, + 0xd08df643e535bb21, 0x34f575b5a9a80b74, 0x2c343af87297f755, 0xcd8b6d99d821f7cb, + 0xe376fd7256fc48ae, 0xe1b06e7334352885, 0xfa87b26f86c169eb, 0x36c1604665a971de, + 0xdba147c2239c8e80, 0x6b208e69fc7f0e24, 0x8795395b6f2b60c3, 0x05dabee9194907f4, + 0xb98175142f5ed902, 0x5e1701e2021ddc81, 0x0875aba2755eed08, 0x778d83289251de95, + 0x3bfbe46a039ecb31, 0xb24704fce4cbd7f9, 0x6985ffe9a7c91e3d, 0xc8efb13df249dabb, + 0xb1037e64b0f4c9f6, 0x55f69fd197d6b7c3, 0x672589d71d68a90c, 0xbebdb8224f50a77e, + 0x3f589f80007374a7, 0xd307f4635954182a, 0xcff5850c10d4fd90, 0xc6da02dfb6408e15, + 0x93daeef1e2b1a485, 0x65d833208aeea625, 0xe2b13fa13ed3b5fa, 0x67053538130fb68e, + 0xc1042f6598218fa9, 0xee5badca749b8a2e, 0x6d22a3f947dae37d, 0xb62c6d1657f4dbaf, + 0x6e007de69704c20b, 0x1af2b913fc3841d8, 0xdc0e47348e2e8e22, 0x9b1ddef1cf958b22, + 0x632ed6b0233066b8, 0xddd02d3311bed8f2, 0xf147cfe1834656e9, 0x399aaa49d511597a, + 0x6b14886979ec0309, 0x64fc4ac36b5afb97, 0xb82f78e07f7cf081, 0x10925c9a323d0e1b, + 0xf451c79ee13c63f6, 0x7c2fc180317876c7, 0x35a12bd9eecb7d22, 0x335654a539621f90, + 0xcc32a3f35db581f0, 0xc60748a80b2369cb, 0x7c4dd3b08591156b, 0xac1ced4b6de22291, + 0xa32cfa2df134def5, 0x627108918dea2a53, 0x0555b1608fcb4ff4, 0x143ee7ac43aaa33c, + 0xdae90ce7cf4fc218, 0x4d68fc2582bcf4b5, 0x37094e1849135d71, 0xf7857e09f3d49fd8, + 0x007538c503768be7, 0xedf648ba2f6be601, 0xaa347664dd72513e, 0xbe63893c6ef23b86, + 0x130b85710605af97, 0xdd765c6b1ef6ab56, 0xf3249a629a97dc6b, 0x2a114f9020fab8e5, + 0x5a69e027cfc6ad08, 0x3c4ccb36f1a5e050, 0x2e9e7d596834f0a5, 0x2430be6858fce789, + 0xe90b862f2466e597, 0x895e2884f159a9ec, 0x26ab8fa4902fcb57, 0xa6efff5c54e1fa50, + 0x333ac4e5811a8255, 0xa58d515f02498611, 0xfe5a09dcb25c6ef4, 0x03898988ab5f5818, + 0x289ff6242af6c617, 0x3d9dd59fd381ea23, 0x52d7d93d8a8aae51, 0xc76a123d511f786f, + 0xf68901edaf00c46c, 0x8c630871b590de80, 0x05209c308991e091, 0x1f809f99b4788177, + 0x11170c2eb6c19fd8, 0x44433c779062ba58, 0xc0acb51af1874c45, 0x9f2e134284809fa1, + 0xedb523bd15c619fa, 0x02d97fd53ecc23c0, 0xacaf05a34462374c, 0xddd9c6d34bffa11f, +} + +var chacha8marshal = []string{ + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x00ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x01ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x02ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x03ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x04ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x05ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x06ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\aABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\bABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\tABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\nABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\vABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\fABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\rABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x0eABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x0fABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x10ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x11ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x12ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x13ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x14ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x15ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x16ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x17ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x18ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x19ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x1aABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x1bABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x1cABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x1dABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x1eABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x1fABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00 ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00!ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\"ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00#ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00$ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00%ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00&ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00'ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00(ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00)ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00*ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00+ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00,ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00-ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00.ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00/ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x000ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x001ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x002ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x003ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x004ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x005ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x006ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x007ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x008ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x009ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00:ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00;ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00?ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00@ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00AABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00BABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00CABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00DABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00EABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00FABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00GABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00HABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00IABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00JABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00KABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00LABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00MABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00NABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00OABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00PABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00QABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00RABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00SABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00TABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00UABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00VABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00WABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00XABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00YABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00ZABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00[ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\\ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00]ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00^ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00_ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00`ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00aABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00bABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00cABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00dABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00eABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00fABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00gABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00hABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00iABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00jABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00kABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00lABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00mABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00nABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00oABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00pABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00qABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00rABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00sABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00tABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00uABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00vABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00wABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00xABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00yABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00zABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00{ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00|ABCDEFGHIJKLMNOPQRSTUVWXYZ123456", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x01>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x02>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x03>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x04>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x05>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x06>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\a>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\b>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\t>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\n>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\v>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\f>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\r>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x0e>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x0f>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x10>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x11>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x12>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x13>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x14>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x15>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x16>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x17>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x18>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x19>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x1a>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x1b>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x1c>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x1d>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x1e>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x1f>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00 >\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00!>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\">\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00#>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00$>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00%>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00&>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00'>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00(>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00)>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00*>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00+>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00,>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00->\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00.>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00/>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x000>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x001>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x002>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x003>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x004>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x005>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x006>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x007>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x008>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x009>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00:>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00;>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00<>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00=>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00>>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00?>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00@>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00A>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00B>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00C>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00D>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00E>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00F>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00G>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00H>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00I>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00J>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00K>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00L>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00M>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00N>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00O>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00P>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00Q>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00R>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00S>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00T>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00U>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00V>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00W>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00X>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00Y>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00Z>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00[>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\\>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00]>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00^>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00_>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00`>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00a>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00b>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00c>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00d>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00e>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00f>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00g>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00h>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00i>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00j>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00k>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00l>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00m>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00n>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00o>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00p>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00q>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00r>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00s>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00t>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00u>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00v>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00w>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00x>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00y>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00z>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00{>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00|>\x15\x0e\xacHk4O\x11a\xa8R\xcd5\x9atr\x8cXO\x9c]\x10\xdf\xf61\xea\x11\x18\x06\x8a\xaa", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x01K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x02K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x03K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x04K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x05K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x06K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\aK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\bK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\tK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\nK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\vK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\fK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\rK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x0eK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x0fK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x10K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x11K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x12K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x13K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x14K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x15K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x16K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x17K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x18K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x19K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x1aK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x1bK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x1cK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x1dK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x1eK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\x1fK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00 K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00!K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\"K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00#K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00$K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00%K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00&K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00'K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00(K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00)K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00*K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00+K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00,K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00-K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00.K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00/K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x000K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x001K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x002K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x003K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x004K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x005K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x006K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x007K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x008K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x009K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00:K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00;K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00?K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00@K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00AK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00BK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00CK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00DK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00EK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00FK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00GK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00HK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00IK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00JK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00KK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00LK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00MK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00NK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00OK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00PK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00QK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00RK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00SK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00TK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00UK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00VK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00WK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00XK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00YK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00ZK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00[K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00\\K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00]K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00^K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00_K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00`K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00aK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00bK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00cK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00dK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00eK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00fK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00gK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00hK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00iK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00jK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00kK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00lK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00mK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00nK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00oK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00pK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00qK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00rK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00sK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00tK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00uK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00vK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00wK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00xK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00yK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00zK3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", + "chacha8:\x00\x00\x00\x00\x00\x00\x00{K3\x9bB!,\x94\x9d\x975\xce'O_t\xee|\xb21\x87\xbb\xbb\xfd)\x8f\xe52\x01\vP\fk", +} diff --git a/src/math/rand/v2/rand.go b/src/math/rand/v2/rand.go index 5382f809e0..17e55830c8 100644 --- a/src/math/rand/v2/rand.go +++ b/src/math/rand/v2/rand.go @@ -14,7 +14,7 @@ // // This package's outputs might be easily predictable regardless of how it's // seeded. For random numbers suitable for security-sensitive work, see the -// crypto/rand package. +// [crypto/rand] package. package rand import ( @@ -250,20 +250,16 @@ func (r *Rand) Shuffle(n int, swap func(i, j int)) { // globalRand is the source of random numbers for the top-level // convenience functions. -var globalRand = &Rand{src: &fastSource{}} +var globalRand = &Rand{src: &runtimeSource{}} -//go:linkname fastrand64 -func fastrand64() uint64 +//go:linkname runtime_rand runtime.rand +func runtime_rand() uint64 -// fastSource is a Source that uses the runtime fastrand functions. -type fastSource struct{} +// runtimeSource is a Source that uses the runtime fastrand functions. +type runtimeSource struct{} -func (*fastSource) Int64() int64 { - return int64(fastrand64() << 1 >> 1) -} - -func (*fastSource) Uint64() uint64 { - return fastrand64() +func (*runtimeSource) Uint64() uint64 { + return runtime_rand() } // Int64 returns a non-negative pseudo-random 63-bit integer as an int64 diff --git a/src/math/rand/v2/rand_test.go b/src/math/rand/v2/rand_test.go index c4b53fa93a..d223180fb6 100644 --- a/src/math/rand/v2/rand_test.go +++ b/src/math/rand/v2/rand_test.go @@ -50,14 +50,14 @@ var testSeeds = []uint64{1, 1754801282, 1698661970, 1550503961} // checkSimilarDistribution returns success if the mean and stddev of the // two statsResults are similar. -func (this *statsResults) checkSimilarDistribution(expected *statsResults) error { - if !nearEqual(this.mean, expected.mean, expected.closeEnough, expected.maxError) { - s := fmt.Sprintf("mean %v != %v (allowed error %v, %v)", this.mean, expected.mean, expected.closeEnough, expected.maxError) +func (sr *statsResults) checkSimilarDistribution(expected *statsResults) error { + if !nearEqual(sr.mean, expected.mean, expected.closeEnough, expected.maxError) { + s := fmt.Sprintf("mean %v != %v (allowed error %v, %v)", sr.mean, expected.mean, expected.closeEnough, expected.maxError) fmt.Println(s) return errors.New(s) } - if !nearEqual(this.stddev, expected.stddev, expected.closeEnough, expected.maxError) { - s := fmt.Sprintf("stddev %v != %v (allowed error %v, %v)", this.stddev, expected.stddev, expected.closeEnough, expected.maxError) + if !nearEqual(sr.stddev, expected.stddev, expected.closeEnough, expected.maxError) { + s := fmt.Sprintf("stddev %v != %v (allowed error %v, %v)", sr.stddev, expected.stddev, expected.closeEnough, expected.maxError) fmt.Println(s) return errors.New(s) } diff --git a/src/mime/multipart/formdata_test.go b/src/mime/multipart/formdata_test.go index d422729c96..bfa9f68382 100644 --- a/src/mime/multipart/formdata_test.go +++ b/src/mime/multipart/formdata_test.go @@ -452,6 +452,48 @@ func TestReadFormLimits(t *testing.T) { } } +func TestReadFormEndlessHeaderLine(t *testing.T) { + for _, test := range []struct { + name string + prefix string + }{{ + name: "name", + prefix: "X-", + }, { + name: "value", + prefix: "X-Header: ", + }, { + name: "continuation", + prefix: "X-Header: foo\r\n ", + }} { + t.Run(test.name, func(t *testing.T) { + const eol = "\r\n" + s := `--boundary` + eol + s += `Content-Disposition: form-data; name="a"` + eol + s += `Content-Type: text/plain` + eol + s += test.prefix + fr := io.MultiReader( + strings.NewReader(s), + neverendingReader('X'), + ) + r := NewReader(fr, "boundary") + _, err := r.ReadForm(1 << 20) + if err != ErrMessageTooLarge { + t.Fatalf("ReadForm(1 << 20): %v, want ErrMessageTooLarge", err) + } + }) + } +} + +type neverendingReader byte + +func (r neverendingReader) Read(p []byte) (n int, err error) { + for i := range p { + p[i] = byte(r) + } + return len(p), nil +} + func BenchmarkReadForm(b *testing.B) { for _, test := range []struct { name string diff --git a/src/net/cgo_unix.go b/src/net/cgo_unix.go index 7ed5daad73..0f62fdeb11 100644 --- a/src/net/cgo_unix.go +++ b/src/net/cgo_unix.go @@ -14,6 +14,7 @@ package net import ( "context" "errors" + "internal/bytealg" "net/netip" "syscall" "unsafe" @@ -40,8 +41,20 @@ func (eai addrinfoErrno) isAddrinfoErrno() {} // doBlockingWithCtx executes a blocking function in a separate goroutine when the provided // context is cancellable. It is intended for use with calls that don't support context // cancellation (cgo, syscalls). blocking func may still be running after this function finishes. -func doBlockingWithCtx[T any](ctx context.Context, blocking func() (T, error)) (T, error) { +// For the duration of the execution of the blocking function, the thread is 'acquired' using [acquireThread], +// blocking might not be executed when the context gets cancelled early. +func doBlockingWithCtx[T any](ctx context.Context, lookupName string, blocking func() (T, error)) (T, error) { + if err := acquireThread(ctx); err != nil { + var zero T + return zero, &DNSError{ + Name: lookupName, + Err: mapErr(err).Error(), + IsTimeout: err == context.DeadlineExceeded, + } + } + if ctx.Done() == nil { + defer releaseThread() return blocking() } @@ -52,6 +65,7 @@ func doBlockingWithCtx[T any](ctx context.Context, blocking func() (T, error)) ( res := make(chan result, 1) go func() { + defer releaseThread() var r result r.res, r.err = blocking() res <- r @@ -62,7 +76,11 @@ func doBlockingWithCtx[T any](ctx context.Context, blocking func() (T, error)) ( return r.res, r.err case <-ctx.Done(): var zero T - return zero, mapErr(ctx.Err()) + return zero, &DNSError{ + Name: lookupName, + Err: mapErr(ctx.Err()).Error(), + IsTimeout: ctx.Err() == context.DeadlineExceeded, + } } } @@ -97,7 +115,7 @@ func cgoLookupPort(ctx context.Context, network, service string) (port int, err *_C_ai_family(&hints) = _C_AF_INET6 } - return doBlockingWithCtx(ctx, func() (int, error) { + return doBlockingWithCtx(ctx, network+"/"+service, func() (int, error) { return cgoLookupServicePort(&hints, network, service) }) } @@ -146,9 +164,6 @@ func cgoLookupServicePort(hints *_C_struct_addrinfo, network, service string) (p } func cgoLookupHostIP(network, name string) (addrs []IPAddr, err error) { - acquireThread() - defer releaseThread() - var hints _C_struct_addrinfo *_C_ai_flags(&hints) = cgoAddrInfoFlags *_C_ai_socktype(&hints) = _C_SOCK_STREAM @@ -213,7 +228,7 @@ func cgoLookupHostIP(network, name string) (addrs []IPAddr, err error) { } func cgoLookupIP(ctx context.Context, network, name string) (addrs []IPAddr, err error) { - return doBlockingWithCtx(ctx, func() ([]IPAddr, error) { + return doBlockingWithCtx(ctx, name, func() ([]IPAddr, error) { return cgoLookupHostIP(network, name) }) } @@ -241,15 +256,12 @@ func cgoLookupPTR(ctx context.Context, addr string) (names []string, err error) return nil, &DNSError{Err: "invalid address " + ip.String(), Name: addr} } - return doBlockingWithCtx(ctx, func() ([]string, error) { + return doBlockingWithCtx(ctx, addr, func() ([]string, error) { return cgoLookupAddrPTR(addr, sa, salen) }) } func cgoLookupAddrPTR(addr string, sa *_C_struct_sockaddr, salen _C_socklen_t) (names []string, err error) { - acquireThread() - defer releaseThread() - var gerrno int var b []byte for l := nameinfoLen; l <= maxNameinfoLen; l *= 2 { @@ -276,11 +288,8 @@ func cgoLookupAddrPTR(addr string, sa *_C_struct_sockaddr, salen _C_socklen_t) ( } return nil, &DNSError{Err: err.Error(), Name: addr, IsTemporary: isTemporary, IsNotFound: isErrorNoSuchHost} } - for i := 0; i < len(b); i++ { - if b[i] == 0 { - b = b[:i] - break - } + if i := bytealg.IndexByte(b, 0); i != -1 { + b = b[:i] } return []string{absDomainName(string(b))}, nil } @@ -310,15 +319,12 @@ func cgoLookupCNAME(ctx context.Context, name string) (cname string, err error, // resSearch will make a call to the 'res_nsearch' routine in the C library // and parse the output as a slice of DNS resources. func resSearch(ctx context.Context, hostname string, rtype, class int) ([]dnsmessage.Resource, error) { - return doBlockingWithCtx(ctx, func() ([]dnsmessage.Resource, error) { + return doBlockingWithCtx(ctx, hostname, func() ([]dnsmessage.Resource, error) { return cgoResSearch(hostname, rtype, class) }) } func cgoResSearch(hostname string, rtype, class int) ([]dnsmessage.Resource, error) { - acquireThread() - defer releaseThread() - resStateSize := unsafe.Sizeof(_C_struct___res_state{}) var state *_C_struct___res_state if resStateSize > 0 { @@ -352,7 +358,7 @@ func cgoResSearch(hostname string, rtype, class int) ([]dnsmessage.Resource, err var size int for { - size, _ = _C_res_nsearch(state, (*_C_char)(unsafe.Pointer(s)), class, rtype, buf, bufSize) + size := _C_res_nsearch(state, (*_C_char)(unsafe.Pointer(s)), class, rtype, buf, bufSize) if size <= 0 || size > 0xffff { return nil, errors.New("res_nsearch failure") } diff --git a/src/net/cgo_unix_cgo_res.go b/src/net/cgo_unix_cgo_res.go index 37bbc9a762..c5f30238a1 100644 --- a/src/net/cgo_unix_cgo_res.go +++ b/src/net/cgo_unix_cgo_res.go @@ -32,7 +32,7 @@ func _C_res_nclose(state *_C_struct___res_state) { return } -func _C_res_nsearch(state *_C_struct___res_state, dname *_C_char, class, typ int, ans *_C_uchar, anslen int) (int, error) { - x, err := C.res_search(dname, C.int(class), C.int(typ), ans, C.int(anslen)) - return int(x), err +func _C_res_nsearch(state *_C_struct___res_state, dname *_C_char, class, typ int, ans *_C_uchar, anslen int) int { + x := C.res_search(dname, C.int(class), C.int(typ), ans, C.int(anslen)) + return int(x) } diff --git a/src/net/cgo_unix_cgo_resn.go b/src/net/cgo_unix_cgo_resn.go index 4a5ff165df..4fc747b5a3 100644 --- a/src/net/cgo_unix_cgo_resn.go +++ b/src/net/cgo_unix_cgo_resn.go @@ -33,7 +33,7 @@ func _C_res_nclose(state *_C_struct___res_state) { C.res_nclose(state) } -func _C_res_nsearch(state *_C_struct___res_state, dname *_C_char, class, typ int, ans *_C_uchar, anslen int) (int, error) { - x, err := C.res_nsearch(state, dname, C.int(class), C.int(typ), ans, C.int(anslen)) - return int(x), err +func _C_res_nsearch(state *_C_struct___res_state, dname *_C_char, class, typ int, ans *_C_uchar, anslen int) int { + x := C.res_nsearch(state, dname, C.int(class), C.int(typ), ans, C.int(anslen)) + return int(x) } diff --git a/src/net/cgo_unix_syscall.go b/src/net/cgo_unix_syscall.go index ac9aaa78fe..735dcdfe36 100644 --- a/src/net/cgo_unix_syscall.go +++ b/src/net/cgo_unix_syscall.go @@ -73,8 +73,9 @@ func _C_res_ninit(state *_C_struct___res_state) error { return nil } -func _C_res_nsearch(state *_C_struct___res_state, dname *_C_char, class, typ int, ans *_C_char, anslen int) (int, error) { - return unix.ResNsearch(state, dname, class, typ, ans, anslen) +func _C_res_nsearch(state *_C_struct___res_state, dname *_C_char, class, typ int, ans *_C_char, anslen int) int { + x, _ := unix.ResNsearch(state, dname, class, typ, ans, anslen) + return x } func _C_res_nclose(state *_C_struct___res_state) { diff --git a/src/net/conf.go b/src/net/conf.go index 649ebcfb18..6fe1e4725a 100644 --- a/src/net/conf.go +++ b/src/net/conf.go @@ -338,13 +338,6 @@ func (c *conf) lookupOrder(r *Resolver, hostname string) (ret hostLookupOrder, d if stringsHasSuffix(hostname, ".") { hostname = hostname[:len(hostname)-1] } - if canUseCgo && stringsHasSuffixFold(hostname, ".local") { - // Per RFC 6762, the ".local" TLD is special. And - // because Go's native resolver doesn't do mDNS or - // similar local resolution mechanisms, assume that - // libc might (via Avahi, etc) and use cgo. - return hostLookupCgo, dnsConf - } nss := getSystemNSS() srcs := nss.sources["hosts"] @@ -404,9 +397,13 @@ func (c *conf) lookupOrder(r *Resolver, hostname string) (ret hostLookupOrder, d } continue case hostname != "" && stringsHasPrefix(src.source, "mdns"): - // e.g. "mdns4", "mdns4_minimal" - // We already returned true before if it was *.local. - // libc wouldn't have found a hit on this anyway. + if stringsHasSuffixFold(hostname, ".local") { + // Per RFC 6762, the ".local" TLD is special. And + // because Go's native resolver doesn't do mDNS or + // similar local resolution mechanisms, assume that + // libc might (via Avahi, etc) and use cgo. + return hostLookupCgo, dnsConf + } // We don't parse mdns.allow files. They're rare. If one // exists, it might list other TLDs (besides .local) or even @@ -522,7 +519,7 @@ func isGateway(h string) bool { return stringsEqualFold(h, "_gateway") } -// isOutbound reports whether h should be considered a "outbound" +// isOutbound reports whether h should be considered an "outbound" // name for the myhostname NSS module. func isOutbound(h string) bool { return stringsEqualFold(h, "_outbound") diff --git a/src/net/conf_test.go b/src/net/conf_test.go index 0f324b245a..6ebd6be635 100644 --- a/src/net/conf_test.go +++ b/src/net/conf_test.go @@ -257,7 +257,7 @@ func TestConfHostLookupOrder(t *testing.T) { hostTests: []nssHostTest{ {"x.com", "myhostname", hostLookupFilesDNS}, {"x", "myhostname", hostLookupFilesDNS}, - {"x.local", "myhostname", hostLookupCgo}, + {"x.local", "myhostname", hostLookupFilesDNS}, }, }, { @@ -268,7 +268,7 @@ func TestConfHostLookupOrder(t *testing.T) { hostTests: []nssHostTest{ {"x.com", "myhostname", hostLookupDNSFiles}, {"x", "myhostname", hostLookupDNSFiles}, - {"x.local", "myhostname", hostLookupCgo}, + {"x.local", "myhostname", hostLookupDNSFiles}, }, }, { diff --git a/src/net/dial.go b/src/net/dial.go index 7ca9b4a468..28f346a372 100644 --- a/src/net/dial.go +++ b/src/net/dial.go @@ -14,9 +14,16 @@ import ( ) const ( - // defaultTCPKeepAlive is a default constant value for TCPKeepAlive times - // See go.dev/issue/31510 - defaultTCPKeepAlive = 15 * time.Second + // defaultTCPKeepAliveIdle is a default constant value for TCP_KEEPIDLE. + // See go.dev/issue/31510 for details. + defaultTCPKeepAliveIdle = 15 * time.Second + + // defaultTCPKeepAliveInterval is a default constant value for TCP_KEEPINTVL. + // It is the same as defaultTCPKeepAliveIdle, see go.dev/issue/31510 for details. + defaultTCPKeepAliveInterval = 15 * time.Second + + // defaultTCPKeepAliveCount is a default constant value for TCP_KEEPCNT. + defaultTCPKeepAliveCount = 9 // For the moment, MultiPath TCP is not used by default // See go.dev/issue/56539 @@ -65,7 +72,7 @@ func (m *mptcpStatus) set(use bool) { // // The zero value for each field is equivalent to dialing // without that option. Dialing with the zero value of Dialer -// is therefore equivalent to just calling the Dial function. +// is therefore equivalent to just calling the [Dial] function. // // It is safe to call Dialer's methods concurrently. type Dialer struct { @@ -116,13 +123,25 @@ type Dialer struct { // KeepAlive specifies the interval between keep-alive // probes for an active network connection. + // + // KeepAlive is ignored if KeepAliveConfig.Enable is true. + // // If zero, keep-alive probes are sent with a default value // (currently 15 seconds), if supported by the protocol and operating // system. Network protocols or operating systems that do - // not support keep-alives ignore this field. + // not support keep-alive ignore this field. // If negative, keep-alive probes are disabled. KeepAlive time.Duration + // KeepAliveConfig specifies the keep-alive probe configuration + // for an active network connection, when supported by the + // protocol and operating system. + // + // If KeepAliveConfig.Enable is true, keep-alive probes are enabled. + // If KeepAliveConfig.Enable is false and KeepAlive is negative, + // keep-alive probes are disabled. + KeepAliveConfig KeepAliveConfig + // Resolver optionally specifies an alternate resolver to use. Resolver *Resolver @@ -338,7 +357,7 @@ func (d *Dialer) MultipathTCP() bool { return d.mptcpStatus.get() } -// SetMultipathTCP directs the Dial methods to use, or not use, MPTCP, +// SetMultipathTCP directs the [Dial] methods to use, or not use, MPTCP, // if supported by the operating system. This method overrides the // system default and the GODEBUG=multipathtcp=... setting if any. // @@ -363,7 +382,7 @@ func (d *Dialer) SetMultipathTCP(use bool) { // brackets, as in "[2001:db8::1]:80" or "[fe80::1%zone]:80". // The zone specifies the scope of the literal IPv6 address as defined // in RFC 4007. -// The functions JoinHostPort and SplitHostPort manipulate a pair of +// The functions [JoinHostPort] and [SplitHostPort] manipulate a pair of // host and port in this form. // When using TCP, and the host resolves to multiple IP addresses, // Dial will try each IP address in order until one succeeds. @@ -401,7 +420,7 @@ func Dial(network, address string) (Conn, error) { return d.Dial(network, address) } -// DialTimeout acts like Dial but takes a timeout. +// DialTimeout acts like [Dial] but takes a timeout. // // The timeout includes name resolution, if required. // When using TCP, and the host in the address parameter resolves to @@ -428,8 +447,8 @@ type sysDialer struct { // See func Dial for a description of the network and address // parameters. // -// Dial uses context.Background internally; to specify the context, use -// DialContext. +// Dial uses [context.Background] internally; to specify the context, use +// [Dialer.DialContext]. func (d *Dialer) Dial(network, address string) (Conn, error) { return d.DialContext(context.Background(), network, address) } @@ -450,7 +469,7 @@ func (d *Dialer) Dial(network, address string) (Conn, error) { // the connect to each single address will be given 15 seconds to complete // before trying the next one. // -// See func Dial for a description of the network and address +// See func [Dial] for a description of the network and address // parameters. func (d *Dialer) DialContext(ctx context.Context, network, address string) (Conn, error) { if ctx == nil { @@ -680,12 +699,24 @@ type ListenConfig struct { // KeepAlive specifies the keep-alive period for network // connections accepted by this listener. - // If zero, keep-alives are enabled if supported by the protocol + // + // KeepAlive is ignored if KeepAliveConfig.Enable is true. + // + // If zero, keep-alive are enabled if supported by the protocol // and operating system. Network protocols or operating systems - // that do not support keep-alives ignore this field. - // If negative, keep-alives are disabled. + // that do not support keep-alive ignore this field. + // If negative, keep-alive are disabled. KeepAlive time.Duration + // KeepAliveConfig specifies the keep-alive probe configuration + // for an active network connection, when supported by the + // protocol and operating system. + // + // If KeepAliveConfig.Enable is true, keep-alive probes are enabled. + // If KeepAliveConfig.Enable is false and KeepAlive is negative, + // keep-alive probes are disabled. + KeepAliveConfig KeepAliveConfig + // If mptcpStatus is set to a value allowing Multipath TCP (MPTCP) to be // used, any call to Listen with "tcp(4|6)" as network will use MPTCP if // supported by the operating system. @@ -700,7 +731,7 @@ func (lc *ListenConfig) MultipathTCP() bool { return lc.mptcpStatus.get() } -// SetMultipathTCP directs the Listen method to use, or not use, MPTCP, +// SetMultipathTCP directs the [Listen] method to use, or not use, MPTCP, // if supported by the operating system. This method overrides the // system default and the GODEBUG=multipathtcp=... setting if any. // @@ -795,14 +826,14 @@ type sysListener struct { // addresses. // If the port in the address parameter is empty or "0", as in // "127.0.0.1:" or "[::1]:0", a port number is automatically chosen. -// The Addr method of Listener can be used to discover the chosen +// The [Addr] method of [Listener] can be used to discover the chosen // port. // -// See func Dial for a description of the network and address +// See func [Dial] for a description of the network and address // parameters. // // Listen uses context.Background internally; to specify the context, use -// ListenConfig.Listen. +// [ListenConfig.Listen]. func Listen(network, address string) (Listener, error) { var lc ListenConfig return lc.Listen(context.Background(), network, address) @@ -825,14 +856,14 @@ func Listen(network, address string) (Listener, error) { // addresses. // If the port in the address parameter is empty or "0", as in // "127.0.0.1:" or "[::1]:0", a port number is automatically chosen. -// The LocalAddr method of PacketConn can be used to discover the +// The LocalAddr method of [PacketConn] can be used to discover the // chosen port. // -// See func Dial for a description of the network and address +// See func [Dial] for a description of the network and address // parameters. // // ListenPacket uses context.Background internally; to specify the context, use -// ListenConfig.ListenPacket. +// [ListenConfig.ListenPacket]. func ListenPacket(network, address string) (PacketConn, error) { var lc ListenConfig return lc.ListenPacket(context.Background(), network, address) diff --git a/src/net/dial_test.go b/src/net/dial_test.go index 1d0832e46e..b3bedb2fa2 100644 --- a/src/net/dial_test.go +++ b/src/net/dial_test.go @@ -690,6 +690,10 @@ func TestDialerDualStack(t *testing.T) { } func TestDialerKeepAlive(t *testing.T) { + t.Cleanup(func() { + testHookSetKeepAlive = func(KeepAliveConfig) {} + }) + handler := func(ls *localServer, ln Listener) { for { c, err := ln.Accept() @@ -699,26 +703,30 @@ func TestDialerKeepAlive(t *testing.T) { c.Close() } } - ls := newLocalServer(t, "tcp") + ln := newLocalListener(t, "tcp", &ListenConfig{ + KeepAlive: -1, // prevent calling hook from accepting + }) + ls := (&streamListener{Listener: ln}).newLocalServer() defer ls.teardown() if err := ls.buildup(handler); err != nil { t.Fatal(err) } - defer func() { testHookSetKeepAlive = func(time.Duration) {} }() tests := []struct { ka time.Duration expected time.Duration }{ {-1, -1}, - {0, 15 * time.Second}, + {0, 0}, {5 * time.Second, 5 * time.Second}, {30 * time.Second, 30 * time.Second}, } + var got time.Duration = -1 + testHookSetKeepAlive = func(cfg KeepAliveConfig) { got = cfg.Idle } + for _, test := range tests { - var got time.Duration = -1 - testHookSetKeepAlive = func(d time.Duration) { got = d } + got = -1 d := Dialer{KeepAlive: test.ka} c, err := d.Dial("tcp", ls.Listener.Addr().String()) if err != nil { diff --git a/src/net/dnsclient.go b/src/net/dnsclient.go index b609dbd468..2b4cb4f3ba 100644 --- a/src/net/dnsclient.go +++ b/src/net/dnsclient.go @@ -8,15 +8,18 @@ import ( "internal/bytealg" "internal/itoa" "sort" + _ "unsafe" // for go:linkname "golang.org/x/net/dns/dnsmessage" ) // provided by runtime -func fastrandu() uint +// +//go:linkname runtime_rand runtime.rand +func runtime_rand() uint64 func randInt() int { - return int(fastrandu() >> 1) // clear sign bit + return int(uint(runtime_rand()) >> 1) // clear sign bit } func randIntn(n int) int { diff --git a/src/net/dnsclient_unix.go b/src/net/dnsclient_unix.go index c291d5eb4f..e9e7e4df15 100644 --- a/src/net/dnsclient_unix.go +++ b/src/net/dnsclient_unix.go @@ -194,7 +194,14 @@ func (r *Resolver) exchange(ctx context.Context, server string, q dnsmessage.Que if err := p.SkipQuestion(); err != dnsmessage.ErrSectionDone { return dnsmessage.Parser{}, dnsmessage.Header{}, errInvalidDNSResponse } - if h.Truncated { // see RFC 5966 + // RFC 5966 indicates that when a client receives a UDP response with + // the TC flag set, it should take the TC flag as an indication that it + // should retry over TCP instead. + // The case when the TC flag is set in a TCP response is not well specified, + // so this implements the glibc resolver behavior, returning the existing + // dns response instead of returning a "errNoAnswerFromDNSServer" error. + // See go.dev/issue/64896 + if h.Truncated && network == "udp" { continue } return p, h, nil @@ -204,7 +211,7 @@ func (r *Resolver) exchange(ctx context.Context, server string, q dnsmessage.Que // checkHeader performs basic sanity checks on the header. func checkHeader(p *dnsmessage.Parser, h dnsmessage.Header) error { - rcode := extractExtendedRCode(*p, h) + rcode, hasAdd := extractExtendedRCode(*p, h) if rcode == dnsmessage.RCodeNameError { return errNoSuchHost @@ -217,7 +224,7 @@ func checkHeader(p *dnsmessage.Parser, h dnsmessage.Header) error { // libresolv continues to the next server when it receives // an invalid referral response. See golang.org/issue/15434. - if rcode == dnsmessage.RCodeSuccess && !h.Authoritative && !h.RecursionAvailable && err == dnsmessage.ErrSectionDone { + if rcode == dnsmessage.RCodeSuccess && !h.Authoritative && !h.RecursionAvailable && err == dnsmessage.ErrSectionDone && !hasAdd { return errLameReferral } @@ -256,16 +263,19 @@ func skipToAnswer(p *dnsmessage.Parser, qtype dnsmessage.Type) error { // extractExtendedRCode extracts the extended RCode from the OPT resource (EDNS(0)) // If an OPT record is not found, the RCode from the hdr is returned. -func extractExtendedRCode(p dnsmessage.Parser, hdr dnsmessage.Header) dnsmessage.RCode { +// Another return value indicates whether an additional resource was found. +func extractExtendedRCode(p dnsmessage.Parser, hdr dnsmessage.Header) (dnsmessage.RCode, bool) { p.SkipAllAnswers() p.SkipAllAuthorities() + hasAdd := false for { ahdr, err := p.AdditionalHeader() if err != nil { - return hdr.RCode + return hdr.RCode, hasAdd } + hasAdd = true if ahdr.Type == dnsmessage.TypeOPT { - return ahdr.ExtendedRCode(hdr.RCode) + return ahdr.ExtendedRCode(hdr.RCode), hasAdd } p.SkipAdditional() } diff --git a/src/net/dnsclient_unix_test.go b/src/net/dnsclient_unix_test.go index 0da36303cc..0fad9e94ba 100644 --- a/src/net/dnsclient_unix_test.go +++ b/src/net/dnsclient_unix_test.go @@ -94,6 +94,61 @@ func TestDNSTransportFallback(t *testing.T) { } } +func TestDNSTransportNoFallbackOnTCP(t *testing.T) { + fake := fakeDNSServer{ + rh: func(n, _ string, q dnsmessage.Message, _ time.Time) (dnsmessage.Message, error) { + r := dnsmessage.Message{ + Header: dnsmessage.Header{ + ID: q.Header.ID, + Response: true, + RCode: dnsmessage.RCodeSuccess, + Truncated: true, + }, + Questions: q.Questions, + } + if n == "tcp" { + r.Answers = []dnsmessage.Resource{ + { + Header: dnsmessage.ResourceHeader{ + Name: q.Questions[0].Name, + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + Length: 4, + }, + Body: &dnsmessage.AResource{ + A: TestAddr, + }, + }, + } + } + return r, nil + }, + } + r := Resolver{PreferGo: true, Dial: fake.DialContext} + for _, tt := range dnsTransportFallbackTests { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + p, h, err := r.exchange(ctx, tt.server, tt.question, time.Second, useUDPOrTCP, false) + if err != nil { + t.Error(err) + continue + } + if h.RCode != tt.rcode { + t.Errorf("got %v from %v; want %v", h.RCode, tt.server, tt.rcode) + continue + } + a, err := p.AllAnswers() + if err != nil { + t.Errorf("unexpected error %v getting all answers from %v", err, tt.server) + continue + } + if len(a) != 1 { + t.Errorf("got %d answers from %v; want 1", len(a), tt.server) + continue + } + } +} + // See RFC 6761 for further information about the reserved, pseudo // domain names. var specialDomainNameTests = []struct { @@ -748,13 +803,25 @@ func TestIgnoreLameReferrals(t *testing.T) { }, } } + } else if s == "192.0.2.1:53" { + if q.Questions[0].Type == dnsmessage.TypeA && strings.HasPrefix(q.Questions[0].Name.String(), "empty.com.") { + var edns0Hdr dnsmessage.ResourceHeader + edns0Hdr.SetEDNS0(maxDNSPacketSize, dnsmessage.RCodeSuccess, false) + + r.Additionals = []dnsmessage.Resource{ + { + Header: edns0Hdr, + Body: &dnsmessage.OPTResource{}, + }, + } + } } return r, nil }} r := Resolver{PreferGo: true, Dial: fake.DialContext} - addrs, err := r.LookupIPAddr(context.Background(), "www.golang.org") + addrs, err := r.LookupIP(context.Background(), "ip4", "www.golang.org") if err != nil { t.Fatal(err) } @@ -766,6 +833,15 @@ func TestIgnoreLameReferrals(t *testing.T) { if got, want := addrs[0].String(), "192.0.2.1"; got != want { t.Fatalf("got address %v, want %v", got, want) } + + _, err = r.LookupIP(context.Background(), "ip4", "empty.com") + de, ok := err.(*DNSError) + if !ok { + t.Fatalf("err = %#v; wanted a *net.DNSError", err) + } + if de.Err != errNoSuchHost.Error() { + t.Fatalf("Err = %#v; wanted %q", de.Err, errNoSuchHost.Error()) + } } func BenchmarkGoLookupIP(b *testing.B) { @@ -1775,6 +1851,53 @@ func TestDNSUseTCP(t *testing.T) { } } +func TestDNSUseTCPTruncated(t *testing.T) { + fake := fakeDNSServer{ + rh: func(n, _ string, q dnsmessage.Message, _ time.Time) (dnsmessage.Message, error) { + r := dnsmessage.Message{ + Header: dnsmessage.Header{ + ID: q.Header.ID, + Response: true, + RCode: dnsmessage.RCodeSuccess, + Truncated: true, + }, + Questions: q.Questions, + Answers: []dnsmessage.Resource{ + { + Header: dnsmessage.ResourceHeader{ + Name: q.Questions[0].Name, + Type: dnsmessage.TypeA, + Class: dnsmessage.ClassINET, + Length: 4, + }, + Body: &dnsmessage.AResource{ + A: TestAddr, + }, + }, + }, + } + if n == "udp" { + t.Fatal("udp protocol was used instead of tcp") + } + return r, nil + }, + } + r := Resolver{PreferGo: true, Dial: fake.DialContext} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + p, _, err := r.exchange(ctx, "0.0.0.0", mustQuestion("com.", dnsmessage.TypeALL, dnsmessage.ClassINET), time.Second, useTCPOnly, false) + if err != nil { + t.Fatal("exchange failed:", err) + } + a, err := p.AllAnswers() + if err != nil { + t.Fatalf("unexpected error %v getting all answers", err) + } + if len(a) != 1 { + t.Fatalf("got %d answers; want 1", len(a)) + } +} + // Issue 34660: PTR response with non-PTR answers should ignore non-PTR func TestPTRandNonPTR(t *testing.T) { fake := fakeDNSServer{ diff --git a/src/net/fd_fake.go b/src/net/fd_fake.go index b9361a3c4e..ae567acc69 100644 --- a/src/net/fd_fake.go +++ b/src/net/fd_fake.go @@ -30,7 +30,7 @@ type netFD struct { raddr Addr // The only networking available in WASI preview 1 is the ability to - // sock_accept on an pre-opened socket, and then fd_read, fd_write, + // sock_accept on a pre-opened socket, and then fd_read, fd_write, // fd_close, and sock_shutdown on the resulting connection. We // intercept applicable netFD calls on this instance, and then pass // the remainder of the netFD calls to fakeNetFD. diff --git a/src/net/fd_windows.go b/src/net/fd_windows.go index 45a10cf1eb..254a5d491e 100644 --- a/src/net/fd_windows.go +++ b/src/net/fd_windows.go @@ -23,6 +23,10 @@ const ( writeMsgSyscallName = "wsasendmsg" ) +func init() { + poll.InitWSA() +} + // canUseConnectEx reports whether we can use the ConnectEx Windows API call // for the given network type. func canUseConnectEx(net string) bool { @@ -212,6 +216,6 @@ func (fd *netFD) accept() (*netFD, error) { // Unimplemented functions. func (fd *netFD) dup() (*os.File, error) { - // TODO: Implement this + // TODO: Implement this, perhaps using internal/poll.DupCloseOnExec. return nil, syscall.EWINDOWS } diff --git a/src/net/file_plan9.go b/src/net/file_plan9.go index 64aabf93ee..6c2151c409 100644 --- a/src/net/file_plan9.go +++ b/src/net/file_plan9.go @@ -100,7 +100,7 @@ func fileConn(f *os.File) (Conn, error) { switch fd.laddr.(type) { case *TCPAddr: - return newTCPConn(fd, defaultTCPKeepAlive, testHookSetKeepAlive), nil + return newTCPConn(fd, defaultTCPKeepAliveIdle, KeepAliveConfig{}, testPreHookSetKeepAlive, testHookSetKeepAlive), nil case *UDPAddr: return newUDPConn(fd), nil } diff --git a/src/net/file_unix.go b/src/net/file_unix.go index 8b9fc38916..c0212cef65 100644 --- a/src/net/file_unix.go +++ b/src/net/file_unix.go @@ -74,7 +74,7 @@ func fileConn(f *os.File) (Conn, error) { } switch fd.laddr.(type) { case *TCPAddr: - return newTCPConn(fd, defaultTCPKeepAlive, testHookSetKeepAlive), nil + return newTCPConn(fd, defaultTCPKeepAliveIdle, KeepAliveConfig{}, testPreHookSetKeepAlive, testHookSetKeepAlive), nil case *UDPAddr: return newUDPConn(fd), nil case *IPAddr: diff --git a/src/net/hook.go b/src/net/hook.go index eded34d48a..08d1aa8934 100644 --- a/src/net/hook.go +++ b/src/net/hook.go @@ -6,7 +6,6 @@ package net import ( "context" - "time" ) var ( @@ -21,7 +20,8 @@ var ( ) ([]IPAddr, error) { return fn(ctx, network, host) } - testHookSetKeepAlive = func(time.Duration) {} + testPreHookSetKeepAlive = func(*netFD) {} + testHookSetKeepAlive = func(KeepAliveConfig) {} // testHookStepTime sleeps until time has moved forward by a nonzero amount. // This helps to avoid flakes in timeout tests by ensuring that an implausibly diff --git a/src/net/http/cgi/child.go b/src/net/http/cgi/child.go index 1411f0b8e8..e29fe20d7d 100644 --- a/src/net/http/cgi/child.go +++ b/src/net/http/cgi/child.go @@ -46,7 +46,7 @@ func envMap(env []string) map[string]string { return m } -// RequestFromMap creates an http.Request from CGI variables. +// RequestFromMap creates an [http.Request] from CGI variables. // The returned Request's Body field is not populated. func RequestFromMap(params map[string]string) (*http.Request, error) { r := new(http.Request) @@ -138,10 +138,10 @@ func RequestFromMap(params map[string]string) (*http.Request, error) { return r, nil } -// Serve executes the provided Handler on the currently active CGI +// Serve executes the provided [Handler] on the currently active CGI // request, if any. If there's no current CGI environment // an error is returned. The provided handler may be nil to use -// http.DefaultServeMux. +// [http.DefaultServeMux]. func Serve(handler http.Handler) error { req, err := Request() if err != nil { diff --git a/src/net/http/cgi/host_test.go b/src/net/http/cgi/host_test.go index 78e05d592a..7fe0e6257d 100644 --- a/src/net/http/cgi/host_test.go +++ b/src/net/http/cgi/host_test.go @@ -17,8 +17,8 @@ import ( "os" "path/filepath" "reflect" + "regexp" "runtime" - "strconv" "strings" "testing" "time" @@ -363,11 +363,12 @@ func TestInternalRedirect(t *testing.T) { // TestCopyError tests that we kill the process if there's an error copying // its output. (for example, from the client having gone away) +// +// If we fail to do so, the test will time out (and dump its goroutines) with a +// call to [Handler.ServeHTTP] blocked on a deferred call to [exec.Cmd.Wait]. func TestCopyError(t *testing.T) { testenv.MustHaveExec(t) - if runtime.GOOS == "windows" { - t.Skipf("skipping test on %q", runtime.GOOS) - } + h := &Handler{ Path: os.Args[0], Root: "/test.cgi", @@ -384,43 +385,47 @@ func TestCopyError(t *testing.T) { if err != nil { t.Fatalf("Write: %v", err) } - res, err := http.ReadResponse(bufio.NewReader(conn), req) if err != nil { t.Fatalf("ReadResponse: %v", err) } - - pidstr := res.Header.Get("X-CGI-Pid") - if pidstr == "" { - t.Fatalf("expected an X-CGI-Pid header in response") - } - pid, err := strconv.Atoi(pidstr) - if err != nil { - t.Fatalf("invalid X-CGI-Pid value") - } - + defer res.Body.Close() var buf [5000]byte n, err := io.ReadFull(res.Body, buf[:]) if err != nil { t.Fatalf("ReadFull: %d bytes, %v", n, err) } - childRunning := func() bool { - return isProcessRunning(pid) - } - - if !childRunning() { - t.Fatalf("pre-conn.Close, expected child to be running") + if !handlerRunning() { + t.Fatalf("pre-conn.Close, expected handler to still be running") } conn.Close() + closed := time.Now() - tries := 0 - for tries < 25 && childRunning() { - time.Sleep(50 * time.Millisecond * time.Duration(tries)) - tries++ + nextSleep := 1 * time.Millisecond + for { + time.Sleep(nextSleep) + nextSleep *= 2 + if !handlerRunning() { + break + } + t.Logf("handler still running %v after conn.Close", time.Since(closed)) } - if childRunning() { - t.Fatalf("post-conn.Close, expected child to be gone") +} + +// handlerRunning reports whether any goroutine is currently running +// [Handler.ServeHTTP]. +func handlerRunning() bool { + r := regexp.MustCompile(`net/http/cgi\.\(\*Handler\)\.ServeHTTP`) + buf := make([]byte, 64<<10) + for { + n := runtime.Stack(buf, true) + if n < len(buf) { + return r.Match(buf[:n]) + } + // Buffer wasn't large enough for a full goroutine dump. + // Resize it and try again. + buf = make([]byte, 2*len(buf)) } } diff --git a/src/net/http/cgi/plan9_test.go b/src/net/http/cgi/plan9_test.go deleted file mode 100644 index b7ace3f81c..0000000000 --- a/src/net/http/cgi/plan9_test.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build plan9 - -package cgi - -import ( - "os" - "strconv" -) - -func isProcessRunning(pid int) bool { - _, err := os.Stat("/proc/" + strconv.Itoa(pid)) - return err == nil -} diff --git a/src/net/http/cgi/posix_test.go b/src/net/http/cgi/posix_test.go deleted file mode 100644 index 49b9470d4a..0000000000 --- a/src/net/http/cgi/posix_test.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !plan9 - -package cgi - -import ( - "os" - "syscall" -) - -func isProcessRunning(pid int) bool { - p, err := os.FindProcess(pid) - if err != nil { - return false - } - return p.Signal(syscall.Signal(0)) == nil -} diff --git a/src/net/http/client.go b/src/net/http/client.go index 5fd86a1ec8..0f29dbb2c5 100644 --- a/src/net/http/client.go +++ b/src/net/http/client.go @@ -27,19 +27,19 @@ import ( "time" ) -// A Client is an HTTP client. Its zero value (DefaultClient) is a -// usable client that uses DefaultTransport. +// A Client is an HTTP client. Its zero value ([DefaultClient]) is a +// usable client that uses [DefaultTransport]. // -// The Client's Transport typically has internal state (cached TCP +// The [Client.Transport] typically has internal state (cached TCP // connections), so Clients should be reused instead of created as // needed. Clients are safe for concurrent use by multiple goroutines. // -// A Client is higher-level than a RoundTripper (such as Transport) +// A Client is higher-level than a [RoundTripper] (such as [Transport]) // and additionally handles HTTP details such as cookies and // redirects. // // When following redirects, the Client will forward all headers set on the -// initial Request except: +// initial [Request] except: // // - when forwarding sensitive headers like "Authorization", // "WWW-Authenticate", and "Cookie" to untrusted targets. @@ -105,11 +105,11 @@ type Client struct { Timeout time.Duration } -// DefaultClient is the default Client and is used by Get, Head, and Post. +// DefaultClient is the default [Client] and is used by [Get], [Head], and [Post]. var DefaultClient = &Client{} // RoundTripper is an interface representing the ability to execute a -// single HTTP transaction, obtaining the Response for a given Request. +// single HTTP transaction, obtaining the [Response] for a given [Request]. // // A RoundTripper must be safe for concurrent use by multiple // goroutines. @@ -439,7 +439,7 @@ func basicAuth(username, password string) string { // // An error is returned if there were too many redirects or if there // was an HTTP protocol error. A non-2xx response doesn't cause an -// error. Any returned error will be of type *url.Error. The url.Error +// error. Any returned error will be of type [*url.Error]. The url.Error // value's Timeout method will report true if the request timed out. // // When err is nil, resp always contains a non-nil resp.Body. @@ -447,10 +447,10 @@ func basicAuth(username, password string) string { // // Get is a wrapper around DefaultClient.Get. // -// To make a request with custom headers, use NewRequest and +// To make a request with custom headers, use [NewRequest] and // DefaultClient.Do. // -// To make a request with a specified context.Context, use NewRequestWithContext +// To make a request with a specified context.Context, use [NewRequestWithContext] // and DefaultClient.Do. func Get(url string) (resp *Response, err error) { return DefaultClient.Get(url) @@ -458,7 +458,7 @@ func Get(url string) (resp *Response, err error) { // Get issues a GET to the specified URL. If the response is one of the // following redirect codes, Get follows the redirect after calling the -// Client's CheckRedirect function: +// [Client.CheckRedirect] function: // // 301 (Moved Permanently) // 302 (Found) @@ -466,18 +466,18 @@ func Get(url string) (resp *Response, err error) { // 307 (Temporary Redirect) // 308 (Permanent Redirect) // -// An error is returned if the Client's CheckRedirect function fails +// An error is returned if the [Client.CheckRedirect] function fails // or if there was an HTTP protocol error. A non-2xx response doesn't -// cause an error. Any returned error will be of type *url.Error. The +// cause an error. Any returned error will be of type [*url.Error]. The // url.Error value's Timeout method will report true if the request // timed out. // // When err is nil, resp always contains a non-nil resp.Body. // Caller should close resp.Body when done reading from it. // -// To make a request with custom headers, use NewRequest and Client.Do. +// To make a request with custom headers, use [NewRequest] and [Client.Do]. // -// To make a request with a specified context.Context, use NewRequestWithContext +// To make a request with a specified context.Context, use [NewRequestWithContext] // and Client.Do. func (c *Client) Get(url string) (resp *Response, err error) { req, err := NewRequest("GET", url, nil) @@ -558,10 +558,10 @@ func urlErrorOp(method string) string { // connectivity problem). A non-2xx status code doesn't cause an // error. // -// If the returned error is nil, the Response will contain a non-nil +// If the returned error is nil, the [Response] will contain a non-nil // Body which the user is expected to close. If the Body is not both -// read to EOF and closed, the Client's underlying RoundTripper -// (typically Transport) may not be able to re-use a persistent TCP +// read to EOF and closed, the [Client]'s underlying [RoundTripper] +// (typically [Transport]) may not be able to re-use a persistent TCP // connection to the server for a subsequent "keep-alive" request. // // The request Body, if non-nil, will be closed by the underlying @@ -570,9 +570,9 @@ func urlErrorOp(method string) string { // // On error, any Response can be ignored. A non-nil Response with a // non-nil error only occurs when CheckRedirect fails, and even then -// the returned Response.Body is already closed. +// the returned [Response.Body] is already closed. // -// Generally Get, Post, or PostForm will be used instead of Do. +// Generally [Get], [Post], or [PostForm] will be used instead of Do. // // If the server replies with a redirect, the Client first uses the // CheckRedirect function to determine whether the redirect should be @@ -580,11 +580,11 @@ func urlErrorOp(method string) string { // subsequent requests to use HTTP method GET // (or HEAD if the original request was HEAD), with no body. // A 307 or 308 redirect preserves the original HTTP method and body, -// provided that the Request.GetBody function is defined. -// The NewRequest function automatically sets GetBody for common +// provided that the [Request.GetBody] function is defined. +// The [NewRequest] function automatically sets GetBody for common // standard library body types. // -// Any returned error will be of type *url.Error. The url.Error +// Any returned error will be of type [*url.Error]. The url.Error // value's Timeout method will report true if the request timed out. func (c *Client) Do(req *Request) (*Response, error) { return c.do(req) @@ -725,10 +725,7 @@ func (c *Client) do(req *Request) (retres *Response, reterr error) { // c.send() always closes req.Body reqBodyClosed = true if !deadline.IsZero() && didTimeout() { - err = &httpError{ - err: err.Error() + " (Client.Timeout exceeded while awaiting headers)", - timeout: true, - } + err = &timeoutError{err.Error() + " (Client.Timeout exceeded while awaiting headers)"} } return nil, uerr(err) } @@ -818,17 +815,17 @@ func defaultCheckRedirect(req *Request, via []*Request) error { // // Caller should close resp.Body when done reading from it. // -// If the provided body is an io.Closer, it is closed after the +// If the provided body is an [io.Closer], it is closed after the // request. // // Post is a wrapper around DefaultClient.Post. // -// To set custom headers, use NewRequest and DefaultClient.Do. +// To set custom headers, use [NewRequest] and DefaultClient.Do. // -// See the Client.Do method documentation for details on how redirects +// See the [Client.Do] method documentation for details on how redirects // are handled. // -// To make a request with a specified context.Context, use NewRequestWithContext +// To make a request with a specified context.Context, use [NewRequestWithContext] // and DefaultClient.Do. func Post(url, contentType string, body io.Reader) (resp *Response, err error) { return DefaultClient.Post(url, contentType, body) @@ -838,13 +835,13 @@ func Post(url, contentType string, body io.Reader) (resp *Response, err error) { // // Caller should close resp.Body when done reading from it. // -// If the provided body is an io.Closer, it is closed after the +// If the provided body is an [io.Closer], it is closed after the // request. // -// To set custom headers, use NewRequest and Client.Do. +// To set custom headers, use [NewRequest] and [Client.Do]. // -// To make a request with a specified context.Context, use NewRequestWithContext -// and Client.Do. +// To make a request with a specified context.Context, use [NewRequestWithContext] +// and [Client.Do]. // // See the Client.Do method documentation for details on how redirects // are handled. @@ -861,17 +858,17 @@ func (c *Client) Post(url, contentType string, body io.Reader) (resp *Response, // values URL-encoded as the request body. // // The Content-Type header is set to application/x-www-form-urlencoded. -// To set other headers, use NewRequest and DefaultClient.Do. +// To set other headers, use [NewRequest] and DefaultClient.Do. // // When err is nil, resp always contains a non-nil resp.Body. // Caller should close resp.Body when done reading from it. // // PostForm is a wrapper around DefaultClient.PostForm. // -// See the Client.Do method documentation for details on how redirects +// See the [Client.Do] method documentation for details on how redirects // are handled. // -// To make a request with a specified context.Context, use NewRequestWithContext +// To make a request with a specified [context.Context], use [NewRequestWithContext] // and DefaultClient.Do. func PostForm(url string, data url.Values) (resp *Response, err error) { return DefaultClient.PostForm(url, data) @@ -881,7 +878,7 @@ func PostForm(url string, data url.Values) (resp *Response, err error) { // with data's keys and values URL-encoded as the request body. // // The Content-Type header is set to application/x-www-form-urlencoded. -// To set other headers, use NewRequest and Client.Do. +// To set other headers, use [NewRequest] and [Client.Do]. // // When err is nil, resp always contains a non-nil resp.Body. // Caller should close resp.Body when done reading from it. @@ -889,7 +886,7 @@ func PostForm(url string, data url.Values) (resp *Response, err error) { // See the Client.Do method documentation for details on how redirects // are handled. // -// To make a request with a specified context.Context, use NewRequestWithContext +// To make a request with a specified context.Context, use [NewRequestWithContext] // and Client.Do. func (c *Client) PostForm(url string, data url.Values) (resp *Response, err error) { return c.Post(url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) @@ -907,7 +904,7 @@ func (c *Client) PostForm(url string, data url.Values) (resp *Response, err erro // // Head is a wrapper around DefaultClient.Head. // -// To make a request with a specified context.Context, use NewRequestWithContext +// To make a request with a specified [context.Context], use [NewRequestWithContext] // and DefaultClient.Do. func Head(url string) (resp *Response, err error) { return DefaultClient.Head(url) @@ -915,7 +912,7 @@ func Head(url string) (resp *Response, err error) { // Head issues a HEAD to the specified URL. If the response is one of the // following redirect codes, Head follows the redirect after calling the -// Client's CheckRedirect function: +// [Client.CheckRedirect] function: // // 301 (Moved Permanently) // 302 (Found) @@ -923,8 +920,8 @@ func Head(url string) (resp *Response, err error) { // 307 (Temporary Redirect) // 308 (Permanent Redirect) // -// To make a request with a specified context.Context, use NewRequestWithContext -// and Client.Do. +// To make a request with a specified [context.Context], use [NewRequestWithContext] +// and [Client.Do]. func (c *Client) Head(url string) (resp *Response, err error) { req, err := NewRequest("HEAD", url, nil) if err != nil { @@ -933,12 +930,12 @@ func (c *Client) Head(url string) (resp *Response, err error) { return c.Do(req) } -// CloseIdleConnections closes any connections on its Transport which +// CloseIdleConnections closes any connections on its [Transport] which // were previously connected from previous requests but are now // sitting idle in a "keep-alive" state. It does not interrupt any // connections currently in use. // -// If the Client's Transport does not have a CloseIdleConnections method +// If [Client.Transport] does not have a [Client.CloseIdleConnections] method // then this method does nothing. func (c *Client) CloseIdleConnections() { type closeIdler interface { @@ -968,10 +965,7 @@ func (b *cancelTimerBody) Read(p []byte) (n int, err error) { return n, err } if b.reqDidTimeout() { - err = &httpError{ - err: err.Error() + " (Client.Timeout or context cancellation while reading body)", - timeout: true, - } + err = &timeoutError{err.Error() + " (Client.Timeout or context cancellation while reading body)"} } return n, err } @@ -1014,6 +1008,12 @@ func isDomainOrSubdomain(sub, parent string) bool { if sub == parent { return true } + // If sub contains a :, it's probably an IPv6 address (and is definitely not a hostname). + // Don't check the suffix in this case, to avoid matching the contents of a IPv6 zone. + // For example, "::1%.www.example.com" is not a subdomain of "www.example.com". + if strings.ContainsAny(sub, ":%") { + return false + } // If sub is "foo.example.com" and parent is "example.com", // that means sub must end in "."+parent. // Do it without allocating. diff --git a/src/net/http/client_test.go b/src/net/http/client_test.go index 7459b9cb6e..569b58ca62 100644 --- a/src/net/http/client_test.go +++ b/src/net/http/client_test.go @@ -1249,6 +1249,9 @@ func testClientTimeout(t *testing.T, mode testMode) { } else if !ne.Timeout() { t.Errorf("net.Error.Timeout = false; want true") } + if !errors.Is(err, context.DeadlineExceeded) { + t.Errorf("ReadAll error = %q; expected some context.DeadlineExceeded", err) + } if got := ne.Error(); !strings.Contains(got, "(Client.Timeout") { if runtime.GOOS == "windows" && strings.HasPrefix(runtime.GOARCH, "arm") { testenv.SkipFlaky(t, 43120) @@ -1292,6 +1295,9 @@ func testClientTimeout_Headers(t *testing.T, mode testMode) { if !ne.Timeout() { t.Error("net.Error.Timeout = false; want true") } + if !errors.Is(err, context.DeadlineExceeded) { + t.Errorf("ReadAll error = %q; expected some context.DeadlineExceeded", err) + } if got := ne.Error(); !strings.Contains(got, "Client.Timeout exceeded") { if runtime.GOOS == "windows" && strings.HasPrefix(runtime.GOARCH, "arm") { testenv.SkipFlaky(t, 43120) @@ -1711,6 +1717,7 @@ func TestShouldCopyHeaderOnRedirect(t *testing.T) { {"authorization", "http://foo.com/", "https://foo.com/", true}, {"authorization", "http://foo.com:1234/", "http://foo.com:4321/", true}, {"www-authenticate", "http://foo.com/", "http://bar.com/", false}, + {"authorization", "http://foo.com/", "http://[::1%25.foo.com]/", false}, // But subdomains should work: {"www-authenticate", "http://foo.com/", "http://foo.com/", true}, @@ -1992,6 +1999,9 @@ func testClientDoCanceledVsTimeout(t *testing.T, mode testMode) { if g, w := ue.Err, wantErr; g != w { t.Errorf("url.Error.Err = %v; want %v", g, w) } + if got := errors.Is(err, context.DeadlineExceeded); got != wantIsTimeout { + t.Errorf("errors.Is(err, context.DeadlineExceeded) = %v, want %v", got, wantIsTimeout) + } }) } } diff --git a/src/net/http/cookie.go b/src/net/http/cookie.go index 912fde6b95..c22897f3f9 100644 --- a/src/net/http/cookie.go +++ b/src/net/http/cookie.go @@ -163,7 +163,7 @@ func readSetCookies(h Header) []*Cookie { return cookies } -// SetCookie adds a Set-Cookie header to the provided ResponseWriter's headers. +// SetCookie adds a Set-Cookie header to the provided [ResponseWriter]'s headers. // The provided cookie must have a valid Name. Invalid cookies may be // silently dropped. func SetCookie(w ResponseWriter, cookie *Cookie) { @@ -172,7 +172,7 @@ func SetCookie(w ResponseWriter, cookie *Cookie) { } } -// String returns the serialization of the cookie for use in a Cookie +// String returns the serialization of the cookie for use in a [Cookie] // header (if only Name and Value are set) or a Set-Cookie response // header (if other fields are set). // If c is nil or c.Name is invalid, the empty string is returned. diff --git a/src/net/http/cookiejar/jar.go b/src/net/http/cookiejar/jar.go index 46d1193951..e7f5ddd4d0 100644 --- a/src/net/http/cookiejar/jar.go +++ b/src/net/http/cookiejar/jar.go @@ -73,7 +73,7 @@ type Jar struct { nextSeqNum uint64 } -// New returns a new cookie jar. A nil *Options is equivalent to a zero +// New returns a new cookie jar. A nil [*Options] is equivalent to a zero // Options. func New(o *Options) (*Jar, error) { jar := &Jar{ @@ -151,7 +151,7 @@ func hasDotSuffix(s, suffix string) bool { return len(s) > len(suffix) && s[len(s)-len(suffix)-1] == '.' && s[len(s)-len(suffix):] == suffix } -// Cookies implements the Cookies method of the http.CookieJar interface. +// Cookies implements the Cookies method of the [http.CookieJar] interface. // // It returns an empty slice if the URL's scheme is not HTTP or HTTPS. func (j *Jar) Cookies(u *url.URL) (cookies []*http.Cookie) { @@ -226,7 +226,7 @@ func (j *Jar) cookies(u *url.URL, now time.Time) (cookies []*http.Cookie) { return cookies } -// SetCookies implements the SetCookies method of the http.CookieJar interface. +// SetCookies implements the SetCookies method of the [http.CookieJar] interface. // // It does nothing if the URL's scheme is not HTTP or HTTPS. func (j *Jar) SetCookies(u *url.URL, cookies []*http.Cookie) { @@ -362,6 +362,13 @@ func jarKey(host string, psl PublicSuffixList) string { // isIP reports whether host is an IP address. func isIP(host string) bool { + if strings.ContainsAny(host, ":%") { + // Probable IPv6 address. + // Hostnames can't contain : or %, so this is definitely not a valid host. + // Treating it as an IP is the more conservative option, and avoids the risk + // of interpeting ::1%.www.example.com as a subtomain of www.example.com. + return true + } return net.ParseIP(host) != nil } diff --git a/src/net/http/cookiejar/jar_test.go b/src/net/http/cookiejar/jar_test.go index 56d0695a66..251f7c1617 100644 --- a/src/net/http/cookiejar/jar_test.go +++ b/src/net/http/cookiejar/jar_test.go @@ -252,6 +252,7 @@ var isIPTests = map[string]bool{ "127.0.0.1": true, "1.2.3.4": true, "2001:4860:0:2001::68": true, + "::1%zone": true, "example.com": false, "1.1.1.300": false, "www.foo.bar.net": false, @@ -629,6 +630,15 @@ var basicsTests = [...]jarTest{ {"http://www.host.test:1234/", "a=1"}, }, }, + { + "IPv6 zone is not treated as a host.", + "https://example.com/", + []string{"a=1"}, + "a=1", + []query{ + {"https://[::1%25.example.com]:80/", ""}, + }, + }, } func TestBasics(t *testing.T) { diff --git a/src/net/http/doc.go b/src/net/http/doc.go index d9e6aafb4e..f7ad3ae762 100644 --- a/src/net/http/doc.go +++ b/src/net/http/doc.go @@ -5,7 +5,7 @@ /* Package http provides HTTP client and server implementations. -Get, Head, Post, and PostForm make HTTP (or HTTPS) requests: +[Get], [Head], [Post], and [PostForm] make HTTP (or HTTPS) requests: resp, err := http.Get("http://example.com/") ... @@ -27,7 +27,7 @@ The caller must close the response body when finished with it: # Clients and Transports For control over HTTP client headers, redirect policy, and other -settings, create a Client: +settings, create a [Client]: client := &http.Client{ CheckRedirect: redirectPolicyFunc, @@ -43,7 +43,7 @@ settings, create a Client: // ... For control over proxies, TLS configuration, keep-alives, -compression, and other settings, create a Transport: +compression, and other settings, create a [Transport]: tr := &http.Transport{ MaxIdleConns: 10, @@ -59,8 +59,8 @@ goroutines and for efficiency should only be created once and re-used. # Servers ListenAndServe starts an HTTP server with a given address and handler. -The handler is usually nil, which means to use DefaultServeMux. -Handle and HandleFunc add handlers to DefaultServeMux: +The handler is usually nil, which means to use [DefaultServeMux]. +[Handle] and [HandleFunc] add handlers to [DefaultServeMux]: http.Handle("/foo", fooHandler) @@ -86,8 +86,8 @@ custom Server: Starting with Go 1.6, the http package has transparent support for the HTTP/2 protocol when using HTTPS. Programs that must disable HTTP/2 -can do so by setting Transport.TLSNextProto (for clients) or -Server.TLSNextProto (for servers) to a non-nil, empty +can do so by setting [Transport.TLSNextProto] (for clients) or +[Server.TLSNextProto] (for servers) to a non-nil, empty map. Alternatively, the following GODEBUG settings are currently supported: @@ -98,7 +98,7 @@ currently supported: Please report any issues before disabling HTTP/2 support: https://golang.org/s/http2bug -The http package's Transport and Server both automatically enable +The http package's [Transport] and [Server] both automatically enable HTTP/2 support for simple configurations. To enable HTTP/2 for more complex configurations, to use lower-level HTTP/2 features, or to use a newer version of Go's http2 package, import "golang.org/x/net/http2" diff --git a/src/net/http/fcgi/child.go b/src/net/http/fcgi/child.go index dc82bf7c3a..7665e7d252 100644 --- a/src/net/http/fcgi/child.go +++ b/src/net/http/fcgi/child.go @@ -335,7 +335,7 @@ func (c *child) cleanUp() { // goroutine for each. The goroutine reads requests and then calls handler // to reply to them. // If l is nil, Serve accepts connections from os.Stdin. -// If handler is nil, http.DefaultServeMux is used. +// If handler is nil, [http.DefaultServeMux] is used. func Serve(l net.Listener, handler http.Handler) error { if l == nil { var err error diff --git a/src/net/http/filetransport.go b/src/net/http/filetransport.go index 2a9e9b02ba..7384b22fbe 100644 --- a/src/net/http/filetransport.go +++ b/src/net/http/filetransport.go @@ -15,13 +15,13 @@ type fileTransport struct { fh fileHandler } -// NewFileTransport returns a new RoundTripper, serving the provided -// FileSystem. The returned RoundTripper ignores the URL host in its +// NewFileTransport returns a new [RoundTripper], serving the provided +// [FileSystem]. The returned RoundTripper ignores the URL host in its // incoming requests, as well as most other properties of the // request. // // The typical use case for NewFileTransport is to register the "file" -// protocol with a Transport, as in: +// protocol with a [Transport], as in: // // t := &http.Transport{} // t.RegisterProtocol("file", http.NewFileTransport(http.Dir("/"))) @@ -32,13 +32,13 @@ func NewFileTransport(fs FileSystem) RoundTripper { return fileTransport{fileHandler{fs}} } -// NewFileTransportFS returns a new RoundTripper, serving the provided +// NewFileTransportFS returns a new [RoundTripper], serving the provided // file system fsys. The returned RoundTripper ignores the URL host in its // incoming requests, as well as most other properties of the // request. // // The typical use case for NewFileTransportFS is to register the "file" -// protocol with a Transport, as in: +// protocol with a [Transport], as in: // // fsys := os.DirFS("/") // t := &http.Transport{} diff --git a/src/net/http/fs.go b/src/net/http/fs.go index ace74a7b80..45cf16eed1 100644 --- a/src/net/http/fs.go +++ b/src/net/http/fs.go @@ -9,7 +9,6 @@ package http import ( "errors" "fmt" - "internal/safefilepath" "io" "io/fs" "mime" @@ -25,12 +24,12 @@ import ( "time" ) -// A Dir implements FileSystem using the native file system restricted to a +// A Dir implements [FileSystem] using the native file system restricted to a // specific directory tree. // -// While the FileSystem.Open method takes '/'-separated paths, a Dir's string -// value is a filename on the native file system, not a URL, so it is separated -// by filepath.Separator, which isn't necessarily '/'. +// While the [FileSystem.Open] method takes '/'-separated paths, a Dir's string +// value is a directory path on the native file system, not a URL, so it is separated +// by [filepath.Separator], which isn't necessarily '/'. // // Note that Dir could expose sensitive files and directories. Dir will follow // symlinks pointing out of the directory tree, which can be especially dangerous @@ -67,10 +66,14 @@ func mapOpenError(originalErr error, name string, sep rune, stat func(string) (f return originalErr } -// Open implements FileSystem using os.Open, opening files for reading rooted +// Open implements [FileSystem] using [os.Open], opening files for reading rooted // and relative to the directory d. func (d Dir) Open(name string) (File, error) { - path, err := safefilepath.FromFS(path.Clean("/" + name)) + path := path.Clean("/" + name)[1:] + if path == "" { + path = "." + } + path, err := filepath.Localize(path) if err != nil { return nil, errors.New("http: invalid or unsafe file path") } @@ -89,18 +92,18 @@ func (d Dir) Open(name string) (File, error) { // A FileSystem implements access to a collection of named files. // The elements in a file path are separated by slash ('/', U+002F) // characters, regardless of host operating system convention. -// See the FileServer function to convert a FileSystem to a Handler. +// See the [FileServer] function to convert a FileSystem to a [Handler]. // -// This interface predates the fs.FS interface, which can be used instead: -// the FS adapter function converts an fs.FS to a FileSystem. +// This interface predates the [fs.FS] interface, which can be used instead: +// the [FS] adapter function converts an fs.FS to a FileSystem. type FileSystem interface { Open(name string) (File, error) } -// A File is returned by a FileSystem's Open method and can be -// served by the FileServer implementation. +// A File is returned by a [FileSystem]'s Open method and can be +// served by the [FileServer] implementation. // -// The methods should behave the same as those on an *os.File. +// The methods should behave the same as those on an [*os.File]. type File interface { io.Closer io.Reader @@ -151,6 +154,8 @@ func dirList(w ResponseWriter, r *Request, f File) { sort.Slice(dirs, func(i, j int) bool { return dirs.name(i) < dirs.name(j) }) w.Header().Set("Content-Type", "text/html; charset=utf-8") + fmt.Fprintf(w, "\n") + fmt.Fprintf(w, "\n") fmt.Fprintf(w, "
    \n")
     	for i, n := 0, dirs.len(); i < n; i++ {
     		name := dirs.name(i)
    @@ -167,7 +172,7 @@ func dirList(w ResponseWriter, r *Request, f File) {
     }
     
     // ServeContent replies to the request using the content in the
    -// provided ReadSeeker. The main benefit of ServeContent over io.Copy
    +// provided ReadSeeker. The main benefit of ServeContent over [io.Copy]
     // is that it handles Range requests properly, sets the MIME type, and
     // handles If-Match, If-Unmodified-Since, If-None-Match, If-Modified-Since,
     // and If-Range requests.
    @@ -175,7 +180,7 @@ func dirList(w ResponseWriter, r *Request, f File) {
     // If the response's Content-Type header is not set, ServeContent
     // first tries to deduce the type from name's file extension and,
     // if that fails, falls back to reading the first block of the content
    -// and passing it to DetectContentType.
    +// and passing it to [DetectContentType].
     // The name is otherwise unused; in particular it can be empty and is
     // never sent in the response.
     //
    @@ -190,7 +195,7 @@ func dirList(w ResponseWriter, r *Request, f File) {
     // If the caller has set w's ETag header formatted per RFC 7232, section 2.3,
     // ServeContent uses it to handle requests using If-Match, If-None-Match, or If-Range.
     //
    -// Note that *os.File implements the io.ReadSeeker interface.
    +// Note that [*os.File] implements the [io.ReadSeeker] interface.
     func ServeContent(w ResponseWriter, req *Request, name string, modtime time.Time, content io.ReadSeeker) {
     	sizeFunc := func() (int64, error) {
     		size, err := content.Seek(0, io.SeekEnd)
    @@ -660,11 +665,16 @@ func serveFile(w ResponseWriter, r *Request, fs FileSystem, name string, redirec
     				localRedirect(w, r, path.Base(url)+"/")
     				return
     			}
    -		} else {
    -			if url[len(url)-1] == '/' {
    -				localRedirect(w, r, "../"+path.Base(url))
    +		} else if url[len(url)-1] == '/' {
    +			base := path.Base(url)
    +			if base == "/" || base == "." {
    +				// The FileSystem maps a path like "/" or "/./" to a file instead of a directory.
    +				msg := "http: attempting to traverse a non-directory"
    +				Error(w, msg, StatusInternalServerError)
     				return
     			}
    +			localRedirect(w, r, "../"+base)
    +			return
     		}
     	}
     
    @@ -741,13 +751,13 @@ func localRedirect(w ResponseWriter, r *Request, newPath string) {
     //
     // As a precaution, ServeFile will reject requests where r.URL.Path
     // contains a ".." path element; this protects against callers who
    -// might unsafely use filepath.Join on r.URL.Path without sanitizing
    +// might unsafely use [filepath.Join] on r.URL.Path without sanitizing
     // it and then use that filepath.Join result as the name argument.
     //
     // As another special case, ServeFile redirects any request where r.URL.Path
     // ends in "/index.html" to the same path, without the final
     // "index.html". To avoid such redirects either modify the path or
    -// use ServeContent.
    +// use [ServeContent].
     //
     // Outside of those two special cases, ServeFile does not use
     // r.URL.Path for selecting the file or directory to serve; only the
    @@ -772,11 +782,11 @@ func ServeFile(w ResponseWriter, r *Request, name string) {
     // If the provided file or directory name is a relative path, it is
     // interpreted relative to the current directory and may ascend to
     // parent directories. If the provided name is constructed from user
    -// input, it should be sanitized before calling ServeFile.
    +// input, it should be sanitized before calling [ServeFile].
     //
     // As a precaution, ServeFile will reject requests where r.URL.Path
     // contains a ".." path element; this protects against callers who
    -// might unsafely use filepath.Join on r.URL.Path without sanitizing
    +// might unsafely use [filepath.Join] on r.URL.Path without sanitizing
     // it and then use that filepath.Join result as the name argument.
     //
     // As another special case, ServeFile redirects any request where r.URL.Path
    @@ -890,9 +900,9 @@ func (f ioFile) Readdir(count int) ([]fs.FileInfo, error) {
     	return list, nil
     }
     
    -// FS converts fsys to a FileSystem implementation,
    -// for use with FileServer and NewFileTransport.
    -// The files provided by fsys must implement io.Seeker.
    +// FS converts fsys to a [FileSystem] implementation,
    +// for use with [FileServer] and [NewFileTransport].
    +// The files provided by fsys must implement [io.Seeker].
     func FS(fsys fs.FS) FileSystem {
     	return ioFS{fsys}
     }
    @@ -905,11 +915,11 @@ func FS(fsys fs.FS) FileSystem {
     // "index.html".
     //
     // To use the operating system's file system implementation,
    -// use http.Dir:
    +// use [http.Dir]:
     //
     //	http.Handle("/", http.FileServer(http.Dir("/tmp")))
     //
    -// To use an fs.FS implementation, use http.FileServerFS instead.
    +// To use an [fs.FS] implementation, use [http.FileServerFS] instead.
     func FileServer(root FileSystem) Handler {
     	return &fileHandler{root}
     }
    diff --git a/src/net/http/fs_test.go b/src/net/http/fs_test.go
    index 861e70caf2..4891c28768 100644
    --- a/src/net/http/fs_test.go
    +++ b/src/net/http/fs_test.go
    @@ -27,6 +27,7 @@ import (
     	"reflect"
     	"regexp"
     	"runtime"
    +	"strconv"
     	"strings"
     	"testing"
     	"testing/fstest"
    @@ -325,7 +326,7 @@ func TestFileServerCleans(t *testing.T) {
     
     func TestFileServerEscapesNames(t *testing.T) { run(t, testFileServerEscapesNames) }
     func testFileServerEscapesNames(t *testing.T, mode testMode) {
    -	const dirListPrefix = "
    \n"
    +	const dirListPrefix = "\n\n
    \n"
     	const dirListSuffix = "\n
    \n" tests := []struct { name, escaped string @@ -1668,3 +1669,90 @@ func (grw gzipResponseWriter) Flush() { fw.Flush() } } + +// Issue 63769 +func TestFileServerDirWithRootFile(t *testing.T) { run(t, testFileServerDirWithRootFile) } +func testFileServerDirWithRootFile(t *testing.T, mode testMode) { + testDirFile := func(t *testing.T, h Handler) { + ts := newClientServerTest(t, mode, h).ts + defer ts.Close() + + res, err := ts.Client().Get(ts.URL) + if err != nil { + t.Fatal(err) + } + if g, w := res.StatusCode, StatusInternalServerError; g != w { + t.Errorf("StatusCode mismatch: got %d, want: %d", g, w) + } + res.Body.Close() + } + + t.Run("FileServer", func(t *testing.T) { + testDirFile(t, FileServer(Dir("testdata/index.html"))) + }) + + t.Run("FileServerFS", func(t *testing.T) { + testDirFile(t, FileServerFS(os.DirFS("testdata/index.html"))) + }) +} + +func TestServeContentHeadersWithError(t *testing.T) { + contents := []byte("content") + ts := newClientServerTest(t, http1Mode, HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Type", "application/octet-stream") + w.Header().Set("Content-Length", strconv.Itoa(len(contents))) + w.Header().Set("Content-Encoding", "gzip") + w.Header().Set("Etag", `"abcdefgh"`) + w.Header().Set("Last-Modified", "Wed, 21 Oct 2015 07:28:00 GMT") + w.Header().Set("Cache-Control", "immutable") + w.Header().Set("Other-Header", "test") + ServeContent(w, r, "", time.Time{}, bytes.NewReader(contents)) + })).ts + defer ts.Close() + + req, err := NewRequest("GET", ts.URL, nil) + if err != nil { + t.Fatal(err) + } + req.Header.Set("Range", "bytes=100-10000") + + c := ts.Client() + res, err := c.Do(req) + if err != nil { + t.Fatal(err) + } + + out, _ := io.ReadAll(res.Body) + res.Body.Close() + + if g, e := res.StatusCode, 416; g != e { + t.Errorf("got status = %d; want %d", g, e) + } + if g, e := string(out), "invalid range: failed to overlap\n"; g != e { + t.Errorf("got body = %q; want %q", g, e) + } + if g, e := res.Header.Get("Content-Type"), "text/plain; charset=utf-8"; g != e { + t.Errorf("got content-type = %q, want %q", g, e) + } + if g, e := res.Header.Get("Content-Length"), strconv.Itoa(len(out)); g != e { + t.Errorf("got content-length = %q, want %q", g, e) + } + if g, e := res.Header.Get("Content-Encoding"), ""; g != e { + t.Errorf("got content-encoding = %q, want %q", g, e) + } + if g, e := res.Header.Get("Etag"), ""; g != e { + t.Errorf("got etag = %q, want %q", g, e) + } + if g, e := res.Header.Get("Last-Modified"), ""; g != e { + t.Errorf("got last-modified = %q, want %q", g, e) + } + if g, e := res.Header.Get("Cache-Control"), "no-cache"; g != e { + t.Errorf("got cache-control = %q, want %q", g, e) + } + if g, e := res.Header.Get("Content-Range"), "bytes */7"; g != e { + t.Errorf("got content-range = %q, want %q", g, e) + } + if g, e := res.Header.Get("Other-Header"), "test"; g != e { + t.Errorf("got other-header = %q, want %q", g, e) + } +} diff --git a/src/net/http/h2_bundle.go b/src/net/http/h2_bundle.go index fea33276d8..2fc8ace61e 100644 --- a/src/net/http/h2_bundle.go +++ b/src/net/http/h2_bundle.go @@ -1041,41 +1041,44 @@ func http2shouldRetryDial(call *http2dialCall, req *Request) bool { // TODO: Benchmark to determine if the pools are necessary. The GC may have // improved enough that we can instead allocate chunks like this: // make([]byte, max(16<<10, expectedBytesRemaining)) -var ( - http2dataChunkSizeClasses = []int{ - 1 << 10, - 2 << 10, - 4 << 10, - 8 << 10, - 16 << 10, - } - http2dataChunkPools = [...]sync.Pool{ - {New: func() interface{} { return make([]byte, 1<<10) }}, - {New: func() interface{} { return make([]byte, 2<<10) }}, - {New: func() interface{} { return make([]byte, 4<<10) }}, - {New: func() interface{} { return make([]byte, 8<<10) }}, - {New: func() interface{} { return make([]byte, 16<<10) }}, - } -) +var http2dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return new([1 << 10]byte) }}, + {New: func() interface{} { return new([2 << 10]byte) }}, + {New: func() interface{} { return new([4 << 10]byte) }}, + {New: func() interface{} { return new([8 << 10]byte) }}, + {New: func() interface{} { return new([16 << 10]byte) }}, +} func http2getDataBufferChunk(size int64) []byte { - i := 0 - for ; i < len(http2dataChunkSizeClasses)-1; i++ { - if size <= int64(http2dataChunkSizeClasses[i]) { - break - } + switch { + case size <= 1<<10: + return http2dataChunkPools[0].Get().(*[1 << 10]byte)[:] + case size <= 2<<10: + return http2dataChunkPools[1].Get().(*[2 << 10]byte)[:] + case size <= 4<<10: + return http2dataChunkPools[2].Get().(*[4 << 10]byte)[:] + case size <= 8<<10: + return http2dataChunkPools[3].Get().(*[8 << 10]byte)[:] + default: + return http2dataChunkPools[4].Get().(*[16 << 10]byte)[:] } - return http2dataChunkPools[i].Get().([]byte) } func http2putDataBufferChunk(p []byte) { - for i, n := range http2dataChunkSizeClasses { - if len(p) == n { - http2dataChunkPools[i].Put(p) - return - } + switch len(p) { + case 1 << 10: + http2dataChunkPools[0].Put((*[1 << 10]byte)(p)) + case 2 << 10: + http2dataChunkPools[1].Put((*[2 << 10]byte)(p)) + case 4 << 10: + http2dataChunkPools[2].Put((*[4 << 10]byte)(p)) + case 8 << 10: + http2dataChunkPools[3].Put((*[8 << 10]byte)(p)) + case 16 << 10: + http2dataChunkPools[4].Put((*[16 << 10]byte)(p)) + default: + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } - panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) } // dataBuffer is an io.ReadWriter backed by a list of data chunks. @@ -2911,13 +2914,12 @@ func (mh *http2MetaHeadersFrame) checkPseudos() error { } func (fr *http2Framer) maxHeaderStringLen() int { - v := fr.maxHeaderListSize() - if uint32(int(v)) == v { - return int(v) + v := int(fr.maxHeaderListSize()) + if v < 0 { + // If maxHeaderListSize overflows an int, use no limit (0). + return 0 } - // They had a crazy big number for MaxHeaderBytes anyway, - // so give them unlimited header lengths: - return 0 + return v } // readMetaFrame returns 0 or more CONTINUATION frames from fr and @@ -3058,41 +3060,6 @@ func http2summarizeFrame(f http2Frame) string { return buf.String() } -func http2traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { - return trace != nil && trace.WroteHeaderField != nil -} - -func http2traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { - if trace != nil && trace.WroteHeaderField != nil { - trace.WroteHeaderField(k, []string{v}) - } -} - -func http2traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { - if trace != nil { - return trace.Got1xxResponse - } - return nil -} - -// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS -// connection. -func (t *http2Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { - dialer := &tls.Dialer{ - Config: cfg, - } - cn, err := dialer.DialContext(ctx, network, addr) - if err != nil { - return nil, err - } - tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed - return tlsCn, nil -} - -func http2tlsUnderlyingConn(tc *tls.Conn) net.Conn { - return tc.NetConn() -} - var http2DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" type http2goroutineLock uint64 @@ -6366,7 +6333,6 @@ type http2responseWriterState struct { wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. sentHeader bool // have we sent the header frame? handlerDone bool // handler has finished - dirty bool // a Write failed; don't reuse this responseWriterState sentContentLen int64 // non-zero if handler set a Content-Length header wroteBytes int64 @@ -6486,7 +6452,6 @@ func (rws *http2responseWriterState) writeChunk(p []byte) (n int, err error) { date: date, }) if err != nil { - rws.dirty = true return 0, err } if endStream { @@ -6507,7 +6472,6 @@ func (rws *http2responseWriterState) writeChunk(p []byte) (n int, err error) { if len(p) > 0 || endStream { // only send a 0 byte DATA frame if we're ending the stream. if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { - rws.dirty = true return 0, err } } @@ -6519,9 +6483,6 @@ func (rws *http2responseWriterState) writeChunk(p []byte) (n int, err error) { trailers: rws.trailers, endStream: true, }) - if err != nil { - rws.dirty = true - } return len(p), err } return len(p), nil @@ -6737,14 +6698,12 @@ func (rws *http2responseWriterState) writeHeader(code int) { h.Del("Transfer-Encoding") } - if rws.conn.writeHeaders(rws.stream, &http2writeResHeaders{ + rws.conn.writeHeaders(rws.stream, &http2writeResHeaders{ streamID: rws.stream.id, httpResCode: code, h: h, endStream: rws.handlerDone && !rws.hasTrailers(), - }) != nil { - rws.dirty = true - } + }) return } @@ -6809,19 +6768,10 @@ func (w *http2responseWriter) write(lenData int, dataB []byte, dataS string) (n func (w *http2responseWriter) handlerDone() { rws := w.rws - dirty := rws.dirty rws.handlerDone = true w.Flush() w.rws = nil - if !dirty { - // Only recycle the pool if all prior Write calls to - // the serverConn goroutine completed successfully. If - // they returned earlier due to resets from the peer - // there might still be write goroutines outstanding - // from the serverConn referencing the rws memory. See - // issue 20704. - http2responseWriterStatePool.Put(rws) - } + http2responseWriterStatePool.Put(rws) } // Push errors. @@ -8094,7 +8044,7 @@ func (cc *http2ClientConn) forceCloseConn() { if !ok { return } - if nc := http2tlsUnderlyingConn(tc); nc != nil { + if nc := tc.NetConn(); nc != nil { nc.Close() } } @@ -10282,6 +10232,37 @@ func http2traceFirstResponseByte(trace *httptrace.ClientTrace) { } } +func http2traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func http2traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func http2traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + if trace != nil { + return trace.Got1xxResponse + } + return nil +} + +// dialTLSWithContext uses tls.Dialer, added in Go 1.15, to open a TLS +// connection. +func (t *http2Transport) dialTLSWithContext(ctx context.Context, network, addr string, cfg *tls.Config) (*tls.Conn, error) { + dialer := &tls.Dialer{ + Config: cfg, + } + cn, err := dialer.DialContext(ctx, network, addr) + if err != nil { + return nil, err + } + tlsCn := cn.(*tls.Conn) // DialContext comment promises this will always succeed + return tlsCn, nil +} + // writeFramer is implemented by any type that is used to write frames. type http2writeFramer interface { writeFrame(http2writeContext) error diff --git a/src/net/http/header.go b/src/net/http/header.go index e0b342c63c..9d0f3a125d 100644 --- a/src/net/http/header.go +++ b/src/net/http/header.go @@ -20,13 +20,13 @@ import ( // A Header represents the key-value pairs in an HTTP header. // // The keys should be in canonical form, as returned by -// CanonicalHeaderKey. +// [CanonicalHeaderKey]. type Header map[string][]string // Add adds the key, value pair to the header. // It appends to any existing values associated with key. // The key is case insensitive; it is canonicalized by -// CanonicalHeaderKey. +// [CanonicalHeaderKey]. func (h Header) Add(key, value string) { textproto.MIMEHeader(h).Add(key, value) } @@ -34,7 +34,7 @@ func (h Header) Add(key, value string) { // Set sets the header entries associated with key to the // single element value. It replaces any existing values // associated with key. The key is case insensitive; it is -// canonicalized by textproto.CanonicalMIMEHeaderKey. +// canonicalized by [textproto.CanonicalMIMEHeaderKey]. // To use non-canonical keys, assign to the map directly. func (h Header) Set(key, value string) { textproto.MIMEHeader(h).Set(key, value) @@ -42,7 +42,7 @@ func (h Header) Set(key, value string) { // Get gets the first value associated with the given key. If // there are no values associated with the key, Get returns "". -// It is case insensitive; textproto.CanonicalMIMEHeaderKey is +// It is case insensitive; [textproto.CanonicalMIMEHeaderKey] is // used to canonicalize the provided key. Get assumes that all // keys are stored in canonical form. To use non-canonical keys, // access the map directly. @@ -51,7 +51,7 @@ func (h Header) Get(key string) string { } // Values returns all values associated with the given key. -// It is case insensitive; textproto.CanonicalMIMEHeaderKey is +// It is case insensitive; [textproto.CanonicalMIMEHeaderKey] is // used to canonicalize the provided key. To use non-canonical // keys, access the map directly. // The returned slice is not a copy. @@ -76,7 +76,7 @@ func (h Header) has(key string) bool { // Del deletes the values associated with key. // The key is case insensitive; it is canonicalized by -// CanonicalHeaderKey. +// [CanonicalHeaderKey]. func (h Header) Del(key string) { textproto.MIMEHeader(h).Del(key) } @@ -125,7 +125,7 @@ var timeFormats = []string{ // ParseTime parses a time header (such as the Date: header), // trying each of the three formats allowed by HTTP/1.1: -// TimeFormat, time.RFC850, and time.ANSIC. +// [TimeFormat], [time.RFC850], and [time.ANSIC]. func ParseTime(text string) (t time.Time, err error) { for _, layout := range timeFormats { t, err = time.Parse(layout, text) diff --git a/src/net/http/http.go b/src/net/http/http.go index 9b81654fcc..6e2259adbf 100644 --- a/src/net/http/http.go +++ b/src/net/http/http.go @@ -103,10 +103,10 @@ func hexEscapeNonASCII(s string) string { return string(b) } -// NoBody is an io.ReadCloser with no bytes. Read always returns EOF +// NoBody is an [io.ReadCloser] with no bytes. Read always returns EOF // and Close always returns nil. It can be used in an outgoing client // request to explicitly signal that a request has zero bytes. -// An alternative, however, is to simply set Request.Body to nil. +// An alternative, however, is to simply set [Request.Body] to nil. var NoBody = noBody{} type noBody struct{} @@ -121,7 +121,7 @@ var ( _ io.ReadCloser = NoBody ) -// PushOptions describes options for Pusher.Push. +// PushOptions describes options for [Pusher.Push]. type PushOptions struct { // Method specifies the HTTP method for the promised request. // If set, it must be "GET" or "HEAD". Empty means "GET". diff --git a/src/net/http/httptest/httptest.go b/src/net/http/httptest/httptest.go index 9bedefd2bc..f0ca64362d 100644 --- a/src/net/http/httptest/httptest.go +++ b/src/net/http/httptest/httptest.go @@ -15,7 +15,7 @@ import ( ) // NewRequest returns a new incoming server Request, suitable -// for passing to an http.Handler for testing. +// for passing to an [http.Handler] for testing. // // The target is the RFC 7230 "request-target": it may be either a // path or an absolute URL. If target is an absolute URL, the host name diff --git a/src/net/http/httptest/recorder.go b/src/net/http/httptest/recorder.go index 1c1d880155..dd51901b0d 100644 --- a/src/net/http/httptest/recorder.go +++ b/src/net/http/httptest/recorder.go @@ -16,7 +16,7 @@ import ( "golang.org/x/net/http/httpguts" ) -// ResponseRecorder is an implementation of http.ResponseWriter that +// ResponseRecorder is an implementation of [http.ResponseWriter] that // records its mutations for later inspection in tests. type ResponseRecorder struct { // Code is the HTTP response code set by WriteHeader. @@ -47,7 +47,7 @@ type ResponseRecorder struct { wroteHeader bool } -// NewRecorder returns an initialized ResponseRecorder. +// NewRecorder returns an initialized [ResponseRecorder]. func NewRecorder() *ResponseRecorder { return &ResponseRecorder{ HeaderMap: make(http.Header), @@ -57,12 +57,12 @@ func NewRecorder() *ResponseRecorder { } // DefaultRemoteAddr is the default remote address to return in RemoteAddr if -// an explicit DefaultRemoteAddr isn't set on ResponseRecorder. +// an explicit DefaultRemoteAddr isn't set on [ResponseRecorder]. const DefaultRemoteAddr = "1.2.3.4" -// Header implements http.ResponseWriter. It returns the response +// Header implements [http.ResponseWriter]. It returns the response // headers to mutate within a handler. To test the headers that were -// written after a handler completes, use the Result method and see +// written after a handler completes, use the [ResponseRecorder.Result] method and see // the returned Response value's Header. func (rw *ResponseRecorder) Header() http.Header { m := rw.HeaderMap @@ -112,7 +112,7 @@ func (rw *ResponseRecorder) Write(buf []byte) (int, error) { return len(buf), nil } -// WriteString implements io.StringWriter. The data in str is written +// WriteString implements [io.StringWriter]. The data in str is written // to rw.Body, if not nil. func (rw *ResponseRecorder) WriteString(str string) (int, error) { rw.writeHeader(nil, str) @@ -139,7 +139,7 @@ func checkWriteHeaderCode(code int) { } } -// WriteHeader implements http.ResponseWriter. +// WriteHeader implements [http.ResponseWriter]. func (rw *ResponseRecorder) WriteHeader(code int) { if rw.wroteHeader { return @@ -154,7 +154,7 @@ func (rw *ResponseRecorder) WriteHeader(code int) { rw.snapHeader = rw.HeaderMap.Clone() } -// Flush implements http.Flusher. To test whether Flush was +// Flush implements [http.Flusher]. To test whether Flush was // called, see rw.Flushed. func (rw *ResponseRecorder) Flush() { if !rw.wroteHeader { @@ -175,7 +175,7 @@ func (rw *ResponseRecorder) Flush() { // did a write. // // The Response.Body is guaranteed to be non-nil and Body.Read call is -// guaranteed to not return any error other than io.EOF. +// guaranteed to not return any error other than [io.EOF]. // // Result must only be called after the handler has finished running. func (rw *ResponseRecorder) Result() *http.Response { diff --git a/src/net/http/httptest/server.go b/src/net/http/httptest/server.go index c962749e85..5095b438ec 100644 --- a/src/net/http/httptest/server.go +++ b/src/net/http/httptest/server.go @@ -100,7 +100,7 @@ func strSliceContainsPrefix(v []string, pre string) bool { return false } -// NewServer starts and returns a new Server. +// NewServer starts and returns a new [Server]. // The caller should call Close when finished, to shut it down. func NewServer(handler http.Handler) *Server { ts := NewUnstartedServer(handler) @@ -108,7 +108,7 @@ func NewServer(handler http.Handler) *Server { return ts } -// NewUnstartedServer returns a new Server but doesn't start it. +// NewUnstartedServer returns a new [Server] but doesn't start it. // // After changing its configuration, the caller should call Start or // StartTLS. @@ -185,7 +185,7 @@ func (s *Server) StartTLS() { s.goServe() } -// NewTLSServer starts and returns a new Server using TLS. +// NewTLSServer starts and returns a new [Server] using TLS. // The caller should call Close when finished, to shut it down. func NewTLSServer(handler http.Handler) *Server { ts := NewUnstartedServer(handler) @@ -298,7 +298,7 @@ func (s *Server) Certificate() *x509.Certificate { // Client returns an HTTP client configured for making requests to the server. // It is configured to trust the server's TLS test certificate and will -// close its idle connections on Server.Close. +// close its idle connections on [Server.Close]. func (s *Server) Client() *http.Client { return s.client } diff --git a/src/net/http/httptrace/trace.go b/src/net/http/httptrace/trace.go index 6af30f78d1..706a432957 100644 --- a/src/net/http/httptrace/trace.go +++ b/src/net/http/httptrace/trace.go @@ -19,7 +19,7 @@ import ( // unique type to prevent assignment. type clientEventContextKey struct{} -// ContextClientTrace returns the ClientTrace associated with the +// ContextClientTrace returns the [ClientTrace] associated with the // provided context. If none, it returns nil. func ContextClientTrace(ctx context.Context) *ClientTrace { trace, _ := ctx.Value(clientEventContextKey{}).(*ClientTrace) @@ -233,7 +233,7 @@ func (t *ClientTrace) hasNetHooks() bool { return t.DNSStart != nil || t.DNSDone != nil || t.ConnectStart != nil || t.ConnectDone != nil } -// GotConnInfo is the argument to the ClientTrace.GotConn function and +// GotConnInfo is the argument to the [ClientTrace.GotConn] function and // contains information about the obtained connection. type GotConnInfo struct { // Conn is the connection that was obtained. It is owned by diff --git a/src/net/http/httputil/dump.go b/src/net/http/httputil/dump.go index 7affe5e61a..2edb9bc98d 100644 --- a/src/net/http/httputil/dump.go +++ b/src/net/http/httputil/dump.go @@ -71,8 +71,8 @@ func outgoingLength(req *http.Request) int64 { return -1 } -// DumpRequestOut is like DumpRequest but for outgoing client requests. It -// includes any headers that the standard http.Transport adds, such as +// DumpRequestOut is like [DumpRequest] but for outgoing client requests. It +// includes any headers that the standard [http.Transport] adds, such as // User-Agent. func DumpRequestOut(req *http.Request, body bool) ([]byte, error) { save := req.Body @@ -203,17 +203,17 @@ var reqWriteExcludeHeaderDump = map[string]bool{ // representation. It should only be used by servers to debug client // requests. The returned representation is an approximation only; // some details of the initial request are lost while parsing it into -// an http.Request. In particular, the order and case of header field +// an [http.Request]. In particular, the order and case of header field // names are lost. The order of values in multi-valued headers is kept // intact. HTTP/2 requests are dumped in HTTP/1.x form, not in their // original binary representations. // // If body is true, DumpRequest also returns the body. To do so, it -// consumes req.Body and then replaces it with a new io.ReadCloser +// consumes req.Body and then replaces it with a new [io.ReadCloser] // that yields the same bytes. If DumpRequest returns an error, // the state of req is undefined. // -// The documentation for http.Request.Write details which fields +// The documentation for [http.Request.Write] details which fields // of req are included in the dump. func DumpRequest(req *http.Request, body bool) ([]byte, error) { var err error diff --git a/src/net/http/httputil/httputil.go b/src/net/http/httputil/httputil.go index 09ea74d6d1..431930ea65 100644 --- a/src/net/http/httputil/httputil.go +++ b/src/net/http/httputil/httputil.go @@ -13,7 +13,7 @@ import ( // NewChunkedReader returns a new chunkedReader that translates the data read from r // out of HTTP "chunked" format before returning it. -// The chunkedReader returns io.EOF when the final 0-length chunk is read. +// The chunkedReader returns [io.EOF] when the final 0-length chunk is read. // // NewChunkedReader is not needed by normal applications. The http package // automatically decodes chunking when reading response bodies. diff --git a/src/net/http/httputil/persist.go b/src/net/http/httputil/persist.go index 84b116df8c..0cbe3ebf10 100644 --- a/src/net/http/httputil/persist.go +++ b/src/net/http/httputil/persist.go @@ -33,7 +33,7 @@ var errClosed = errors.New("i/o operation on closed connection") // It is low-level, old, and unused by Go's current HTTP stack. // We should have deleted it before Go 1. // -// Deprecated: Use the Server in package net/http instead. +// Deprecated: Use the Server in package [net/http] instead. type ServerConn struct { mu sync.Mutex // read-write protects the following fields c net.Conn @@ -50,7 +50,7 @@ type ServerConn struct { // It is low-level, old, and unused by Go's current HTTP stack. // We should have deleted it before Go 1. // -// Deprecated: Use the Server in package net/http instead. +// Deprecated: Use the Server in package [net/http] instead. func NewServerConn(c net.Conn, r *bufio.Reader) *ServerConn { if r == nil { r = bufio.NewReader(c) @@ -58,10 +58,10 @@ func NewServerConn(c net.Conn, r *bufio.Reader) *ServerConn { return &ServerConn{c: c, r: r, pipereq: make(map[*http.Request]uint)} } -// Hijack detaches the ServerConn and returns the underlying connection as well +// Hijack detaches the [ServerConn] and returns the underlying connection as well // as the read-side bufio which may have some left over data. Hijack may be // called before Read has signaled the end of the keep-alive logic. The user -// should not call Hijack while Read or Write is in progress. +// should not call Hijack while [ServerConn.Read] or [ServerConn.Write] is in progress. func (sc *ServerConn) Hijack() (net.Conn, *bufio.Reader) { sc.mu.Lock() defer sc.mu.Unlock() @@ -72,7 +72,7 @@ func (sc *ServerConn) Hijack() (net.Conn, *bufio.Reader) { return c, r } -// Close calls Hijack and then also closes the underlying connection. +// Close calls [ServerConn.Hijack] and then also closes the underlying connection. func (sc *ServerConn) Close() error { c, _ := sc.Hijack() if c != nil { @@ -81,7 +81,7 @@ func (sc *ServerConn) Close() error { return nil } -// Read returns the next request on the wire. An ErrPersistEOF is returned if +// Read returns the next request on the wire. An [ErrPersistEOF] is returned if // it is gracefully determined that there are no more requests (e.g. after the // first request on an HTTP/1.0 connection, or after a Connection:close on a // HTTP/1.1 connection). @@ -171,7 +171,7 @@ func (sc *ServerConn) Pending() int { // Write writes resp in response to req. To close the connection gracefully, set the // Response.Close field to true. Write should be considered operational until -// it returns an error, regardless of any errors returned on the Read side. +// it returns an error, regardless of any errors returned on the [ServerConn.Read] side. func (sc *ServerConn) Write(req *http.Request, resp *http.Response) error { // Retrieve the pipeline ID of this request/response pair @@ -226,7 +226,7 @@ func (sc *ServerConn) Write(req *http.Request, resp *http.Response) error { // It is low-level, old, and unused by Go's current HTTP stack. // We should have deleted it before Go 1. // -// Deprecated: Use Client or Transport in package net/http instead. +// Deprecated: Use Client or Transport in package [net/http] instead. type ClientConn struct { mu sync.Mutex // read-write protects the following fields c net.Conn @@ -244,7 +244,7 @@ type ClientConn struct { // It is low-level, old, and unused by Go's current HTTP stack. // We should have deleted it before Go 1. // -// Deprecated: Use the Client or Transport in package net/http instead. +// Deprecated: Use the Client or Transport in package [net/http] instead. func NewClientConn(c net.Conn, r *bufio.Reader) *ClientConn { if r == nil { r = bufio.NewReader(c) @@ -261,17 +261,17 @@ func NewClientConn(c net.Conn, r *bufio.Reader) *ClientConn { // It is low-level, old, and unused by Go's current HTTP stack. // We should have deleted it before Go 1. // -// Deprecated: Use the Client or Transport in package net/http instead. +// Deprecated: Use the Client or Transport in package [net/http] instead. func NewProxyClientConn(c net.Conn, r *bufio.Reader) *ClientConn { cc := NewClientConn(c, r) cc.writeReq = (*http.Request).WriteProxy return cc } -// Hijack detaches the ClientConn and returns the underlying connection as well +// Hijack detaches the [ClientConn] and returns the underlying connection as well // as the read-side bufio which may have some left over data. Hijack may be // called before the user or Read have signaled the end of the keep-alive -// logic. The user should not call Hijack while Read or Write is in progress. +// logic. The user should not call Hijack while [ClientConn.Read] or ClientConn.Write is in progress. func (cc *ClientConn) Hijack() (c net.Conn, r *bufio.Reader) { cc.mu.Lock() defer cc.mu.Unlock() @@ -282,7 +282,7 @@ func (cc *ClientConn) Hijack() (c net.Conn, r *bufio.Reader) { return } -// Close calls Hijack and then also closes the underlying connection. +// Close calls [ClientConn.Hijack] and then also closes the underlying connection. func (cc *ClientConn) Close() error { c, _ := cc.Hijack() if c != nil { @@ -291,7 +291,7 @@ func (cc *ClientConn) Close() error { return nil } -// Write writes a request. An ErrPersistEOF error is returned if the connection +// Write writes a request. An [ErrPersistEOF] error is returned if the connection // has been closed in an HTTP keep-alive sense. If req.Close equals true, the // keep-alive connection is logically closed after this request and the opposing // server is informed. An ErrUnexpectedEOF indicates the remote closed the @@ -357,9 +357,9 @@ func (cc *ClientConn) Pending() int { } // Read reads the next response from the wire. A valid response might be -// returned together with an ErrPersistEOF, which means that the remote +// returned together with an [ErrPersistEOF], which means that the remote // requested that this be the last request serviced. Read can be called -// concurrently with Write, but not with another Read. +// concurrently with [ClientConn.Write], but not with another Read. func (cc *ClientConn) Read(req *http.Request) (resp *http.Response, err error) { // Retrieve the pipeline ID of this request/response pair cc.mu.Lock() diff --git a/src/net/http/httputil/reverseproxy.go b/src/net/http/httputil/reverseproxy.go index 719ab62d1a..04248d5f53 100644 --- a/src/net/http/httputil/reverseproxy.go +++ b/src/net/http/httputil/reverseproxy.go @@ -26,7 +26,7 @@ import ( "golang.org/x/net/http/httpguts" ) -// A ProxyRequest contains a request to be rewritten by a ReverseProxy. +// A ProxyRequest contains a request to be rewritten by a [ReverseProxy]. type ProxyRequest struct { // In is the request received by the proxy. // The Rewrite function must not modify In. @@ -45,7 +45,7 @@ type ProxyRequest struct { // // SetURL rewrites the outbound Host header to match the target's host. // To preserve the inbound request's Host header (the default behavior -// of NewSingleHostReverseProxy): +// of [NewSingleHostReverseProxy]): // // rewriteFunc := func(r *httputil.ProxyRequest) { // r.SetURL(url) @@ -68,7 +68,7 @@ func (r *ProxyRequest) SetURL(target *url.URL) { // If the outbound request contains an existing X-Forwarded-For header, // SetXForwarded appends the client IP address to it. To append to the // inbound request's X-Forwarded-For header (the default behavior of -// ReverseProxy when using a Director function), copy the header +// [ReverseProxy] when using a Director function), copy the header // from the inbound request before calling SetXForwarded: // // rewriteFunc := func(r *httputil.ProxyRequest) { @@ -200,7 +200,7 @@ type ReverseProxy struct { } // A BufferPool is an interface for getting and returning temporary -// byte slices for use by io.CopyBuffer. +// byte slices for use by [io.CopyBuffer]. type BufferPool interface { Get() []byte Put([]byte) @@ -239,7 +239,7 @@ func joinURLPath(a, b *url.URL) (path, rawpath string) { return a.Path + b.Path, apath + bpath } -// NewSingleHostReverseProxy returns a new ReverseProxy that routes +// NewSingleHostReverseProxy returns a new [ReverseProxy] that routes // URLs to the scheme, host, and base path provided in target. If the // target's path is "/base" and the incoming request was for "/dir", // the target request will be for /base/dir. @@ -454,8 +454,19 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) { outreq.Header.Set("User-Agent", "") } + var ( + roundTripMutex sync.Mutex + roundTripDone bool + ) trace := &httptrace.ClientTrace{ Got1xxResponse: func(code int, header textproto.MIMEHeader) error { + roundTripMutex.Lock() + defer roundTripMutex.Unlock() + if roundTripDone { + // If RoundTrip has returned, don't try to further modify + // the ResponseWriter's header map. + return nil + } h := rw.Header() copyHeader(h, http.Header(header)) rw.WriteHeader(code) @@ -468,6 +479,9 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) { outreq = outreq.WithContext(httptrace.WithClientTrace(outreq.Context(), trace)) res, err := transport.RoundTrip(outreq) + roundTripMutex.Lock() + roundTripDone = true + roundTripMutex.Unlock() if err != nil { p.getErrorHandler()(rw, outreq, err) return diff --git a/src/net/http/httputil/reverseproxy_test.go b/src/net/http/httputil/reverseproxy_test.go index dd3330b615..1bd64e65ba 100644 --- a/src/net/http/httputil/reverseproxy_test.go +++ b/src/net/http/httputil/reverseproxy_test.go @@ -1687,6 +1687,47 @@ func TestReverseProxyRewriteReplacesOut(t *testing.T) { } } +func Test1xxHeadersNotModifiedAfterRoundTrip(t *testing.T) { + // https://go.dev/issue/65123: We use httptrace.Got1xxResponse to capture 1xx responses + // and proxy them. httptrace handlers can execute after RoundTrip returns, in particular + // after experiencing connection errors. When this happens, we shouldn't modify the + // ResponseWriter headers after ReverseProxy.ServeHTTP returns. + backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + for i := 0; i < 5; i++ { + w.WriteHeader(103) + } + })) + defer backend.Close() + backendURL, err := url.Parse(backend.URL) + if err != nil { + t.Fatal(err) + } + proxyHandler := NewSingleHostReverseProxy(backendURL) + proxyHandler.ErrorLog = log.New(io.Discard, "", 0) // quiet for tests + + rw := &testResponseWriter{} + func() { + // Cancel the request (and cause RoundTrip to return) immediately upon + // seeing a 1xx response. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + ctx = httptrace.WithClientTrace(ctx, &httptrace.ClientTrace{ + Got1xxResponse: func(code int, header textproto.MIMEHeader) error { + cancel() + return nil + }, + }) + + req, _ := http.NewRequestWithContext(ctx, "GET", "http://go.dev/", nil) + proxyHandler.ServeHTTP(rw, req) + }() + // Trigger data race while iterating over response headers. + // When run with -race, this causes the condition in https://go.dev/issue/65123 often + // enough to detect reliably. + for _ = range rw.Header() { + } +} + func Test1xxResponses(t *testing.T) { backend := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { h := w.Header() @@ -1861,3 +1902,29 @@ func testReverseProxyQueryParameterSmuggling(t *testing.T, wantCleanQuery bool, } } } + +type testResponseWriter struct { + h http.Header + writeHeader func(int) + write func([]byte) (int, error) +} + +func (rw *testResponseWriter) Header() http.Header { + if rw.h == nil { + rw.h = make(http.Header) + } + return rw.h +} + +func (rw *testResponseWriter) WriteHeader(statusCode int) { + if rw.writeHeader != nil { + rw.writeHeader(statusCode) + } +} + +func (rw *testResponseWriter) Write(p []byte) (int, error) { + if rw.write != nil { + return rw.write(p) + } + return len(p), nil +} diff --git a/src/net/http/internal/ascii/print.go b/src/net/http/internal/ascii/print.go index 585e5baba4..98dbf4e3d2 100644 --- a/src/net/http/internal/ascii/print.go +++ b/src/net/http/internal/ascii/print.go @@ -9,7 +9,7 @@ import ( "unicode" ) -// EqualFold is strings.EqualFold, ASCII only. It reports whether s and t +// EqualFold is [strings.EqualFold], ASCII only. It reports whether s and t // are equal, ASCII-case-insensitively. func EqualFold(s, t string) bool { if len(s) != len(t) { diff --git a/src/net/http/internal/chunked.go b/src/net/http/internal/chunked.go index 5a174415dc..196b5d8925 100644 --- a/src/net/http/internal/chunked.go +++ b/src/net/http/internal/chunked.go @@ -22,7 +22,7 @@ var ErrLineTooLong = errors.New("header line too long") // NewChunkedReader returns a new chunkedReader that translates the data read from r // out of HTTP "chunked" format before returning it. -// The chunkedReader returns io.EOF when the final 0-length chunk is read. +// The chunkedReader returns [io.EOF] when the final 0-length chunk is read. // // NewChunkedReader is not needed by normal applications. The http package // automatically decodes chunking when reading response bodies. @@ -39,7 +39,8 @@ type chunkedReader struct { n uint64 // unread bytes in chunk err error buf [2]byte - checkEnd bool // whether need to check for \r\n chunk footer + checkEnd bool // whether need to check for \r\n chunk footer + excess int64 // "excessive" chunk overhead, for malicious sender detection } func (cr *chunkedReader) beginChunk() { @@ -49,10 +50,36 @@ func (cr *chunkedReader) beginChunk() { if cr.err != nil { return } + cr.excess += int64(len(line)) + 2 // header, plus \r\n after the chunk data + line = trimTrailingWhitespace(line) + line, cr.err = removeChunkExtension(line) + if cr.err != nil { + return + } cr.n, cr.err = parseHexUint(line) if cr.err != nil { return } + // A sender who sends one byte per chunk will send 5 bytes of overhead + // for every byte of data. ("1\r\nX\r\n" to send "X".) + // We want to allow this, since streaming a byte at a time can be legitimate. + // + // A sender can use chunk extensions to add arbitrary amounts of additional + // data per byte read. ("1;very long extension\r\nX\r\n" to send "X".) + // We don't want to disallow extensions (although we discard them), + // but we also don't want to allow a sender to reduce the signal/noise ratio + // arbitrarily. + // + // We track the amount of excess overhead read, + // and produce an error if it grows too large. + // + // Currently, we say that we're willing to accept 16 bytes of overhead per chunk, + // plus twice the amount of real data in the chunk. + cr.excess -= 16 + (2 * int64(cr.n)) + cr.excess = max(cr.excess, 0) + if cr.excess > 16*1024 { + cr.err = errors.New("chunked encoding contains too much non-data") + } if cr.n == 0 { cr.err = io.EOF } @@ -140,11 +167,6 @@ func readChunkLine(b *bufio.Reader) ([]byte, error) { if len(p) >= maxLineLength { return nil, ErrLineTooLong } - p = trimTrailingWhitespace(p) - p, err = removeChunkExtension(p) - if err != nil { - return nil, err - } return p, nil } @@ -199,7 +221,7 @@ type chunkedWriter struct { // Write the contents of data as one chunk to Wire. // NOTE: Note that the corresponding chunk-writing procedure in Conn.Write has -// a bug since it does not check for success of io.WriteString +// a bug since it does not check for success of [io.WriteString] func (cw *chunkedWriter) Write(data []byte) (n int, err error) { // Don't send 0-length data. It looks like EOF for chunked encoding. @@ -231,9 +253,9 @@ func (cw *chunkedWriter) Close() error { return err } -// FlushAfterChunkWriter signals from the caller of NewChunkedWriter +// FlushAfterChunkWriter signals from the caller of [NewChunkedWriter] // that each chunk should be followed by a flush. It is used by the -// http.Transport code to keep the buffering behavior for headers and +// [net/http.Transport] code to keep the buffering behavior for headers and // trailers, but flush out chunks aggressively in the middle for // request bodies which may be generated slowly. See Issue 6574. type FlushAfterChunkWriter struct { @@ -241,6 +263,9 @@ type FlushAfterChunkWriter struct { } func parseHexUint(v []byte) (n uint64, err error) { + if len(v) == 0 { + return 0, errors.New("empty hex number for chunk length") + } for i, b := range v { switch { case '0' <= b && b <= '9': diff --git a/src/net/http/internal/chunked_test.go b/src/net/http/internal/chunked_test.go index 5e29a786dd..af79711781 100644 --- a/src/net/http/internal/chunked_test.go +++ b/src/net/http/internal/chunked_test.go @@ -153,6 +153,7 @@ func TestParseHexUint(t *testing.T) { {"00000000000000000", 0, "http chunk length too large"}, // could accept if we wanted {"10000000000000000", 0, "http chunk length too large"}, {"00000000000000001", 0, "http chunk length too large"}, // could accept if we wanted + {"", 0, "empty hex number for chunk length"}, } for i := uint64(0); i <= 1234; i++ { tests = append(tests, testCase{in: fmt.Sprintf("%x", i), want: i}) @@ -239,3 +240,62 @@ func TestChunkEndReadError(t *testing.T) { t.Errorf("expected %v, got %v", readErr, err) } } + +func TestChunkReaderTooMuchOverhead(t *testing.T) { + // If the sender is sending 100x as many chunk header bytes as chunk data, + // we should reject the stream at some point. + chunk := []byte("1;") + for i := 0; i < 100; i++ { + chunk = append(chunk, 'a') // chunk extension + } + chunk = append(chunk, "\r\nX\r\n"...) + const bodylen = 1 << 20 + r := NewChunkedReader(&funcReader{f: func(i int) ([]byte, error) { + if i < bodylen { + return chunk, nil + } + return []byte("0\r\n"), nil + }}) + _, err := io.ReadAll(r) + if err == nil { + t.Fatalf("successfully read body with excessive overhead; want error") + } +} + +func TestChunkReaderByteAtATime(t *testing.T) { + // Sending one byte per chunk should not trip the excess-overhead detection. + const bodylen = 1 << 20 + r := NewChunkedReader(&funcReader{f: func(i int) ([]byte, error) { + if i < bodylen { + return []byte("1\r\nX\r\n"), nil + } + return []byte("0\r\n"), nil + }}) + got, err := io.ReadAll(r) + if err != nil { + t.Errorf("unexpected error: %v", err) + } + if len(got) != bodylen { + t.Errorf("read %v bytes, want %v", len(got), bodylen) + } +} + +type funcReader struct { + f func(iteration int) ([]byte, error) + i int + b []byte + err error +} + +func (r *funcReader) Read(p []byte) (n int, err error) { + if len(r.b) == 0 && r.err == nil { + r.b, r.err = r.f(r.i) + r.i++ + } + n = copy(p, r.b) + r.b = r.b[n:] + if len(r.b) > 0 { + return n, nil + } + return n, r.err +} diff --git a/src/net/http/pattern.go b/src/net/http/pattern.go index f6af19b0f4..8fd120e777 100644 --- a/src/net/http/pattern.go +++ b/src/net/http/pattern.go @@ -76,7 +76,7 @@ type segment struct { // a literal or a wildcard of the form "{name}", "{name...}", or "{$}". // // METHOD, HOST and PATH are all optional; that is, the string can be "/". -// If METHOD is present, it must be followed by a single space. +// If METHOD is present, it must be followed by at least one space or tab. // Wildcard names must be valid Go identifiers. // The "{$}" and "{name...}" wildcard must occur at the end of PATH. // PATH may end with a '/'. @@ -92,7 +92,10 @@ func parsePattern(s string) (_ *pattern, err error) { } }() - method, rest, found := strings.Cut(s, " ") + method, rest, found := s, "", false + if i := strings.IndexAny(s, " \t"); i >= 0 { + method, rest, found = s[:i], strings.TrimLeft(s[i+1:], " \t"), true + } if !found { rest = method method = "" diff --git a/src/net/http/pattern_test.go b/src/net/http/pattern_test.go index f0c84d243e..833fe88bf6 100644 --- a/src/net/http/pattern_test.go +++ b/src/net/http/pattern_test.go @@ -98,6 +98,23 @@ func TestParsePattern(t *testing.T) { "/%61%62/%7b/%", pattern{segments: []segment{lit("ab"), lit("{"), lit("%")}}, }, + // Allow multiple spaces matching regexp '[ \t]+' between method and path. + { + "GET\t /", + pattern{method: "GET", segments: []segment{multi("")}}, + }, + { + "POST \t example.com/foo/{w}", + pattern{ + method: "POST", + host: "example.com", + segments: []segment{lit("foo"), wild("w")}, + }, + }, + { + "DELETE \texample.com/a/{foo12}/{$}", + pattern{method: "DELETE", host: "example.com", segments: []segment{lit("a"), wild("foo12"), lit("/")}}, + }, } { got := mustParsePattern(t, test.in) if !got.equal(&test.want) { diff --git a/src/net/http/pprof/pprof.go b/src/net/http/pprof/pprof.go index bc3225daca..cf4b8415ca 100644 --- a/src/net/http/pprof/pprof.go +++ b/src/net/http/pprof/pprof.go @@ -8,6 +8,7 @@ // The package is typically only imported for the side effect of // registering its HTTP handlers. // The handled paths all begin with /debug/pprof/. +// As of Go 1.22, all the paths must be requested with GET. // // To use pprof, link this package into your program: // @@ -47,12 +48,12 @@ // go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30 // // Or to look at the goroutine blocking profile, after calling -// runtime.SetBlockProfileRate in your program: +// [runtime.SetBlockProfileRate] in your program: // // go tool pprof http://localhost:6060/debug/pprof/block // // Or to look at the holders of contended mutexes, after calling -// runtime.SetMutexProfileFraction in your program: +// [runtime.SetMutexProfileFraction] in your program: // // go tool pprof http://localhost:6060/debug/pprof/mutex // @@ -75,6 +76,7 @@ import ( "context" "fmt" "html" + "internal/godebug" "internal/profile" "io" "log" @@ -91,11 +93,15 @@ import ( ) func init() { - http.HandleFunc("/debug/pprof/", Index) - http.HandleFunc("/debug/pprof/cmdline", Cmdline) - http.HandleFunc("/debug/pprof/profile", Profile) - http.HandleFunc("/debug/pprof/symbol", Symbol) - http.HandleFunc("/debug/pprof/trace", Trace) + prefix := "" + if godebug.New("httpmuxgo121").Value() != "1" { + prefix = "GET " + } + http.HandleFunc(prefix+"/debug/pprof/", Index) + http.HandleFunc(prefix+"/debug/pprof/cmdline", Cmdline) + http.HandleFunc(prefix+"/debug/pprof/profile", Profile) + http.HandleFunc(prefix+"/debug/pprof/symbol", Symbol) + http.HandleFunc(prefix+"/debug/pprof/trace", Trace) } // Cmdline responds with the running program's @@ -114,9 +120,14 @@ func sleep(r *http.Request, d time.Duration) { } } -func durationExceedsWriteTimeout(r *http.Request, seconds float64) bool { +func configureWriteDeadline(w http.ResponseWriter, r *http.Request, seconds float64) { srv, ok := r.Context().Value(http.ServerContextKey).(*http.Server) - return ok && srv.WriteTimeout != 0 && seconds >= srv.WriteTimeout.Seconds() + if ok && srv.WriteTimeout > 0 { + timeout := srv.WriteTimeout + time.Duration(seconds*float64(time.Second)) + + rc := http.NewResponseController(w) + rc.SetWriteDeadline(time.Now().Add(timeout)) + } } func serveError(w http.ResponseWriter, status int, txt string) { @@ -137,10 +148,7 @@ func Profile(w http.ResponseWriter, r *http.Request) { sec = 30 } - if durationExceedsWriteTimeout(r, float64(sec)) { - serveError(w, http.StatusBadRequest, "profile duration exceeds server's WriteTimeout") - return - } + configureWriteDeadline(w, r, float64(sec)) // Set Content Type assuming StartCPUProfile will work, // because if it does it starts writing. @@ -166,10 +174,7 @@ func Trace(w http.ResponseWriter, r *http.Request) { sec = 1 } - if durationExceedsWriteTimeout(r, sec) { - serveError(w, http.StatusBadRequest, "profile duration exceeds server's WriteTimeout") - return - } + configureWriteDeadline(w, r, sec) // Set Content Type assuming trace.Start will work, // because if it does it starts writing. @@ -273,15 +278,14 @@ func (name handler) serveDeltaProfile(w http.ResponseWriter, r *http.Request, p serveError(w, http.StatusBadRequest, `invalid value for "seconds" - must be a positive integer`) return } + // 'name' should be a key in profileSupportsDelta. if !profileSupportsDelta[name] { serveError(w, http.StatusBadRequest, `"seconds" parameter is not supported for this profile type`) return } - // 'name' should be a key in profileSupportsDelta. - if durationExceedsWriteTimeout(r, float64(sec)) { - serveError(w, http.StatusBadRequest, "profile duration exceeds server's WriteTimeout") - return - } + + configureWriteDeadline(w, r, float64(sec)) + debug, _ := strconv.Atoi(r.FormValue("debug")) if debug != 0 { serveError(w, http.StatusBadRequest, "seconds and debug params are incompatible") diff --git a/src/net/http/pprof/pprof_test.go b/src/net/http/pprof/pprof_test.go index f82ad45bf6..24ad59ab39 100644 --- a/src/net/http/pprof/pprof_test.go +++ b/src/net/http/pprof/pprof_test.go @@ -6,12 +6,14 @@ package pprof import ( "bytes" + "encoding/base64" "fmt" "internal/profile" "internal/testenv" "io" "net/http" "net/http/httptest" + "path/filepath" "runtime" "runtime/pprof" "strings" @@ -261,3 +263,64 @@ func seen(p *profile.Profile, fname string) bool { } return false } + +// TestDeltaProfileEmptyBase validates that we still receive a valid delta +// profile even if the base contains no samples. +// +// Regression test for https://go.dev/issue/64566. +func TestDeltaProfileEmptyBase(t *testing.T) { + if testing.Short() { + // Delta profile collection has a 1s minimum. + t.Skip("skipping in -short mode") + } + + testenv.MustHaveGoRun(t) + + gotool, err := testenv.GoTool() + if err != nil { + t.Fatalf("error finding go tool: %v", err) + } + + out, err := testenv.Command(t, gotool, "run", filepath.Join("testdata", "delta_mutex.go")).CombinedOutput() + if err != nil { + t.Fatalf("error running profile collection: %v\noutput: %s", err, out) + } + + // Log the binary output for debugging failures. + b64 := make([]byte, base64.StdEncoding.EncodedLen(len(out))) + base64.StdEncoding.Encode(b64, out) + t.Logf("Output in base64.StdEncoding: %s", b64) + + p, err := profile.Parse(bytes.NewReader(out)) + if err != nil { + t.Fatalf("Parse got err %v want nil", err) + } + + t.Logf("Output as parsed Profile: %s", p) + + if len(p.SampleType) != 2 { + t.Errorf("len(p.SampleType) got %d want 2", len(p.SampleType)) + } + if p.SampleType[0].Type != "contentions" { + t.Errorf(`p.SampleType[0].Type got %q want "contentions"`, p.SampleType[0].Type) + } + if p.SampleType[0].Unit != "count" { + t.Errorf(`p.SampleType[0].Unit got %q want "count"`, p.SampleType[0].Unit) + } + if p.SampleType[1].Type != "delay" { + t.Errorf(`p.SampleType[1].Type got %q want "delay"`, p.SampleType[1].Type) + } + if p.SampleType[1].Unit != "nanoseconds" { + t.Errorf(`p.SampleType[1].Unit got %q want "nanoseconds"`, p.SampleType[1].Unit) + } + + if p.PeriodType == nil { + t.Fatal("p.PeriodType got nil want not nil") + } + if p.PeriodType.Type != "contentions" { + t.Errorf(`p.PeriodType.Type got %q want "contentions"`, p.PeriodType.Type) + } + if p.PeriodType.Unit != "count" { + t.Errorf(`p.PeriodType.Unit got %q want "count"`, p.PeriodType.Unit) + } +} diff --git a/src/net/http/pprof/testdata/delta_mutex.go b/src/net/http/pprof/testdata/delta_mutex.go new file mode 100644 index 0000000000..634069c8a0 --- /dev/null +++ b/src/net/http/pprof/testdata/delta_mutex.go @@ -0,0 +1,43 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This binary collects a 1s delta mutex profile and dumps it to os.Stdout. +// +// This is in a subprocess because we want the base mutex profile to be empty +// (as a regression test for https://go.dev/issue/64566) and the only way to +// force reset the profile is to create a new subprocess. +// +// This manually collects the HTTP response and dumps to stdout in order to +// avoid any flakiness around port selection for a real HTTP server. +package main + +import ( + "bytes" + "fmt" + "log" + "net/http" + "net/http/pprof" + "net/http/httptest" + "runtime" +) + +func main() { + // Disable the mutex profiler. This is the default, but that default is + // load-bearing for this test, which needs the base profile to be empty. + runtime.SetMutexProfileFraction(0) + + h := pprof.Handler("mutex") + + req := httptest.NewRequest("GET", "/debug/pprof/mutex?seconds=1", nil) + rec := httptest.NewRecorder() + rec.Body = new(bytes.Buffer) + + h.ServeHTTP(rec, req) + resp := rec.Result() + if resp.StatusCode != http.StatusOK { + log.Fatalf("Request failed: %s\n%s", resp.Status, rec.Body) + } + + fmt.Print(rec.Body) +} diff --git a/src/net/http/readrequest_test.go b/src/net/http/readrequest_test.go index 5aaf3b9fe2..2da3122879 100644 --- a/src/net/http/readrequest_test.go +++ b/src/net/http/readrequest_test.go @@ -207,6 +207,22 @@ var reqTests = []reqTest{ noError, }, + // Tests chunked body and an invalid Content-Length. + { + "POST / HTTP/1.1\r\n" + + "Host: foo.com\r\n" + + "Transfer-Encoding: chunked\r\n" + + "Content-Length: notdigits\r\n\r\n" + // raise an error + "3\r\nfoo\r\n" + + "3\r\nbar\r\n" + + "0\r\n" + + "\r\n", + nil, + noBodyStr, + noTrailer, + `bad Content-Length "notdigits"`, + }, + // CONNECT request with domain name: { "CONNECT www.google.com:443 HTTP/1.1\r\n\r\n", diff --git a/src/net/http/request.go b/src/net/http/request.go index ed2cdac136..99fdebcf9b 100644 --- a/src/net/http/request.go +++ b/src/net/http/request.go @@ -107,7 +107,7 @@ var reqWriteExcludeHeader = map[string]bool{ // // The field semantics differ slightly between client and server // usage. In addition to the notes on the fields below, see the -// documentation for Request.Write and RoundTripper. +// documentation for [Request.Write] and [RoundTripper]. type Request struct { // Method specifies the HTTP method (GET, POST, PUT, etc.). // For client requests, an empty string means GET. @@ -333,7 +333,7 @@ type Request struct { } // Context returns the request's context. To change the context, use -// Clone or WithContext. +// [Request.Clone] or [Request.WithContext]. // // The returned context is always non-nil; it defaults to the // background context. @@ -357,8 +357,8 @@ func (r *Request) Context() context.Context { // lifetime of a request and its response: obtaining a connection, // sending the request, and reading the response headers and body. // -// To create a new request with a context, use NewRequestWithContext. -// To make a deep copy of a request with a new context, use Request.Clone. +// To create a new request with a context, use [NewRequestWithContext]. +// To make a deep copy of a request with a new context, use [Request.Clone]. func (r *Request) WithContext(ctx context.Context) *Request { if ctx == nil { panic("nil context") @@ -397,6 +397,20 @@ func (r *Request) Clone(ctx context.Context) *Request { r2.Form = cloneURLValues(r.Form) r2.PostForm = cloneURLValues(r.PostForm) r2.MultipartForm = cloneMultipartForm(r.MultipartForm) + + // Copy matches and otherValues. See issue 61410. + if s := r.matches; s != nil { + s2 := make([]string, len(s)) + copy(s2, s) + r2.matches = s2 + } + if s := r.otherValues; s != nil { + s2 := make(map[string]string, len(s)) + for k, v := range s { + s2[k] = v + } + r2.otherValues = s2 + } return r2 } @@ -421,7 +435,7 @@ func (r *Request) Cookies() []*Cookie { var ErrNoCookie = errors.New("http: named cookie not present") // Cookie returns the named cookie provided in the request or -// ErrNoCookie if not found. +// [ErrNoCookie] if not found. // If multiple cookies match the given name, only one cookie will // be returned. func (r *Request) Cookie(name string) (*Cookie, error) { @@ -435,7 +449,7 @@ func (r *Request) Cookie(name string) (*Cookie, error) { } // AddCookie adds a cookie to the request. Per RFC 6265 section 5.4, -// AddCookie does not attach more than one Cookie header field. That +// AddCookie does not attach more than one [Cookie] header field. That // means all cookies, if any, are written into the same line, // separated by semicolon. // AddCookie only sanitizes c's name and value, and does not sanitize @@ -453,7 +467,7 @@ func (r *Request) AddCookie(c *Cookie) { // // Referer is misspelled as in the request itself, a mistake from the // earliest days of HTTP. This value can also be fetched from the -// Header map as Header["Referer"]; the benefit of making it available +// [Header] map as Header["Referer"]; the benefit of making it available // as a method is that the compiler can diagnose programs that use the // alternate (correct English) spelling req.Referrer() but cannot // diagnose programs that use Header["Referrer"]. @@ -471,7 +485,7 @@ var multipartByReader = &multipart.Form{ // MultipartReader returns a MIME multipart reader if this is a // multipart/form-data or a multipart/mixed POST request, else returns nil and an error. -// Use this function instead of ParseMultipartForm to +// Use this function instead of [Request.ParseMultipartForm] to // process the request body as a stream. func (r *Request) MultipartReader() (*multipart.Reader, error) { if r.MultipartForm == multipartByReader { @@ -534,15 +548,15 @@ const defaultUserAgent = "Go-http-client/1.1" // TransferEncoding // Body // -// If Body is present, Content-Length is <= 0 and TransferEncoding +// If Body is present, Content-Length is <= 0 and [Request.TransferEncoding] // hasn't been set to "identity", Write adds "Transfer-Encoding: // chunked" to the header. Body is closed after it is sent. func (r *Request) Write(w io.Writer) error { return r.write(w, false, nil, nil) } -// WriteProxy is like Write but writes the request in the form -// expected by an HTTP proxy. In particular, WriteProxy writes the +// WriteProxy is like [Request.Write] but writes the request in the form +// expected by an HTTP proxy. In particular, [Request.WriteProxy] writes the // initial Request-URI line of the request with an absolute URI, per // section 5.3 of RFC 7230, including the scheme and host. // In either case, WriteProxy also writes a Host header, using @@ -837,33 +851,33 @@ func validMethod(method string) bool { return len(method) > 0 && strings.IndexFunc(method, isNotToken) == -1 } -// NewRequest wraps NewRequestWithContext using context.Background. +// NewRequest wraps [NewRequestWithContext] using [context.Background]. func NewRequest(method, url string, body io.Reader) (*Request, error) { return NewRequestWithContext(context.Background(), method, url, body) } -// NewRequestWithContext returns a new Request given a method, URL, and +// NewRequestWithContext returns a new [Request] given a method, URL, and // optional body. // -// If the provided body is also an io.Closer, the returned -// Request.Body is set to body and will be closed (possibly +// If the provided body is also an [io.Closer], the returned +// [Request.Body] is set to body and will be closed (possibly // asynchronously) by the Client methods Do, Post, and PostForm, -// and Transport.RoundTrip. +// and [Transport.RoundTrip]. // // NewRequestWithContext returns a Request suitable for use with -// Client.Do or Transport.RoundTrip. To create a request for use with -// testing a Server Handler, either use the NewRequest function in the -// net/http/httptest package, use ReadRequest, or manually update the +// [Client.Do] or [Transport.RoundTrip]. To create a request for use with +// testing a Server Handler, either use the [NewRequest] function in the +// net/http/httptest package, use [ReadRequest], or manually update the // Request fields. For an outgoing client request, the context // controls the entire lifetime of a request and its response: // obtaining a connection, sending the request, and reading the // response headers and body. See the Request type's documentation for // the difference between inbound and outbound request fields. // -// If body is of type *bytes.Buffer, *bytes.Reader, or -// *strings.Reader, the returned request's ContentLength is set to its +// If body is of type [*bytes.Buffer], [*bytes.Reader], or +// [*strings.Reader], the returned request's ContentLength is set to its // exact value (instead of -1), GetBody is populated (so 307 and 308 -// redirects can replay the body), and Body is set to NoBody if the +// redirects can replay the body), and Body is set to [NoBody] if the // ContentLength is 0. func NewRequestWithContext(ctx context.Context, method, url string, body io.Reader) (*Request, error) { if method == "" { @@ -987,7 +1001,7 @@ func parseBasicAuth(auth string) (username, password string, ok bool) { // The username may not contain a colon. Some protocols may impose // additional requirements on pre-escaping the username and // password. For instance, when used with OAuth2, both arguments must -// be URL encoded first with url.QueryEscape. +// be URL encoded first with [url.QueryEscape]. func (r *Request) SetBasicAuth(username, password string) { r.Header.Set("Authorization", "Basic "+basicAuth(username, password)) } @@ -1021,8 +1035,8 @@ func putTextprotoReader(r *textproto.Reader) { // ReadRequest reads and parses an incoming request from b. // // ReadRequest is a low-level function and should only be used for -// specialized applications; most code should use the Server to read -// requests and handle them via the Handler interface. ReadRequest +// specialized applications; most code should use the [Server] to read +// requests and handle them via the [Handler] interface. ReadRequest // only supports HTTP/1.x requests. For HTTP/2, use golang.org/x/net/http2. func ReadRequest(b *bufio.Reader) (*Request, error) { req, err := readRequest(b) @@ -1131,15 +1145,15 @@ func readRequest(b *bufio.Reader) (req *Request, err error) { return req, nil } -// MaxBytesReader is similar to io.LimitReader but is intended for +// MaxBytesReader is similar to [io.LimitReader] but is intended for // limiting the size of incoming request bodies. In contrast to // io.LimitReader, MaxBytesReader's result is a ReadCloser, returns a -// non-nil error of type *MaxBytesError for a Read beyond the limit, +// non-nil error of type [*MaxBytesError] for a Read beyond the limit, // and closes the underlying reader when its Close method is called. // // MaxBytesReader prevents clients from accidentally or maliciously // sending a large request and wasting server resources. If possible, -// it tells the ResponseWriter to close the connection after the limit +// it tells the [ResponseWriter] to close the connection after the limit // has been reached. func MaxBytesReader(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser { if n < 0 { // Treat negative limits as equivalent to 0. @@ -1148,7 +1162,7 @@ func MaxBytesReader(w ResponseWriter, r io.ReadCloser, n int64) io.ReadCloser { return &maxBytesReader{w: w, r: r, i: n, n: n} } -// MaxBytesError is returned by MaxBytesReader when its read limit is exceeded. +// MaxBytesError is returned by [MaxBytesReader] when its read limit is exceeded. type MaxBytesError struct { Limit int64 } @@ -1273,14 +1287,14 @@ func parsePostForm(r *Request) (vs url.Values, err error) { // as a form and puts the results into both r.PostForm and r.Form. Request body // parameters take precedence over URL query string values in r.Form. // -// If the request Body's size has not already been limited by MaxBytesReader, +// If the request Body's size has not already been limited by [MaxBytesReader], // the size is capped at 10MB. // // For other HTTP methods, or when the Content-Type is not // application/x-www-form-urlencoded, the request Body is not read, and // r.PostForm is initialized to a non-nil, empty value. // -// ParseMultipartForm calls ParseForm automatically. +// [Request.ParseMultipartForm] calls ParseForm automatically. // ParseForm is idempotent. func (r *Request) ParseForm() error { var err error @@ -1321,7 +1335,7 @@ func (r *Request) ParseForm() error { // The whole request body is parsed and up to a total of maxMemory bytes of // its file parts are stored in memory, with the remainder stored on // disk in temporary files. -// ParseMultipartForm calls ParseForm if necessary. +// ParseMultipartForm calls [Request.ParseForm] if necessary. // If ParseForm returns an error, ParseMultipartForm returns it but also // continues parsing the request body. // After one call to ParseMultipartForm, subsequent calls have no effect. @@ -1364,12 +1378,16 @@ func (r *Request) ParseMultipartForm(maxMemory int64) error { } // FormValue returns the first value for the named component of the query. -// POST, PUT, and PATCH body parameters take precedence over URL query string values. -// FormValue calls ParseMultipartForm and ParseForm if necessary and ignores -// any errors returned by these functions. +// The precedence order: +// 1. application/x-www-form-urlencoded form body (POST, PUT, PATCH only) +// 2. query parameters (always) +// 3. multipart/form-data form body (always) +// +// FormValue calls [Request.ParseMultipartForm] and [Request.ParseForm] +// if necessary and ignores any errors returned by these functions. // If key is not present, FormValue returns the empty string. // To access multiple values of the same key, call ParseForm and -// then inspect Request.Form directly. +// then inspect [Request.Form] directly. func (r *Request) FormValue(key string) string { if r.Form == nil { r.ParseMultipartForm(defaultMaxMemory) @@ -1382,7 +1400,7 @@ func (r *Request) FormValue(key string) string { // PostFormValue returns the first value for the named component of the POST, // PUT, or PATCH request body. URL query parameters are ignored. -// PostFormValue calls ParseMultipartForm and ParseForm if necessary and ignores +// PostFormValue calls [Request.ParseMultipartForm] and [Request.ParseForm] if necessary and ignores // any errors returned by these functions. // If key is not present, PostFormValue returns the empty string. func (r *Request) PostFormValue(key string) string { @@ -1396,7 +1414,7 @@ func (r *Request) PostFormValue(key string) string { } // FormFile returns the first file for the provided form key. -// FormFile calls ParseMultipartForm and ParseForm if necessary. +// FormFile calls [Request.ParseMultipartForm] and [Request.ParseForm] if necessary. func (r *Request) FormFile(key string) (multipart.File, *multipart.FileHeader, error) { if r.MultipartForm == multipartByReader { return nil, nil, errors.New("http: multipart handled by MultipartReader") @@ -1416,7 +1434,7 @@ func (r *Request) FormFile(key string) (multipart.File, *multipart.FileHeader, e return nil, nil, ErrMissingFile } -// PathValue returns the value for the named path wildcard in the ServeMux pattern +// PathValue returns the value for the named path wildcard in the [ServeMux] pattern // that matched the request. // It returns the empty string if the request was not matched against a pattern // or there is no such wildcard in the pattern. @@ -1427,6 +1445,8 @@ func (r *Request) PathValue(name string) string { return r.otherValues[name] } +// SetPathValue sets name to value, so that subsequent calls to r.PathValue(name) +// return value. func (r *Request) SetPathValue(name, value string) { if i := r.patIndex(name); i >= 0 { r.matches[i] = value diff --git a/src/net/http/request_test.go b/src/net/http/request_test.go index 1531da3d8c..6ce32332e7 100644 --- a/src/net/http/request_test.go +++ b/src/net/http/request_test.go @@ -1053,6 +1053,33 @@ func TestRequestCloneTransferEncoding(t *testing.T) { } } +// Ensure that Request.Clone works correctly with PathValue. +// See issue 64911. +func TestRequestClonePathValue(t *testing.T) { + req, _ := http.NewRequest("GET", "https://example.org/", nil) + req.SetPathValue("p1", "orig") + + clonedReq := req.Clone(context.Background()) + clonedReq.SetPathValue("p2", "copy") + + // Ensure that any modifications to the cloned + // request do not pollute the original request. + if g, w := req.PathValue("p2"), ""; g != w { + t.Fatalf("p2 mismatch got %q, want %q", g, w) + } + if g, w := req.PathValue("p1"), "orig"; g != w { + t.Fatalf("p1 mismatch got %q, want %q", g, w) + } + + // Assert on the changes to the cloned request. + if g, w := clonedReq.PathValue("p1"), "orig"; g != w { + t.Fatalf("p1 mismatch got %q, want %q", g, w) + } + if g, w := clonedReq.PathValue("p2"), "copy"; g != w { + t.Fatalf("p2 mismatch got %q, want %q", g, w) + } +} + // Issue 34878: verify we don't panic when including basic auth (Go 1.13 regression) func TestNoPanicOnRoundTripWithBasicAuth(t *testing.T) { run(t, testNoPanicWithBasicAuth) } func testNoPanicWithBasicAuth(t *testing.T, mode testMode) { diff --git a/src/net/http/response.go b/src/net/http/response.go index 755c696557..0c3d7f6d85 100644 --- a/src/net/http/response.go +++ b/src/net/http/response.go @@ -29,7 +29,7 @@ var respExcludeHeader = map[string]bool{ // Response represents the response from an HTTP request. // -// The Client and Transport return Responses from servers once +// The [Client] and [Transport] return Responses from servers once // the response headers have been received. The response body // is streamed on demand as the Body field is read. type Response struct { @@ -126,13 +126,13 @@ func (r *Response) Cookies() []*Cookie { return readSetCookies(r.Header) } -// ErrNoLocation is returned by Response's Location method +// ErrNoLocation is returned by the [Response.Location] method // when no Location header is present. var ErrNoLocation = errors.New("http: no Location header in response") // Location returns the URL of the response's "Location" header, // if present. Relative redirects are resolved relative to -// the Response's Request. ErrNoLocation is returned if no +// [Response.Request]. [ErrNoLocation] is returned if no // Location header is present. func (r *Response) Location() (*url.URL, error) { lv := r.Header.Get("Location") @@ -146,8 +146,8 @@ func (r *Response) Location() (*url.URL, error) { } // ReadResponse reads and returns an HTTP response from r. -// The req parameter optionally specifies the Request that corresponds -// to this Response. If nil, a GET request is assumed. +// The req parameter optionally specifies the [Request] that corresponds +// to this [Response]. If nil, a GET request is assumed. // Clients must call resp.Body.Close when finished reading resp.Body. // After that call, clients can inspect resp.Trailer to find key/value // pairs included in the response trailer. diff --git a/src/net/http/responsecontroller.go b/src/net/http/responsecontroller.go index 92276ffaf2..f3f24c1273 100644 --- a/src/net/http/responsecontroller.go +++ b/src/net/http/responsecontroller.go @@ -13,14 +13,14 @@ import ( // A ResponseController is used by an HTTP handler to control the response. // -// A ResponseController may not be used after the Handler.ServeHTTP method has returned. +// A ResponseController may not be used after the [Handler.ServeHTTP] method has returned. type ResponseController struct { rw ResponseWriter } -// NewResponseController creates a ResponseController for a request. +// NewResponseController creates a [ResponseController] for a request. // -// The ResponseWriter should be the original value passed to the Handler.ServeHTTP method, +// The ResponseWriter should be the original value passed to the [Handler.ServeHTTP] method, // or have an Unwrap method returning the original ResponseWriter. // // If the ResponseWriter implements any of the following methods, the ResponseController @@ -34,7 +34,7 @@ type ResponseController struct { // EnableFullDuplex() error // // If the ResponseWriter does not support a method, ResponseController returns -// an error matching ErrNotSupported. +// an error matching [ErrNotSupported]. func NewResponseController(rw ResponseWriter) *ResponseController { return &ResponseController{rw} } @@ -116,8 +116,8 @@ func (c *ResponseController) SetWriteDeadline(deadline time.Time) error { } } -// EnableFullDuplex indicates that the request handler will interleave reads from Request.Body -// with writes to the ResponseWriter. +// EnableFullDuplex indicates that the request handler will interleave reads from [Request.Body] +// with writes to the [ResponseWriter]. // // For HTTP/1 requests, the Go HTTP server by default consumes any unread portion of // the request body before beginning to write the response, preventing handlers from diff --git a/src/net/http/responsecontroller_test.go b/src/net/http/responsecontroller_test.go index 5828f3795a..a217891026 100644 --- a/src/net/http/responsecontroller_test.go +++ b/src/net/http/responsecontroller_test.go @@ -1,3 +1,7 @@ +// Copyright 2022 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package http_test import ( @@ -322,3 +326,18 @@ func testResponseControllerEnableFullDuplex(t *testing.T, mode testMode) { } pw.Close() } + +func TestIssue58237(t *testing.T) { + cst := newClientServerTest(t, http2Mode, HandlerFunc(func(w ResponseWriter, req *Request) { + ctl := NewResponseController(w) + if err := ctl.SetReadDeadline(time.Now().Add(1 * time.Millisecond)); err != nil { + t.Errorf("ctl.SetReadDeadline() = %v, want nil", err) + } + time.Sleep(10 * time.Millisecond) + })) + res, err := cst.c.Get(cst.ts.URL) + if err != nil { + t.Fatal(err) + } + res.Body.Close() +} diff --git a/src/net/http/roundtrip.go b/src/net/http/roundtrip.go index 49ea1a71ed..08c270179a 100644 --- a/src/net/http/roundtrip.go +++ b/src/net/http/roundtrip.go @@ -6,10 +6,10 @@ package http -// RoundTrip implements the RoundTripper interface. +// RoundTrip implements the [RoundTripper] interface. // // For higher-level HTTP client support (such as handling of cookies -// and redirects), see Get, Post, and the Client type. +// and redirects), see [Get], [Post], and the [Client] type. // // Like the RoundTripper interface, the error types returned // by RoundTrip are unspecified. diff --git a/src/net/http/roundtrip_js.go b/src/net/http/roundtrip_js.go index cbf978af18..04c241eb4c 100644 --- a/src/net/http/roundtrip_js.go +++ b/src/net/http/roundtrip_js.go @@ -56,7 +56,7 @@ var jsFetchMissing = js.Global().Get("fetch").IsUndefined() var jsFetchDisabled = js.Global().Get("process").Type() == js.TypeObject && strings.HasPrefix(js.Global().Get("process").Get("argv0").String(), "node") -// RoundTrip implements the RoundTripper interface using the WHATWG Fetch API. +// RoundTrip implements the [RoundTripper] interface using the WHATWG Fetch API. func (t *Transport) RoundTrip(req *Request) (*Response, error) { // The Transport has a documented contract that states that if the DialContext or // DialTLSContext functions are set, they will be used to set up the connections. diff --git a/src/net/http/serve_test.go b/src/net/http/serve_test.go index 0c76f1bcc4..9df6ab426c 100644 --- a/src/net/http/serve_test.go +++ b/src/net/http/serve_test.go @@ -597,6 +597,22 @@ func TestServeWithSlashRedirectForHostPatterns(t *testing.T) { } } +// Test that we don't attempt trailing-slash redirect on a path that already has +// a trailing slash. +// See issue #65624. +func TestMuxNoSlashRedirectWithTrailingSlash(t *testing.T) { + mux := NewServeMux() + mux.HandleFunc("/{x}/", func(w ResponseWriter, r *Request) { + fmt.Fprintln(w, "ok") + }) + w := httptest.NewRecorder() + req, _ := NewRequest("GET", "/", nil) + mux.ServeHTTP(w, req) + if g, w := w.Code, 404; g != w { + t.Errorf("got %d, want %d", g, w) + } +} + func TestShouldRedirectConcurrency(t *testing.T) { run(t, testShouldRedirectConcurrency) } func testShouldRedirectConcurrency(t *testing.T, mode testMode) { mux := NewServeMux() @@ -748,7 +764,17 @@ func testServerReadTimeout(t *testing.T, mode testMode) { }), func(ts *httptest.Server) { ts.Config.ReadHeaderTimeout = -1 // don't time out while reading headers ts.Config.ReadTimeout = timeout + t.Logf("Server.Config.ReadTimeout = %v", timeout) }) + + var retries atomic.Int32 + cst.c.Transport.(*Transport).Proxy = func(*Request) (*url.URL, error) { + if retries.Add(1) != 1 { + return nil, errors.New("too many retries") + } + return nil, nil + } + pr, pw := io.Pipe() res, err := cst.c.Post(cst.ts.URL, "text/apocryphal", pr) if err != nil { @@ -776,7 +802,34 @@ func testServerWriteTimeout(t *testing.T, mode testMode) { errc <- err }), func(ts *httptest.Server) { ts.Config.WriteTimeout = timeout + t.Logf("Server.Config.WriteTimeout = %v", timeout) }) + + // The server's WriteTimeout parameter also applies to reads during the TLS + // handshake. The client makes the last write during the handshake, and if + // the server happens to time out during the read of that write, the client + // may think that the connection was accepted even though the server thinks + // it timed out. + // + // The client only notices that the server connection is gone when it goes + // to actually write the request — and when that fails, it retries + // internally (the same as if the server had closed the connection due to a + // racing idle-timeout). + // + // With unlucky and very stable scheduling (as may be the case with the fake wasm + // net stack), this can result in an infinite retry loop that doesn't + // propagate the error up far enough for us to adjust the WriteTimeout. + // + // To avoid that problem, we explicitly forbid internal retries by rejecting + // them in a Proxy hook in the transport. + var retries atomic.Int32 + cst.c.Transport.(*Transport).Proxy = func(*Request) (*url.URL, error) { + if retries.Add(1) != 1 { + return nil, errors.New("too many retries") + } + return nil, nil + } + res, err := cst.c.Get(cst.ts.URL) if err != nil { // Probably caused by the write timeout expiring before the handler runs. @@ -2659,7 +2712,7 @@ func TestRedirectContentTypeAndBody(t *testing.T) { wantCT string wantBody string }{ - {MethodGet, nil, "text/html; charset=utf-8", "Found.\n\n"}, + {MethodGet, nil, "text/html; charset=utf-8", "Found.\n"}, {MethodHead, nil, "text/html; charset=utf-8", ""}, {MethodPost, nil, "", ""}, {MethodDelete, nil, "", ""}, @@ -4799,6 +4852,16 @@ func TestServerValidatesHeaders(t *testing.T) { {"Foo : bar\r\n", 400}, {"Foo\t: bar\r\n", 400}, + // Empty header keys are invalid. + // See RFC 7230, Section 3.2. + {": empty key\r\n", 400}, + + // Requests with invalid Content-Length headers should be rejected + // regardless of the presence of a Transfer-Encoding header. + // Check out RFC 9110, Section 8.6 and RFC 9112, Section 6.3.3. + {"Content-Length: notdigits\r\n", 400}, + {"Content-Length: notdigits\r\nTransfer-Encoding: chunked\r\n\r\n0\r\n\r\n", 400}, + {"foo: foo foo\r\n", 200}, // LWS space is okay {"foo: foo\tfoo\r\n", 200}, // LWS tab is okay {"foo: foo\x00foo\r\n", 400}, // CTL 0x00 in value is bad @@ -5752,10 +5815,19 @@ func testServerCancelsReadTimeoutWhenIdle(t *testing.T, mode testMode) { } }), func(ts *httptest.Server) { ts.Config.ReadTimeout = timeout + t.Logf("Server.Config.ReadTimeout = %v", timeout) }) defer cst.close() ts := cst.ts + var retries atomic.Int32 + cst.c.Transport.(*Transport).Proxy = func(*Request) (*url.URL, error) { + if retries.Add(1) != 1 { + return nil, errors.New("too many retries") + } + return nil, nil + } + c := ts.Client() res, err := c.Get(ts.URL) @@ -6980,3 +7052,24 @@ func testDisableContentLength(t *testing.T, mode testMode) { t.Fatal(err) } } + +func TestErrorContentLength(t *testing.T) { run(t, testErrorContentLength) } +func testErrorContentLength(t *testing.T, mode testMode) { + const errorBody = "an error occurred" + cst := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) { + w.Header().Set("Content-Length", "1000") + Error(w, errorBody, 400) + })) + res, err := cst.c.Get(cst.ts.URL) + if err != nil { + t.Fatalf("Get(%q) = %v", cst.ts.URL, err) + } + defer res.Body.Close() + body, err := io.ReadAll(res.Body) + if err != nil { + t.Fatalf("io.ReadAll(res.Body) = %v", err) + } + if string(body) != errorBody+"\n" { + t.Fatalf("read body: %q, want %q", string(body), errorBody) + } +} diff --git a/src/net/http/server.go b/src/net/http/server.go index 7fa785dfee..fa953d842e 100644 --- a/src/net/http/server.go +++ b/src/net/http/server.go @@ -61,7 +61,7 @@ var ( // A Handler responds to an HTTP request. // -// ServeHTTP should write reply headers and data to the [ResponseWriter] +// [Handler.ServeHTTP] should write reply headers and data to the [ResponseWriter] // and then return. Returning signals that the request is finished; it // is not valid to use the [ResponseWriter] or read from the // [Request.Body] after or concurrently with the completion of the @@ -161,8 +161,8 @@ type ResponseWriter interface { // The Flusher interface is implemented by ResponseWriters that allow // an HTTP handler to flush buffered data to the client. // -// The default HTTP/1.x and HTTP/2 ResponseWriter implementations -// support Flusher, but ResponseWriter wrappers may not. Handlers +// The default HTTP/1.x and HTTP/2 [ResponseWriter] implementations +// support [Flusher], but ResponseWriter wrappers may not. Handlers // should always test for this ability at runtime. // // Note that even for ResponseWriters that support Flush, @@ -177,7 +177,7 @@ type Flusher interface { // The Hijacker interface is implemented by ResponseWriters that allow // an HTTP handler to take over the connection. // -// The default ResponseWriter for HTTP/1.x connections supports +// The default [ResponseWriter] for HTTP/1.x connections supports // Hijacker, but HTTP/2 connections intentionally do not. // ResponseWriter wrappers may also not support Hijacker. Handlers // should always test for this ability at runtime. @@ -211,7 +211,7 @@ type Hijacker interface { // if the client has disconnected before the response is ready. // // Deprecated: the CloseNotifier interface predates Go's context package. -// New code should use Request.Context instead. +// New code should use [Request.Context] instead. type CloseNotifier interface { // CloseNotify returns a channel that receives at most a // single value (true) when the client connection has gone @@ -505,7 +505,7 @@ func (c *response) EnableFullDuplex() error { return nil } -// TrailerPrefix is a magic prefix for ResponseWriter.Header map keys +// TrailerPrefix is a magic prefix for [ResponseWriter.Header] map keys // that, if present, signals that the map entry is actually for // the response trailers, and not the response headers. The prefix // is stripped after the ServeHTTP call finishes and the values are @@ -571,13 +571,12 @@ type writerOnly struct { io.Writer } -// ReadFrom is here to optimize copying from an *os.File regular file -// to a *net.TCPConn with sendfile, or from a supported src type such +// ReadFrom is here to optimize copying from an [*os.File] regular file +// to a [*net.TCPConn] with sendfile, or from a supported src type such // as a *net.TCPConn on Linux with splice. func (w *response) ReadFrom(src io.Reader) (n int64, err error) { - bufp := copyBufPool.Get().(*[]byte) - buf := *bufp - defer copyBufPool.Put(bufp) + buf := getCopyBuf() + defer putCopyBuf(buf) // Our underlying w.conn.rwc is usually a *TCPConn (with its // own ReadFrom method). If not, just fall back to the normal @@ -807,11 +806,18 @@ var ( bufioWriter4kPool sync.Pool ) -var copyBufPool = sync.Pool{ - New: func() any { - b := make([]byte, 32*1024) - return &b - }, +const copyBufPoolSize = 32 * 1024 + +var copyBufPool = sync.Pool{New: func() any { return new([copyBufPoolSize]byte) }} + +func getCopyBuf() []byte { + return copyBufPool.Get().(*[copyBufPoolSize]byte)[:] +} +func putCopyBuf(b []byte) { + if len(b) != copyBufPoolSize { + panic("trying to put back buffer of the wrong size in the copyBufPool") + } + copyBufPool.Put((*[copyBufPoolSize]byte)(b)) } func bufioWriterPool(size int) *sync.Pool { @@ -861,7 +867,7 @@ func putBufioWriter(bw *bufio.Writer) { // DefaultMaxHeaderBytes is the maximum permitted size of the headers // in an HTTP request. -// This can be overridden by setting Server.MaxHeaderBytes. +// This can be overridden by setting [Server.MaxHeaderBytes]. const DefaultMaxHeaderBytes = 1 << 20 // 1 MB func (srv *Server) maxHeaderBytes() int { @@ -934,11 +940,11 @@ func (ecr *expectContinueReader) Close() error { } // TimeFormat is the time format to use when generating times in HTTP -// headers. It is like time.RFC1123 but hard-codes GMT as the time +// headers. It is like [time.RFC1123] but hard-codes GMT as the time // zone. The time being formatted must be in UTC for Format to // generate the correct format. // -// For parsing this time format, see ParseTime. +// For parsing this time format, see [ParseTime]. const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" // appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat)) @@ -1584,13 +1590,13 @@ func (w *response) bodyAllowed() bool { // The Writers are wired together like: // // 1. *response (the ResponseWriter) -> -// 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes -> +// 2. (*response).w, a [*bufio.Writer] of bufferBeforeChunkingSize bytes -> // 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type) // and which writes the chunk headers, if needed -> // 4. conn.bufw, a *bufio.Writer of default (4kB) bytes, writing to -> // 5. checkConnErrorWriter{c}, which notes any non-nil error on Write // and populates c.werr with it if so, but otherwise writes to -> -// 6. the rwc, the net.Conn. +// 6. the rwc, the [net.Conn]. // // TODO(bradfitz): short-circuit some of the buffering when the // initial header contains both a Content-Type and Content-Length. @@ -2091,8 +2097,8 @@ func (w *response) sendExpectationFailed() { w.finishRequest() } -// Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter -// and a Hijacker. +// Hijack implements the [Hijacker.Hijack] method. Our response is both a [ResponseWriter] +// and a [Hijacker]. func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { if w.handlerDone.Load() { panic("net/http: Hijack called after ServeHTTP finished") @@ -2152,7 +2158,7 @@ func requestBodyRemains(rc io.ReadCloser) bool { // The HandlerFunc type is an adapter to allow the use of // ordinary functions as HTTP handlers. If f is a function // with the appropriate signature, HandlerFunc(f) is a -// Handler that calls f. +// [Handler] that calls f. type HandlerFunc func(ResponseWriter, *Request) // ServeHTTP calls f(w, r). @@ -2167,8 +2173,20 @@ func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) { // writes are done to w. // The error message should be plain text. func Error(w ResponseWriter, error string, code int) { - w.Header().Set("Content-Type", "text/plain; charset=utf-8") - w.Header().Set("X-Content-Type-Options", "nosniff") + h := w.Header() + // We delete headers which might be valid for some other content, + // but not anymore for the error content. + h.Del("Content-Length") + h.Del("Content-Encoding") + h.Del("Etag") + h.Del("Last-Modified") + // There might be cache control headers set for some other content, + // but we reset it to no-cache for the error content. + h.Set("Cache-Control", "no-cache") + // There might be content type already set, but we reset it to + // text/plain for the error message. + h.Set("Content-Type", "text/plain; charset=utf-8") + h.Set("X-Content-Type-Options", "nosniff") w.WriteHeader(code) fmt.Fprintln(w, error) } @@ -2211,9 +2229,9 @@ func StripPrefix(prefix string, h Handler) Handler { // which may be a path relative to the request path. // // The provided code should be in the 3xx range and is usually -// StatusMovedPermanently, StatusFound or StatusSeeOther. +// [StatusMovedPermanently], [StatusFound] or [StatusSeeOther]. // -// If the Content-Type header has not been set, Redirect sets it +// If the Content-Type header has not been set, [Redirect] sets it // to "text/html; charset=utf-8" and writes a small HTML body. // Setting the Content-Type header to any value, including nil, // disables that behavior. @@ -2267,7 +2285,7 @@ func Redirect(w ResponseWriter, r *Request, url string, code int) { // Shouldn't send the body for POST or HEAD; that leaves GET. if !hadCT && r.Method == "GET" { - body := "" + StatusText(code) + ".\n" + body := "" + StatusText(code) + "." fmt.Fprintln(w, body) } } @@ -2301,7 +2319,7 @@ func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) { // status code. // // The provided code should be in the 3xx range and is usually -// StatusMovedPermanently, StatusFound or StatusSeeOther. +// [StatusMovedPermanently], [StatusFound] or [StatusSeeOther]. func RedirectHandler(url string, code int) Handler { return &redirectHandler{url, code} } @@ -2329,7 +2347,7 @@ func RedirectHandler(url string, code int) Handler { // [METHOD ][HOST]/[PATH] // // All three parts are optional; "/" is a valid pattern. -// If METHOD is present, it must be followed by a single space. +// If METHOD is present, it must be followed by at least one space or tab. // // Literal (that is, non-wildcard) parts of a pattern match // the corresponding parts of a request case-sensitively. @@ -2372,7 +2390,7 @@ func RedirectHandler(url string, code int) Handler { // There is one exception to this rule, for backwards compatibility: // if two patterns would otherwise conflict and one has a host while the other does not, // then the pattern with the host takes precedence. -// If a pattern passed [ServeMux.Handle] or [ServeMux.HandleFunc] conflicts with +// If a pattern passed to [ServeMux.Handle] or [ServeMux.HandleFunc] conflicts with // another pattern that is already registered, those functions panic. // // As an example of the general rule, "/images/thumbnails/" is more specific than "/images/", @@ -2388,7 +2406,7 @@ func RedirectHandler(url string, code int) Handler { // // # Trailing-slash redirection // -// Consider a ServeMux with a handler for a subtree, registered using a trailing slash or "..." wildcard. +// Consider a [ServeMux] with a handler for a subtree, registered using a trailing slash or "..." wildcard. // If the ServeMux receives a request for the subtree root without a trailing slash, // it redirects the request by adding the trailing slash. // This behavior can be overridden with a separate registration for the path without @@ -2431,12 +2449,12 @@ type ServeMux struct { mux121 serveMux121 // used only when GODEBUG=httpmuxgo121=1 } -// NewServeMux allocates and returns a new ServeMux. +// NewServeMux allocates and returns a new [ServeMux]. func NewServeMux() *ServeMux { return &ServeMux{} } -// DefaultServeMux is the default ServeMux used by Serve. +// DefaultServeMux is the default [ServeMux] used by [Serve]. var DefaultServeMux = &defaultServeMux var defaultServeMux ServeMux @@ -2571,8 +2589,8 @@ func (mux *ServeMux) matchOrRedirect(host, method, path string, u *url.URL) (_ * n, matches := mux.tree.match(host, method, path) // If we have an exact match, or we were asked not to try trailing-slash redirection, - // then we're done. - if !exactMatch(n, path) && u != nil { + // or the URL already has a trailing slash, then we're done. + if !exactMatch(n, path) && u != nil && !strings.HasSuffix(path, "/") { // If there is an exact match with a trailing slash, then redirect. path += "/" n2, _ := mux.tree.match(host, method, path) @@ -2778,7 +2796,7 @@ func (mux *ServeMux) registerErr(patstr string, handler Handler) error { // // The handler is typically nil, in which case [DefaultServeMux] is used. // -// HTTP/2 support is only enabled if the Listener returns *tls.Conn +// HTTP/2 support is only enabled if the Listener returns [*tls.Conn] // connections and they were configured with "h2" in the TLS // Config.NextProtos. // @@ -2918,13 +2936,13 @@ type Server struct { } // Close immediately closes all active net.Listeners and any -// connections in state StateNew, StateActive, or StateIdle. For a -// graceful shutdown, use Shutdown. +// connections in state [StateNew], [StateActive], or [StateIdle]. For a +// graceful shutdown, use [Server.Shutdown]. // // Close does not attempt to close (and does not even know about) // any hijacked connections, such as WebSockets. // -// Close returns any error returned from closing the Server's +// Close returns any error returned from closing the [Server]'s // underlying Listener(s). func (srv *Server) Close() error { srv.inShutdown.Store(true) @@ -2962,16 +2980,16 @@ const shutdownPollIntervalMax = 500 * time.Millisecond // indefinitely for connections to return to idle and then shut down. // If the provided context expires before the shutdown is complete, // Shutdown returns the context's error, otherwise it returns any -// error returned from closing the Server's underlying Listener(s). +// error returned from closing the [Server]'s underlying Listener(s). // -// When Shutdown is called, Serve, ListenAndServe, and -// ListenAndServeTLS immediately return ErrServerClosed. Make sure the +// When Shutdown is called, [Serve], [ListenAndServe], and +// [ListenAndServeTLS] immediately return [ErrServerClosed]. Make sure the // program doesn't exit and waits instead for Shutdown to return. // // Shutdown does not attempt to close nor wait for hijacked // connections such as WebSockets. The caller of Shutdown should // separately notify such long-lived connections of shutdown and wait -// for them to close, if desired. See RegisterOnShutdown for a way to +// for them to close, if desired. See [Server.RegisterOnShutdown] for a way to // register shutdown notification functions. // // Once Shutdown has been called on a server, it may not be reused; @@ -3014,7 +3032,7 @@ func (srv *Server) Shutdown(ctx context.Context) error { } } -// RegisterOnShutdown registers a function to call on Shutdown. +// RegisterOnShutdown registers a function to call on [Server.Shutdown]. // This can be used to gracefully shutdown connections that have // undergone ALPN protocol upgrade or that have been hijacked. // This function should start protocol-specific graceful shutdown, @@ -3062,7 +3080,7 @@ func (s *Server) closeListenersLocked() error { } // A ConnState represents the state of a client connection to a server. -// It's used by the optional Server.ConnState hook. +// It's used by the optional [Server.ConnState] hook. type ConnState int const ( @@ -3139,7 +3157,7 @@ func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) { // behavior doesn't match that of many proxies, and the mismatch can lead to // security issues. // -// AllowQuerySemicolons should be invoked before Request.ParseForm is called. +// AllowQuerySemicolons should be invoked before [Request.ParseForm] is called. func AllowQuerySemicolons(h Handler) Handler { return HandlerFunc(func(w ResponseWriter, r *Request) { if strings.Contains(r.URL.RawQuery, ";") { @@ -3156,13 +3174,13 @@ func AllowQuerySemicolons(h Handler) Handler { } // ListenAndServe listens on the TCP network address srv.Addr and then -// calls Serve to handle requests on incoming connections. +// calls [Serve] to handle requests on incoming connections. // Accepted connections are configured to enable TCP keep-alives. // // If srv.Addr is blank, ":http" is used. // -// ListenAndServe always returns a non-nil error. After Shutdown or Close, -// the returned error is ErrServerClosed. +// ListenAndServe always returns a non-nil error. After [Server.Shutdown] or [Server.Close], +// the returned error is [ErrServerClosed]. func (srv *Server) ListenAndServe() error { if srv.shuttingDown() { return ErrServerClosed @@ -3202,20 +3220,20 @@ func (srv *Server) shouldConfigureHTTP2ForServe() bool { return strSliceContains(srv.TLSConfig.NextProtos, http2NextProtoTLS) } -// ErrServerClosed is returned by the Server's Serve, ServeTLS, ListenAndServe, -// and ListenAndServeTLS methods after a call to Shutdown or Close. +// ErrServerClosed is returned by the [Server.Serve], [ServeTLS], [ListenAndServe], +// and [ListenAndServeTLS] methods after a call to [Server.Shutdown] or [Server.Close]. var ErrServerClosed = errors.New("http: Server closed") // Serve accepts incoming connections on the Listener l, creating a // new service goroutine for each. The service goroutines read requests and // then call srv.Handler to reply to them. // -// HTTP/2 support is only enabled if the Listener returns *tls.Conn +// HTTP/2 support is only enabled if the Listener returns [*tls.Conn] // connections and they were configured with "h2" in the TLS // Config.NextProtos. // // Serve always returns a non-nil error and closes l. -// After Shutdown or Close, the returned error is ErrServerClosed. +// After [Server.Shutdown] or [Server.Close], the returned error is [ErrServerClosed]. func (srv *Server) Serve(l net.Listener) error { if fn := testHookServerServe; fn != nil { fn(srv, l) // call hook with unwrapped listener @@ -3285,14 +3303,14 @@ func (srv *Server) Serve(l net.Listener) error { // setup and then read requests, calling srv.Handler to reply to them. // // Files containing a certificate and matching private key for the -// server must be provided if neither the Server's +// server must be provided if neither the [Server]'s // TLSConfig.Certificates nor TLSConfig.GetCertificate are populated. // If the certificate is signed by a certificate authority, the // certFile should be the concatenation of the server's certificate, // any intermediates, and the CA's certificate. // -// ServeTLS always returns a non-nil error. After Shutdown or Close, the -// returned error is ErrServerClosed. +// ServeTLS always returns a non-nil error. After [Server.Shutdown] or [Server.Close], the +// returned error is [ErrServerClosed]. func (srv *Server) ServeTLS(l net.Listener, certFile, keyFile string) error { // Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig // before we clone it and create the TLS Listener. @@ -3421,7 +3439,7 @@ func logf(r *Request, format string, args ...any) { } // ListenAndServe listens on the TCP network address addr and then calls -// Serve with handler to handle requests on incoming connections. +// [Serve] with handler to handle requests on incoming connections. // Accepted connections are configured to enable TCP keep-alives. // // The handler is typically nil, in which case [DefaultServeMux] is used. @@ -3443,11 +3461,11 @@ func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { } // ListenAndServeTLS listens on the TCP network address srv.Addr and -// then calls ServeTLS to handle requests on incoming TLS connections. +// then calls [ServeTLS] to handle requests on incoming TLS connections. // Accepted connections are configured to enable TCP keep-alives. // // Filenames containing a certificate and matching private key for the -// server must be provided if neither the Server's TLSConfig.Certificates +// server must be provided if neither the [Server]'s TLSConfig.Certificates // nor TLSConfig.GetCertificate are populated. If the certificate is // signed by a certificate authority, the certFile should be the // concatenation of the server's certificate, any intermediates, and @@ -3455,8 +3473,8 @@ func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { // // If srv.Addr is blank, ":https" is used. // -// ListenAndServeTLS always returns a non-nil error. After Shutdown or -// Close, the returned error is ErrServerClosed. +// ListenAndServeTLS always returns a non-nil error. After [Server.Shutdown] or +// [Server.Close], the returned error is [ErrServerClosed]. func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { if srv.shuttingDown() { return ErrServerClosed @@ -3526,17 +3544,17 @@ func (srv *Server) onceSetNextProtoDefaults() { } } -// TimeoutHandler returns a Handler that runs h with the given time limit. +// TimeoutHandler returns a [Handler] that runs h with the given time limit. // // The new Handler calls h.ServeHTTP to handle each request, but if a // call runs for longer than its time limit, the handler responds with // a 503 Service Unavailable error and the given message in its body. // (If msg is empty, a suitable default message will be sent.) -// After such a timeout, writes by h to its ResponseWriter will return -// ErrHandlerTimeout. +// After such a timeout, writes by h to its [ResponseWriter] will return +// [ErrHandlerTimeout]. // -// TimeoutHandler supports the Pusher interface but does not support -// the Hijacker or Flusher interfaces. +// TimeoutHandler supports the [Pusher] interface but does not support +// the [Hijacker] or [Flusher] interfaces. func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler { return &timeoutHandler{ handler: h, @@ -3545,7 +3563,7 @@ func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler { } } -// ErrHandlerTimeout is returned on ResponseWriter Write calls +// ErrHandlerTimeout is returned on [ResponseWriter] Write calls // in handlers which have timed out. var ErrHandlerTimeout = errors.New("http: Handler timeout") @@ -3634,7 +3652,7 @@ type timeoutWriter struct { var _ Pusher = (*timeoutWriter)(nil) -// Push implements the Pusher interface. +// Push implements the [Pusher] interface. func (tw *timeoutWriter) Push(target string, opts *PushOptions) error { if pusher, ok := tw.w.(Pusher); ok { return pusher.Push(target, opts) @@ -3719,7 +3737,7 @@ type initALPNRequest struct { h serverHandler } -// BaseContext is an exported but unadvertised http.Handler method +// BaseContext is an exported but unadvertised [http.Handler] method // recognized by x/net/http2 to pass down a context; the TLSNextProto // API predates context support so we shoehorn through the only // interface we have available. @@ -3806,7 +3824,6 @@ func numLeadingCRorLF(v []byte) (n int) { break } return - } func strSliceContains(ss []string, s string) bool { @@ -3828,7 +3845,7 @@ func tlsRecordHeaderLooksLikeHTTP(hdr [5]byte) bool { return false } -// MaxBytesHandler returns a Handler that runs h with its ResponseWriter and Request.Body wrapped by a MaxBytesReader. +// MaxBytesHandler returns a [Handler] that runs h with its [ResponseWriter] and [Request.Body] wrapped by a MaxBytesReader. func MaxBytesHandler(h Handler, n int64) Handler { return HandlerFunc(func(w ResponseWriter, r *Request) { r2 := *r diff --git a/src/net/http/server_test.go b/src/net/http/server_test.go index e81e3bb6b0..f4aafc853b 100644 --- a/src/net/http/server_test.go +++ b/src/net/http/server_test.go @@ -250,6 +250,24 @@ func TestEscapedPathsAndPatterns(t *testing.T) { t.Run("1.21", func(t *testing.T) { run(t, true) }) } +func TestCleanPath(t *testing.T) { + for _, test := range []struct { + in, want string + }{ + {"//", "/"}, + {"/x", "/x"}, + {"//x", "/x"}, + {"x//", "/x/"}, + {"a//b/////c", "/a/b/c"}, + {"/foo/../bar/./..//baz", "/baz"}, + } { + got := cleanPath(test.in) + if got != test.want { + t.Errorf("%s: got %q, want %q", test.in, got, test.want) + } + } +} + func BenchmarkServerMatch(b *testing.B) { fn := func(w ResponseWriter, r *Request) { fmt.Fprintf(w, "OK") diff --git a/src/net/http/transfer.go b/src/net/http/transfer.go index dffff56b31..255e8bc45a 100644 --- a/src/net/http/transfer.go +++ b/src/net/http/transfer.go @@ -410,9 +410,8 @@ func (t *transferWriter) writeBody(w io.Writer) (err error) { // // This function is only intended for use in writeBody. func (t *transferWriter) doBodyCopy(dst io.Writer, src io.Reader) (n int64, err error) { - bufp := copyBufPool.Get().(*[]byte) - buf := *bufp - defer copyBufPool.Put(bufp) + buf := getCopyBuf() + defer putCopyBuf(buf) n, err = io.CopyBuffer(dst, src, buf) if err != nil && err != io.EOF { @@ -651,19 +650,6 @@ func (t *transferReader) parseTransferEncoding() error { return &unsupportedTEError{fmt.Sprintf("unsupported transfer encoding: %q", raw[0])} } - // RFC 7230 3.3.2 says "A sender MUST NOT send a Content-Length header field - // in any message that contains a Transfer-Encoding header field." - // - // but also: "If a message is received with both a Transfer-Encoding and a - // Content-Length header field, the Transfer-Encoding overrides the - // Content-Length. Such a message might indicate an attempt to perform - // request smuggling (Section 9.5) or response splitting (Section 9.4) and - // ought to be handled as an error. A sender MUST remove the received - // Content-Length field prior to forwarding such a message downstream." - // - // Reportedly, these appear in the wild. - delete(t.Header, "Content-Length") - t.Chunked = true return nil } @@ -671,7 +657,7 @@ func (t *transferReader) parseTransferEncoding() error { // Determine the expected body length, using RFC 7230 Section 3.3. This // function is not a method, because ultimately it should be shared by // ReadResponse and ReadRequest. -func fixLength(isResponse bool, status int, requestMethod string, header Header, chunked bool) (int64, error) { +func fixLength(isResponse bool, status int, requestMethod string, header Header, chunked bool) (n int64, err error) { isRequest := !isResponse contentLens := header["Content-Length"] @@ -695,6 +681,14 @@ func fixLength(isResponse bool, status int, requestMethod string, header Header, contentLens = header["Content-Length"] } + // Reject requests with invalid Content-Length headers. + if len(contentLens) > 0 { + n, err = parseContentLength(contentLens) + if err != nil { + return -1, err + } + } + // Logic based on response type or status if isResponse && noResponseBodyExpected(requestMethod) { return 0, nil @@ -707,17 +701,26 @@ func fixLength(isResponse bool, status int, requestMethod string, header Header, return 0, nil } + // According to RFC 9112, "If a message is received with both a + // Transfer-Encoding and a Content-Length header field, the Transfer-Encoding + // overrides the Content-Length. Such a message might indicate an attempt to + // perform request smuggling (Section 11.2) or response splitting (Section 11.1) + // and ought to be handled as an error. An intermediary that chooses to forward + // the message MUST first remove the received Content-Length field and process + // the Transfer-Encoding (as described below) prior to forwarding the message downstream." + // + // Chunked-encoding requests with either valid Content-Length + // headers or no Content-Length headers are accepted after removing + // the Content-Length field from header. + // // Logic based on Transfer-Encoding if chunked { + header.Del("Content-Length") return -1, nil } + // Logic based on Content-Length if len(contentLens) > 0 { - // Logic based on Content-Length - n, err := parseContentLength(contentLens) - if err != nil { - return -1, err - } return n, nil } @@ -818,10 +821,10 @@ type body struct { onHitEOF func() // if non-nil, func to call when EOF is Read } -// ErrBodyReadAfterClose is returned when reading a Request or Response +// ErrBodyReadAfterClose is returned when reading a [Request] or [Response] // Body after the body has been closed. This typically happens when the body is -// read after an HTTP Handler calls WriteHeader or Write on its -// ResponseWriter. +// read after an HTTP [Handler] calls WriteHeader or Write on its +// [ResponseWriter]. var ErrBodyReadAfterClose = errors.New("http: invalid Read on closed Body") func (b *body) Read(p []byte) (n int, err error) { diff --git a/src/net/http/transfer_test.go b/src/net/http/transfer_test.go index 3f9ebdea7b..b1a5a93103 100644 --- a/src/net/http/transfer_test.go +++ b/src/net/http/transfer_test.go @@ -264,6 +264,12 @@ func TestTransferWriterWriteBodyReaderTypes(t *testing.T) { actualReader = reflect.TypeOf(lr.R) } else { actualReader = reflect.TypeOf(mw.CalledReader) + // We have to handle this special case for genericWriteTo in os, + // this struct is introduced to support a zero-copy optimization, + // check out https://go.dev/issue/58808 for details. + if actualReader.Kind() == reflect.Struct && actualReader.PkgPath() == "os" && actualReader.Name() == "fileWithoutWriteTo" { + actualReader = actualReader.Field(1).Type + } } if tc.expectedReader != actualReader { diff --git a/src/net/http/transport.go b/src/net/http/transport.go index 1cf41a5474..75934f00de 100644 --- a/src/net/http/transport.go +++ b/src/net/http/transport.go @@ -35,8 +35,8 @@ import ( "golang.org/x/net/http/httpproxy" ) -// DefaultTransport is the default implementation of Transport and is -// used by DefaultClient. It establishes network connections as needed +// DefaultTransport is the default implementation of [Transport] and is +// used by [DefaultClient]. It establishes network connections as needed // and caches them for reuse by subsequent calls. It uses HTTP proxies // as directed by the environment variables HTTP_PROXY, HTTPS_PROXY // and NO_PROXY (or the lowercase versions thereof). @@ -53,42 +53,42 @@ var DefaultTransport RoundTripper = &Transport{ ExpectContinueTimeout: 1 * time.Second, } -// DefaultMaxIdleConnsPerHost is the default value of Transport's +// DefaultMaxIdleConnsPerHost is the default value of [Transport]'s // MaxIdleConnsPerHost. const DefaultMaxIdleConnsPerHost = 2 -// Transport is an implementation of RoundTripper that supports HTTP, +// Transport is an implementation of [RoundTripper] that supports HTTP, // HTTPS, and HTTP proxies (for either HTTP or HTTPS with CONNECT). // // By default, Transport caches connections for future re-use. // This may leave many open connections when accessing many hosts. -// This behavior can be managed using Transport's CloseIdleConnections method -// and the MaxIdleConnsPerHost and DisableKeepAlives fields. +// This behavior can be managed using [Transport.CloseIdleConnections] method +// and the [Transport.MaxIdleConnsPerHost] and [Transport.DisableKeepAlives] fields. // // Transports should be reused instead of created as needed. // Transports are safe for concurrent use by multiple goroutines. // // A Transport is a low-level primitive for making HTTP and HTTPS requests. -// For high-level functionality, such as cookies and redirects, see Client. +// For high-level functionality, such as cookies and redirects, see [Client]. // // Transport uses HTTP/1.1 for HTTP URLs and either HTTP/1.1 or HTTP/2 // for HTTPS URLs, depending on whether the server supports HTTP/2, -// and how the Transport is configured. The DefaultTransport supports HTTP/2. +// and how the Transport is configured. The [DefaultTransport] supports HTTP/2. // To explicitly enable HTTP/2 on a transport, use golang.org/x/net/http2 // and call ConfigureTransport. See the package docs for more about HTTP/2. // // Responses with status codes in the 1xx range are either handled // automatically (100 expect-continue) or ignored. The one // exception is HTTP status code 101 (Switching Protocols), which is -// considered a terminal status and returned by RoundTrip. To see the +// considered a terminal status and returned by [Transport.RoundTrip]. To see the // ignored 1xx responses, use the httptrace trace package's // ClientTrace.Got1xxResponse. // // Transport only retries a request upon encountering a network error // if the connection has been already been used successfully and if the -// request is idempotent and either has no body or has its Request.GetBody +// request is idempotent and either has no body or has its [Request.GetBody] // defined. HTTP requests are considered idempotent if they have HTTP methods -// GET, HEAD, OPTIONS, or TRACE; or if their Header map contains an +// GET, HEAD, OPTIONS, or TRACE; or if their [Header] map contains an // "Idempotency-Key" or "X-Idempotency-Key" entry. If the idempotency key // value is a zero-length slice, the request is treated as idempotent but the // header is not sent on the wire. @@ -237,7 +237,7 @@ type Transport struct { // TLSNextProto specifies how the Transport switches to an // alternate protocol (such as HTTP/2) after a TLS ALPN - // protocol negotiation. If Transport dials an TLS connection + // protocol negotiation. If Transport dials a TLS connection // with a non-empty protocol name and TLSNextProto contains a // map entry for that key (such as "h2"), then the func is // called with the request's authority (such as "example.com" @@ -453,7 +453,7 @@ func ProxyFromEnvironment(req *Request) (*url.URL, error) { return envProxyFunc()(req.URL) } -// ProxyURL returns a proxy function (for use in a Transport) +// ProxyURL returns a proxy function (for use in a [Transport]) // that always returns the same URL. func ProxyURL(fixedURL *url.URL) func(*Request) (*url.URL, error) { return func(*Request) (*url.URL, error) { @@ -752,14 +752,14 @@ func (pc *persistConn) shouldRetryRequest(req *Request, err error) bool { var ErrSkipAltProtocol = errors.New("net/http: skip alternate protocol") // RegisterProtocol registers a new protocol with scheme. -// The Transport will pass requests using the given scheme to rt. +// The [Transport] will pass requests using the given scheme to rt. // It is rt's responsibility to simulate HTTP request semantics. // // RegisterProtocol can be used by other packages to provide // implementations of protocol schemes like "ftp" or "file". // -// If rt.RoundTrip returns ErrSkipAltProtocol, the Transport will -// handle the RoundTrip itself for that one request, as if the +// If rt.RoundTrip returns [ErrSkipAltProtocol], the Transport will +// handle the [Transport.RoundTrip] itself for that one request, as if the // protocol were not registered. func (t *Transport) RegisterProtocol(scheme string, rt RoundTripper) { t.altMu.Lock() @@ -799,9 +799,9 @@ func (t *Transport) CloseIdleConnections() { } // CancelRequest cancels an in-flight request by closing its connection. -// CancelRequest should only be called after RoundTrip has returned. +// CancelRequest should only be called after [Transport.RoundTrip] has returned. // -// Deprecated: Use Request.WithContext to create a request with a +// Deprecated: Use [Request.WithContext] to create a request with a // cancelable context instead. CancelRequest cannot cancel HTTP/2 // requests. func (t *Transport) CancelRequest(req *Request) { @@ -1478,6 +1478,7 @@ func (t *Transport) dialConnFor(w *wantConn) { defer w.afterDial() ctx := w.getCtxForDial() if ctx == nil { + t.decConnsPerHost(w.key) return } @@ -1761,6 +1762,7 @@ func (t *Transport) dialConn(ctx context.Context, cm connectMethod) (pconn *pers if t.OnProxyConnectResponse != nil { err = t.OnProxyConnectResponse(ctx, cm.proxyURL, connectReq, resp) if err != nil { + conn.Close() return nil, err } } @@ -2555,16 +2557,18 @@ type writeRequest struct { continueCh <-chan struct{} } -type httpError struct { - err string - timeout bool +// httpTimeoutError represents a timeout. +// It implements net.Error and wraps context.DeadlineExceeded. +type timeoutError struct { + err string } -func (e *httpError) Error() string { return e.err } -func (e *httpError) Timeout() bool { return e.timeout } -func (e *httpError) Temporary() bool { return true } +func (e *timeoutError) Error() string { return e.err } +func (e *timeoutError) Timeout() bool { return true } +func (e *timeoutError) Temporary() bool { return true } +func (e *timeoutError) Is(err error) bool { return err == context.DeadlineExceeded } -var errTimeout error = &httpError{err: "net/http: timeout awaiting response headers", timeout: true} +var errTimeout error = &timeoutError{"net/http: timeout awaiting response headers"} // errRequestCanceled is set to be identical to the one from h2 to facilitate // testing. diff --git a/src/net/http/transport_internal_test.go b/src/net/http/transport_internal_test.go index 2ed637e9f0..dc3259fadf 100644 --- a/src/net/http/transport_internal_test.go +++ b/src/net/http/transport_internal_test.go @@ -58,8 +58,8 @@ func TestTransportPersistConnReadLoopEOF(t *testing.T) { <-pc.closech err = pc.closed - if !isTransportReadFromServerError(err) && err != errServerClosedIdle { - t.Errorf("pc.closed = %#v, %v; want errServerClosedIdle or transportReadFromServerError", err, err) + if !isNothingWrittenError(err) && !isTransportReadFromServerError(err) && err != errServerClosedIdle { + t.Errorf("pc.closed = %#v, %v; want errServerClosedIdle or transportReadFromServerError, or nothingWrittenError", err, err) } } diff --git a/src/net/http/transport_test.go b/src/net/http/transport_test.go index 8c09de70ff..55222a6763 100644 --- a/src/net/http/transport_test.go +++ b/src/net/http/transport_test.go @@ -730,6 +730,56 @@ func testTransportMaxConnsPerHost(t *testing.T, mode testMode) { } } +func TestTransportMaxConnsPerHostDialCancellation(t *testing.T) { + run(t, testTransportMaxConnsPerHostDialCancellation, + testNotParallel, // because test uses SetPendingDialHooks + []testMode{http1Mode, https1Mode, http2Mode}, + ) +} + +func testTransportMaxConnsPerHostDialCancellation(t *testing.T, mode testMode) { + CondSkipHTTP2(t) + + h := HandlerFunc(func(w ResponseWriter, r *Request) { + _, err := w.Write([]byte("foo")) + if err != nil { + t.Fatalf("Write: %v", err) + } + }) + + cst := newClientServerTest(t, mode, h) + defer cst.close() + ts := cst.ts + c := ts.Client() + tr := c.Transport.(*Transport) + tr.MaxConnsPerHost = 1 + + // This request is cancelled when dial is queued, which preempts dialing. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + SetPendingDialHooks(cancel, nil) + defer SetPendingDialHooks(nil, nil) + + req, _ := NewRequestWithContext(ctx, "GET", ts.URL, nil) + _, err := c.Do(req) + if !errors.Is(err, context.Canceled) { + t.Errorf("expected error %v, got %v", context.Canceled, err) + } + + // This request should succeed. + SetPendingDialHooks(nil, nil) + req, _ = NewRequest("GET", ts.URL, nil) + resp, err := c.Do(req) + if err != nil { + t.Fatalf("request failed: %v", err) + } + defer resp.Body.Close() + _, err = io.ReadAll(resp.Body) + if err != nil { + t.Fatalf("read body failed: %v", err) + } +} + func TestTransportRemovesDeadIdleConnections(t *testing.T) { run(t, testTransportRemovesDeadIdleConnections, []testMode{http1Mode}) } @@ -1523,6 +1573,24 @@ func TestOnProxyConnectResponse(t *testing.T) { c := proxy.Client() + var ( + dials atomic.Int32 + closes atomic.Int32 + ) + c.Transport.(*Transport).DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { + conn, err := net.Dial(network, addr) + if err != nil { + return nil, err + } + dials.Add(1) + return noteCloseConn{ + Conn: conn, + closeFunc: func() { + closes.Add(1) + }, + }, nil + } + c.Transport.(*Transport).Proxy = ProxyURL(pu) c.Transport.(*Transport).OnProxyConnectResponse = func(ctx context.Context, proxyURL *url.URL, connectReq *Request, connectRes *Response) error { if proxyURL.String() != pu.String() { @@ -1534,10 +1602,23 @@ func TestOnProxyConnectResponse(t *testing.T) { } return tcase.err } + wantCloses := int32(0) if _, err := c.Head(ts.URL); err != nil { + wantCloses = 1 if tcase.err != nil && !strings.Contains(err.Error(), tcase.err.Error()) { t.Errorf("got %v, want %v", err, tcase.err) } + } else { + if tcase.err != nil { + t.Errorf("got %v, want nil", err) + } + } + if got, want := dials.Load(), int32(1); got != want { + t.Errorf("got %v dials, want %v", got, want) + } + // #64804: If OnProxyConnectResponse returns an error, we should close the conn. + if got, want := closes.Load(), wantCloses; got != want { + t.Errorf("got %v closes, want %v", got, want) } } } @@ -3499,6 +3580,7 @@ func testTransportNoReuseAfterEarlyResponse(t *testing.T, mode testMode) { c net.Conn } var getOkay bool + var copying sync.WaitGroup closeConn := func() { sconn.Lock() defer sconn.Unlock() @@ -3510,7 +3592,10 @@ func testTransportNoReuseAfterEarlyResponse(t *testing.T, mode testMode) { } } } - defer closeConn() + defer func() { + closeConn() + copying.Wait() + }() ts := newClientServerTest(t, mode, HandlerFunc(func(w ResponseWriter, r *Request) { if r.Method == "GET" { @@ -3522,7 +3607,12 @@ func testTransportNoReuseAfterEarlyResponse(t *testing.T, mode testMode) { sconn.c = conn sconn.Unlock() conn.Write([]byte("HTTP/1.1 200 OK\r\nContent-Length: 3\r\n\r\nfoo")) // keep-alive - go io.Copy(io.Discard, conn) + + copying.Add(1) + go func() { + io.Copy(io.Discard, conn) + copying.Done() + }() })).ts c := ts.Client() diff --git a/src/net/http/triv.go b/src/net/http/triv.go index f614922c24..c1696425cd 100644 --- a/src/net/http/triv.go +++ b/src/net/http/triv.go @@ -34,7 +34,7 @@ type Counter struct { n int } -// This makes Counter satisfy the expvar.Var interface, so we can export +// This makes Counter satisfy the [expvar.Var] interface, so we can export // it directly. func (ctr *Counter) String() string { ctr.mu.Lock() diff --git a/src/net/interface.go b/src/net/interface.go index e1c9a2e2ff..20ac07d31a 100644 --- a/src/net/interface.go +++ b/src/net/interface.go @@ -114,7 +114,7 @@ func Interfaces() ([]Interface, error) { // addresses. // // The returned list does not identify the associated interface; use -// Interfaces and Interface.Addrs for more detail. +// Interfaces and [Interface.Addrs] for more detail. func InterfaceAddrs() ([]Addr, error) { ifat, err := interfaceAddrTable(nil) if err != nil { @@ -127,7 +127,7 @@ func InterfaceAddrs() ([]Addr, error) { // // On Solaris, it returns one of the logical network interfaces // sharing the logical data link; for more precision use -// InterfaceByName. +// [InterfaceByName]. func InterfaceByIndex(index int) (*Interface, error) { if index <= 0 { return nil, &OpError{Op: "route", Net: "ip+net", Source: nil, Addr: nil, Err: errInvalidInterfaceIndex} diff --git a/src/net/internal/cgotest/empty_test.go b/src/net/internal/cgotest/empty_test.go new file mode 100644 index 0000000000..c4f601d571 --- /dev/null +++ b/src/net/internal/cgotest/empty_test.go @@ -0,0 +1,13 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package cgotest + +import "testing" + +// Nothing to test here. +// The test is that the package compiles at all. +// See resstate.go. +func Test(t *testing.T) { +} diff --git a/src/net/cgo_unix_cgo_darwin.go b/src/net/internal/cgotest/resstate.go similarity index 89% rename from src/net/cgo_unix_cgo_darwin.go rename to src/net/internal/cgotest/resstate.go index 40d5e426f2..1b4871109e 100644 --- a/src/net/cgo_unix_cgo_darwin.go +++ b/src/net/internal/cgotest/resstate.go @@ -2,9 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !netgo && cgo && darwin +//go:build cgo && darwin -package net +package cgotest /* #include diff --git a/src/net/internal/socktest/switch.go b/src/net/internal/socktest/switch.go index 3c37b6ff80..dea6d9288c 100644 --- a/src/net/internal/socktest/switch.go +++ b/src/net/internal/socktest/switch.go @@ -133,7 +133,7 @@ const ( // If the filter returns a non-nil error, the execution of system call // will be canceled and the system call function returns the non-nil // error. -// It can return a non-nil AfterFilter for filtering after the +// It can return a non-nil [AfterFilter] for filtering after the // execution of the system call. type Filter func(*Status) (AfterFilter, error) diff --git a/src/net/internal/socktest/sys_unix.go b/src/net/internal/socktest/sys_unix.go index 712462abf4..3eef26c70b 100644 --- a/src/net/internal/socktest/sys_unix.go +++ b/src/net/internal/socktest/sys_unix.go @@ -8,7 +8,7 @@ package socktest import "syscall" -// Socket wraps syscall.Socket. +// Socket wraps [syscall.Socket]. func (sw *Switch) Socket(family, sotype, proto int) (s int, err error) { sw.once.Do(sw.init) diff --git a/src/net/internal/socktest/sys_windows.go b/src/net/internal/socktest/sys_windows.go index 1c42e5c7f3..2f02446075 100644 --- a/src/net/internal/socktest/sys_windows.go +++ b/src/net/internal/socktest/sys_windows.go @@ -9,7 +9,7 @@ import ( "syscall" ) -// WSASocket wraps syscall.WSASocket. +// WSASocket wraps [syscall.WSASocket]. func (sw *Switch) WSASocket(family, sotype, proto int32, protinfo *syscall.WSAProtocolInfo, group uint32, flags uint32) (s syscall.Handle, err error) { sw.once.Do(sw.init) @@ -41,7 +41,7 @@ func (sw *Switch) WSASocket(family, sotype, proto int32, protinfo *syscall.WSAPr return s, nil } -// Closesocket wraps syscall.Closesocket. +// Closesocket wraps [syscall.Closesocket]. func (sw *Switch) Closesocket(s syscall.Handle) (err error) { so := sw.sockso(s) if so == nil { @@ -71,7 +71,7 @@ func (sw *Switch) Closesocket(s syscall.Handle) (err error) { return nil } -// Connect wraps syscall.Connect. +// Connect wraps [syscall.Connect]. func (sw *Switch) Connect(s syscall.Handle, sa syscall.Sockaddr) (err error) { so := sw.sockso(s) if so == nil { @@ -100,7 +100,7 @@ func (sw *Switch) Connect(s syscall.Handle, sa syscall.Sockaddr) (err error) { return nil } -// ConnectEx wraps syscall.ConnectEx. +// ConnectEx wraps [syscall.ConnectEx]. func (sw *Switch) ConnectEx(s syscall.Handle, sa syscall.Sockaddr, b *byte, n uint32, nwr *uint32, o *syscall.Overlapped) (err error) { so := sw.sockso(s) if so == nil { @@ -129,7 +129,7 @@ func (sw *Switch) ConnectEx(s syscall.Handle, sa syscall.Sockaddr, b *byte, n ui return nil } -// Listen wraps syscall.Listen. +// Listen wraps [syscall.Listen]. func (sw *Switch) Listen(s syscall.Handle, backlog int) (err error) { so := sw.sockso(s) if so == nil { @@ -158,7 +158,7 @@ func (sw *Switch) Listen(s syscall.Handle, backlog int) (err error) { return nil } -// AcceptEx wraps syscall.AcceptEx. +// AcceptEx wraps [syscall.AcceptEx]. func (sw *Switch) AcceptEx(ls syscall.Handle, as syscall.Handle, b *byte, rxdatalen uint32, laddrlen uint32, raddrlen uint32, rcvd *uint32, overlapped *syscall.Overlapped) error { so := sw.sockso(ls) if so == nil { diff --git a/src/net/ip.go b/src/net/ip.go index d51ba10eec..6083dd8bf9 100644 --- a/src/net/ip.go +++ b/src/net/ip.go @@ -38,7 +38,7 @@ type IP []byte // An IPMask is a bitmask that can be used to manipulate // IP addresses for IP addressing and routing. // -// See type IPNet and func ParseCIDR for details. +// See type [IPNet] and func [ParseCIDR] for details. type IPMask []byte // An IPNet represents an IP network. @@ -72,9 +72,9 @@ func IPv4Mask(a, b, c, d byte) IPMask { return p } -// CIDRMask returns an IPMask consisting of 'ones' 1 bits +// CIDRMask returns an [IPMask] consisting of 'ones' 1 bits // followed by 0s up to a total length of 'bits' bits. -// For a mask of this form, CIDRMask is the inverse of IPMask.Size. +// For a mask of this form, CIDRMask is the inverse of [IPMask.Size]. func CIDRMask(ones, bits int) IPMask { if bits != 8*IPv4len && bits != 8*IPv6len { return nil @@ -324,8 +324,8 @@ func ipEmptyString(ip IP) string { return ip.String() } -// MarshalText implements the encoding.TextMarshaler interface. -// The encoding is the same as returned by String, with one exception: +// MarshalText implements the [encoding.TextMarshaler] interface. +// The encoding is the same as returned by [IP.String], with one exception: // When len(ip) is zero, it returns an empty slice. func (ip IP) MarshalText() ([]byte, error) { if len(ip) == 0 { @@ -337,8 +337,8 @@ func (ip IP) MarshalText() ([]byte, error) { return []byte(ip.String()), nil } -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// The IP address is expected in a form accepted by ParseIP. +// UnmarshalText implements the [encoding.TextUnmarshaler] interface. +// The IP address is expected in a form accepted by [ParseIP]. func (ip *IP) UnmarshalText(text []byte) error { if len(text) == 0 { *ip = nil diff --git a/src/net/ip_test.go b/src/net/ip_test.go index acc2310be1..11c0b75246 100644 --- a/src/net/ip_test.go +++ b/src/net/ip_test.go @@ -21,7 +21,6 @@ var parseIPTests = []struct { {"::ffff:127.1.2.3", IPv4(127, 1, 2, 3)}, {"::ffff:7f01:0203", IPv4(127, 1, 2, 3)}, {"0:0:0:0:0000:ffff:127.1.2.3", IPv4(127, 1, 2, 3)}, - {"0:0:0:0:000000:ffff:127.1.2.3", IPv4(127, 1, 2, 3)}, {"0:0:0:0::ffff:127.1.2.3", IPv4(127, 1, 2, 3)}, {"2001:4860:0:2001::68", IP{0x20, 0x01, 0x48, 0x60, 0, 0, 0x20, 0x01, 0, 0, 0, 0, 0, 0, 0x00, 0x68}}, @@ -37,6 +36,10 @@ var parseIPTests = []struct { {"fe80::1%lo0", nil}, {"fe80::1%911", nil}, {"", nil}, + //6 zeroes in one group + {"0:0:0:0:000000:ffff:127.1.2.3", nil}, + //5 zeroes in one group edge case + {"0:0:0:0:00000:ffff:127.1.2.3", nil}, {"a1:a2:a3:a4::b1:b2:b3:b4", nil}, // Issue 6628 {"127.001.002.003", nil}, {"::ffff:127.001.002.003", nil}, diff --git a/src/net/iprawsock.go b/src/net/iprawsock.go index c3fd6deac5..4c06b1b5ac 100644 --- a/src/net/iprawsock.go +++ b/src/net/iprawsock.go @@ -72,7 +72,7 @@ func (a *IPAddr) opAddr() Addr { // recommended, because it will return at most one of the host name's // IP addresses. // -// See func Dial for a description of the network and address +// See func [Dial] for a description of the network and address // parameters. func ResolveIPAddr(network, address string) (*IPAddr, error) { if network == "" { // a hint wildcard for Go 1.0 undocumented behavior @@ -94,14 +94,14 @@ func ResolveIPAddr(network, address string) (*IPAddr, error) { return addrs.forResolve(network, address).(*IPAddr), nil } -// IPConn is the implementation of the Conn and PacketConn interfaces +// IPConn is the implementation of the [Conn] and [PacketConn] interfaces // for IP network connections. type IPConn struct { conn } // SyscallConn returns a raw network connection. -// This implements the syscall.Conn interface. +// This implements the [syscall.Conn] interface. func (c *IPConn) SyscallConn() (syscall.RawConn, error) { if !c.ok() { return nil, syscall.EINVAL @@ -121,7 +121,7 @@ func (c *IPConn) ReadFromIP(b []byte) (int, *IPAddr, error) { return n, addr, err } -// ReadFrom implements the PacketConn ReadFrom method. +// ReadFrom implements the [PacketConn] ReadFrom method. func (c *IPConn) ReadFrom(b []byte) (int, Addr, error) { if !c.ok() { return 0, nil, syscall.EINVAL @@ -154,7 +154,7 @@ func (c *IPConn) ReadMsgIP(b, oob []byte) (n, oobn, flags int, addr *IPAddr, err return } -// WriteToIP acts like WriteTo but takes an IPAddr. +// WriteToIP acts like [IPConn.WriteTo] but takes an [IPAddr]. func (c *IPConn) WriteToIP(b []byte, addr *IPAddr) (int, error) { if !c.ok() { return 0, syscall.EINVAL @@ -166,7 +166,7 @@ func (c *IPConn) WriteToIP(b []byte, addr *IPAddr) (int, error) { return n, err } -// WriteTo implements the PacketConn WriteTo method. +// WriteTo implements the [PacketConn] WriteTo method. func (c *IPConn) WriteTo(b []byte, addr Addr) (int, error) { if !c.ok() { return 0, syscall.EINVAL @@ -201,7 +201,7 @@ func (c *IPConn) WriteMsgIP(b, oob []byte, addr *IPAddr) (n, oobn int, err error func newIPConn(fd *netFD) *IPConn { return &IPConn{conn{fd}} } -// DialIP acts like Dial for IP networks. +// DialIP acts like [Dial] for IP networks. // // The network must be an IP network name; see func Dial for details. // @@ -220,7 +220,7 @@ func DialIP(network string, laddr, raddr *IPAddr) (*IPConn, error) { return c, nil } -// ListenIP acts like ListenPacket for IP networks. +// ListenIP acts like [ListenPacket] for IP networks. // // The network must be an IP network name; see func Dial for details. // diff --git a/src/net/lookup.go b/src/net/lookup.go index 15165970b6..3ec2660786 100644 --- a/src/net/lookup.go +++ b/src/net/lookup.go @@ -181,8 +181,8 @@ func (r *Resolver) getLookupGroup() *singleflight.Group { // LookupHost looks up the given host using the local resolver. // It returns a slice of that host's addresses. // -// LookupHost uses context.Background internally; to specify the context, use -// Resolver.LookupHost. +// LookupHost uses [context.Background] internally; to specify the context, use +// [Resolver.LookupHost]. func LookupHost(host string) (addrs []string, err error) { return DefaultResolver.LookupHost(context.Background(), host) } @@ -417,8 +417,8 @@ func ipAddrsEface(addrs []IPAddr) []any { // LookupPort looks up the port for the given network and service. // -// LookupPort uses context.Background internally; to specify the context, use -// Resolver.LookupPort. +// LookupPort uses [context.Background] internally; to specify the context, use +// [Resolver.LookupPort]. func LookupPort(network, service string) (port int, err error) { return DefaultResolver.LookupPort(context.Background(), network, service) } @@ -449,7 +449,7 @@ func (r *Resolver) LookupPort(ctx context.Context, network, service string) (por // LookupCNAME returns the canonical name for the given host. // Callers that do not care about the canonical name can call -// LookupHost or LookupIP directly; both take care of resolving +// [LookupHost] or [LookupIP] directly; both take care of resolving // the canonical name as part of the lookup. // // A canonical name is the final name after following zero @@ -461,15 +461,15 @@ func (r *Resolver) LookupPort(ctx context.Context, network, service string) (por // The returned canonical name is validated to be a properly // formatted presentation-format domain name. // -// LookupCNAME uses context.Background internally; to specify the context, use -// Resolver.LookupCNAME. +// LookupCNAME uses [context.Background] internally; to specify the context, use +// [Resolver.LookupCNAME]. func LookupCNAME(host string) (cname string, err error) { return DefaultResolver.LookupCNAME(context.Background(), host) } // LookupCNAME returns the canonical name for the given host. // Callers that do not care about the canonical name can call -// LookupHost or LookupIP directly; both take care of resolving +// [LookupHost] or [LookupIP] directly; both take care of resolving // the canonical name as part of the lookup. // // A canonical name is the final name after following zero @@ -491,7 +491,7 @@ func (r *Resolver) LookupCNAME(ctx context.Context, host string) (string, error) return cname, nil } -// LookupSRV tries to resolve an SRV query of the given service, +// LookupSRV tries to resolve an [SRV] query of the given service, // protocol, and domain name. The proto is "tcp" or "udp". // The returned records are sorted by priority and randomized // by weight within a priority. @@ -509,7 +509,7 @@ func LookupSRV(service, proto, name string) (cname string, addrs []*SRV, err err return DefaultResolver.LookupSRV(context.Background(), service, proto, name) } -// LookupSRV tries to resolve an SRV query of the given service, +// LookupSRV tries to resolve an [SRV] query of the given service, // protocol, and domain name. The proto is "tcp" or "udp". // The returned records are sorted by priority and randomized // by weight within a priority. @@ -554,8 +554,8 @@ func (r *Resolver) LookupSRV(ctx context.Context, service, proto, name string) ( // invalid names, those records are filtered out and an error // will be returned alongside the remaining results, if any. // -// LookupMX uses context.Background internally; to specify the context, use -// Resolver.LookupMX. +// LookupMX uses [context.Background] internally; to specify the context, use +// [Resolver.LookupMX]. func LookupMX(name string) ([]*MX, error) { return DefaultResolver.LookupMX(context.Background(), name) } @@ -594,8 +594,8 @@ func (r *Resolver) LookupMX(ctx context.Context, name string) ([]*MX, error) { // invalid names, those records are filtered out and an error // will be returned alongside the remaining results, if any. // -// LookupNS uses context.Background internally; to specify the context, use -// Resolver.LookupNS. +// LookupNS uses [context.Background] internally; to specify the context, use +// [Resolver.LookupNS]. func LookupNS(name string) ([]*NS, error) { return DefaultResolver.LookupNS(context.Background(), name) } @@ -629,8 +629,8 @@ func (r *Resolver) LookupNS(ctx context.Context, name string) ([]*NS, error) { // LookupTXT returns the DNS TXT records for the given domain name. // -// LookupTXT uses context.Background internally; to specify the context, use -// Resolver.LookupTXT. +// LookupTXT uses [context.Background] internally; to specify the context, use +// [Resolver.LookupTXT]. func LookupTXT(name string) ([]string, error) { return DefaultResolver.lookupTXT(context.Background(), name) } @@ -648,10 +648,10 @@ func (r *Resolver) LookupTXT(ctx context.Context, name string) ([]string, error) // out and an error will be returned alongside the remaining results, if any. // // When using the host C library resolver, at most one result will be -// returned. To bypass the host resolver, use a custom Resolver. +// returned. To bypass the host resolver, use a custom [Resolver]. // -// LookupAddr uses context.Background internally; to specify the context, use -// Resolver.LookupAddr. +// LookupAddr uses [context.Background] internally; to specify the context, use +// [Resolver.LookupAddr]. func LookupAddr(addr string) (names []string, err error) { return DefaultResolver.LookupAddr(context.Background(), addr) } diff --git a/src/net/lookup_test.go b/src/net/lookup_test.go index 57ac9a933a..b32591a718 100644 --- a/src/net/lookup_test.go +++ b/src/net/lookup_test.go @@ -1509,22 +1509,6 @@ func TestLookupPortIPNetworkString(t *testing.T) { }) } -func allResolvers(t *testing.T, f func(t *testing.T)) { - t.Run("default resolver", f) - t.Run("forced go resolver", func(t *testing.T) { - if fixup := forceGoDNS(); fixup != nil { - defer fixup() - f(t) - } - }) - t.Run("forced cgo resolver", func(t *testing.T) { - if fixup := forceCgoDNS(); fixup != nil { - defer fixup() - f(t) - } - }) -} - func TestLookupNoSuchHost(t *testing.T) { mustHaveExternalNetwork(t) diff --git a/src/net/lookup_windows.go b/src/net/lookup_windows.go index 3048f3269b..946622761c 100644 --- a/src/net/lookup_windows.go +++ b/src/net/lookup_windows.go @@ -54,7 +54,10 @@ func lookupProtocol(ctx context.Context, name string) (int, error) { } ch := make(chan result) // unbuffered go func() { - acquireThread() + if err := acquireThread(ctx); err != nil { + ch <- result{err: mapErr(err)} + return + } defer releaseThread() runtime.LockOSThread() defer runtime.UnlockOSThread() @@ -111,7 +114,13 @@ func (r *Resolver) lookupIP(ctx context.Context, network, name string) ([]IPAddr } getaddr := func() ([]IPAddr, error) { - acquireThread() + if err := acquireThread(ctx); err != nil { + return nil, &DNSError{ + Name: name, + Err: mapErr(err).Error(), + IsTimeout: ctx.Err() == context.DeadlineExceeded, + } + } defer releaseThread() hints := syscall.AddrinfoW{ Family: family, @@ -200,8 +209,14 @@ func (r *Resolver) lookupPort(ctx context.Context, network, service string) (int return lookupPortMap(network, service) } - // TODO(bradfitz): finish ctx plumbing. Nothing currently depends on this. - acquireThread() + // TODO(bradfitz): finish ctx plumbing + if err := acquireThread(ctx); err != nil { + return 0, &DNSError{ + Name: network + "/" + service, + Err: mapErr(err).Error(), + IsTimeout: ctx.Err() == context.DeadlineExceeded, + } + } defer releaseThread() var hints syscall.AddrinfoW @@ -263,8 +278,14 @@ func (r *Resolver) lookupCNAME(ctx context.Context, name string) (string, error) return r.goLookupCNAME(ctx, name, order, conf) } - // TODO(bradfitz): finish ctx plumbing. Nothing currently depends on this. - acquireThread() + // TODO(bradfitz): finish ctx plumbing + if err := acquireThread(ctx); err != nil { + return "", &DNSError{ + Name: name, + Err: mapErr(err).Error(), + IsTimeout: ctx.Err() == context.DeadlineExceeded, + } + } defer releaseThread() var rec *syscall.DNSRecord e := syscall.DnsQuery(name, syscall.DNS_TYPE_CNAME, 0, nil, &rec, nil) @@ -288,8 +309,14 @@ func (r *Resolver) lookupSRV(ctx context.Context, service, proto, name string) ( if systemConf().mustUseGoResolver(r) { return r.goLookupSRV(ctx, service, proto, name) } - // TODO(bradfitz): finish ctx plumbing. Nothing currently depends on this. - acquireThread() + // TODO(bradfitz): finish ctx plumbing + if err := acquireThread(ctx); err != nil { + return "", nil, &DNSError{ + Name: name, + Err: mapErr(err).Error(), + IsTimeout: ctx.Err() == context.DeadlineExceeded, + } + } defer releaseThread() var target string if service == "" && proto == "" { @@ -318,8 +345,14 @@ func (r *Resolver) lookupMX(ctx context.Context, name string) ([]*MX, error) { if systemConf().mustUseGoResolver(r) { return r.goLookupMX(ctx, name) } - // TODO(bradfitz): finish ctx plumbing. Nothing currently depends on this. - acquireThread() + // TODO(bradfitz): finish ctx plumbing. + if err := acquireThread(ctx); err != nil { + return nil, &DNSError{ + Name: name, + Err: mapErr(err).Error(), + IsTimeout: ctx.Err() == context.DeadlineExceeded, + } + } defer releaseThread() var rec *syscall.DNSRecord e := syscall.DnsQuery(name, syscall.DNS_TYPE_MX, 0, nil, &rec, nil) @@ -342,8 +375,14 @@ func (r *Resolver) lookupNS(ctx context.Context, name string) ([]*NS, error) { if systemConf().mustUseGoResolver(r) { return r.goLookupNS(ctx, name) } - // TODO(bradfitz): finish ctx plumbing. Nothing currently depends on this. - acquireThread() + // TODO(bradfitz): finish ctx plumbing. + if err := acquireThread(ctx); err != nil { + return nil, &DNSError{ + Name: name, + Err: mapErr(err).Error(), + IsTimeout: ctx.Err() == context.DeadlineExceeded, + } + } defer releaseThread() var rec *syscall.DNSRecord e := syscall.DnsQuery(name, syscall.DNS_TYPE_NS, 0, nil, &rec, nil) @@ -365,8 +404,14 @@ func (r *Resolver) lookupTXT(ctx context.Context, name string) ([]string, error) if systemConf().mustUseGoResolver(r) { return r.goLookupTXT(ctx, name) } - // TODO(bradfitz): finish ctx plumbing. Nothing currently depends on this. - acquireThread() + // TODO(bradfitz): finish ctx plumbing. + if err := acquireThread(ctx); err != nil { + return nil, &DNSError{ + Name: name, + Err: mapErr(err).Error(), + IsTimeout: ctx.Err() == context.DeadlineExceeded, + } + } defer releaseThread() var rec *syscall.DNSRecord e := syscall.DnsQuery(name, syscall.DNS_TYPE_TEXT, 0, nil, &rec, nil) @@ -393,8 +438,14 @@ func (r *Resolver) lookupAddr(ctx context.Context, addr string) ([]string, error return r.goLookupPTR(ctx, addr, order, conf) } - // TODO(bradfitz): finish ctx plumbing. Nothing currently depends on this. - acquireThread() + // TODO(bradfitz): finish ctx plumbing. + if err := acquireThread(ctx); err != nil { + return nil, &DNSError{ + Name: addr, + Err: mapErr(err).Error(), + IsTimeout: ctx.Err() == context.DeadlineExceeded, + } + } defer releaseThread() arpa, err := reverseaddr(addr) if err != nil { diff --git a/src/net/mail/message.go b/src/net/mail/message.go index af516fc30f..bb40ccd20a 100644 --- a/src/net/mail/message.go +++ b/src/net/mail/message.go @@ -13,7 +13,6 @@ Notable divergences: - The full range of spacing (the CFWS syntax element) is not supported, such as breaking addresses across lines. - No unicode normalization is performed. - - The special characters ()[]:;@\, are allowed to appear unquoted in names. - A leading From line is permitted, as in mbox format (RFC 4155). */ package mail @@ -280,7 +279,7 @@ func (a *Address) String() string { // Add quotes if needed quoteLocal := false for i, r := range local { - if isAtext(r, false, false) { + if isAtext(r, false) { continue } if r == '.' { @@ -444,7 +443,7 @@ func (p *addrParser) parseAddress(handleGroup bool) ([]*Address, error) { if !p.consume('<') { atext := true for _, r := range displayName { - if !isAtext(r, true, false) { + if !isAtext(r, true) { atext = false break } @@ -479,7 +478,9 @@ func (p *addrParser) consumeGroupList() ([]*Address, error) { // handle empty group. p.skipSpace() if p.consume(';') { - p.skipCFWS() + if !p.skipCFWS() { + return nil, errors.New("mail: misformatted parenthetical comment") + } return group, nil } @@ -496,7 +497,9 @@ func (p *addrParser) consumeGroupList() ([]*Address, error) { return nil, errors.New("mail: misformatted parenthetical comment") } if p.consume(';') { - p.skipCFWS() + if !p.skipCFWS() { + return nil, errors.New("mail: misformatted parenthetical comment") + } break } if !p.consume(',') { @@ -566,6 +569,12 @@ func (p *addrParser) consumePhrase() (phrase string, err error) { var words []string var isPrevEncoded bool for { + // obs-phrase allows CFWS after one word + if len(words) > 0 { + if !p.skipCFWS() { + return "", errors.New("mail: misformatted parenthetical comment") + } + } // word = atom / quoted-string var word string p.skipSpace() @@ -661,7 +670,6 @@ Loop: // If dot is true, consumeAtom parses an RFC 5322 dot-atom instead. // If permissive is true, consumeAtom will not fail on: // - leading/trailing/double dots in the atom (see golang.org/issue/4938) -// - special characters (RFC 5322 3.2.3) except '<', '>', ':' and '"' (see golang.org/issue/21018) func (p *addrParser) consumeAtom(dot bool, permissive bool) (atom string, err error) { i := 0 @@ -672,7 +680,7 @@ Loop: case size == 1 && r == utf8.RuneError: return "", fmt.Errorf("mail: invalid utf-8 in address: %q", p.s) - case size == 0 || !isAtext(r, dot, permissive): + case size == 0 || !isAtext(r, dot): break Loop default: @@ -850,18 +858,13 @@ func (e charsetError) Error() string { // isAtext reports whether r is an RFC 5322 atext character. // If dot is true, period is included. -// If permissive is true, RFC 5322 3.2.3 specials is included, -// except '<', '>', ':' and '"'. -func isAtext(r rune, dot, permissive bool) bool { +func isAtext(r rune, dot bool) bool { switch r { case '.': return dot // RFC 5322 3.2.3. specials - case '(', ')', '[', ']', ';', '@', '\\', ',': - return permissive - - case '<', '>', '"', ':': + case '(', ')', '<', '>', '[', ']', ':', ';', '@', '\\', ',', '"': // RFC 5322 3.2.3. specials return false } return isVchar(r) diff --git a/src/net/mail/message_test.go b/src/net/mail/message_test.go index 1e1bb4092f..1f2f62afbf 100644 --- a/src/net/mail/message_test.go +++ b/src/net/mail/message_test.go @@ -385,8 +385,11 @@ func TestAddressParsingError(t *testing.T) { 13: {"group not closed: null@example.com", "expected comma"}, 14: {"group: first@example.com, second@example.com;", "group with multiple addresses"}, 15: {"john.doe", "missing '@' or angle-addr"}, - 16: {"john.doe@", "no angle-addr"}, + 16: {"john.doe@", "missing '@' or angle-addr"}, 17: {"John Doe@foo.bar", "no angle-addr"}, + 18: {" group: null@example.com; (asd", "misformatted parenthetical comment"}, + 19: {" group: ; (asd", "misformatted parenthetical comment"}, + 20: {`(John) Doe `, "missing word in phrase:"}, } for i, tc := range mustErrTestCases { @@ -436,6 +439,15 @@ func TestAddressParsing(t *testing.T) { Address: "john.q.public@example.com", }}, }, + // Comment in display name + { + `John (middle) Doe `, + []*Address{{ + Name: "John Doe", + Address: "jdoe@machine.example", + }}, + }, + // Display name is quoted string, so comment is not a comment { `"John (middle) Doe" `, []*Address{{ @@ -443,20 +455,6 @@ func TestAddressParsing(t *testing.T) { Address: "jdoe@machine.example", }}, }, - { - `John (middle) Doe `, - []*Address{{ - Name: "John (middle) Doe", - Address: "jdoe@machine.example", - }}, - }, - { - `John !@M@! Doe `, - []*Address{{ - Name: "John !@M@! Doe", - Address: "jdoe@machine.example", - }}, - }, { `"John Doe" `, []*Address{{ @@ -788,6 +786,26 @@ func TestAddressParsing(t *testing.T) { }, }, }, + // Comment in group display name + { + `group (comment:): a@example.com, b@example.com;`, + []*Address{ + { + Address: "a@example.com", + }, + { + Address: "b@example.com", + }, + }, + }, + { + `x(:"):"@a.example;("@b.example;`, + []*Address{ + { + Address: `@a.example;(@b.example`, + }, + }, + }, } for _, test := range tests { if len(test.exp) == 1 { diff --git a/src/net/main_conf_test.go b/src/net/main_conf_test.go index 307ff5dd8c..bb140240ed 100644 --- a/src/net/main_conf_test.go +++ b/src/net/main_conf_test.go @@ -2,11 +2,28 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build !plan9 - package net -import "testing" +import ( + "context" + "runtime" + "testing" +) + +func allResolvers(t *testing.T, f func(t *testing.T)) { + t.Run("default resolver", f) + t.Run("forced go resolver", func(t *testing.T) { + // On plan9 the forceGoDNS might not force the go resolver, currently + // it is only forced when the Resolver.Dial field is populated. + // See conf.go mustUseGoResolver. + defer forceGoDNS()() + f(t) + }) + t.Run("forced cgo resolver", func(t *testing.T) { + defer forceCgoDNS()() + f(t) + }) +} // forceGoDNS forces the resolver configuration to use the pure Go resolver // and returns a fixup function to restore the old settings. @@ -25,7 +42,6 @@ func forceGoDNS() func() { // forceCgoDNS forces the resolver configuration to use the cgo resolver // and returns a fixup function to restore the old settings. -// (On non-Unix systems forceCgoDNS returns nil.) func forceCgoDNS() func() { c := systemConf() oldGo := c.netGo @@ -48,12 +64,34 @@ func TestForceCgoDNS(t *testing.T) { if order != hostLookupCgo { t.Fatalf("hostLookupOrder returned: %v, want cgo", order) } + order, _ = systemConf().addrLookupOrder(nil, "192.0.2.1") + if order != hostLookupCgo { + t.Fatalf("addrLookupOrder returned: %v, want cgo", order) + } + if systemConf().mustUseGoResolver(nil) { + t.Fatal("mustUseGoResolver = true, want false") + } } func TestForceGoDNS(t *testing.T) { + var resolver *Resolver + if runtime.GOOS == "plan9" { + resolver = &Resolver{ + Dial: func(_ context.Context, _, _ string) (Conn, error) { + panic("unreachable") + }, + } + } defer forceGoDNS()() - order, _ := systemConf().hostLookupOrder(nil, "go.dev") + order, _ := systemConf().hostLookupOrder(resolver, "go.dev") if order == hostLookupCgo { t.Fatalf("hostLookupOrder returned: %v, want go resolver order", order) } + order, _ = systemConf().addrLookupOrder(resolver, "192.0.2.1") + if order == hostLookupCgo { + t.Fatalf("addrLookupOrder returned: %v, want go resolver order", order) + } + if !systemConf().mustUseGoResolver(resolver) { + t.Fatal("mustUseGoResolver = false, want true") + } } diff --git a/src/net/main_noconf_test.go b/src/net/main_noconf_test.go deleted file mode 100644 index cdd7c54805..0000000000 --- a/src/net/main_noconf_test.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build plan9 - -package net - -import "runtime" - -// See main_conf_test.go for what these (don't) do. -func forceGoDNS() func() { - switch runtime.GOOS { - case "plan9": - return func() {} - default: - return nil - } -} - -// See main_conf_test.go for what these (don't) do. -func forceCgoDNS() func() { return nil } diff --git a/src/net/mockserver_test.go b/src/net/mockserver_test.go index 46b2a57321..4d5e79a592 100644 --- a/src/net/mockserver_test.go +++ b/src/net/mockserver_test.go @@ -8,8 +8,12 @@ import ( "context" "errors" "fmt" + "internal/testenv" + "log" "os" "path/filepath" + "runtime" + "strconv" "sync" "testing" "time" @@ -56,12 +60,7 @@ func newLocalListener(t testing.TB, network string, lcOpt ...*ListenConfig) List switch network { case "tcp": if supportsIPv4() { - if !supportsIPv6() { - return listen("tcp4", "127.0.0.1:0") - } - if ln, err := Listen("tcp4", "127.0.0.1:0"); err == nil { - return ln - } + return listen("tcp4", "127.0.0.1:0") } if supportsIPv6() { return listen("tcp6", "[::1]:0") @@ -506,3 +505,127 @@ func packetTransceiver(c PacketConn, wb []byte, dst Addr, ch chan<- error) { ch <- fmt.Errorf("read %d; want %d", n, len(wb)) } } + +func spawnTestSocketPair(t testing.TB, net string) (client, server Conn) { + t.Helper() + + ln := newLocalListener(t, net) + defer ln.Close() + var cerr, serr error + acceptDone := make(chan struct{}) + go func() { + server, serr = ln.Accept() + acceptDone <- struct{}{} + }() + client, cerr = Dial(ln.Addr().Network(), ln.Addr().String()) + <-acceptDone + if cerr != nil { + if server != nil { + server.Close() + } + t.Fatal(cerr) + } + if serr != nil { + if client != nil { + client.Close() + } + t.Fatal(serr) + } + return client, server +} + +func startTestSocketPeer(t testing.TB, conn Conn, op string, chunkSize, totalSize int) (func(t testing.TB), error) { + t.Helper() + + if runtime.GOOS == "windows" { + // TODO(panjf2000): Windows has not yet implemented FileConn, + // remove this when it's implemented in https://go.dev/issues/9503. + t.Fatalf("startTestSocketPeer is not supported on %s", runtime.GOOS) + } + + f, err := conn.(interface{ File() (*os.File, error) }).File() + if err != nil { + return nil, err + } + + cmd := testenv.Command(t, os.Args[0]) + cmd.Env = []string{ + "GO_NET_TEST_TRANSFER=1", + "GO_NET_TEST_TRANSFER_OP=" + op, + "GO_NET_TEST_TRANSFER_CHUNK_SIZE=" + strconv.Itoa(chunkSize), + "GO_NET_TEST_TRANSFER_TOTAL_SIZE=" + strconv.Itoa(totalSize), + "TMPDIR=" + os.Getenv("TMPDIR"), + } + cmd.ExtraFiles = append(cmd.ExtraFiles, f) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + + if err := cmd.Start(); err != nil { + return nil, err + } + + cmdCh := make(chan error, 1) + go func() { + err := cmd.Wait() + conn.Close() + f.Close() + cmdCh <- err + }() + + return func(tb testing.TB) { + err := <-cmdCh + if err != nil { + tb.Errorf("process exited with error: %v", err) + } + }, nil +} + +func init() { + if os.Getenv("GO_NET_TEST_TRANSFER") == "" { + return + } + defer os.Exit(0) + + f := os.NewFile(uintptr(3), "splice-test-conn") + defer f.Close() + + conn, err := FileConn(f) + if err != nil { + log.Fatal(err) + } + + var chunkSize int + if chunkSize, err = strconv.Atoi(os.Getenv("GO_NET_TEST_TRANSFER_CHUNK_SIZE")); err != nil { + log.Fatal(err) + } + buf := make([]byte, chunkSize) + + var totalSize int + if totalSize, err = strconv.Atoi(os.Getenv("GO_NET_TEST_TRANSFER_TOTAL_SIZE")); err != nil { + log.Fatal(err) + } + + var fn func([]byte) (int, error) + switch op := os.Getenv("GO_NET_TEST_TRANSFER_OP"); op { + case "r": + fn = conn.Read + case "w": + defer conn.Close() + + fn = conn.Write + default: + log.Fatalf("unknown op %q", op) + } + + var n int + for count := 0; count < totalSize; count += n { + if count+chunkSize > totalSize { + buf = buf[:totalSize-count] + } + + var err error + if n, err = fn(buf); err != nil { + return + } + } +} diff --git a/src/net/net.go b/src/net/net.go index 396713ce4a..d0db65286b 100644 --- a/src/net/net.go +++ b/src/net/net.go @@ -8,8 +8,8 @@ TCP/IP, UDP, domain name resolution, and Unix domain sockets. Although the package provides access to low-level networking primitives, most clients will need only the basic interface provided -by the Dial, Listen, and Accept functions and the associated -Conn and Listener interfaces. The crypto/tls package uses +by the [Dial], [Listen], and Accept functions and the associated +[Conn] and [Listener] interfaces. The crypto/tls package uses the same interfaces and similar Dial and Listen functions. The Dial function connects to a server: @@ -39,23 +39,26 @@ The Listen function creates servers: # Name Resolution The method for resolving domain names, whether indirectly with functions like Dial -or directly with functions like LookupHost and LookupAddr, varies by operating system. +or directly with functions like [LookupHost] and [LookupAddr], varies by operating system. On Unix systems, the resolver has two options for resolving names. It can use a pure Go resolver that sends DNS requests directly to the servers listed in /etc/resolv.conf, or it can use a cgo-based resolver that calls C library routines such as getaddrinfo and getnameinfo. -By default the pure Go resolver is used, because a blocked DNS request consumes -only a goroutine, while a blocked C call consumes an operating system thread. +On Unix the pure Go resolver is preferred over the cgo resolver, because a blocked DNS +request consumes only a goroutine, while a blocked C call consumes an operating system thread. When cgo is available, the cgo-based resolver is used instead under a variety of conditions: on systems that do not let programs make direct DNS requests (OS X), when the LOCALDOMAIN environment variable is present (even if empty), when the RES_OPTIONS or HOSTALIASES environment variable is non-empty, when the ASR_CONFIG environment variable is non-empty (OpenBSD only), when /etc/resolv.conf or /etc/nsswitch.conf specify the use of features that the -Go resolver does not implement, and when the name being looked up ends in .local -or is an mDNS name. +Go resolver does not implement. + +On all systems (except Plan 9), when the cgo resolver is being used +this package applies a concurrent cgo lookup limit to prevent the system +from running out of system threads. Currently, it is limited to 500 concurrent lookups. The resolver decision can be overridden by setting the netdns value of the GODEBUG environment variable (see package runtime) to go or cgo, as in: @@ -95,8 +98,8 @@ import ( // Addr represents a network end point address. // -// The two methods Network and String conventionally return strings -// that can be passed as the arguments to Dial, but the exact form +// The two methods [Addr.Network] and [Addr.String] conventionally return strings +// that can be passed as the arguments to [Dial], but the exact form // and meaning of the strings is up to the implementation. type Addr interface { Network() string // name of the network (for example, "tcp", "udp") @@ -284,7 +287,7 @@ func (c *conn) SetWriteBuffer(bytes int) error { return nil } -// File returns a copy of the underlying os.File. +// File returns a copy of the underlying [os.File]. // It is the caller's responsibility to close f when finished. // Closing c does not affect f, and closing f does not affect c. // @@ -645,12 +648,12 @@ func (e *DNSError) Error() string { // Timeout reports whether the DNS lookup is known to have timed out. // This is not always known; a DNS lookup may fail due to a timeout -// and return a DNSError for which Timeout returns false. +// and return a [DNSError] for which Timeout returns false. func (e *DNSError) Timeout() bool { return e.IsTimeout } // Temporary reports whether the DNS error is known to be temporary. // This is not always known; a DNS lookup may fail due to a temporary -// error and return a DNSError for which Temporary returns false. +// error and return a [DNSError] for which Temporary returns false. func (e *DNSError) Temporary() bool { return e.IsTimeout || e.IsTemporary } // errClosed exists just so that the docs for ErrClosed don't mention @@ -664,15 +667,53 @@ var errClosed = poll.ErrNetClosing // errors.Is(err, net.ErrClosed). var ErrClosed error = errClosed -type writerOnly struct { - io.Writer +// noReadFrom can be embedded alongside another type to +// hide the ReadFrom method of that other type. +type noReadFrom struct{} + +// ReadFrom hides another ReadFrom method. +// It should never be called. +func (noReadFrom) ReadFrom(io.Reader) (int64, error) { + panic("can't happen") +} + +// tcpConnWithoutReadFrom implements all the methods of *TCPConn other +// than ReadFrom. This is used to permit ReadFrom to call io.Copy +// without leading to a recursive call to ReadFrom. +type tcpConnWithoutReadFrom struct { + noReadFrom + *TCPConn } // Fallback implementation of io.ReaderFrom's ReadFrom, when sendfile isn't // applicable. -func genericReadFrom(w io.Writer, r io.Reader) (n int64, err error) { +func genericReadFrom(c *TCPConn, r io.Reader) (n int64, err error) { // Use wrapper to hide existing r.ReadFrom from io.Copy. - return io.Copy(writerOnly{w}, r) + return io.Copy(tcpConnWithoutReadFrom{TCPConn: c}, r) +} + +// noWriteTo can be embedded alongside another type to +// hide the WriteTo method of that other type. +type noWriteTo struct{} + +// WriteTo hides another WriteTo method. +// It should never be called. +func (noWriteTo) WriteTo(io.Writer) (int64, error) { + panic("can't happen") +} + +// tcpConnWithoutWriteTo implements all the methods of *TCPConn other +// than WriteTo. This is used to permit WriteTo to call io.Copy +// without leading to a recursive call to WriteTo. +type tcpConnWithoutWriteTo struct { + noWriteTo + *TCPConn +} + +// Fallback implementation of io.WriterTo's WriteTo, when zero-copy isn't applicable. +func genericWriteTo(c *TCPConn, w io.Writer) (n int64, err error) { + // Use wrapper to hide existing w.WriteTo from io.Copy. + return io.Copy(w, tcpConnWithoutWriteTo{TCPConn: c}) } // Limit the number of concurrent cgo-using goroutines, because @@ -685,11 +726,16 @@ var threadLimit chan struct{} var threadOnce sync.Once -func acquireThread() { +func acquireThread(ctx context.Context) error { threadOnce.Do(func() { threadLimit = make(chan struct{}, concurrentThreadsLimit()) }) - threadLimit <- struct{}{} + select { + case threadLimit <- struct{}{}: + return nil + case <-ctx.Done(): + return ctx.Err() + } } func releaseThread() { @@ -718,7 +764,7 @@ var ( // WriteTo writes contents of the buffers to w. // -// WriteTo implements io.WriterTo for Buffers. +// WriteTo implements [io.WriterTo] for [Buffers]. // // WriteTo modifies the slice v as well as v[i] for 0 <= i < len(v), // but does not modify v[i][j] for any i, j. @@ -740,7 +786,7 @@ func (v *Buffers) WriteTo(w io.Writer) (n int64, err error) { // Read from the buffers. // -// Read implements io.Reader for Buffers. +// Read implements [io.Reader] for [Buffers]. // // Read modifies the slice v as well as v[i] for 0 <= i < len(v), // but does not modify v[i][j] for any i, j. diff --git a/src/net/net_fake.go b/src/net/net_fake.go index 6b6fdc728e..f7eb28e01a 100644 --- a/src/net/net_fake.go +++ b/src/net/net_fake.go @@ -22,7 +22,6 @@ import ( var ( sockets sync.Map // fakeSockAddr → *netFD - fakeSocketIDs sync.Map // fakeNetFD.id → *netFD fakePorts sync.Map // int (port #) → *netFD nextPortCounter atomic.Int32 ) @@ -325,14 +324,27 @@ func (ffd *fakeNetFD) accept(laddr Addr) (*netFD, error) { incoming []*netFD ok bool ) + expired := ffd.readDeadline.Load().expired select { - case <-ffd.readDeadline.Load().expired: + case <-expired: return nil, os.ErrDeadlineExceeded case incoming, ok = <-ffd.incoming: if !ok { return nil, ErrClosed } + select { + case <-expired: + ffd.incoming <- incoming + return nil, os.ErrDeadlineExceeded + default: + } case incoming, ok = <-ffd.incomingFull: + select { + case <-expired: + ffd.incomingFull <- incoming + return nil, os.ErrDeadlineExceeded + default: + } } peer := incoming[0] @@ -447,16 +459,6 @@ func (pq *packetQueue) put(q packetQueueState) { func (pq *packetQueue) closeRead() error { q := pq.get() - - // Discard any unread packets. - for q.head != nil { - p := q.head - q.head = p.next - p.clear() - packetPool.Put(p) - } - q.nBytes = 0 - q.readClosed = true pq.put(q) return nil @@ -513,6 +515,7 @@ func (pq *packetQueue) send(dt *deadlineTimer, b []byte, from sockaddr, block bo if !block { full = pq.full } + select { case <-dt.expired: return 0, os.ErrDeadlineExceeded @@ -535,7 +538,7 @@ func (pq *packetQueue) send(dt *deadlineTimer, b []byte, from sockaddr, block bo } if q.writeClosed { return 0, ErrClosed - } else if q.readClosed { + } else if q.readClosed && q.nBytes >= q.readBufferBytes { return 0, os.NewSyscallError("send", syscall.ECONNRESET) } @@ -563,6 +566,7 @@ func (pq *packetQueue) recvfrom(dt *deadlineTimer, b []byte, wholePacket bool, c // (Without this, TestZeroByteRead deadlocks.) empty = pq.empty } + select { case <-dt.expired: return 0, nil, os.ErrDeadlineExceeded @@ -572,11 +576,13 @@ func (pq *packetQueue) recvfrom(dt *deadlineTimer, b []byte, wholePacket bool, c } defer func() { pq.put(q) }() + if q.readClosed { + return 0, nil, ErrClosed + } + p := q.head if p == nil { switch { - case q.readClosed: - return 0, nil, ErrClosed case q.writeClosed: if q.noLinger { return 0, nil, os.NewSyscallError("recvfrom", syscall.ECONNRESET) diff --git a/src/net/net_test.go b/src/net/net_test.go index b448a79cce..4a5dc3b73a 100644 --- a/src/net/net_test.go +++ b/src/net/net_test.go @@ -293,30 +293,6 @@ func TestPacketConnClose(t *testing.T) { } } -func TestListenCloseListen(t *testing.T) { - const maxTries = 10 - for tries := 0; tries < maxTries; tries++ { - ln := newLocalListener(t, "tcp") - addr := ln.Addr().String() - // TODO: This is racy. The selected address could be reused in between this - // Close and the subsequent Listen. - if err := ln.Close(); err != nil { - if perr := parseCloseError(err, false); perr != nil { - t.Error(perr) - } - t.Fatal(err) - } - ln, err := Listen("tcp", addr) - if err == nil { - // Success. (This test didn't always make it here earlier.) - ln.Close() - return - } - t.Errorf("failed on try %d/%d: %v", tries+1, maxTries, err) - } - t.Fatalf("failed to listen/close/listen on same address after %d tries", maxTries) -} - // See golang.org/issue/6163, golang.org/issue/6987. func TestAcceptIgnoreAbortedConnRequest(t *testing.T) { switch runtime.GOOS { diff --git a/src/net/netip/export_test.go b/src/net/netip/export_test.go index 59971fa2e4..72347ee01b 100644 --- a/src/net/netip/export_test.go +++ b/src/net/netip/export_test.go @@ -28,3 +28,5 @@ var TestAppendToMarshal = testAppendToMarshal func (a Addr) IsZero() bool { return a.isZero() } func (p Prefix) IsZero() bool { return p.isZero() } + +func (p Prefix) Compare(p2 Prefix) int { return p.compare(p2) } diff --git a/src/net/netip/netip.go b/src/net/netip/netip.go index 1d20a4aa7f..d709c56dfa 100644 --- a/src/net/netip/netip.go +++ b/src/net/netip/netip.go @@ -128,7 +128,7 @@ func ParseAddr(s string) (Addr, error) { return Addr{}, parseAddrError{in: s, msg: "unable to parse IP"} } -// MustParseAddr calls ParseAddr(s) and panics on error. +// MustParseAddr calls [ParseAddr](s) and panics on error. // It is intended for use in tests with hard-coded strings. func MustParseAddr(s string) Addr { ip, err := ParseAddr(s) @@ -152,44 +152,53 @@ func (err parseAddrError) Error() string { return "ParseAddr(" + q(err.in) + "): " + err.msg } -// parseIPv4 parses s as an IPv4 address (in form "192.168.0.1"). -func parseIPv4(s string) (ip Addr, err error) { - var fields [4]uint8 +func parseIPv4Fields(in string, off, end int, fields []uint8) error { var val, pos int var digLen int // number of digits in current octet + s := in[off:end] for i := 0; i < len(s); i++ { if s[i] >= '0' && s[i] <= '9' { if digLen == 1 && val == 0 { - return Addr{}, parseAddrError{in: s, msg: "IPv4 field has octet with leading zero"} + return parseAddrError{in: in, msg: "IPv4 field has octet with leading zero"} } val = val*10 + int(s[i]) - '0' digLen++ if val > 255 { - return Addr{}, parseAddrError{in: s, msg: "IPv4 field has value >255"} + return parseAddrError{in: in, msg: "IPv4 field has value >255"} } } else if s[i] == '.' { // .1.2.3 // 1.2.3. // 1..2.3 if i == 0 || i == len(s)-1 || s[i-1] == '.' { - return Addr{}, parseAddrError{in: s, msg: "IPv4 field must have at least one digit", at: s[i:]} + return parseAddrError{in: in, msg: "IPv4 field must have at least one digit", at: s[i:]} } // 1.2.3.4.5 if pos == 3 { - return Addr{}, parseAddrError{in: s, msg: "IPv4 address too long"} + return parseAddrError{in: in, msg: "IPv4 address too long"} } fields[pos] = uint8(val) pos++ val = 0 digLen = 0 } else { - return Addr{}, parseAddrError{in: s, msg: "unexpected character", at: s[i:]} + return parseAddrError{in: in, msg: "unexpected character", at: s[i:]} } } if pos < 3 { - return Addr{}, parseAddrError{in: s, msg: "IPv4 address too short"} + return parseAddrError{in: in, msg: "IPv4 address too short"} } fields[3] = uint8(val) + return nil +} + +// parseIPv4 parses s as an IPv4 address (in form "192.168.0.1"). +func parseIPv4(s string) (ip Addr, err error) { + var fields [4]uint8 + err = parseIPv4Fields(s, 0, len(s), fields[:]) + if err != nil { + return Addr{}, err + } return AddrFrom4(fields), nil } @@ -242,6 +251,10 @@ func parseIPv6(in string) (Addr, error) { } else { break } + if off > 3 { + //more than 4 digits in group, fail. + return Addr{}, parseAddrError{in: in, msg: "each group must have 4 or less digits", at: s} + } if acc > math.MaxUint16 { // Overflow, fail. return Addr{}, parseAddrError{in: in, msg: "IPv6 field has value >=2^16", at: s} @@ -262,17 +275,15 @@ func parseIPv6(in string) (Addr, error) { // Not enough room. return Addr{}, parseAddrError{in: in, msg: "too many hex fields to fit an embedded IPv4 at the end of the address", at: s} } - // TODO: could make this a bit faster by having a helper - // that parses to a [4]byte, and have both parseIPv4 and - // parseIPv6 use it. - ip4, err := parseIPv4(s) - if err != nil { - return Addr{}, parseAddrError{in: in, msg: err.Error(), at: s} + + end := len(in) + if len(zone) > 0 { + end -= len(zone) + 1 + } + err := parseIPv4Fields(in, end-len(s), end, ip[i:i+4]) + if err != nil { + return Addr{}, err } - ip[i] = ip4.v4(0) - ip[i+1] = ip4.v4(1) - ip[i+2] = ip4.v4(2) - ip[i+3] = ip4.v4(3) s = "" i += 4 break @@ -335,8 +346,8 @@ func parseIPv6(in string) (Addr, error) { } // AddrFromSlice parses the 4- or 16-byte byte slice as an IPv4 or IPv6 address. -// Note that a net.IP can be passed directly as the []byte argument. -// If slice's length is not 4 or 16, AddrFromSlice returns Addr{}, false. +// Note that a [net.IP] can be passed directly as the []byte argument. +// If slice's length is not 4 or 16, AddrFromSlice returns [Addr]{}, false. func AddrFromSlice(slice []byte) (ip Addr, ok bool) { switch len(slice) { case 4: @@ -376,13 +387,13 @@ func (ip Addr) isZero() bool { return ip.z == z0 } -// IsValid reports whether the Addr is an initialized address (not the zero Addr). +// IsValid reports whether the [Addr] is an initialized address (not the zero Addr). // // Note that "0.0.0.0" and "::" are both valid values. func (ip Addr) IsValid() bool { return ip.z != z0 } // BitLen returns the number of bits in the IP address: -// 128 for IPv6, 32 for IPv4, and 0 for the zero Addr. +// 128 for IPv6, 32 for IPv4, and 0 for the zero [Addr]. // // Note that IPv4-mapped IPv6 addresses are considered IPv6 addresses // and therefore have bit length 128. @@ -407,7 +418,7 @@ func (ip Addr) Zone() string { // Compare returns an integer comparing two IPs. // The result will be 0 if ip == ip2, -1 if ip < ip2, and +1 if ip > ip2. -// The definition of "less than" is the same as the Less method. +// The definition of "less than" is the same as the [Addr.Less] method. func (ip Addr) Compare(ip2 Addr) int { f1, f2 := ip.BitLen(), ip2.BitLen() if f1 < f2 { @@ -449,7 +460,7 @@ func (ip Addr) Less(ip2 Addr) bool { return ip.Compare(ip2) == -1 } // Is4 reports whether ip is an IPv4 address. // -// It returns false for IPv4-mapped IPv6 addresses. See Addr.Unmap. +// It returns false for IPv4-mapped IPv6 addresses. See [Addr.Unmap]. func (ip Addr) Is4() bool { return ip.z == z4 } @@ -583,7 +594,7 @@ func (ip Addr) IsLinkLocalMulticast() bool { // IANA-allocated 2000::/3 global unicast space, with the exception of the // link-local address space. It also returns true even if ip is in the IPv4 // private address space or IPv6 unique local address space. -// It returns false for the zero Addr. +// It returns false for the zero [Addr]. // // For reference, see RFC 1122, RFC 4291, and RFC 4632. func (ip Addr) IsGlobalUnicast() bool { @@ -607,7 +618,7 @@ func (ip Addr) IsGlobalUnicast() bool { // IsPrivate reports whether ip is a private address, according to RFC 1918 // (IPv4 addresses) and RFC 4193 (IPv6 addresses). That is, it reports whether // ip is in 10.0.0.0/8, 172.16.0.0/12, 192.168.0.0/16, or fc00::/7. This is the -// same as net.IP.IsPrivate. +// same as [net.IP.IsPrivate]. func (ip Addr) IsPrivate() bool { // Match the stdlib's IsPrivate logic. if ip.Is4() { @@ -630,14 +641,14 @@ func (ip Addr) IsPrivate() bool { // IsUnspecified reports whether ip is an unspecified address, either the IPv4 // address "0.0.0.0" or the IPv6 address "::". // -// Note that the zero Addr is not an unspecified address. +// Note that the zero [Addr] is not an unspecified address. func (ip Addr) IsUnspecified() bool { return ip == IPv4Unspecified() || ip == IPv6Unspecified() } // Prefix keeps only the top b bits of IP, producing a Prefix // of the specified length. -// If ip is a zero Addr, Prefix always returns a zero Prefix and a nil error. +// If ip is a zero [Addr], Prefix always returns a zero Prefix and a nil error. // Otherwise, if bits is less than zero or greater than ip.BitLen(), // Prefix returns an error. func (ip Addr) Prefix(b int) (Prefix, error) { @@ -665,7 +676,7 @@ func (ip Addr) Prefix(b int) (Prefix, error) { // As16 returns the IP address in its 16-byte representation. // IPv4 addresses are returned as IPv4-mapped IPv6 addresses. // IPv6 addresses with zones are returned without their zone (use the -// Zone method to get it). +// [Addr.Zone] method to get it). // The ip zero value returns all zeroes. func (ip Addr) As16() (a16 [16]byte) { bePutUint64(a16[:8], ip.addr.hi) @@ -674,7 +685,7 @@ func (ip Addr) As16() (a16 [16]byte) { } // As4 returns an IPv4 or IPv4-in-IPv6 address in its 4-byte representation. -// If ip is the zero Addr or an IPv6 address, As4 panics. +// If ip is the zero [Addr] or an IPv6 address, As4 panics. // Note that 0.0.0.0 is not the zero Addr. func (ip Addr) As4() (a4 [4]byte) { if ip.z == z4 || ip.Is4In6() { @@ -705,7 +716,7 @@ func (ip Addr) AsSlice() []byte { } // Next returns the address following ip. -// If there is none, it returns the zero Addr. +// If there is none, it returns the zero [Addr]. func (ip Addr) Next() Addr { ip.addr = ip.addr.addOne() if ip.Is4() { @@ -739,10 +750,10 @@ func (ip Addr) Prev() Addr { // String returns the string form of the IP address ip. // It returns one of 5 forms: // -// - "invalid IP", if ip is the zero Addr +// - "invalid IP", if ip is the zero [Addr] // - IPv4 dotted decimal ("192.0.2.1") // - IPv6 ("2001:db8::1") -// - "::ffff:1.2.3.4" (if Is4In6) +// - "::ffff:1.2.3.4" (if [Addr.Is4In6]) // - IPv6 with zone ("fe80:db8::1%eth0") // // Note that unlike package net's IP.String method, @@ -756,18 +767,14 @@ func (ip Addr) String() string { return ip.string4() default: if ip.Is4In6() { - if z := ip.Zone(); z != "" { - return "::ffff:" + ip.Unmap().string4() + "%" + z - } else { - return "::ffff:" + ip.Unmap().string4() - } + return ip.string4In6() } return ip.string6() } } // AppendTo appends a text encoding of ip, -// as generated by MarshalText, +// as generated by [Addr.MarshalText], // to b and returns the extended buffer. func (ip Addr) AppendTo(b []byte) []byte { switch ip.z { @@ -777,13 +784,7 @@ func (ip Addr) AppendTo(b []byte) []byte { return ip.appendTo4(b) default: if ip.Is4In6() { - b = append(b, "::ffff:"...) - b = ip.Unmap().appendTo4(b) - if z := ip.Zone(); z != "" { - b = append(b, '%') - b = append(b, z...) - } - return b + return ip.appendTo4In6(b) } return ip.appendTo6(b) } @@ -847,6 +848,23 @@ func (ip Addr) appendTo4(ret []byte) []byte { return ret } +func (ip Addr) string4In6() string { + const max = len("::ffff:255.255.255.255%enp5s0") + ret := make([]byte, 0, max) + ret = ip.appendTo4In6(ret) + return string(ret) +} + +func (ip Addr) appendTo4In6(ret []byte) []byte { + ret = append(ret, "::ffff:"...) + ret = ip.Unmap().appendTo4(ret) + if ip.z != z6noz { + ret = append(ret, '%') + ret = append(ret, ip.Zone()...) + } + return ret +} + // string6 formats ip in IPv6 textual representation. It follows the // guidelines in section 4 of RFC 5952 // (https://tools.ietf.org/html/rfc5952#section-4): no unnecessary @@ -899,7 +917,7 @@ func (ip Addr) appendTo6(ret []byte) []byte { return ret } -// StringExpanded is like String but IPv6 addresses are expanded with leading +// StringExpanded is like [Addr.String] but IPv6 addresses are expanded with leading // zeroes and no "::" compression. For example, "2001:db8::1" becomes // "2001:0db8:0000:0000:0000:0000:0000:0001". func (ip Addr) StringExpanded() string { @@ -927,9 +945,9 @@ func (ip Addr) StringExpanded() string { return string(ret) } -// MarshalText implements the encoding.TextMarshaler interface, -// The encoding is the same as returned by String, with one exception: -// If ip is the zero Addr, the encoding is the empty string. +// MarshalText implements the [encoding.TextMarshaler] interface, +// The encoding is the same as returned by [Addr.String], with one exception: +// If ip is the zero [Addr], the encoding is the empty string. func (ip Addr) MarshalText() ([]byte, error) { switch ip.z { case z0: @@ -939,26 +957,21 @@ func (ip Addr) MarshalText() ([]byte, error) { b := make([]byte, 0, max) return ip.appendTo4(b), nil default: + if ip.Is4In6() { + max := len("::ffff:255.255.255.255%enp5s0") + b := make([]byte, 0, max) + return ip.appendTo4In6(b), nil + } max := len("ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff%enp5s0") b := make([]byte, 0, max) - if ip.Is4In6() { - b = append(b, "::ffff:"...) - b = ip.Unmap().appendTo4(b) - if z := ip.Zone(); z != "" { - b = append(b, '%') - b = append(b, z...) - } - return b, nil - } return ip.appendTo6(b), nil } - } // UnmarshalText implements the encoding.TextUnmarshaler interface. -// The IP address is expected in a form accepted by ParseAddr. +// The IP address is expected in a form accepted by [ParseAddr]. // -// If text is empty, UnmarshalText sets *ip to the zero Addr and +// If text is empty, UnmarshalText sets *ip to the zero [Addr] and // returns no error. func (ip *Addr) UnmarshalText(text []byte) error { if len(text) == 0 { @@ -988,15 +1001,15 @@ func (ip Addr) marshalBinaryWithTrailingBytes(trailingBytes int) []byte { return b } -// MarshalBinary implements the encoding.BinaryMarshaler interface. -// It returns a zero-length slice for the zero Addr, +// MarshalBinary implements the [encoding.BinaryMarshaler] interface. +// It returns a zero-length slice for the zero [Addr], // the 4-byte form for an IPv4 address, // and the 16-byte form with zone appended for an IPv6 address. func (ip Addr) MarshalBinary() ([]byte, error) { return ip.marshalBinaryWithTrailingBytes(0), nil } -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +// UnmarshalBinary implements the [encoding.BinaryUnmarshaler] interface. // It expects data in the form generated by MarshalBinary. func (ip *Addr) UnmarshalBinary(b []byte) error { n := len(b) @@ -1023,7 +1036,7 @@ type AddrPort struct { port uint16 } -// AddrPortFrom returns an AddrPort with the provided IP and port. +// AddrPortFrom returns an [AddrPort] with the provided IP and port. // It does not allocate. func AddrPortFrom(ip Addr, port uint16) AddrPort { return AddrPort{ip: ip, port: port} } @@ -1062,7 +1075,7 @@ func splitAddrPort(s string) (ip, port string, v6 bool, err error) { return ip, port, v6, nil } -// ParseAddrPort parses s as an AddrPort. +// ParseAddrPort parses s as an [AddrPort]. // // It doesn't do any name resolution: both the address and the port // must be numeric. @@ -1089,7 +1102,7 @@ func ParseAddrPort(s string) (AddrPort, error) { return ipp, nil } -// MustParseAddrPort calls ParseAddrPort(s) and panics on error. +// MustParseAddrPort calls [ParseAddrPort](s) and panics on error. // It is intended for use in tests with hard-coded strings. func MustParseAddrPort(s string) AddrPort { ip, err := ParseAddrPort(s) @@ -1114,24 +1127,35 @@ func (p AddrPort) Compare(p2 AddrPort) int { } func (p AddrPort) String() string { + var b []byte switch p.ip.z { case z0: return "invalid AddrPort" case z4: const max = len("255.255.255.255:65535") - buf := make([]byte, 0, max) - buf = p.ip.appendTo4(buf) - buf = append(buf, ':') - buf = strconv.AppendUint(buf, uint64(p.port), 10) - return string(buf) + b = make([]byte, 0, max) + b = p.ip.appendTo4(b) default: - // TODO: this could be more efficient allocation-wise: - return "[" + p.ip.String() + "]:" + itoa.Uitoa(uint(p.port)) + if p.ip.Is4In6() { + const max = len("[::ffff:255.255.255.255%enp5s0]:65535") + b = make([]byte, 0, max) + b = append(b, '[') + b = p.ip.appendTo4In6(b) + } else { + const max = len("[ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff%enp5s0]:65535") + b = make([]byte, 0, max) + b = append(b, '[') + b = p.ip.appendTo6(b) + } + b = append(b, ']') } + b = append(b, ':') + b = strconv.AppendUint(b, uint64(p.port), 10) + return string(b) } // AppendTo appends a text encoding of p, -// as generated by MarshalText, +// as generated by [AddrPort.MarshalText], // to b and returns the extended buffer. func (p AddrPort) AppendTo(b []byte) []byte { switch p.ip.z { @@ -1140,15 +1164,10 @@ func (p AddrPort) AppendTo(b []byte) []byte { case z4: b = p.ip.appendTo4(b) default: + b = append(b, '[') if p.ip.Is4In6() { - b = append(b, "[::ffff:"...) - b = p.ip.Unmap().appendTo4(b) - if z := p.ip.Zone(); z != "" { - b = append(b, '%') - b = append(b, z...) - } + b = p.ip.appendTo4In6(b) } else { - b = append(b, '[') b = p.ip.appendTo6(b) } b = append(b, ']') @@ -1158,9 +1177,9 @@ func (p AddrPort) AppendTo(b []byte) []byte { return b } -// MarshalText implements the encoding.TextMarshaler interface. The -// encoding is the same as returned by String, with one exception: if -// p.Addr() is the zero Addr, the encoding is the empty string. +// MarshalText implements the [encoding.TextMarshaler] interface. The +// encoding is the same as returned by [AddrPort.String], with one exception: if +// p.Addr() is the zero [Addr], the encoding is the empty string. func (p AddrPort) MarshalText() ([]byte, error) { var max int switch p.ip.z { @@ -1176,8 +1195,8 @@ func (p AddrPort) MarshalText() ([]byte, error) { } // UnmarshalText implements the encoding.TextUnmarshaler -// interface. The AddrPort is expected in a form -// generated by MarshalText or accepted by ParseAddrPort. +// interface. The [AddrPort] is expected in a form +// generated by [AddrPort.MarshalText] or accepted by [ParseAddrPort]. func (p *AddrPort) UnmarshalText(text []byte) error { if len(text) == 0 { *p = AddrPort{} @@ -1188,8 +1207,8 @@ func (p *AddrPort) UnmarshalText(text []byte) error { return err } -// MarshalBinary implements the encoding.BinaryMarshaler interface. -// It returns Addr.MarshalBinary with an additional two bytes appended +// MarshalBinary implements the [encoding.BinaryMarshaler] interface. +// It returns [Addr.MarshalBinary] with an additional two bytes appended // containing the port in little-endian. func (p AddrPort) MarshalBinary() ([]byte, error) { b := p.Addr().marshalBinaryWithTrailingBytes(2) @@ -1197,8 +1216,8 @@ func (p AddrPort) MarshalBinary() ([]byte, error) { return b, nil } -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -// It expects data in the form generated by MarshalBinary. +// UnmarshalBinary implements the [encoding.BinaryUnmarshaler] interface. +// It expects data in the form generated by [AddrPort.MarshalBinary]. func (p *AddrPort) UnmarshalBinary(b []byte) error { if len(b) < 2 { return errors.New("unexpected slice size") @@ -1214,7 +1233,7 @@ func (p *AddrPort) UnmarshalBinary(b []byte) error { // Prefix is an IP address prefix (CIDR) representing an IP network. // -// The first Bits() of Addr() are specified. The remaining bits match any address. +// The first [Prefix.Bits]() of [Addr]() are specified. The remaining bits match any address. // The range of Bits() is [0,32] for IPv4 or [0,128] for IPv6. type Prefix struct { ip Addr @@ -1224,13 +1243,13 @@ type Prefix struct { bitsPlusOne uint8 } -// PrefixFrom returns a Prefix with the provided IP address and bit +// PrefixFrom returns a [Prefix] with the provided IP address and bit // prefix length. // -// It does not allocate. Unlike Addr.Prefix, PrefixFrom does not mask +// It does not allocate. Unlike [Addr.Prefix], [PrefixFrom] does not mask // off the host bits of ip. // -// If bits is less than zero or greater than ip.BitLen, Prefix.Bits +// If bits is less than zero or greater than ip.BitLen, [Prefix.Bits] // will return an invalid value -1. func PrefixFrom(ip Addr, bits int) Prefix { var bitsPlusOne uint8 @@ -1252,8 +1271,8 @@ func (p Prefix) Addr() Addr { return p.ip } func (p Prefix) Bits() int { return int(p.bitsPlusOne) - 1 } // IsValid reports whether p.Bits() has a valid range for p.Addr(). -// If p.Addr() is the zero Addr, IsValid returns false. -// Note that if p is the zero Prefix, then p.IsValid() == false. +// If p.Addr() is the zero [Addr], IsValid returns false. +// Note that if p is the zero [Prefix], then p.IsValid() == false. func (p Prefix) IsValid() bool { return p.bitsPlusOne > 0 } func (p Prefix) isZero() bool { return p == Prefix{} } @@ -1261,12 +1280,15 @@ func (p Prefix) isZero() bool { return p == Prefix{} } // IsSingleIP reports whether p contains exactly one IP. func (p Prefix) IsSingleIP() bool { return p.IsValid() && p.Bits() == p.ip.BitLen() } -// Compare returns an integer comparing two prefixes. +// compare returns an integer comparing two prefixes. // The result will be 0 if p == p2, -1 if p < p2, and +1 if p > p2. // Prefixes sort first by validity (invalid before valid), then // address family (IPv4 before IPv6), then prefix length, then // address. -func (p Prefix) Compare(p2 Prefix) int { +// +// Unexported for Go 1.22 because we may want to compare by p.Addr first. +// See post-acceptance discussion on go.dev/issue/61642. +func (p Prefix) compare(p2 Prefix) int { if c := cmp.Compare(p.Addr().BitLen(), p2.Addr().BitLen()); c != 0 { return c } @@ -1276,6 +1298,15 @@ func (p Prefix) Compare(p2 Prefix) int { return p.Addr().Compare(p2.Addr()) } +type parsePrefixError struct { + in string // the string given to ParsePrefix + msg string // an explanation of the parse failure +} + +func (err parsePrefixError) Error() string { + return "netip.ParsePrefix(" + strconv.Quote(err.in) + "): " + err.msg +} + // ParsePrefix parses s as an IP address prefix. // The string can be in the form "192.168.1.0/24" or "2001:db8::/32", // the CIDR notation defined in RFC 4632 and RFC 4291. @@ -1286,39 +1317,39 @@ func (p Prefix) Compare(p2 Prefix) int { func ParsePrefix(s string) (Prefix, error) { i := bytealg.LastIndexByteString(s, '/') if i < 0 { - return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + "): no '/'") + return Prefix{}, parsePrefixError{in: s, msg: "no '/'"} } ip, err := ParseAddr(s[:i]) if err != nil { - return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + "): " + err.Error()) + return Prefix{}, parsePrefixError{in: s, msg: err.Error()} } // IPv6 zones are not allowed: https://go.dev/issue/51899 if ip.Is6() && ip.z != z6noz { - return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + "): IPv6 zones cannot be present in a prefix") + return Prefix{}, parsePrefixError{in: s, msg: "IPv6 zones cannot be present in a prefix"} } bitsStr := s[i+1:] // strconv.Atoi accepts a leading sign and leading zeroes, but we don't want that. if len(bitsStr) > 1 && (bitsStr[0] < '1' || bitsStr[0] > '9') { - return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + "): bad bits after slash: " + strconv.Quote(bitsStr)) + return Prefix{}, parsePrefixError{in: s, msg: "bad bits after slash: " + strconv.Quote(bitsStr)} } bits, err := strconv.Atoi(bitsStr) if err != nil { - return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + "): bad bits after slash: " + strconv.Quote(bitsStr)) + return Prefix{}, parsePrefixError{in: s, msg: "bad bits after slash: " + strconv.Quote(bitsStr)} } maxBits := 32 if ip.Is6() { maxBits = 128 } if bits < 0 || bits > maxBits { - return Prefix{}, errors.New("netip.ParsePrefix(" + strconv.Quote(s) + "): prefix length out of range") + return Prefix{}, parsePrefixError{in: s, msg: "prefix length out of range"} } return PrefixFrom(ip, bits), nil } -// MustParsePrefix calls ParsePrefix(s) and panics on error. +// MustParsePrefix calls [ParsePrefix](s) and panics on error. // It is intended for use in tests with hard-coded strings. func MustParsePrefix(s string) Prefix { ip, err := ParsePrefix(s) @@ -1331,7 +1362,7 @@ func MustParsePrefix(s string) Prefix { // Masked returns p in its canonical form, with all but the high // p.Bits() bits of p.Addr() masked off. // -// If p is zero or otherwise invalid, Masked returns the zero Prefix. +// If p is zero or otherwise invalid, Masked returns the zero [Prefix]. func (p Prefix) Masked() Prefix { m, _ := p.ip.Prefix(p.Bits()) return m @@ -1408,7 +1439,7 @@ func (p Prefix) Overlaps(o Prefix) bool { } // AppendTo appends a text encoding of p, -// as generated by MarshalText, +// as generated by [Prefix.MarshalText], // to b and returns the extended buffer. func (p Prefix) AppendTo(b []byte) []byte { if p.isZero() { @@ -1435,8 +1466,8 @@ func (p Prefix) AppendTo(b []byte) []byte { return b } -// MarshalText implements the encoding.TextMarshaler interface, -// The encoding is the same as returned by String, with one exception: +// MarshalText implements the [encoding.TextMarshaler] interface, +// The encoding is the same as returned by [Prefix.String], with one exception: // If p is the zero value, the encoding is the empty string. func (p Prefix) MarshalText() ([]byte, error) { var max int @@ -1453,8 +1484,8 @@ func (p Prefix) MarshalText() ([]byte, error) { } // UnmarshalText implements the encoding.TextUnmarshaler interface. -// The IP address is expected in a form accepted by ParsePrefix -// or generated by MarshalText. +// The IP address is expected in a form accepted by [ParsePrefix] +// or generated by [Prefix.MarshalText]. func (p *Prefix) UnmarshalText(text []byte) error { if len(text) == 0 { *p = Prefix{} @@ -1465,8 +1496,8 @@ func (p *Prefix) UnmarshalText(text []byte) error { return err } -// MarshalBinary implements the encoding.BinaryMarshaler interface. -// It returns Addr.MarshalBinary with an additional byte appended +// MarshalBinary implements the [encoding.BinaryMarshaler] interface. +// It returns [Addr.MarshalBinary] with an additional byte appended // containing the prefix bits. func (p Prefix) MarshalBinary() ([]byte, error) { b := p.Addr().withoutZone().marshalBinaryWithTrailingBytes(1) @@ -1474,8 +1505,8 @@ func (p Prefix) MarshalBinary() ([]byte, error) { return b, nil } -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -// It expects data in the form generated by MarshalBinary. +// UnmarshalBinary implements the [encoding.BinaryUnmarshaler] interface. +// It expects data in the form generated by [Prefix.MarshalBinary]. func (p *Prefix) UnmarshalBinary(b []byte) error { if len(b) < 1 { return errors.New("unexpected slice size") diff --git a/src/net/netip/netip_test.go b/src/net/netip/netip_test.go index a748ac34f1..e75f07d8c2 100644 --- a/src/net/netip/netip_test.go +++ b/src/net/netip/netip_test.go @@ -60,7 +60,12 @@ func TestParseAddr(t *testing.T) { // 4-in-6 with octet with leading zero { in: "::ffff:1.2.03.4", - wantErr: `ParseAddr("::ffff:1.2.03.4"): ParseAddr("1.2.03.4"): IPv4 field has octet with leading zero (at "1.2.03.4")`, + wantErr: `ParseAddr("::ffff:1.2.03.4"): IPv4 field has octet with leading zero`, + }, + // 4-in-6 with octet with unexpected character + { + in: "::ffff:1.2.3.z", + wantErr: `ParseAddr("::ffff:1.2.3.z"): unexpected character (at "z")`, }, // Basic zero IPv6 address. { @@ -269,6 +274,10 @@ func TestParseAddr(t *testing.T) { "fe80:1?:1", // IPv6 with truncated bytes after single colon. "fe80:", + // IPv6 with 5 zeros in last group + "0:0:0:0:0:ffff:0:00000", + // IPv6 with 5 zeros in one group and embedded IPv4 + "0:0:0:0:00000:ffff:127.1.2.3", } for _, s := range invalidIPs { @@ -1242,7 +1251,6 @@ func TestIs4In6(t *testing.T) { {mustIP("::ffff:127.1.2.3"), true, mustIP("127.1.2.3")}, {mustIP("::ffff:7f01:0203"), true, mustIP("127.1.2.3")}, {mustIP("0:0:0:0:0000:ffff:127.1.2.3"), true, mustIP("127.1.2.3")}, - {mustIP("0:0:0:0:000000:ffff:127.1.2.3"), true, mustIP("127.1.2.3")}, {mustIP("0:0:0:0::ffff:127.1.2.3"), true, mustIP("127.1.2.3")}, {mustIP("::1"), false, mustIP("::1")}, {mustIP("1.2.3.4"), false, mustIP("1.2.3.4")}, @@ -1691,7 +1699,7 @@ func BenchmarkStdParseIP(b *testing.B) { } } -func BenchmarkIPString(b *testing.B) { +func BenchmarkAddrString(b *testing.B) { for _, test := range parseBenchInputs { ip := MustParseAddr(test.ip) b.Run(test.name, func(b *testing.B) { @@ -1715,11 +1723,15 @@ func BenchmarkIPStringExpanded(b *testing.B) { } } -func BenchmarkIPMarshalText(b *testing.B) { - b.ReportAllocs() - ip := MustParseAddr("66.55.44.33") - for i := 0; i < b.N; i++ { - sinkBytes, _ = ip.MarshalText() +func BenchmarkAddrMarshalText(b *testing.B) { + for _, test := range parseBenchInputs { + ip := MustParseAddr(test.ip) + b.Run(test.name, func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + sinkBytes, _ = ip.MarshalText() + } + }) } } diff --git a/src/net/netip/slow_test.go b/src/net/netip/slow_test.go index d7c8025164..a05f39de74 100644 --- a/src/net/netip/slow_test.go +++ b/src/net/netip/slow_test.go @@ -182,6 +182,9 @@ func parseIPv4Slow(s string) (Addr, error) { // parseWord converts a 16-bit hex string into its corresponding // two-byte value. func parseWord(s string) (byte, byte, error) { + if(len(s) > 4) { + return 0, 0, fmt.Errorf("parseWord(%q): invalid word", s) + } ret, err := strconv.ParseUint(s, 16, 16) if err != nil { return 0, 0, err diff --git a/src/net/parse_test.go b/src/net/parse_test.go index 7c509a97f2..e58d954154 100644 --- a/src/net/parse_test.go +++ b/src/net/parse_test.go @@ -22,7 +22,8 @@ func TestReadLine(t *testing.T) { fd, err := os.Open(filename) if err != nil { - t.Fatal(err) + // The file is missing even on some Unix systems. + t.Skipf("skipping because failed to open /etc/services: %v", err) } defer fd.Close() br := bufio.NewReader(fd) diff --git a/src/net/pipe.go b/src/net/pipe.go index f1741938b0..69955e4617 100644 --- a/src/net/pipe.go +++ b/src/net/pipe.go @@ -106,7 +106,7 @@ type pipe struct { } // Pipe creates a synchronous, in-memory, full duplex -// network connection; both ends implement the Conn interface. +// network connection; both ends implement the [Conn] interface. // Reads on one end are matched with writes on the other, // copying data directly between the two; there is no internal // buffering. diff --git a/src/net/rawconn.go b/src/net/rawconn.go index e49b9fb81b..19228e94ed 100644 --- a/src/net/rawconn.go +++ b/src/net/rawconn.go @@ -63,7 +63,7 @@ func (c *rawConn) Write(f func(uintptr) bool) error { // PollFD returns the poll.FD of the underlying connection. // -// Other packages in std that also import internal/poll (such as os) +// Other packages in std that also import [internal/poll] (such as os) // can use a type assertion to access this extension method so that // they can pass the *poll.FD to functions like poll.Splice. // @@ -79,6 +79,17 @@ func newRawConn(fd *netFD) *rawConn { return &rawConn{fd: fd} } +// Network returns the network type of the underlying connection. +// +// Other packages in std that import internal/poll and are unable to +// import net (such as os) can use a type assertion to access this +// extension method so that they can distinguish different socket types. +// +// Network is not intended for use outside the standard library. +func (c *rawConn) Network() poll.String { + return poll.String(c.fd.net) +} + type rawListener struct { rawConn } diff --git a/src/net/rpc/client.go b/src/net/rpc/client.go index 42d13519b1..ffdc435965 100644 --- a/src/net/rpc/client.go +++ b/src/net/rpc/client.go @@ -53,13 +53,13 @@ type Client struct { // A ClientCodec implements writing of RPC requests and // reading of RPC responses for the client side of an RPC session. -// The client calls WriteRequest to write a request to the connection -// and calls ReadResponseHeader and ReadResponseBody in pairs -// to read responses. The client calls Close when finished with the +// The client calls [ClientCodec.WriteRequest] to write a request to the connection +// and calls [ClientCodec.ReadResponseHeader] and [ClientCodec.ReadResponseBody] in pairs +// to read responses. The client calls [ClientCodec.Close] when finished with the // connection. ReadResponseBody may be called with a nil // argument to force the body of the response to be read and then // discarded. -// See NewClient's comment for information about concurrent access. +// See [NewClient]'s comment for information about concurrent access. type ClientCodec interface { WriteRequest(*Request, any) error ReadResponseHeader(*Response) error @@ -181,7 +181,7 @@ func (call *Call) done() { } } -// NewClient returns a new Client to handle requests to the +// NewClient returns a new [Client] to handle requests to the // set of services at the other end of the connection. // It adds a buffer to the write side of the connection so // the header and payload are sent as a unit. @@ -196,7 +196,7 @@ func NewClient(conn io.ReadWriteCloser) *Client { return NewClientWithCodec(client) } -// NewClientWithCodec is like NewClient but uses the specified +// NewClientWithCodec is like [NewClient] but uses the specified // codec to encode requests and decode responses. func NewClientWithCodec(codec ClientCodec) *Client { client := &Client{ @@ -279,7 +279,7 @@ func Dial(network, address string) (*Client, error) { } // Close calls the underlying codec's Close method. If the connection is already -// shutting down, ErrShutdown is returned. +// shutting down, [ErrShutdown] is returned. func (client *Client) Close() error { client.mutex.Lock() if client.closing { @@ -291,7 +291,7 @@ func (client *Client) Close() error { return client.codec.Close() } -// Go invokes the function asynchronously. It returns the Call structure representing +// Go invokes the function asynchronously. It returns the [Call] structure representing // the invocation. The done channel will signal when the call is complete by returning // the same Call object. If done is nil, Go will allocate a new channel. // If non-nil, done must be buffered or Go will deliberately crash. diff --git a/src/net/rpc/jsonrpc/client.go b/src/net/rpc/jsonrpc/client.go index c473017d26..1beba0f364 100644 --- a/src/net/rpc/jsonrpc/client.go +++ b/src/net/rpc/jsonrpc/client.go @@ -33,7 +33,7 @@ type clientCodec struct { pending map[uint64]string // map request id to method name } -// NewClientCodec returns a new rpc.ClientCodec using JSON-RPC on conn. +// NewClientCodec returns a new [rpc.ClientCodec] using JSON-RPC on conn. func NewClientCodec(conn io.ReadWriteCloser) rpc.ClientCodec { return &clientCodec{ dec: json.NewDecoder(conn), @@ -108,7 +108,7 @@ func (c *clientCodec) Close() error { return c.c.Close() } -// NewClient returns a new rpc.Client to handle requests to the +// NewClient returns a new [rpc.Client] to handle requests to the // set of services at the other end of the connection. func NewClient(conn io.ReadWriteCloser) *rpc.Client { return rpc.NewClientWithCodec(NewClientCodec(conn)) diff --git a/src/net/rpc/jsonrpc/server.go b/src/net/rpc/jsonrpc/server.go index 3ee4ddfef2..57a4de1d0f 100644 --- a/src/net/rpc/jsonrpc/server.go +++ b/src/net/rpc/jsonrpc/server.go @@ -33,7 +33,7 @@ type serverCodec struct { pending map[uint64]*json.RawMessage } -// NewServerCodec returns a new rpc.ServerCodec using JSON-RPC on conn. +// NewServerCodec returns a new [rpc.ServerCodec] using JSON-RPC on conn. func NewServerCodec(conn io.ReadWriteCloser) rpc.ServerCodec { return &serverCodec{ dec: json.NewDecoder(conn), diff --git a/src/net/rpc/server.go b/src/net/rpc/server.go index fc3ec484ad..1771726a93 100644 --- a/src/net/rpc/server.go +++ b/src/net/rpc/server.go @@ -30,17 +30,17 @@ These requirements apply even if a different codec is used. The method's first argument represents the arguments provided by the caller; the second argument represents the result parameters to be returned to the caller. The method's return value, if non-nil, is passed back as a string that the client -sees as if created by errors.New. If an error is returned, the reply parameter +sees as if created by [errors.New]. If an error is returned, the reply parameter will not be sent back to the client. -The server may handle requests on a single connection by calling ServeConn. More -typically it will create a network listener and call Accept or, for an HTTP -listener, HandleHTTP and http.Serve. +The server may handle requests on a single connection by calling [ServeConn]. More +typically it will create a network listener and call [Accept] or, for an HTTP +listener, [HandleHTTP] and [http.Serve]. A client wishing to use the service establishes a connection and then invokes -NewClient on the connection. The convenience function Dial (DialHTTP) performs +[NewClient] on the connection. The convenience function [Dial] ([DialHTTP]) performs both steps for a raw network connection (an HTTP connection). The resulting -Client object has two methods, Call and Go, that specify the service and method to +[Client] object has two methods, [Call] and Go, that specify the service and method to call, a pointer containing the arguments, and a pointer to receive the result parameters. @@ -48,7 +48,7 @@ The Call method waits for the remote call to complete while the Go method launches the call asynchronously and signals completion using the Call structure's Done channel. -Unless an explicit codec is set up, package encoding/gob is used to +Unless an explicit codec is set up, package [encoding/gob] is used to transport the data. Here is a simple example. A server wishes to export an object of type Arith: @@ -192,12 +192,12 @@ type Server struct { freeResp *Response } -// NewServer returns a new Server. +// NewServer returns a new [Server]. func NewServer() *Server { return &Server{} } -// DefaultServer is the default instance of *Server. +// DefaultServer is the default instance of [*Server]. var DefaultServer = NewServer() // Is this type exported or a builtin? @@ -225,7 +225,7 @@ func (server *Server) Register(rcvr any) error { return server.register(rcvr, "", false) } -// RegisterName is like Register but uses the provided name for the type +// RegisterName is like [Register] but uses the provided name for the type // instead of the receiver's concrete type. func (server *Server) RegisterName(name string, rcvr any) error { return server.register(rcvr, name, true) @@ -440,8 +440,8 @@ func (c *gobServerCodec) Close() error { // ServeConn blocks, serving the connection until the client hangs up. // The caller typically invokes ServeConn in a go statement. // ServeConn uses the gob wire format (see package gob) on the -// connection. To use an alternate codec, use ServeCodec. -// See NewClient's comment for information about concurrent access. +// connection. To use an alternate codec, use [ServeCodec]. +// See [NewClient]'s comment for information about concurrent access. func (server *Server) ServeConn(conn io.ReadWriteCloser) { buf := bufio.NewWriter(conn) srv := &gobServerCodec{ @@ -453,7 +453,7 @@ func (server *Server) ServeConn(conn io.ReadWriteCloser) { server.ServeCodec(srv) } -// ServeCodec is like ServeConn but uses the specified codec to +// ServeCodec is like [ServeConn] but uses the specified codec to // decode requests and encode responses. func (server *Server) ServeCodec(codec ServerCodec) { sending := new(sync.Mutex) @@ -483,7 +483,7 @@ func (server *Server) ServeCodec(codec ServerCodec) { codec.Close() } -// ServeRequest is like ServeCodec but synchronously serves a single request. +// ServeRequest is like [ServeCodec] but synchronously serves a single request. // It does not close the codec upon completion. func (server *Server) ServeRequest(codec ServerCodec) error { sending := new(sync.Mutex) @@ -635,10 +635,10 @@ func (server *Server) Accept(lis net.Listener) { } } -// Register publishes the receiver's methods in the DefaultServer. +// Register publishes the receiver's methods in the [DefaultServer]. func Register(rcvr any) error { return DefaultServer.Register(rcvr) } -// RegisterName is like Register but uses the provided name for the type +// RegisterName is like [Register] but uses the provided name for the type // instead of the receiver's concrete type. func RegisterName(name string, rcvr any) error { return DefaultServer.RegisterName(name, rcvr) @@ -646,12 +646,12 @@ func RegisterName(name string, rcvr any) error { // A ServerCodec implements reading of RPC requests and writing of // RPC responses for the server side of an RPC session. -// The server calls ReadRequestHeader and ReadRequestBody in pairs -// to read requests from the connection, and it calls WriteResponse to -// write a response back. The server calls Close when finished with the +// The server calls [ServerCodec.ReadRequestHeader] and [ServerCodec.ReadRequestBody] in pairs +// to read requests from the connection, and it calls [ServerCodec.WriteResponse] to +// write a response back. The server calls [ServerCodec.Close] when finished with the // connection. ReadRequestBody may be called with a nil // argument to force the body of the request to be read and discarded. -// See NewClient's comment for information about concurrent access. +// See [NewClient]'s comment for information about concurrent access. type ServerCodec interface { ReadRequestHeader(*Request) error ReadRequestBody(any) error @@ -661,37 +661,37 @@ type ServerCodec interface { Close() error } -// ServeConn runs the DefaultServer on a single connection. +// ServeConn runs the [DefaultServer] on a single connection. // ServeConn blocks, serving the connection until the client hangs up. // The caller typically invokes ServeConn in a go statement. // ServeConn uses the gob wire format (see package gob) on the -// connection. To use an alternate codec, use ServeCodec. -// See NewClient's comment for information about concurrent access. +// connection. To use an alternate codec, use [ServeCodec]. +// See [NewClient]'s comment for information about concurrent access. func ServeConn(conn io.ReadWriteCloser) { DefaultServer.ServeConn(conn) } -// ServeCodec is like ServeConn but uses the specified codec to +// ServeCodec is like [ServeConn] but uses the specified codec to // decode requests and encode responses. func ServeCodec(codec ServerCodec) { DefaultServer.ServeCodec(codec) } -// ServeRequest is like ServeCodec but synchronously serves a single request. +// ServeRequest is like [ServeCodec] but synchronously serves a single request. // It does not close the codec upon completion. func ServeRequest(codec ServerCodec) error { return DefaultServer.ServeRequest(codec) } // Accept accepts connections on the listener and serves requests -// to DefaultServer for each incoming connection. +// to [DefaultServer] for each incoming connection. // Accept blocks; the caller typically invokes it in a go statement. func Accept(lis net.Listener) { DefaultServer.Accept(lis) } // Can connect to RPC service using HTTP CONNECT to rpcPath. var connected = "200 Connected to Go RPC" -// ServeHTTP implements an http.Handler that answers RPC requests. +// ServeHTTP implements an [http.Handler] that answers RPC requests. func (server *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { if req.Method != "CONNECT" { w.Header().Set("Content-Type", "text/plain; charset=utf-8") @@ -710,15 +710,15 @@ func (server *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { // HandleHTTP registers an HTTP handler for RPC messages on rpcPath, // and a debugging handler on debugPath. -// It is still necessary to invoke http.Serve(), typically in a go statement. +// It is still necessary to invoke [http.Serve](), typically in a go statement. func (server *Server) HandleHTTP(rpcPath, debugPath string) { http.Handle(rpcPath, server) http.Handle(debugPath, debugHTTP{server}) } -// HandleHTTP registers an HTTP handler for RPC messages to DefaultServer -// on DefaultRPCPath and a debugging handler on DefaultDebugPath. -// It is still necessary to invoke http.Serve(), typically in a go statement. +// HandleHTTP registers an HTTP handler for RPC messages to [DefaultServer] +// on [DefaultRPCPath] and a debugging handler on [DefaultDebugPath]. +// It is still necessary to invoke [http.Serve](), typically in a go statement. func HandleHTTP() { DefaultServer.HandleHTTP(DefaultRPCPath, DefaultDebugPath) } diff --git a/src/net/sendfile_linux_test.go b/src/net/sendfile_linux_test.go deleted file mode 100644 index 0b5af36cdb..0000000000 --- a/src/net/sendfile_linux_test.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build linux - -package net - -import ( - "io" - "os" - "strconv" - "testing" -) - -func BenchmarkSendFile(b *testing.B) { - for i := 0; i <= 10; i++ { - size := 1 << (i + 10) - bench := sendFileBench{chunkSize: size} - b.Run(strconv.Itoa(size), bench.benchSendFile) - } -} - -type sendFileBench struct { - chunkSize int -} - -func (bench sendFileBench) benchSendFile(b *testing.B) { - fileSize := b.N * bench.chunkSize - f := createTempFile(b, fileSize) - fileName := f.Name() - defer os.Remove(fileName) - defer f.Close() - - client, server := spliceTestSocketPair(b, "tcp") - defer server.Close() - - cleanUp, err := startSpliceClient(client, "r", bench.chunkSize, fileSize) - if err != nil { - b.Fatal(err) - } - defer cleanUp() - - b.ReportAllocs() - b.SetBytes(int64(bench.chunkSize)) - b.ResetTimer() - - // Data go from file to socket via sendfile(2). - sent, err := io.Copy(server, f) - if err != nil { - b.Fatalf("failed to copy data with sendfile, error: %v", err) - } - if sent != int64(fileSize) { - b.Fatalf("bytes sent mismatch\n\texpect: %d\n\tgot: %d", fileSize, sent) - } -} - -func createTempFile(b *testing.B, size int) *os.File { - f, err := os.CreateTemp("", "linux-sendfile-test") - if err != nil { - b.Fatalf("failed to create temporary file: %v", err) - } - - data := make([]byte, size) - if _, err := f.Write(data); err != nil { - b.Fatalf("failed to create and feed the file: %v", err) - } - if err := f.Sync(); err != nil { - b.Fatalf("failed to save the file: %v", err) - } - if _, err := f.Seek(0, io.SeekStart); err != nil { - b.Fatalf("failed to rewind the file: %v", err) - } - - return f -} diff --git a/src/net/sendfile_test.go b/src/net/sendfile_test.go index 4cba1ed2b1..8fadb47c15 100644 --- a/src/net/sendfile_test.go +++ b/src/net/sendfile_test.go @@ -14,6 +14,7 @@ import ( "io" "os" "runtime" + "strconv" "sync" "testing" "time" @@ -446,3 +447,81 @@ func BenchmarkSendfileZeroBytes(b *testing.B) { cancel() } + +func BenchmarkSendFile(b *testing.B) { + if runtime.GOOS == "windows" { + // TODO(panjf2000): Windows has not yet implemented FileConn, + // remove this when it's implemented in https://go.dev/issues/9503. + b.Skipf("skipping on %s", runtime.GOOS) + } + + b.Run("file-to-tcp", func(b *testing.B) { benchmarkSendFile(b, "tcp") }) + b.Run("file-to-unix", func(b *testing.B) { benchmarkSendFile(b, "unix") }) +} + +func benchmarkSendFile(b *testing.B, proto string) { + for i := 0; i <= 10; i++ { + size := 1 << (i + 10) + bench := sendFileBench{ + proto: proto, + chunkSize: size, + } + b.Run(strconv.Itoa(size), bench.benchSendFile) + } +} + +type sendFileBench struct { + proto string + chunkSize int +} + +func (bench sendFileBench) benchSendFile(b *testing.B) { + fileSize := b.N * bench.chunkSize + f := createTempFile(b, fileSize) + + client, server := spawnTestSocketPair(b, bench.proto) + defer server.Close() + + cleanUp, err := startTestSocketPeer(b, client, "r", bench.chunkSize, fileSize) + if err != nil { + client.Close() + b.Fatal(err) + } + defer cleanUp(b) + + b.ReportAllocs() + b.SetBytes(int64(bench.chunkSize)) + b.ResetTimer() + + // Data go from file to socket via sendfile(2). + sent, err := io.Copy(server, f) + if err != nil { + b.Fatalf("failed to copy data with sendfile, error: %v", err) + } + if sent != int64(fileSize) { + b.Fatalf("bytes sent mismatch, got: %d, want: %d", sent, fileSize) + } +} + +func createTempFile(b *testing.B, size int) *os.File { + f, err := os.CreateTemp(b.TempDir(), "sendfile-bench") + if err != nil { + b.Fatalf("failed to create temporary file: %v", err) + } + b.Cleanup(func() { + f.Close() + }) + + data := make([]byte, size) + if _, err := f.Write(data); err != nil { + b.Fatalf("failed to create and feed the file: %v", err) + } + if err := f.Sync(); err != nil { + b.Fatalf("failed to save the file: %v", err) + } + if _, err := f.Seek(0, io.SeekStart); err != nil { + b.Fatalf("failed to rewind the file: %v", err) + } + + return f +} diff --git a/src/net/smtp/auth.go b/src/net/smtp/auth.go index 72eb16671f..6d461acc48 100644 --- a/src/net/smtp/auth.go +++ b/src/net/smtp/auth.go @@ -42,7 +42,7 @@ type plainAuth struct { host string } -// PlainAuth returns an Auth that implements the PLAIN authentication +// PlainAuth returns an [Auth] that implements the PLAIN authentication // mechanism as defined in RFC 4616. The returned Auth uses the given // username and password to authenticate to host and act as identity. // Usually identity should be the empty string, to act as username. @@ -86,7 +86,7 @@ type cramMD5Auth struct { username, secret string } -// CRAMMD5Auth returns an Auth that implements the CRAM-MD5 authentication +// CRAMMD5Auth returns an [Auth] that implements the CRAM-MD5 authentication // mechanism as defined in RFC 2195. // The returned Auth uses the given username and secret to authenticate // to the server using the challenge-response mechanism. diff --git a/src/net/smtp/smtp.go b/src/net/smtp/smtp.go index b5a025ef2a..b7877936da 100644 --- a/src/net/smtp/smtp.go +++ b/src/net/smtp/smtp.go @@ -48,7 +48,7 @@ type Client struct { helloError error // the error from the hello } -// Dial returns a new Client connected to an SMTP server at addr. +// Dial returns a new [Client] connected to an SMTP server at addr. // The addr must include a port, as in "mail.example.com:smtp". func Dial(addr string) (*Client, error) { conn, err := net.Dial("tcp", addr) @@ -59,7 +59,7 @@ func Dial(addr string) (*Client, error) { return NewClient(conn, host) } -// NewClient returns a new Client using an existing connection and host as a +// NewClient returns a new [Client] using an existing connection and host as a // server name to be used when authenticating. func NewClient(conn net.Conn, host string) (*Client, error) { text := textproto.NewConn(conn) @@ -166,7 +166,7 @@ func (c *Client) StartTLS(config *tls.Config) error { } // TLSConnectionState returns the client's TLS connection state. -// The return values are their zero values if StartTLS did +// The return values are their zero values if [Client.StartTLS] did // not succeed. func (c *Client) TLSConnectionState() (state tls.ConnectionState, ok bool) { tc, ok := c.conn.(*tls.Conn) @@ -241,7 +241,7 @@ func (c *Client) Auth(a Auth) error { // If the server supports the 8BITMIME extension, Mail adds the BODY=8BITMIME // parameter. If the server supports the SMTPUTF8 extension, Mail adds the // SMTPUTF8 parameter. -// This initiates a mail transaction and is followed by one or more Rcpt calls. +// This initiates a mail transaction and is followed by one or more [Client.Rcpt] calls. func (c *Client) Mail(from string) error { if err := validateLine(from); err != nil { return err @@ -263,8 +263,8 @@ func (c *Client) Mail(from string) error { } // Rcpt issues a RCPT command to the server using the provided email address. -// A call to Rcpt must be preceded by a call to Mail and may be followed by -// a Data call or another Rcpt call. +// A call to Rcpt must be preceded by a call to [Client.Mail] and may be followed by +// a [Client.Data] call or another Rcpt call. func (c *Client) Rcpt(to string) error { if err := validateLine(to); err != nil { return err @@ -287,7 +287,7 @@ func (d *dataCloser) Close() error { // Data issues a DATA command to the server and returns a writer that // can be used to write the mail headers and body. The caller should // close the writer before calling any more methods on c. A call to -// Data must be preceded by one or more calls to Rcpt. +// Data must be preceded by one or more calls to [Client.Rcpt]. func (c *Client) Data() (io.WriteCloser, error) { _, _, err := c.cmd(354, "DATA") if err != nil { diff --git a/src/net/sock_windows.go b/src/net/sock_windows.go index 5540135a2c..a519909bb0 100644 --- a/src/net/sock_windows.go +++ b/src/net/sock_windows.go @@ -11,8 +11,9 @@ import ( ) func maxListenerBacklog() int { - // TODO: Implement this - // NOTE: Never return a number bigger than 1<<16 - 1. See issue 5030. + // When the socket backlog is SOMAXCONN, Windows will set the backlog to + // "a reasonable maximum value". + // See: https://learn.microsoft.com/en-us/windows/win32/api/winsock2/nf-winsock2-listen return syscall.SOMAXCONN } diff --git a/src/net/splice_linux.go b/src/net/splice_linux.go index ab2ab70b28..9fc26b4c23 100644 --- a/src/net/splice_linux.go +++ b/src/net/splice_linux.go @@ -9,12 +9,14 @@ import ( "io" ) -// splice transfers data from r to c using the splice system call to minimize -// copies from and to userspace. c must be a TCP connection. Currently, splice -// is only enabled if r is a TCP or a stream-oriented Unix connection. +var pollSplice = poll.Splice + +// spliceFrom transfers data from r to c using the splice system call to minimize +// copies from and to userspace. c must be a TCP connection. +// Currently, spliceFrom is only enabled if r is a TCP or a stream-oriented Unix connection. // -// If splice returns handled == false, it has performed no work. -func splice(c *netFD, r io.Reader) (written int64, err error, handled bool) { +// If spliceFrom returns handled == false, it has performed no work. +func spliceFrom(c *netFD, r io.Reader) (written int64, err error, handled bool) { var remain int64 = 1<<63 - 1 // by default, copy until EOF lr, ok := r.(*io.LimitedReader) if ok { @@ -25,20 +27,38 @@ func splice(c *netFD, r io.Reader) (written int64, err error, handled bool) { } var s *netFD - if tc, ok := r.(*TCPConn); ok { - s = tc.fd - } else if uc, ok := r.(*UnixConn); ok { - if uc.fd.net != "unix" { + switch v := r.(type) { + case *TCPConn: + s = v.fd + case tcpConnWithoutWriteTo: + s = v.fd + case *UnixConn: + if v.fd.net != "unix" { return 0, nil, false } - s = uc.fd - } else { + s = v.fd + default: return 0, nil, false } - written, handled, sc, err := poll.Splice(&c.pfd, &s.pfd, remain) + written, handled, sc, err := pollSplice(&c.pfd, &s.pfd, remain) if lr != nil { lr.N -= written } return written, wrapSyscallError(sc, err), handled } + +// spliceTo transfers data from c to w using the splice system call to minimize +// copies from and to userspace. c must be a TCP connection. +// Currently, spliceTo is only enabled if w is a stream-oriented Unix connection. +// +// If spliceTo returns handled == false, it has performed no work. +func spliceTo(w io.Writer, c *netFD) (written int64, err error, handled bool) { + uc, ok := w.(*UnixConn) + if !ok || uc.fd.net != "unix" { + return + } + + written, handled, sc, err := pollSplice(&uc.fd.pfd, &c.pfd, 1<<63-1) + return written, wrapSyscallError(sc, err), handled +} diff --git a/src/net/splice_test.go b/src/net/splice_linux_test.go similarity index 61% rename from src/net/splice_test.go rename to src/net/splice_linux_test.go index 75a8f274ff..2edd744406 100644 --- a/src/net/splice_test.go +++ b/src/net/splice_linux_test.go @@ -7,14 +7,13 @@ package net import ( + "internal/poll" "io" - "log" "os" - "os/exec" "strconv" "sync" + "syscall" "testing" - "time" ) func TestSplice(t *testing.T) { @@ -23,6 +22,7 @@ func TestSplice(t *testing.T) { t.Skip("skipping unix-to-tcp tests") } t.Run("unix-to-tcp", func(t *testing.T) { testSplice(t, "unix", "tcp") }) + t.Run("tcp-to-unix", func(t *testing.T) { testSplice(t, "tcp", "unix") }) t.Run("tcp-to-file", func(t *testing.T) { testSpliceToFile(t, "tcp", "file") }) t.Run("unix-to-file", func(t *testing.T) { testSpliceToFile(t, "unix", "file") }) t.Run("no-unixpacket", testSpliceNoUnixpacket) @@ -57,29 +57,35 @@ type spliceTestCase struct { } func (tc spliceTestCase) test(t *testing.T) { - clientUp, serverUp := spliceTestSocketPair(t, tc.upNet) - defer serverUp.Close() - cleanup, err := startSpliceClient(clientUp, "w", tc.chunkSize, tc.totalSize) - if err != nil { - t.Fatal(err) - } - defer cleanup() - clientDown, serverDown := spliceTestSocketPair(t, tc.downNet) - defer serverDown.Close() - cleanup, err = startSpliceClient(clientDown, "r", tc.chunkSize, tc.totalSize) - if err != nil { - t.Fatal(err) - } - defer cleanup() - var ( - r io.Reader = serverUp - size = tc.totalSize - ) + hook := hookSplice(t) + + // We need to use the actual size for startTestSocketPeer when testing with LimitedReader, + // otherwise the child process created in startTestSocketPeer will hang infinitely because of + // the mismatch of data size to transfer. + size := tc.totalSize if tc.limitReadSize > 0 { if tc.limitReadSize < size { size = tc.limitReadSize } + } + clientUp, serverUp := spawnTestSocketPair(t, tc.upNet) + defer serverUp.Close() + cleanup, err := startTestSocketPeer(t, clientUp, "w", tc.chunkSize, size) + if err != nil { + t.Fatal(err) + } + defer cleanup(t) + clientDown, serverDown := spawnTestSocketPair(t, tc.downNet) + defer serverDown.Close() + cleanup, err = startTestSocketPeer(t, clientDown, "r", tc.chunkSize, size) + if err != nil { + t.Fatal(err) + } + defer cleanup(t) + + var r io.Reader = serverUp + if tc.limitReadSize > 0 { r = &io.LimitedReader{ N: int64(tc.limitReadSize), R: serverUp, @@ -87,10 +93,10 @@ func (tc spliceTestCase) test(t *testing.T) { defer serverUp.Close() } n, err := io.Copy(serverDown, r) - serverDown.Close() if err != nil { t.Fatal(err) } + if want := int64(size); want != n { t.Errorf("want %d bytes spliced, got %d", want, n) } @@ -105,34 +111,90 @@ func (tc spliceTestCase) test(t *testing.T) { t.Errorf("r.N = %d, want %d", n, wantN) } } + + // poll.Splice is expected to be called when the source is not + // a wrapper or the destination is TCPConn. + if tc.limitReadSize == 0 || tc.downNet == "tcp" { + // We should have called poll.Splice with the right file descriptor arguments. + if n > 0 && !hook.called { + t.Fatal("expected poll.Splice to be called") + } + + verifySpliceFds(t, serverDown, hook, "dst") + verifySpliceFds(t, serverUp, hook, "src") + + // poll.Splice is expected to handle the data transmission successfully. + if !hook.handled || hook.written != int64(size) || hook.err != nil { + t.Errorf("expected handled = true, written = %d, err = nil, but got handled = %t, written = %d, err = %v", + size, hook.handled, hook.written, hook.err) + } + } else if hook.called { + // poll.Splice will certainly not be called when the source + // is a wrapper and the destination is not TCPConn. + t.Errorf("expected poll.Splice not be called") + } +} + +func verifySpliceFds(t *testing.T, c Conn, hook *spliceHook, fdType string) { + t.Helper() + + sc, ok := c.(syscall.Conn) + if !ok { + t.Fatalf("expected syscall.Conn") + } + rc, err := sc.SyscallConn() + if err != nil { + t.Fatalf("syscall.Conn.SyscallConn error: %v", err) + } + var hookFd int + switch fdType { + case "src": + hookFd = hook.srcfd + case "dst": + hookFd = hook.dstfd + default: + t.Fatalf("unknown fdType %q", fdType) + } + if err := rc.Control(func(fd uintptr) { + if hook.called && hookFd != int(fd) { + t.Fatalf("wrong %s file descriptor: got %d, want %d", fdType, hook.dstfd, int(fd)) + } + }); err != nil { + t.Fatalf("syscall.RawConn.Control error: %v", err) + } } func (tc spliceTestCase) testFile(t *testing.T) { + hook := hookSplice(t) + + // We need to use the actual size for startTestSocketPeer when testing with LimitedReader, + // otherwise the child process created in startTestSocketPeer will hang infinitely because of + // the mismatch of data size to transfer. + actualSize := tc.totalSize + if tc.limitReadSize > 0 { + if tc.limitReadSize < actualSize { + actualSize = tc.limitReadSize + } + } + f, err := os.OpenFile(os.DevNull, os.O_WRONLY, 0) if err != nil { t.Fatal(err) } defer f.Close() - client, server := spliceTestSocketPair(t, tc.upNet) + client, server := spawnTestSocketPair(t, tc.upNet) defer server.Close() - cleanup, err := startSpliceClient(client, "w", tc.chunkSize, tc.totalSize) + cleanup, err := startTestSocketPeer(t, client, "w", tc.chunkSize, actualSize) if err != nil { client.Close() t.Fatal("failed to start splice client:", err) } - defer cleanup() + defer cleanup(t) - var ( - r io.Reader = server - actualSize = tc.totalSize - ) + var r io.Reader = server if tc.limitReadSize > 0 { - if tc.limitReadSize < actualSize { - actualSize = tc.limitReadSize - } - r = &io.LimitedReader{ N: int64(tc.limitReadSize), R: r, @@ -143,6 +205,13 @@ func (tc spliceTestCase) testFile(t *testing.T) { if err != nil { t.Fatalf("failed to ReadFrom with error: %v", err) } + + // We shouldn't have called poll.Splice in TCPConn.WriteTo, + // it's supposed to be called from File.ReadFrom. + if got > 0 && hook.called { + t.Error("expected not poll.Splice to be called") + } + if want := int64(actualSize); got != want { t.Errorf("got %d bytes, want %d", got, want) } @@ -159,23 +228,33 @@ func (tc spliceTestCase) testFile(t *testing.T) { } func testSpliceReaderAtEOF(t *testing.T, upNet, downNet string) { - clientUp, serverUp := spliceTestSocketPair(t, upNet) + // UnixConn doesn't implement io.ReaderFrom, which will fail + // the following test in asserting a UnixConn to be an io.ReaderFrom, + // so skip this test. + if downNet == "unix" { + t.Skip("skipping test on unix socket") + } + + hook := hookSplice(t) + + clientUp, serverUp := spawnTestSocketPair(t, upNet) defer clientUp.Close() - clientDown, serverDown := spliceTestSocketPair(t, downNet) + clientDown, serverDown := spawnTestSocketPair(t, downNet) defer clientDown.Close() + defer serverDown.Close() serverUp.Close() - // We'd like to call net.splice here and check the handled return + // We'd like to call net.spliceFrom here and check the handled return // value, but we disable splice on old Linux kernels. // - // In that case, poll.Splice and net.splice return a non-nil error + // In that case, poll.Splice and net.spliceFrom return a non-nil error // and handled == false. We'd ideally like to see handled == true // because the source reader is at EOF, but if we're running on an old - // kernel, and splice is disabled, we won't see EOF from net.splice, + // kernel, and splice is disabled, we won't see EOF from net.spliceFrom, // because we won't touch the reader at all. // - // Trying to untangle the errors from net.splice and match them + // Trying to untangle the errors from net.spliceFrom and match them // against the errors created by the poll package would be brittle, // so this is a higher level test. // @@ -186,17 +265,30 @@ func testSpliceReaderAtEOF(t *testing.T, upNet, downNet string) { go func() { serverDown.(io.ReaderFrom).ReadFrom(serverUp) io.WriteString(serverDown, msg) - serverDown.Close() }() buf := make([]byte, 3) - _, err := io.ReadFull(clientDown, buf) + n, err := io.ReadFull(clientDown, buf) if err != nil { t.Errorf("clientDown: %v", err) } if string(buf) != msg { t.Errorf("clientDown got %q, want %q", buf, msg) } + + // We should have called poll.Splice with the right file descriptor arguments. + if n > 0 && !hook.called { + t.Fatal("expected poll.Splice to be called") + } + + verifySpliceFds(t, serverDown, hook, "dst") + + // poll.Splice is expected to handle the data transmission but fail + // when working with a closed endpoint, return an error. + if !hook.handled || hook.written > 0 || hook.err == nil { + t.Errorf("expected handled = true, written = 0, err != nil, but got handled = %t, written = %d, err = %v", + hook.handled, hook.written, hook.err) + } } func testSpliceIssue25985(t *testing.T, upNet, downNet string) { @@ -254,10 +346,10 @@ func testSpliceIssue25985(t *testing.T, upNet, downNet string) { } func testSpliceNoUnixpacket(t *testing.T) { - clientUp, serverUp := spliceTestSocketPair(t, "unixpacket") + clientUp, serverUp := spawnTestSocketPair(t, "unixpacket") defer clientUp.Close() defer serverUp.Close() - clientDown, serverDown := spliceTestSocketPair(t, "tcp") + clientDown, serverDown := spawnTestSocketPair(t, "tcp") defer clientDown.Close() defer serverDown.Close() // If splice called poll.Splice here, we'd get err == syscall.EINVAL @@ -268,7 +360,7 @@ func testSpliceNoUnixpacket(t *testing.T) { // // What we want is err == nil and handled == false, i.e. we never // called poll.Splice, because we know the unix socket's network. - _, err, handled := splice(serverDown.(*TCPConn).fd, serverUp) + _, err, handled := spliceFrom(serverDown.(*TCPConn).fd, serverUp) if err != nil || handled != false { t.Fatalf("got err = %v, handled = %t, want nil error, handled == false", err, handled) } @@ -285,11 +377,11 @@ func testSpliceNoUnixgram(t *testing.T) { t.Fatal(err) } defer up.Close() - clientDown, serverDown := spliceTestSocketPair(t, "tcp") + clientDown, serverDown := spawnTestSocketPair(t, "tcp") defer clientDown.Close() defer serverDown.Close() // Analogous to testSpliceNoUnixpacket. - _, err, handled := splice(serverDown.(*TCPConn).fd, up) + _, err, handled := spliceFrom(serverDown.(*TCPConn).fd, up) if err != nil || handled != false { t.Fatalf("got err = %v, handled = %t, want nil error, handled == false", err, handled) } @@ -300,6 +392,7 @@ func BenchmarkSplice(b *testing.B) { b.Run("tcp-to-tcp", func(b *testing.B) { benchSplice(b, "tcp", "tcp") }) b.Run("unix-to-tcp", func(b *testing.B) { benchSplice(b, "unix", "tcp") }) + b.Run("tcp-to-unix", func(b *testing.B) { benchSplice(b, "tcp", "unix") }) } func benchSplice(b *testing.B, upNet, downNet string) { @@ -319,23 +412,23 @@ func (tc spliceTestCase) bench(b *testing.B) { // To benchmark the genericReadFrom code path, set this to false. useSplice := true - clientUp, serverUp := spliceTestSocketPair(b, tc.upNet) + clientUp, serverUp := spawnTestSocketPair(b, tc.upNet) defer serverUp.Close() - cleanup, err := startSpliceClient(clientUp, "w", tc.chunkSize, tc.chunkSize*b.N) + cleanup, err := startTestSocketPeer(b, clientUp, "w", tc.chunkSize, tc.chunkSize*b.N) if err != nil { b.Fatal(err) } - defer cleanup() + defer cleanup(b) - clientDown, serverDown := spliceTestSocketPair(b, tc.downNet) + clientDown, serverDown := spawnTestSocketPair(b, tc.downNet) defer serverDown.Close() - cleanup, err = startSpliceClient(clientDown, "r", tc.chunkSize, tc.chunkSize*b.N) + cleanup, err = startTestSocketPeer(b, clientDown, "r", tc.chunkSize, tc.chunkSize*b.N) if err != nil { b.Fatal(err) } - defer cleanup() + defer cleanup(b) b.SetBytes(int64(tc.chunkSize)) b.ResetTimer() @@ -356,128 +449,6 @@ func (tc spliceTestCase) bench(b *testing.B) { } } -func spliceTestSocketPair(t testing.TB, net string) (client, server Conn) { - t.Helper() - ln := newLocalListener(t, net) - defer ln.Close() - var cerr, serr error - acceptDone := make(chan struct{}) - go func() { - server, serr = ln.Accept() - acceptDone <- struct{}{} - }() - client, cerr = Dial(ln.Addr().Network(), ln.Addr().String()) - <-acceptDone - if cerr != nil { - if server != nil { - server.Close() - } - t.Fatal(cerr) - } - if serr != nil { - if client != nil { - client.Close() - } - t.Fatal(serr) - } - return client, server -} - -func startSpliceClient(conn Conn, op string, chunkSize, totalSize int) (func(), error) { - f, err := conn.(interface{ File() (*os.File, error) }).File() - if err != nil { - return nil, err - } - - cmd := exec.Command(os.Args[0], os.Args[1:]...) - cmd.Env = []string{ - "GO_NET_TEST_SPLICE=1", - "GO_NET_TEST_SPLICE_OP=" + op, - "GO_NET_TEST_SPLICE_CHUNK_SIZE=" + strconv.Itoa(chunkSize), - "GO_NET_TEST_SPLICE_TOTAL_SIZE=" + strconv.Itoa(totalSize), - "TMPDIR=" + os.Getenv("TMPDIR"), - } - cmd.ExtraFiles = append(cmd.ExtraFiles, f) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - - if err := cmd.Start(); err != nil { - return nil, err - } - - donec := make(chan struct{}) - go func() { - cmd.Wait() - conn.Close() - f.Close() - close(donec) - }() - - return func() { - select { - case <-donec: - case <-time.After(5 * time.Second): - log.Printf("killing splice client after 5 second shutdown timeout") - cmd.Process.Kill() - select { - case <-donec: - case <-time.After(5 * time.Second): - log.Printf("splice client didn't die after 10 seconds") - } - } - }, nil -} - -func init() { - if os.Getenv("GO_NET_TEST_SPLICE") == "" { - return - } - defer os.Exit(0) - - f := os.NewFile(uintptr(3), "splice-test-conn") - defer f.Close() - - conn, err := FileConn(f) - if err != nil { - log.Fatal(err) - } - - var chunkSize int - if chunkSize, err = strconv.Atoi(os.Getenv("GO_NET_TEST_SPLICE_CHUNK_SIZE")); err != nil { - log.Fatal(err) - } - buf := make([]byte, chunkSize) - - var totalSize int - if totalSize, err = strconv.Atoi(os.Getenv("GO_NET_TEST_SPLICE_TOTAL_SIZE")); err != nil { - log.Fatal(err) - } - - var fn func([]byte) (int, error) - switch op := os.Getenv("GO_NET_TEST_SPLICE_OP"); op { - case "r": - fn = conn.Read - case "w": - defer conn.Close() - - fn = conn.Write - default: - log.Fatalf("unknown op %q", op) - } - - var n int - for count := 0; count < totalSize; count += n { - if count+chunkSize > totalSize { - buf = buf[:totalSize-count] - } - - var err error - if n, err = fn(buf); err != nil { - return - } - } -} - func BenchmarkSpliceFile(b *testing.B) { b.Run("tcp-to-file", func(b *testing.B) { benchmarkSpliceFile(b, "tcp") }) b.Run("unix-to-file", func(b *testing.B) { benchmarkSpliceFile(b, "unix") }) @@ -508,15 +479,15 @@ func (bench spliceFileBench) benchSpliceFile(b *testing.B) { totalSize := b.N * bench.chunkSize - client, server := spliceTestSocketPair(b, bench.proto) + client, server := spawnTestSocketPair(b, bench.proto) defer server.Close() - cleanup, err := startSpliceClient(client, "w", bench.chunkSize, totalSize) + cleanup, err := startTestSocketPeer(b, client, "w", bench.chunkSize, totalSize) if err != nil { client.Close() b.Fatalf("failed to start splice client: %v", err) } - defer cleanup() + defer cleanup(b) b.ReportAllocs() b.SetBytes(int64(bench.chunkSize)) @@ -530,3 +501,42 @@ func (bench spliceFileBench) benchSpliceFile(b *testing.B) { b.Errorf("bytes sent mismatch, got: %d, want: %d", got, want) } } + +func hookSplice(t *testing.T) *spliceHook { + t.Helper() + + h := new(spliceHook) + h.install() + t.Cleanup(h.uninstall) + return h +} + +type spliceHook struct { + called bool + dstfd int + srcfd int + remain int64 + + written int64 + handled bool + sc string + err error + + original func(dst, src *poll.FD, remain int64) (int64, bool, string, error) +} + +func (h *spliceHook) install() { + h.original = pollSplice + pollSplice = func(dst, src *poll.FD, remain int64) (int64, bool, string, error) { + h.called = true + h.dstfd = dst.Sysfd + h.srcfd = src.Sysfd + h.remain = remain + h.written, h.handled, h.sc, h.err = h.original(dst, src, remain) + return h.written, h.handled, h.sc, h.err + } +} + +func (h *spliceHook) uninstall() { + pollSplice = h.original +} diff --git a/src/net/splice_stub.go b/src/net/splice_stub.go index 3cdadb11c5..239227ff88 100644 --- a/src/net/splice_stub.go +++ b/src/net/splice_stub.go @@ -8,6 +8,10 @@ package net import "io" -func splice(c *netFD, r io.Reader) (int64, error, bool) { +func spliceFrom(_ *netFD, _ io.Reader) (int64, error, bool) { + return 0, nil, false +} + +func spliceTo(_ io.Writer, _ *netFD) (int64, error, bool) { return 0, nil, false } diff --git a/src/net/tcpconn_keepalive_conf_unix_test.go b/src/net/tcpconn_keepalive_conf_unix_test.go new file mode 100644 index 0000000000..7c397083f9 --- /dev/null +++ b/src/net/tcpconn_keepalive_conf_unix_test.go @@ -0,0 +1,102 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || freebsd || linux || netbsd || darwin || dragonfly + +package net + +import "time" + +var testConfigs = []KeepAliveConfig{ + { + Enable: true, + Idle: 5 * time.Second, + Interval: 3 * time.Second, + Count: 10, + }, + { + Enable: true, + Idle: 0, + Interval: 0, + Count: 0, + }, + { + Enable: true, + Idle: -1, + Interval: -1, + Count: -1, + }, + { + Enable: true, + Idle: -1, + Interval: 3 * time.Second, + Count: 10, + }, + { + Enable: true, + Idle: 5 * time.Second, + Interval: -1, + Count: 10, + }, + { + Enable: true, + Idle: 5 * time.Second, + Interval: 3 * time.Second, + Count: -1, + }, + { + Enable: true, + Idle: -1, + Interval: -1, + Count: 10, + }, + { + Enable: true, + Idle: -1, + Interval: 3 * time.Second, + Count: -1, + }, + { + Enable: true, + Idle: 5 * time.Second, + Interval: -1, + Count: -1, + }, + { + Enable: true, + Idle: 0, + Interval: 3 * time.Second, + Count: 10, + }, + { + Enable: true, + Idle: 5 * time.Second, + Interval: 0, + Count: 10, + }, + { + Enable: true, + Idle: 5 * time.Second, + Interval: 3 * time.Second, + Count: 0, + }, + { + Enable: true, + Idle: 0, + Interval: 0, + Count: 10, + }, + { + Enable: true, + Idle: 0, + Interval: 3 * time.Second, + Count: 0, + }, + { + Enable: true, + Idle: 5 * time.Second, + Interval: 0, + Count: 0, + }, +} diff --git a/src/net/tcpconn_keepalive_darwin_test.go b/src/net/tcpconn_keepalive_darwin_test.go new file mode 100644 index 0000000000..147e08cff1 --- /dev/null +++ b/src/net/tcpconn_keepalive_darwin_test.go @@ -0,0 +1,92 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build darwin + +package net + +import ( + "syscall" + "testing" + "time" +) + +func getCurrentKeepAliveSettings(fd int) (cfg KeepAliveConfig, err error) { + tcpKeepAlive, err := syscall.GetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_KEEPALIVE) + if err != nil { + return + } + tcpKeepAliveIdle, err := syscall.GetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPALIVE) + if err != nil { + return + } + tcpKeepAliveInterval, err := syscall.GetsockoptInt(fd, syscall.IPPROTO_TCP, sysTCP_KEEPINTVL) + if err != nil { + return + } + tcpKeepAliveCount, err := syscall.GetsockoptInt(fd, syscall.IPPROTO_TCP, sysTCP_KEEPCNT) + if err != nil { + return + } + cfg = KeepAliveConfig{ + Enable: tcpKeepAlive != 0, + Idle: time.Duration(tcpKeepAliveIdle) * time.Second, + Interval: time.Duration(tcpKeepAliveInterval) * time.Second, + Count: tcpKeepAliveCount, + } + return +} + +func verifyKeepAliveSettings(t *testing.T, fd int, oldCfg, cfg KeepAliveConfig) { + if cfg.Idle == 0 { + cfg.Idle = defaultTCPKeepAliveIdle + } + if cfg.Interval == 0 { + cfg.Interval = defaultTCPKeepAliveInterval + } + if cfg.Count == 0 { + cfg.Count = defaultTCPKeepAliveCount + } + if cfg.Idle == -1 { + cfg.Idle = oldCfg.Idle + } + if cfg.Interval == -1 { + cfg.Interval = oldCfg.Interval + } + if cfg.Count == -1 { + cfg.Count = oldCfg.Count + } + + tcpKeepAlive, err := syscall.GetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_KEEPALIVE) + if err != nil { + t.Fatal(err) + } + if (tcpKeepAlive != 0) != cfg.Enable { + t.Fatalf("SO_KEEPALIVE: got %t; want %t", tcpKeepAlive != 0, cfg.Enable) + } + + tcpKeepAliveIdle, err := syscall.GetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPALIVE) + if err != nil { + t.Fatal(err) + } + if time.Duration(tcpKeepAliveIdle)*time.Second != cfg.Idle { + t.Fatalf("TCP_KEEPIDLE: got %ds; want %v", tcpKeepAliveIdle, cfg.Idle) + } + + tcpKeepAliveInterval, err := syscall.GetsockoptInt(fd, syscall.IPPROTO_TCP, sysTCP_KEEPINTVL) + if err != nil { + t.Fatal(err) + } + if time.Duration(tcpKeepAliveInterval)*time.Second != cfg.Interval { + t.Fatalf("TCP_KEEPINTVL: got %ds; want %v", tcpKeepAliveInterval, cfg.Interval) + } + + tcpKeepAliveCount, err := syscall.GetsockoptInt(fd, syscall.IPPROTO_TCP, sysTCP_KEEPCNT) + if err != nil { + t.Fatal(err) + } + if tcpKeepAliveCount != cfg.Count { + t.Fatalf("TCP_KEEPCNT: got %d; want %d", tcpKeepAliveCount, cfg.Count) + } +} diff --git a/src/net/tcpconn_keepalive_solaris_test.go b/src/net/tcpconn_keepalive_solaris_test.go new file mode 100644 index 0000000000..c6456c47a9 --- /dev/null +++ b/src/net/tcpconn_keepalive_solaris_test.go @@ -0,0 +1,89 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build solaris + +package net + +import ( + "syscall" + "testing" + "time" +) + +var testConfigs = []KeepAliveConfig{ + { + Enable: true, + Idle: 2 * time.Second, + Interval: -1, + Count: -1, + }, + { + Enable: true, + Idle: 0, + Interval: -1, + Count: -1, + }, + { + Enable: true, + Idle: -1, + Interval: -1, + Count: -1, + }, +} + +func getCurrentKeepAliveSettings(fd int) (cfg KeepAliveConfig, err error) { + tcpKeepAlive, err := syscall.GetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_KEEPALIVE) + if err != nil { + return + } + tcpKeepAliveIdle, err := syscall.GetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPALIVE_THRESHOLD) + if err != nil { + return + } + cfg = KeepAliveConfig{ + Enable: tcpKeepAlive != 0, + Idle: time.Duration(tcpKeepAliveIdle) * time.Millisecond, + Interval: -1, + Count: -1, + } + return +} + +func verifyKeepAliveSettings(t *testing.T, fd int, oldCfg, cfg KeepAliveConfig) { + if cfg.Idle == 0 { + cfg.Idle = defaultTCPKeepAliveIdle + } + if cfg.Interval == 0 { + cfg.Interval = defaultTCPKeepAliveInterval + } + if cfg.Count == 0 { + cfg.Count = defaultTCPKeepAliveCount + } + if cfg.Idle == -1 { + cfg.Idle = oldCfg.Idle + } + if cfg.Interval == -1 { + cfg.Interval = oldCfg.Interval + } + if cfg.Count == -1 { + cfg.Count = oldCfg.Count + } + + tcpKeepAlive, err := syscall.GetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_KEEPALIVE) + if err != nil { + t.Fatal(err) + } + if (tcpKeepAlive != 0) != cfg.Enable { + t.Fatalf("SO_KEEPALIVE: got %t; want %t", tcpKeepAlive != 0, cfg.Enable) + } + + tcpKeepAliveIdle, err := syscall.GetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPALIVE_THRESHOLD) + if err != nil { + t.Fatal(err) + } + if time.Duration(tcpKeepAliveIdle)*time.Millisecond != cfg.Idle { + t.Fatalf("TCP_KEEPIDLE: got %dms; want %v", tcpKeepAliveIdle, cfg.Idle) + } +} diff --git a/src/net/tcpconn_keepalive_test.go b/src/net/tcpconn_keepalive_test.go new file mode 100644 index 0000000000..f858d995f0 --- /dev/null +++ b/src/net/tcpconn_keepalive_test.go @@ -0,0 +1,195 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || freebsd || linux || netbsd || dragonfly || darwin || solaris || windows + +package net + +import ( + "runtime" + "testing" +) + +func TestTCPConnDialerKeepAliveConfig(t *testing.T) { + // TODO(panjf2000): stop skipping this test on Solaris + // when https://go.dev/issue/64251 is fixed. + if runtime.GOOS == "solaris" { + t.Skip("skipping on solaris for now") + } + + t.Cleanup(func() { + testPreHookSetKeepAlive = func(*netFD) {} + }) + var ( + errHook error + oldCfg KeepAliveConfig + ) + testPreHookSetKeepAlive = func(nfd *netFD) { + oldCfg, errHook = getCurrentKeepAliveSettings(int(nfd.pfd.Sysfd)) + } + + handler := func(ls *localServer, ln Listener) { + for { + c, err := ln.Accept() + if err != nil { + return + } + c.Close() + } + } + ln := newLocalListener(t, "tcp", &ListenConfig{ + KeepAlive: -1, // prevent calling hook from accepting + }) + ls := (&streamListener{Listener: ln}).newLocalServer() + defer ls.teardown() + if err := ls.buildup(handler); err != nil { + t.Fatal(err) + } + + for _, cfg := range testConfigs { + d := Dialer{ + KeepAlive: defaultTCPKeepAliveIdle, // should be ignored + KeepAliveConfig: cfg} + c, err := d.Dial("tcp", ls.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + if errHook != nil { + t.Fatal(errHook) + } + + sc, err := c.(*TCPConn).SyscallConn() + if err != nil { + t.Fatal(err) + } + if err := sc.Control(func(fd uintptr) { + verifyKeepAliveSettings(t, int(fd), oldCfg, cfg) + }); err != nil { + t.Fatal(err) + } + } +} + +func TestTCPConnListenerKeepAliveConfig(t *testing.T) { + // TODO(panjf2000): stop skipping this test on Solaris + // when https://go.dev/issue/64251 is fixed. + if runtime.GOOS == "solaris" { + t.Skip("skipping on solaris for now") + } + + t.Cleanup(func() { + testPreHookSetKeepAlive = func(*netFD) {} + }) + var ( + errHook error + oldCfg KeepAliveConfig + ) + testPreHookSetKeepAlive = func(nfd *netFD) { + oldCfg, errHook = getCurrentKeepAliveSettings(int(nfd.pfd.Sysfd)) + } + + ch := make(chan Conn, 1) + handler := func(ls *localServer, ln Listener) { + c, err := ln.Accept() + if err != nil { + return + } + ch <- c + } + for _, cfg := range testConfigs { + ln := newLocalListener(t, "tcp", &ListenConfig{ + KeepAlive: defaultTCPKeepAliveIdle, // should be ignored + KeepAliveConfig: cfg}) + ls := (&streamListener{Listener: ln}).newLocalServer() + defer ls.teardown() + if err := ls.buildup(handler); err != nil { + t.Fatal(err) + } + d := Dialer{KeepAlive: -1} // prevent calling hook from dialing + c, err := d.Dial("tcp", ls.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + cc := <-ch + defer cc.Close() + if errHook != nil { + t.Fatal(errHook) + } + sc, err := cc.(*TCPConn).SyscallConn() + if err != nil { + t.Fatal(err) + } + if err := sc.Control(func(fd uintptr) { + verifyKeepAliveSettings(t, int(fd), oldCfg, cfg) + }); err != nil { + t.Fatal(err) + } + } +} + +func TestTCPConnSetKeepAliveConfig(t *testing.T) { + // TODO(panjf2000): stop skipping this test on Solaris + // when https://go.dev/issue/64251 is fixed. + if runtime.GOOS == "solaris" { + t.Skip("skipping on solaris for now") + } + + handler := func(ls *localServer, ln Listener) { + for { + c, err := ln.Accept() + if err != nil { + return + } + c.Close() + } + } + ls := newLocalServer(t, "tcp") + defer ls.teardown() + if err := ls.buildup(handler); err != nil { + t.Fatal(err) + } + ra, err := ResolveTCPAddr("tcp", ls.Listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + for _, cfg := range testConfigs { + c, err := DialTCP("tcp", nil, ra) + if err != nil { + t.Fatal(err) + } + defer c.Close() + + sc, err := c.SyscallConn() + if err != nil { + t.Fatal(err) + } + + var ( + errHook error + oldCfg KeepAliveConfig + ) + if err := sc.Control(func(fd uintptr) { + oldCfg, errHook = getCurrentKeepAliveSettings(int(fd)) + }); err != nil { + t.Fatal(err) + } + if errHook != nil { + t.Fatal(errHook) + } + + if err := c.SetKeepAliveConfig(cfg); err != nil { + t.Fatal(err) + } + + if err := sc.Control(func(fd uintptr) { + verifyKeepAliveSettings(t, int(fd), oldCfg, cfg) + }); err != nil { + t.Fatal(err) + } + } +} diff --git a/src/net/tcpconn_keepalive_unix_test.go b/src/net/tcpconn_keepalive_unix_test.go new file mode 100644 index 0000000000..74555c9c5b --- /dev/null +++ b/src/net/tcpconn_keepalive_unix_test.go @@ -0,0 +1,92 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || dragonfly || freebsd || linux || netbsd + +package net + +import ( + "syscall" + "testing" + "time" +) + +func getCurrentKeepAliveSettings(fd int) (cfg KeepAliveConfig, err error) { + tcpKeepAlive, err := syscall.GetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_KEEPALIVE) + if err != nil { + return + } + tcpKeepAliveIdle, err := syscall.GetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE) + if err != nil { + return + } + tcpKeepAliveInterval, err := syscall.GetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL) + if err != nil { + return + } + tcpKeepAliveCount, err := syscall.GetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPCNT) + if err != nil { + return + } + cfg = KeepAliveConfig{ + Enable: tcpKeepAlive != 0, + Idle: time.Duration(tcpKeepAliveIdle) * time.Second, + Interval: time.Duration(tcpKeepAliveInterval) * time.Second, + Count: tcpKeepAliveCount, + } + return +} + +func verifyKeepAliveSettings(t *testing.T, fd int, oldCfg, cfg KeepAliveConfig) { + if cfg.Idle == 0 { + cfg.Idle = defaultTCPKeepAliveIdle + } + if cfg.Interval == 0 { + cfg.Interval = defaultTCPKeepAliveInterval + } + if cfg.Count == 0 { + cfg.Count = defaultTCPKeepAliveCount + } + if cfg.Idle == -1 { + cfg.Idle = oldCfg.Idle + } + if cfg.Interval == -1 { + cfg.Interval = oldCfg.Interval + } + if cfg.Count == -1 { + cfg.Count = oldCfg.Count + } + + tcpKeepAlive, err := syscall.GetsockoptInt(fd, syscall.SOL_SOCKET, syscall.SO_KEEPALIVE) + if err != nil { + t.Fatal(err) + } + if (tcpKeepAlive != 0) != cfg.Enable { + t.Fatalf("SO_KEEPALIVE: got %t; want %t", tcpKeepAlive != 0, cfg.Enable) + } + + tcpKeepAliveIdle, err := syscall.GetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE) + if err != nil { + t.Fatal(err) + } + if time.Duration(tcpKeepAliveIdle)*time.Second != cfg.Idle { + t.Fatalf("TCP_KEEPIDLE: got %ds; want %v", tcpKeepAliveIdle, cfg.Idle) + } + + tcpKeepAliveInterval, err := syscall.GetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL) + if err != nil { + t.Fatal(err) + } + if time.Duration(tcpKeepAliveInterval)*time.Second != cfg.Interval { + t.Fatalf("TCP_KEEPINTVL: got %ds; want %v", tcpKeepAliveInterval, cfg.Interval) + } + + tcpKeepAliveCount, err := syscall.GetsockoptInt(fd, syscall.IPPROTO_TCP, syscall.TCP_KEEPCNT) + if err != nil { + t.Fatal(err) + } + if tcpKeepAliveCount != cfg.Count { + t.Fatalf("TCP_KEEPCNT: got %d; want %d", tcpKeepAliveCount, cfg.Count) + } +} diff --git a/src/net/tcpconn_keepalive_windows_test.go b/src/net/tcpconn_keepalive_windows_test.go new file mode 100644 index 0000000000..c3d6366c62 --- /dev/null +++ b/src/net/tcpconn_keepalive_windows_test.go @@ -0,0 +1,33 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build windows + +package net + +import ( + "testing" + "time" +) + +var testConfigs = []KeepAliveConfig{ + { + Enable: true, + Idle: 2 * time.Second, + Interval: time.Second, + Count: -1, + }, +} + +func getCurrentKeepAliveSettings(_ int) (cfg KeepAliveConfig, err error) { + // TODO(panjf2000): same as verifyKeepAliveSettings. + return +} + +func verifyKeepAliveSettings(_ *testing.T, _ int, _, _ KeepAliveConfig) { + // TODO(panjf2000): Unlike Unix-like OS's, Windows doesn't provide + // any ways to retrieve the current TCP keep-alive settings, therefore + // we're not able to run the test suite similar to Unix-like OS's on Windows. + // Try to find another proper approach to test the keep-alive settings on Windows. +} diff --git a/src/net/tcpsock.go b/src/net/tcpsock.go index 1528353cba..5ffdbb0359 100644 --- a/src/net/tcpsock.go +++ b/src/net/tcpsock.go @@ -24,7 +24,7 @@ type TCPAddr struct { Zone string // IPv6 scoped addressing zone } -// AddrPort returns the TCPAddr a as a netip.AddrPort. +// AddrPort returns the [TCPAddr] a as a [netip.AddrPort]. // // If a.Port does not fit in a uint16, it's silently truncated. // @@ -79,7 +79,7 @@ func (a *TCPAddr) opAddr() Addr { // recommended, because it will return at most one of the host name's // IP addresses. // -// See func Dial for a description of the network and address +// See func [Dial] for a description of the network and address // parameters. func ResolveTCPAddr(network, address string) (*TCPAddr, error) { switch network { @@ -96,7 +96,7 @@ func ResolveTCPAddr(network, address string) (*TCPAddr, error) { return addrs.forResolve(network, address).(*TCPAddr), nil } -// TCPAddrFromAddrPort returns addr as a TCPAddr. If addr.IsValid() is false, +// TCPAddrFromAddrPort returns addr as a [TCPAddr]. If addr.IsValid() is false, // then the returned TCPAddr will contain a nil IP field, indicating an // address family-agnostic unspecified address. func TCPAddrFromAddrPort(addr netip.AddrPort) *TCPAddr { @@ -107,14 +107,44 @@ func TCPAddrFromAddrPort(addr netip.AddrPort) *TCPAddr { } } -// TCPConn is an implementation of the Conn interface for TCP network +// TCPConn is an implementation of the [Conn] interface for TCP network // connections. type TCPConn struct { conn } +// KeepAliveConfig contains TCP keep-alive options. +// +// If the Idle, Interval, or Count fields are zero, a default value is chosen. +// If a field is negative, the corresponding socket-level option will be left unchanged. +// +// Note that Windows doesn't support setting the KeepAliveIdle and KeepAliveInterval separately. +// It's recommended to set both Idle and Interval to non-negative values on Windows if you +// intend to customize the TCP keep-alive settings. +// By contrast, if only one of Idle and Interval is set to a non-negative value, the other will +// be set to the system default value, and ultimately, set both Idle and Interval to negative +// values if you want to leave them unchanged. +type KeepAliveConfig struct { + // If Enable is true, keep-alive probes are enabled. + Enable bool + + // Idle is the time that the connection must be idle before + // the first keep-alive probe is sent. + // If zero, a default value of 15 seconds is used. + Idle time.Duration + + // Interval is the time between keep-alive probes. + // If zero, a default value of 15 seconds is used. + Interval time.Duration + + // Count is the maximum number of keep-alive probes that + // can go unanswered before dropping a connection. + // If zero, a default value of 9 is used. + Count int +} + // SyscallConn returns a raw network connection. -// This implements the syscall.Conn interface. +// This implements the [syscall.Conn] interface. func (c *TCPConn) SyscallConn() (syscall.RawConn, error) { if !c.ok() { return nil, syscall.EINVAL @@ -122,7 +152,7 @@ func (c *TCPConn) SyscallConn() (syscall.RawConn, error) { return newRawConn(c.fd), nil } -// ReadFrom implements the io.ReaderFrom ReadFrom method. +// ReadFrom implements the [io.ReaderFrom] ReadFrom method. func (c *TCPConn) ReadFrom(r io.Reader) (int64, error) { if !c.ok() { return 0, syscall.EINVAL @@ -134,6 +164,18 @@ func (c *TCPConn) ReadFrom(r io.Reader) (int64, error) { return n, err } +// WriteTo implements the io.WriterTo WriteTo method. +func (c *TCPConn) WriteTo(w io.Writer) (int64, error) { + if !c.ok() { + return 0, syscall.EINVAL + } + n, err := c.writeTo(w) + if err != nil && err != io.EOF { + err = &OpError{Op: "writeto", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + return n, err +} + // CloseRead shuts down the reading side of the TCP connection. // Most callers should just use Close. func (c *TCPConn) CloseRead() error { @@ -194,12 +236,16 @@ func (c *TCPConn) SetKeepAlive(keepalive bool) error { return nil } -// SetKeepAlivePeriod sets period between keep-alives. +// SetKeepAlivePeriod sets the idle duration the connection +// needs to remain idle before TCP starts sending keepalive probes. +// +// Note that calling this method on Windows will reset the KeepAliveInterval +// to the default system value, which is normally 1 second. func (c *TCPConn) SetKeepAlivePeriod(d time.Duration) error { if !c.ok() { return syscall.EINVAL } - if err := setKeepAlivePeriod(c.fd, d); err != nil { + if err := setKeepAliveIdle(c.fd, d); err != nil { return &OpError{Op: "set", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} } return nil @@ -235,22 +281,28 @@ func (c *TCPConn) MultipathTCP() (bool, error) { return isUsingMultipathTCP(c.fd), nil } -func newTCPConn(fd *netFD, keepAlive time.Duration, keepAliveHook func(time.Duration)) *TCPConn { +func newTCPConn(fd *netFD, keepAliveIdle time.Duration, keepAliveCfg KeepAliveConfig, preKeepAliveHook func(*netFD), keepAliveHook func(KeepAliveConfig)) *TCPConn { setNoDelay(fd, true) - if keepAlive == 0 { - keepAlive = defaultTCPKeepAlive - } - if keepAlive > 0 { - setKeepAlive(fd, true) - setKeepAlivePeriod(fd, keepAlive) - if keepAliveHook != nil { - keepAliveHook(keepAlive) + if !keepAliveCfg.Enable && keepAliveIdle >= 0 { + keepAliveCfg = KeepAliveConfig{ + Enable: true, + Idle: keepAliveIdle, } } - return &TCPConn{conn{fd}} + c := &TCPConn{conn{fd}} + if keepAliveCfg.Enable { + if preKeepAliveHook != nil { + preKeepAliveHook(fd) + } + c.SetKeepAliveConfig(keepAliveCfg) + if keepAliveHook != nil { + keepAliveHook(keepAliveCfg) + } + } + return c } -// DialTCP acts like Dial for TCP networks. +// DialTCP acts like [Dial] for TCP networks. // // The network must be a TCP network name; see func Dial for details. // @@ -275,14 +327,14 @@ func DialTCP(network string, laddr, raddr *TCPAddr) (*TCPConn, error) { } // TCPListener is a TCP network listener. Clients should typically -// use variables of type Listener instead of assuming TCP. +// use variables of type [Listener] instead of assuming TCP. type TCPListener struct { fd *netFD lc ListenConfig } // SyscallConn returns a raw network connection. -// This implements the syscall.Conn interface. +// This implements the [syscall.Conn] interface. // // The returned RawConn only supports calling Control. Read and // Write return an error. @@ -306,8 +358,8 @@ func (l *TCPListener) AcceptTCP() (*TCPConn, error) { return c, nil } -// Accept implements the Accept method in the Listener interface; it -// waits for the next call and returns a generic Conn. +// Accept implements the Accept method in the [Listener] interface; it +// waits for the next call and returns a generic [Conn]. func (l *TCPListener) Accept() (Conn, error) { if !l.ok() { return nil, syscall.EINVAL @@ -331,7 +383,7 @@ func (l *TCPListener) Close() error { return nil } -// Addr returns the listener's network address, a *TCPAddr. +// Addr returns the listener's network address, a [*TCPAddr]. // The Addr returned is shared by all invocations of Addr, so // do not modify it. func (l *TCPListener) Addr() Addr { return l.fd.laddr } @@ -345,7 +397,7 @@ func (l *TCPListener) SetDeadline(t time.Time) error { return l.fd.SetDeadline(t) } -// File returns a copy of the underlying os.File. +// File returns a copy of the underlying [os.File]. // It is the caller's responsibility to close f when finished. // Closing l does not affect f, and closing f does not affect l. // @@ -363,7 +415,7 @@ func (l *TCPListener) File() (f *os.File, err error) { return } -// ListenTCP acts like Listen for TCP networks. +// ListenTCP acts like [Listen] for TCP networks. // // The network must be a TCP network name; see func Dial for details. // diff --git a/src/net/tcpsock_plan9.go b/src/net/tcpsock_plan9.go index d55948f69e..430ed29ed4 100644 --- a/src/net/tcpsock_plan9.go +++ b/src/net/tcpsock_plan9.go @@ -14,6 +14,10 @@ func (c *TCPConn) readFrom(r io.Reader) (int64, error) { return genericReadFrom(c, r) } +func (c *TCPConn) writeTo(w io.Writer) (int64, error) { + return genericWriteTo(c, w) +} + func (sd *sysDialer) dialTCP(ctx context.Context, laddr, raddr *TCPAddr) (*TCPConn, error) { if h := sd.testHookDialTCP; h != nil { return h(ctx, sd.network, laddr, raddr) @@ -42,7 +46,7 @@ func (sd *sysDialer) doDialTCP(ctx context.Context, laddr, raddr *TCPAddr) (*TCP if err != nil { return nil, err } - return newTCPConn(fd, sd.Dialer.KeepAlive, testHookSetKeepAlive), nil + return newTCPConn(fd, sd.Dialer.KeepAlive, sd.Dialer.KeepAliveConfig, testPreHookSetKeepAlive, testHookSetKeepAlive), nil } func (ln *TCPListener) ok() bool { return ln != nil && ln.fd != nil && ln.fd.ctl != nil } @@ -52,7 +56,7 @@ func (ln *TCPListener) accept() (*TCPConn, error) { if err != nil { return nil, err } - return newTCPConn(fd, ln.lc.KeepAlive, nil), nil + return newTCPConn(fd, ln.lc.KeepAlive, ln.lc.KeepAliveConfig, testPreHookSetKeepAlive, testHookSetKeepAlive), nil } func (ln *TCPListener) close() error { diff --git a/src/net/tcpsock_posix.go b/src/net/tcpsock_posix.go index 83cee7c789..a25494d9c0 100644 --- a/src/net/tcpsock_posix.go +++ b/src/net/tcpsock_posix.go @@ -45,7 +45,7 @@ func (a *TCPAddr) toLocal(net string) sockaddr { } func (c *TCPConn) readFrom(r io.Reader) (int64, error) { - if n, err, handled := splice(c.fd, r); handled { + if n, err, handled := spliceFrom(c.fd, r); handled { return n, err } if n, err, handled := sendFile(c.fd, r); handled { @@ -54,6 +54,13 @@ func (c *TCPConn) readFrom(r io.Reader) (int64, error) { return genericReadFrom(c, r) } +func (c *TCPConn) writeTo(w io.Writer) (int64, error) { + if n, err, handled := spliceTo(w, c.fd); handled { + return n, err + } + return genericWriteTo(c, w) +} + func (sd *sysDialer) dialTCP(ctx context.Context, laddr, raddr *TCPAddr) (*TCPConn, error) { if h := sd.testHookDialTCP; h != nil { return h(ctx, sd.network, laddr, raddr) @@ -111,7 +118,7 @@ func (sd *sysDialer) doDialTCPProto(ctx context.Context, laddr, raddr *TCPAddr, if err != nil { return nil, err } - return newTCPConn(fd, sd.Dialer.KeepAlive, testHookSetKeepAlive), nil + return newTCPConn(fd, sd.Dialer.KeepAlive, sd.Dialer.KeepAliveConfig, testPreHookSetKeepAlive, testHookSetKeepAlive), nil } func selfConnect(fd *netFD, err error) bool { @@ -153,7 +160,7 @@ func (ln *TCPListener) accept() (*TCPConn, error) { if err != nil { return nil, err } - return newTCPConn(fd, ln.lc.KeepAlive, nil), nil + return newTCPConn(fd, ln.lc.KeepAlive, ln.lc.KeepAliveConfig, testPreHookSetKeepAlive, testHookSetKeepAlive), nil } func (ln *TCPListener) close() error { diff --git a/src/net/tcpsock_test.go b/src/net/tcpsock_test.go index b37e936ff8..9ed49a925b 100644 --- a/src/net/tcpsock_test.go +++ b/src/net/tcpsock_test.go @@ -775,8 +775,8 @@ func TestDialTCPDefaultKeepAlive(t *testing.T) { defer ln.Close() got := time.Duration(-1) - testHookSetKeepAlive = func(d time.Duration) { got = d } - defer func() { testHookSetKeepAlive = func(time.Duration) {} }() + testHookSetKeepAlive = func(cfg KeepAliveConfig) { got = cfg.Idle } + defer func() { testHookSetKeepAlive = func(KeepAliveConfig) {} }() c, err := DialTCP("tcp", nil, ln.Addr().(*TCPAddr)) if err != nil { @@ -784,8 +784,8 @@ func TestDialTCPDefaultKeepAlive(t *testing.T) { } defer c.Close() - if got != defaultTCPKeepAlive { - t.Errorf("got keepalive %v; want %v", got, defaultTCPKeepAlive) + if got != 0 { + t.Errorf("got keepalive %v; want %v", got, defaultTCPKeepAliveIdle) } } diff --git a/src/net/tcpsock_unix.go b/src/net/tcpsock_unix.go new file mode 100644 index 0000000000..b5c05f4ead --- /dev/null +++ b/src/net/tcpsock_unix.go @@ -0,0 +1,31 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !windows + +package net + +import "syscall" + +// SetKeepAliveConfig configures keep-alive messages sent by the operating system. +func (c *TCPConn) SetKeepAliveConfig(config KeepAliveConfig) error { + if !c.ok() { + return syscall.EINVAL + } + + if err := setKeepAlive(c.fd, config.Enable); err != nil { + return &OpError{Op: "set", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + if err := setKeepAliveIdle(c.fd, config.Idle); err != nil { + return &OpError{Op: "set", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + if err := setKeepAliveInterval(c.fd, config.Interval); err != nil { + return &OpError{Op: "set", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + if err := setKeepAliveCount(c.fd, config.Count); err != nil { + return &OpError{Op: "set", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + + return nil +} diff --git a/src/net/tcpsock_windows.go b/src/net/tcpsock_windows.go new file mode 100644 index 0000000000..8ec71ab3ad --- /dev/null +++ b/src/net/tcpsock_windows.go @@ -0,0 +1,26 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package net + +import "syscall" + +// SetKeepAliveConfig configures keep-alive messages sent by the operating system. +func (c *TCPConn) SetKeepAliveConfig(config KeepAliveConfig) error { + if !c.ok() { + return syscall.EINVAL + } + + if err := setKeepAlive(c.fd, config.Enable); err != nil { + return &OpError{Op: "set", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + if err := setKeepAliveIdleAndInterval(c.fd, config.Idle, config.Interval); err != nil { + return &OpError{Op: "set", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + if err := setKeepAliveCount(c.fd, config.Count); err != nil { + return &OpError{Op: "set", Net: c.fd.net, Source: c.fd.laddr, Addr: c.fd.raddr, Err: err} + } + + return nil +} diff --git a/src/net/tcpsockopt_darwin.go b/src/net/tcpsockopt_darwin.go index 53c6756e33..efe7f63323 100644 --- a/src/net/tcpsockopt_darwin.go +++ b/src/net/tcpsockopt_darwin.go @@ -10,16 +10,48 @@ import ( "time" ) -// syscall.TCP_KEEPINTVL is missing on some darwin architectures. -const sysTCP_KEEPINTVL = 0x101 +// syscall.TCP_KEEPINTVL and syscall.TCP_KEEPCNT might be missing on some darwin architectures. +const ( + sysTCP_KEEPINTVL = 0x101 + sysTCP_KEEPCNT = 0x102 +) + +func setKeepAliveIdle(fd *netFD, d time.Duration) error { + if d == 0 { + d = defaultTCPKeepAliveIdle + } else if d < 0 { + return nil + } -func setKeepAlivePeriod(fd *netFD, d time.Duration) error { // The kernel expects seconds so round to next highest second. secs := int(roundDurationUp(d, time.Second)) - if err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, sysTCP_KEEPINTVL, secs); err != nil { - return wrapSyscallError("setsockopt", err) - } err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_KEEPALIVE, secs) runtime.KeepAlive(fd) return wrapSyscallError("setsockopt", err) } + +func setKeepAliveInterval(fd *netFD, d time.Duration) error { + if d == 0 { + d = defaultTCPKeepAliveInterval + } else if d < 0 { + return nil + } + + // The kernel expects seconds so round to next highest second. + secs := int(roundDurationUp(d, time.Second)) + err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, sysTCP_KEEPINTVL, secs) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} + +func setKeepAliveCount(fd *netFD, n int) error { + if n == 0 { + n = defaultTCPKeepAliveCount + } else if n < 0 { + return nil + } + + err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, sysTCP_KEEPCNT, n) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} diff --git a/src/net/tcpsockopt_dragonfly.go b/src/net/tcpsockopt_dragonfly.go deleted file mode 100644 index b473c02b68..0000000000 --- a/src/net/tcpsockopt_dragonfly.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package net - -import ( - "runtime" - "syscall" - "time" -) - -func setKeepAlivePeriod(fd *netFD, d time.Duration) error { - // The kernel expects milliseconds so round to next highest - // millisecond. - msecs := int(roundDurationUp(d, time.Millisecond)) - if err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, msecs); err != nil { - return wrapSyscallError("setsockopt", err) - } - err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE, msecs) - runtime.KeepAlive(fd) - return wrapSyscallError("setsockopt", err) -} diff --git a/src/net/tcpsockopt_openbsd.go b/src/net/tcpsockopt_openbsd.go index 10e1bef3e5..d21b77c406 100644 --- a/src/net/tcpsockopt_openbsd.go +++ b/src/net/tcpsockopt_openbsd.go @@ -9,7 +9,28 @@ import ( "time" ) -func setKeepAlivePeriod(fd *netFD, d time.Duration) error { +func setKeepAliveIdle(_ *netFD, d time.Duration) error { + if d < 0 { + return nil + } + // OpenBSD has no user-settable per-socket TCP keepalive + // options. + return syscall.ENOPROTOOPT +} + +func setKeepAliveInterval(_ *netFD, d time.Duration) error { + if d < 0 { + return nil + } + // OpenBSD has no user-settable per-socket TCP keepalive + // options. + return syscall.ENOPROTOOPT +} + +func setKeepAliveCount(_ *netFD, n int) error { + if n < 0 { + return nil + } // OpenBSD has no user-settable per-socket TCP keepalive // options. return syscall.ENOPROTOOPT diff --git a/src/net/tcpsockopt_plan9.go b/src/net/tcpsockopt_plan9.go index 264359dcf3..017e87518a 100644 --- a/src/net/tcpsockopt_plan9.go +++ b/src/net/tcpsockopt_plan9.go @@ -12,13 +12,31 @@ import ( "time" ) -func setNoDelay(fd *netFD, noDelay bool) error { +func setNoDelay(_ *netFD, _ bool) error { return syscall.EPLAN9 } // Set keep alive period. -func setKeepAlivePeriod(fd *netFD, d time.Duration) error { +func setKeepAliveIdle(fd *netFD, d time.Duration) error { + if d < 0 { + return nil + } + cmd := "keepalive " + itoa.Itoa(int(d/time.Millisecond)) _, e := fd.ctl.WriteAt([]byte(cmd), 0) return e } + +func setKeepAliveInterval(_ *netFD, d time.Duration) error { + if d < 0 { + return nil + } + return syscall.EPLAN9 +} + +func setKeepAliveCount(_ *netFD, n int) error { + if n < 0 { + return nil + } + return syscall.EPLAN9 +} diff --git a/src/net/tcpsockopt_solaris.go b/src/net/tcpsockopt_solaris.go index f15e589dc0..44eb9cd09e 100644 --- a/src/net/tcpsockopt_solaris.go +++ b/src/net/tcpsockopt_solaris.go @@ -10,11 +10,31 @@ import ( "time" ) -func setKeepAlivePeriod(fd *netFD, d time.Duration) error { +func setKeepAliveIdle(fd *netFD, d time.Duration) error { + if d == 0 { + d = defaultTCPKeepAliveIdle + } else if d < 0 { + return nil + } + // The kernel expects milliseconds so round to next highest // millisecond. msecs := int(roundDurationUp(d, time.Millisecond)) + // TODO(panjf2000): the system call here always returns an error of invalid argument, + // this was never discovered due to the lack of tests for TCP keep-alive on various + // platforms in Go's test suite. Try to dive deep and figure out the reason later. + // Check out https://go.dev/issue/64251 for more details. + err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_KEEPALIVE_THRESHOLD, msecs) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} + +func setKeepAliveInterval(_ *netFD, d time.Duration) error { + if d < 0 { + return nil + } + // Normally we'd do // syscall.SetsockoptInt(fd.sysfd, syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, secs) // here, but we can't because Solaris does not have TCP_KEEPINTVL. @@ -25,8 +45,12 @@ func setKeepAlivePeriod(fd *netFD, d time.Duration) error { // and do it anyway, like on Darwin, because Solaris might eventually // allocate a constant with a different meaning for the value of // TCP_KEEPINTVL on illumos. - - err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_KEEPALIVE_THRESHOLD, msecs) - runtime.KeepAlive(fd) - return wrapSyscallError("setsockopt", err) + return syscall.ENOPROTOOPT +} + +func setKeepAliveCount(_ *netFD, n int) error { + if n < 0 { + return nil + } + return syscall.ENOPROTOOPT } diff --git a/src/net/tcpsockopt_stub.go b/src/net/tcpsockopt_stub.go index cef07cd648..b789e0ae93 100644 --- a/src/net/tcpsockopt_stub.go +++ b/src/net/tcpsockopt_stub.go @@ -15,6 +15,14 @@ func setNoDelay(fd *netFD, noDelay bool) error { return syscall.ENOPROTOOPT } -func setKeepAlivePeriod(fd *netFD, d time.Duration) error { +func setKeepAliveIdle(fd *netFD, d time.Duration) error { + return syscall.ENOPROTOOPT +} + +func setKeepAliveInterval(fd *netFD, d time.Duration) error { + return syscall.ENOPROTOOPT +} + +func setKeepAliveCount(fd *netFD, n int) error { return syscall.ENOPROTOOPT } diff --git a/src/net/tcpsockopt_unix.go b/src/net/tcpsockopt_unix.go index bdcdc40239..f3526e4962 100644 --- a/src/net/tcpsockopt_unix.go +++ b/src/net/tcpsockopt_unix.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -//go:build aix || freebsd || linux || netbsd +//go:build aix || dragonfly || freebsd || linux || netbsd package net @@ -12,13 +12,42 @@ import ( "time" ) -func setKeepAlivePeriod(fd *netFD, d time.Duration) error { +func setKeepAliveIdle(fd *netFD, d time.Duration) error { + if d == 0 { + d = defaultTCPKeepAliveIdle + } else if d < 0 { + return nil + } + // The kernel expects seconds so round to next highest second. secs := int(roundDurationUp(d, time.Second)) - if err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, secs); err != nil { - return wrapSyscallError("setsockopt", err) - } err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_KEEPIDLE, secs) runtime.KeepAlive(fd) return wrapSyscallError("setsockopt", err) } + +func setKeepAliveInterval(fd *netFD, d time.Duration) error { + if d == 0 { + d = defaultTCPKeepAliveInterval + } else if d < 0 { + return nil + } + + // The kernel expects seconds so round to next highest second. + secs := int(roundDurationUp(d, time.Second)) + err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_KEEPINTVL, secs) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} + +func setKeepAliveCount(fd *netFD, n int) error { + if n == 0 { + n = defaultTCPKeepAliveCount + } else if n < 0 { + return nil + } + + err := fd.pfd.SetsockoptInt(syscall.IPPROTO_TCP, syscall.TCP_KEEPCNT, n) + runtime.KeepAlive(fd) + return wrapSyscallError("setsockopt", err) +} diff --git a/src/net/tcpsockopt_windows.go b/src/net/tcpsockopt_windows.go index 4a0b09465e..274fc4d9c4 100644 --- a/src/net/tcpsockopt_windows.go +++ b/src/net/tcpsockopt_windows.go @@ -12,14 +12,72 @@ import ( "unsafe" ) -func setKeepAlivePeriod(fd *netFD, d time.Duration) error { +// Default values of KeepAliveTime and KeepAliveInterval on Windows, +// check out https://learn.microsoft.com/en-us/windows/win32/winsock/sio-keepalive-vals#remarks for details. +const ( + defaultKeepAliveIdle = 2 * time.Hour + defaultKeepAliveInterval = time.Second +) + +func setKeepAliveIdle(fd *netFD, d time.Duration) error { + return setKeepAliveIdleAndInterval(fd, d, -1) +} + +func setKeepAliveInterval(fd *netFD, d time.Duration) error { + return setKeepAliveIdleAndInterval(fd, -1, d) +} + +func setKeepAliveCount(_ *netFD, n int) error { + if n < 0 { + return nil + } + + // This value is not capable to be changed on Windows. + return syscall.WSAENOPROTOOPT +} + +func setKeepAliveIdleAndInterval(fd *netFD, idle, interval time.Duration) error { + // WSAIoctl with SIO_KEEPALIVE_VALS control code requires all fields in + // `tcp_keepalive` struct to be provided. + // Otherwise, if any of the fields were not provided, just leaving them + // zero will knock off any existing values of keep-alive. + // Unfortunately, Windows doesn't support retrieving current keep-alive + // settings in any form programmatically, which disable us to first retrieve + // the current keep-alive settings, then set it without unwanted corruption. + switch { + case idle < 0 && interval >= 0: + // Given that we can't set KeepAliveInterval alone, and this code path + // is new, it doesn't exist before, so we just return an error. + return syscall.WSAENOPROTOOPT + case idle >= 0 && interval < 0: + // Although we can't set KeepAliveTime alone either, this existing code + // path had been backing up [SetKeepAlivePeriod] which used to be set both + // KeepAliveTime and KeepAliveInterval to 15 seconds. + // Now we will use the default of KeepAliveInterval on Windows if user doesn't + // provide one. + interval = defaultKeepAliveInterval + case idle < 0 && interval < 0: + // Nothing to do, just bail out. + return nil + case idle >= 0 && interval >= 0: + // Go ahead. + } + + if idle == 0 { + idle = defaultTCPKeepAliveIdle + } + if interval == 0 { + interval = defaultTCPKeepAliveInterval + } + // The kernel expects milliseconds so round to next highest // millisecond. - msecs := uint32(roundDurationUp(d, time.Millisecond)) + tcpKeepAliveIdle := uint32(roundDurationUp(idle, time.Millisecond)) + tcpKeepAliveInterval := uint32(roundDurationUp(interval, time.Millisecond)) ka := syscall.TCPKeepalive{ OnOff: 1, - Time: msecs, - Interval: msecs, + Time: tcpKeepAliveIdle, + Interval: tcpKeepAliveInterval, } ret := uint32(0) size := uint32(unsafe.Sizeof(ka)) diff --git a/src/net/textproto/header.go b/src/net/textproto/header.go index a58df7aebc..689a6827b9 100644 --- a/src/net/textproto/header.go +++ b/src/net/textproto/header.go @@ -23,7 +23,7 @@ func (h MIMEHeader) Set(key, value string) { } // Get gets the first value associated with the given key. -// It is case insensitive; CanonicalMIMEHeaderKey is used +// It is case insensitive; [CanonicalMIMEHeaderKey] is used // to canonicalize the provided key. // If there are no values associated with the key, Get returns "". // To use non-canonical keys, access the map directly. @@ -39,7 +39,7 @@ func (h MIMEHeader) Get(key string) string { } // Values returns all values associated with the given key. -// It is case insensitive; CanonicalMIMEHeaderKey is +// It is case insensitive; [CanonicalMIMEHeaderKey] is // used to canonicalize the provided key. To use non-canonical // keys, access the map directly. // The returned slice is not a copy. diff --git a/src/net/textproto/reader.go b/src/net/textproto/reader.go index fc2590b1cd..1a81453559 100644 --- a/src/net/textproto/reader.go +++ b/src/net/textproto/reader.go @@ -16,6 +16,10 @@ import ( "sync" ) +// TODO: This should be a distinguishable error (ErrMessageTooLarge) +// to allow mime/multipart to detect it. +var errMessageTooLarge = errors.New("message too large") + // A Reader implements convenience methods for reading requests // or responses from a text protocol network connection. type Reader struct { @@ -24,10 +28,10 @@ type Reader struct { buf []byte // a re-usable buffer for readContinuedLineSlice } -// NewReader returns a new Reader reading from r. +// NewReader returns a new [Reader] reading from r. // -// To avoid denial of service attacks, the provided bufio.Reader -// should be reading from an io.LimitReader or similar Reader to bound +// To avoid denial of service attacks, the provided [bufio.Reader] +// should be reading from an [io.LimitReader] or similar Reader to bound // the size of responses. func NewReader(r *bufio.Reader) *Reader { return &Reader{R: r} @@ -36,20 +40,23 @@ func NewReader(r *bufio.Reader) *Reader { // ReadLine reads a single line from r, // eliding the final \n or \r\n from the returned string. func (r *Reader) ReadLine() (string, error) { - line, err := r.readLineSlice() + line, err := r.readLineSlice(-1) return string(line), err } -// ReadLineBytes is like ReadLine but returns a []byte instead of a string. +// ReadLineBytes is like [Reader.ReadLine] but returns a []byte instead of a string. func (r *Reader) ReadLineBytes() ([]byte, error) { - line, err := r.readLineSlice() + line, err := r.readLineSlice(-1) if line != nil { line = bytes.Clone(line) } return line, err } -func (r *Reader) readLineSlice() ([]byte, error) { +// readLineSlice reads a single line from r, +// up to lim bytes long (or unlimited if lim is less than 0), +// eliding the final \r or \r\n from the returned string. +func (r *Reader) readLineSlice(lim int64) ([]byte, error) { r.closeDot() var line []byte for { @@ -57,6 +64,9 @@ func (r *Reader) readLineSlice() ([]byte, error) { if err != nil { return nil, err } + if lim >= 0 && int64(len(line))+int64(len(l)) > lim { + return nil, errMessageTooLarge + } // Avoid the copy if the first call produced a full line. if line == nil && !more { return l, nil @@ -88,7 +98,7 @@ func (r *Reader) readLineSlice() ([]byte, error) { // // Empty lines are never continued. func (r *Reader) ReadContinuedLine() (string, error) { - line, err := r.readContinuedLineSlice(noValidation) + line, err := r.readContinuedLineSlice(-1, noValidation) return string(line), err } @@ -106,10 +116,10 @@ func trim(s []byte) []byte { return s[i:n] } -// ReadContinuedLineBytes is like ReadContinuedLine but +// ReadContinuedLineBytes is like [Reader.ReadContinuedLine] but // returns a []byte instead of a string. func (r *Reader) ReadContinuedLineBytes() ([]byte, error) { - line, err := r.readContinuedLineSlice(noValidation) + line, err := r.readContinuedLineSlice(-1, noValidation) if line != nil { line = bytes.Clone(line) } @@ -120,13 +130,14 @@ func (r *Reader) ReadContinuedLineBytes() ([]byte, error) { // returning a byte slice with all lines. The validateFirstLine function // is run on the first read line, and if it returns an error then this // error is returned from readContinuedLineSlice. -func (r *Reader) readContinuedLineSlice(validateFirstLine func([]byte) error) ([]byte, error) { +// It reads up to lim bytes of data (or unlimited if lim is less than 0). +func (r *Reader) readContinuedLineSlice(lim int64, validateFirstLine func([]byte) error) ([]byte, error) { if validateFirstLine == nil { return nil, fmt.Errorf("missing validateFirstLine func") } // Read the first line. - line, err := r.readLineSlice() + line, err := r.readLineSlice(lim) if err != nil { return nil, err } @@ -154,13 +165,21 @@ func (r *Reader) readContinuedLineSlice(validateFirstLine func([]byte) error) ([ // copy the slice into buf. r.buf = append(r.buf[:0], trim(line)...) + if lim < 0 { + lim = math.MaxInt64 + } + lim -= int64(len(r.buf)) + // Read continuation lines. for r.skipSpace() > 0 { - line, err := r.readLineSlice() + r.buf = append(r.buf, ' ') + if int64(len(r.buf)) >= lim { + return nil, errMessageTooLarge + } + line, err := r.readLineSlice(lim - int64(len(r.buf))) if err != nil { break } - r.buf = append(r.buf, ' ') r.buf = append(r.buf, trim(line)...) } return r.buf, nil @@ -289,7 +308,7 @@ func (r *Reader) ReadResponse(expectCode int) (code int, message string, err err return } -// DotReader returns a new Reader that satisfies Reads using the +// DotReader returns a new [Reader] that satisfies Reads using the // decoded text of a dot-encoded block read from r. // The returned Reader is only valid until the next call // to a method on r. @@ -303,7 +322,7 @@ func (r *Reader) ReadResponse(expectCode int) (code int, message string, err err // // The decoded form returned by the Reader's Read method // rewrites the "\r\n" line endings into the simpler "\n", -// removes leading dot escapes if present, and stops with error io.EOF +// removes leading dot escapes if present, and stops with error [io.EOF] // after consuming (and discarding) the end-of-sequence line. func (r *Reader) DotReader() io.Reader { r.closeDot() @@ -420,7 +439,7 @@ func (r *Reader) closeDot() { // ReadDotBytes reads a dot-encoding and returns the decoded data. // -// See the documentation for the DotReader method for details about dot-encoding. +// See the documentation for the [Reader.DotReader] method for details about dot-encoding. func (r *Reader) ReadDotBytes() ([]byte, error) { return io.ReadAll(r.DotReader()) } @@ -428,7 +447,7 @@ func (r *Reader) ReadDotBytes() ([]byte, error) { // ReadDotLines reads a dot-encoding and returns a slice // containing the decoded lines, with the final \r\n or \n elided from each. // -// See the documentation for the DotReader method for details about dot-encoding. +// See the documentation for the [Reader.DotReader] method for details about dot-encoding. func (r *Reader) ReadDotLines() ([]string, error) { // We could use ReadDotBytes and then Split it, // but reading a line at a time avoids needing a @@ -462,7 +481,7 @@ var colon = []byte(":") // ReadMIMEHeader reads a MIME-style header from r. // The header is a sequence of possibly continued Key: Value lines // ending in a blank line. -// The returned map m maps CanonicalMIMEHeaderKey(key) to a +// The returned map m maps [CanonicalMIMEHeaderKey](key) to a // sequence of values in the same order encountered in the input. // // For example, consider this input: @@ -507,7 +526,8 @@ func readMIMEHeader(r *Reader, maxMemory, maxHeaders int64) (MIMEHeader, error) // The first line cannot start with a leading space. if buf, err := r.R.Peek(1); err == nil && (buf[0] == ' ' || buf[0] == '\t') { - line, err := r.readLineSlice() + const errorLimit = 80 // arbitrary limit on how much of the line we'll quote + line, err := r.readLineSlice(errorLimit) if err != nil { return m, err } @@ -515,7 +535,7 @@ func readMIMEHeader(r *Reader, maxMemory, maxHeaders int64) (MIMEHeader, error) } for { - kv, err := r.readContinuedLineSlice(mustHaveFieldNameColon) + kv, err := r.readContinuedLineSlice(maxMemory, mustHaveFieldNameColon) if len(kv) == 0 { return m, err } @@ -535,16 +555,9 @@ func readMIMEHeader(r *Reader, maxMemory, maxHeaders int64) (MIMEHeader, error) } } - // As per RFC 7230 field-name is a token, tokens consist of one or more chars. - // We could return a ProtocolError here, but better to be liberal in what we - // accept, so if we get an empty key, skip it. - if key == "" { - continue - } - maxHeaders-- if maxHeaders < 0 { - return nil, errors.New("message too large") + return nil, errMessageTooLarge } // Skip initial spaces in value. @@ -557,9 +570,7 @@ func readMIMEHeader(r *Reader, maxMemory, maxHeaders int64) (MIMEHeader, error) } maxMemory -= int64(len(value)) if maxMemory < 0 { - // TODO: This should be a distinguishable error (ErrMessageTooLarge) - // to allow mime/multipart to detect it. - return m, errors.New("message too large") + return m, errMessageTooLarge } if vv == nil && len(strs) > 0 { // More than likely this will be a single-element key. @@ -725,6 +736,10 @@ func validHeaderValueByte(c byte) bool { // ReadMIMEHeader accepts header keys containing spaces, but does not // canonicalize them. func canonicalMIMEHeaderKey(a []byte) (_ string, ok bool) { + if len(a) == 0 { + return "", false + } + // See if a looks like a header key. If not, return it unchanged. noCanon := false for _, c := range a { diff --git a/src/net/textproto/reader_test.go b/src/net/textproto/reader_test.go index 696ae406f3..f794879bd7 100644 --- a/src/net/textproto/reader_test.go +++ b/src/net/textproto/reader_test.go @@ -36,6 +36,18 @@ func TestReadLine(t *testing.T) { } } +func TestReadLineLongLine(t *testing.T) { + line := strings.Repeat("12345", 10000) + r := reader(line + "\r\n") + s, err := r.ReadLine() + if err != nil { + t.Fatalf("Line 1: %v", err) + } + if s != line { + t.Fatalf("%v-byte line does not match expected %v-byte line", len(s), len(line)) + } +} + func TestReadContinuedLine(t *testing.T) { r := reader("line1\nline\n 2\nline3\n") s, err := r.ReadContinuedLine() @@ -169,8 +181,8 @@ func TestReaderUpcomingHeaderKeys(t *testing.T) { func TestReadMIMEHeaderNoKey(t *testing.T) { r := reader(": bar\ntest-1: 1\n\n") m, err := r.ReadMIMEHeader() - want := MIMEHeader{"Test-1": {"1"}} - if !reflect.DeepEqual(m, want) || err != nil { + want := MIMEHeader{} + if !reflect.DeepEqual(m, want) || err == nil { t.Fatalf("ReadMIMEHeader: %v, %v; want %v", m, err, want) } } @@ -227,6 +239,7 @@ func TestReadMIMEHeaderMalformed(t *testing.T) { "Foo\r\n\t: foo\r\n\r\n", "Foo-\n\tBar", "Foo \tBar: foo\r\n\r\n", + ": empty key\r\n\r\n", } for _, input := range inputs { r := reader(input) diff --git a/src/net/textproto/textproto.go b/src/net/textproto/textproto.go index 70038d5888..4ae3ecff74 100644 --- a/src/net/textproto/textproto.go +++ b/src/net/textproto/textproto.go @@ -7,20 +7,20 @@ // // The package provides: // -// Error, which represents a numeric error response from +// [Error], which represents a numeric error response from // a server. // -// Pipeline, to manage pipelined requests and responses +// [Pipeline], to manage pipelined requests and responses // in a client. // -// Reader, to read numeric response code lines, +// [Reader], to read numeric response code lines, // key: value headers, lines wrapped with leading spaces // on continuation lines, and whole text blocks ending // with a dot on a line by itself. // -// Writer, to write dot-encoded text blocks. +// [Writer], to write dot-encoded text blocks. // -// Conn, a convenient packaging of Reader, Writer, and Pipeline for use +// [Conn], a convenient packaging of [Reader], [Writer], and [Pipeline] for use // with a single network connection. package textproto @@ -50,8 +50,8 @@ func (p ProtocolError) Error() string { } // A Conn represents a textual network protocol connection. -// It consists of a Reader and Writer to manage I/O -// and a Pipeline to sequence concurrent requests on the connection. +// It consists of a [Reader] and [Writer] to manage I/O +// and a [Pipeline] to sequence concurrent requests on the connection. // These embedded types carry methods with them; // see the documentation of those types for details. type Conn struct { @@ -61,7 +61,7 @@ type Conn struct { conn io.ReadWriteCloser } -// NewConn returns a new Conn using conn for I/O. +// NewConn returns a new [Conn] using conn for I/O. func NewConn(conn io.ReadWriteCloser) *Conn { return &Conn{ Reader: Reader{R: bufio.NewReader(conn)}, @@ -75,8 +75,8 @@ func (c *Conn) Close() error { return c.conn.Close() } -// Dial connects to the given address on the given network using net.Dial -// and then returns a new Conn for the connection. +// Dial connects to the given address on the given network using [net.Dial] +// and then returns a new [Conn] for the connection. func Dial(network, addr string) (*Conn, error) { c, err := net.Dial(network, addr) if err != nil { diff --git a/src/net/textproto/writer.go b/src/net/textproto/writer.go index 2ece3f511b..662515fb2c 100644 --- a/src/net/textproto/writer.go +++ b/src/net/textproto/writer.go @@ -17,7 +17,7 @@ type Writer struct { dot *dotWriter } -// NewWriter returns a new Writer writing to w. +// NewWriter returns a new [Writer] writing to w. func NewWriter(w *bufio.Writer) *Writer { return &Writer{W: w} } @@ -39,7 +39,7 @@ func (w *Writer) PrintfLine(format string, args ...any) error { // when the DotWriter is closed. The caller should close the // DotWriter before the next call to a method on w. // -// See the documentation for Reader's DotReader method for details about dot-encoding. +// See the documentation for the [Reader.DotReader] method for details about dot-encoding. func (w *Writer) DotWriter() io.WriteCloser { w.closeDot() w.dot = &dotWriter{w: w} diff --git a/src/net/timeout_test.go b/src/net/timeout_test.go index 563a842cf9..09adb9bdca 100644 --- a/src/net/timeout_test.go +++ b/src/net/timeout_test.go @@ -5,9 +5,9 @@ package net import ( + "context" "errors" "fmt" - "internal/testenv" "io" "os" "runtime" @@ -166,19 +166,7 @@ func TestDialTimeoutMaxDuration(t *testing.T) { } } -var acceptTimeoutTests = []struct { - timeout time.Duration - xerrs [2]error // expected errors in transition -}{ - // Tests that accept deadlines in the past work, even if - // there's incoming connections available. - {-5 * time.Second, [2]error{os.ErrDeadlineExceeded, os.ErrDeadlineExceeded}}, - - {50 * time.Millisecond, [2]error{nil, os.ErrDeadlineExceeded}}, -} - func TestAcceptTimeout(t *testing.T) { - testenv.SkipFlaky(t, 17948) t.Parallel() switch runtime.GOOS { @@ -186,49 +174,79 @@ func TestAcceptTimeout(t *testing.T) { t.Skipf("not supported on %s", runtime.GOOS) } - ln := newLocalListener(t, "tcp") - defer ln.Close() - - var wg sync.WaitGroup - for i, tt := range acceptTimeoutTests { - if tt.timeout < 0 { - wg.Add(1) - go func() { - defer wg.Done() - d := Dialer{Timeout: 100 * time.Millisecond} - c, err := d.Dial(ln.Addr().Network(), ln.Addr().String()) - if err != nil { - t.Error(err) - return - } - c.Close() - }() - } - - if err := ln.(*TCPListener).SetDeadline(time.Now().Add(tt.timeout)); err != nil { - t.Fatalf("$%d: %v", i, err) - } - for j, xerr := range tt.xerrs { - for { - c, err := ln.Accept() - if xerr != nil { - if perr := parseAcceptError(err); perr != nil { - t.Errorf("#%d/%d: %v", i, j, perr) - } - if !isDeadlineExceeded(err) { - t.Fatalf("#%d/%d: %v", i, j, err) - } - } - if err == nil { - c.Close() - time.Sleep(10 * time.Millisecond) - continue - } - break - } - } + timeouts := []time.Duration{ + -5 * time.Second, + 10 * time.Millisecond, + } + + for _, timeout := range timeouts { + timeout := timeout + t.Run(fmt.Sprintf("%v", timeout), func(t *testing.T) { + t.Parallel() + + ln := newLocalListener(t, "tcp") + defer ln.Close() + + if timeout >= 0 { + // Don't dial the listener at all, so that Accept will hang. + } else { + // A deadline in the past should cause Accept to fail even if there are + // incoming connections available. Try to make one available before the + // call to Accept happens. (It's ok if the timing doesn't always work + // out that way, though: the test should pass regardless.) + ctx, cancel := context.WithCancel(context.Background()) + dialDone := make(chan struct{}) + + // Ensure that our background Dial returns before we close the listener. + // Otherwise, the listener's port could be reused immediately and we + // might spuriously Dial some completely unrelated socket, causing some + // other test to see an unexpected extra connection. + defer func() { + cancel() + <-dialDone + }() + + go func() { + defer close(dialDone) + d := Dialer{} + c, err := d.DialContext(ctx, ln.Addr().Network(), ln.Addr().String()) + if err != nil { + // If the timing didn't work out, it is possible for this Dial + // to return an error (depending on the kernel's buffering behavior). + // In https://go.dev/issue/65240 we saw failures with ECONNREFUSED + // and ECONNRESET. + // + // What this test really cares about is the behavior of Accept, not + // Dial, so just log the error and ignore it. + t.Logf("DialContext: %v", err) + return + } + t.Logf("Dialed %v -> %v", c.LocalAddr(), c.RemoteAddr()) + c.Close() + }() + + time.Sleep(10 * time.Millisecond) + } + + if err := ln.(*TCPListener).SetDeadline(time.Now().Add(timeout)); err != nil { + t.Fatal(err) + } + t.Logf("ln.SetDeadline(time.Now().Add(%v))", timeout) + + c, err := ln.Accept() + if err == nil { + c.Close() + } + t.Logf("ln.Accept: %v", err) + + if perr := parseAcceptError(err); perr != nil { + t.Error(perr) + } + if !isDeadlineExceeded(err) { + t.Error("wanted deadline exceeded") + } + }) } - wg.Wait() } func TestAcceptTimeoutMustReturn(t *testing.T) { @@ -242,35 +260,22 @@ func TestAcceptTimeoutMustReturn(t *testing.T) { ln := newLocalListener(t, "tcp") defer ln.Close() - max := time.NewTimer(time.Second) - defer max.Stop() - ch := make(chan error) - go func() { - if err := ln.(*TCPListener).SetDeadline(noDeadline); err != nil { - t.Error(err) - } - if err := ln.(*TCPListener).SetDeadline(time.Now().Add(10 * time.Millisecond)); err != nil { - t.Error(err) - } - c, err := ln.Accept() - if err == nil { - c.Close() - } - ch <- err - }() + if err := ln.(*TCPListener).SetDeadline(noDeadline); err != nil { + t.Error(err) + } + if err := ln.(*TCPListener).SetDeadline(time.Now().Add(10 * time.Millisecond)); err != nil { + t.Error(err) + } + c, err := ln.Accept() + if err == nil { + c.Close() + } - select { - case <-max.C: - ln.Close() - <-ch // wait for tester goroutine to stop - t.Fatal("Accept didn't return in an expected time") - case err := <-ch: - if perr := parseAcceptError(err); perr != nil { - t.Error(perr) - } - if !isDeadlineExceeded(err) { - t.Fatal(err) - } + if perr := parseAcceptError(err); perr != nil { + t.Error(perr) + } + if !isDeadlineExceeded(err) { + t.Fatal(err) } } diff --git a/src/net/unixsock.go b/src/net/unixsock.go index 7e5ffa036a..821be7bf74 100644 --- a/src/net/unixsock.go +++ b/src/net/unixsock.go @@ -52,7 +52,7 @@ func (a *UnixAddr) opAddr() Addr { // // The network must be a Unix network name. // -// See func Dial for a description of the network and address +// See func [Dial] for a description of the network and address // parameters. func ResolveUnixAddr(network, address string) (*UnixAddr, error) { switch network { @@ -63,14 +63,14 @@ func ResolveUnixAddr(network, address string) (*UnixAddr, error) { } } -// UnixConn is an implementation of the Conn interface for connections +// UnixConn is an implementation of the [Conn] interface for connections // to Unix domain sockets. type UnixConn struct { conn } // SyscallConn returns a raw network connection. -// This implements the syscall.Conn interface. +// This implements the [syscall.Conn] interface. func (c *UnixConn) SyscallConn() (syscall.RawConn, error) { if !c.ok() { return nil, syscall.EINVAL @@ -102,7 +102,7 @@ func (c *UnixConn) CloseWrite() error { return nil } -// ReadFromUnix acts like ReadFrom but returns a UnixAddr. +// ReadFromUnix acts like [UnixConn.ReadFrom] but returns a [UnixAddr]. func (c *UnixConn) ReadFromUnix(b []byte) (int, *UnixAddr, error) { if !c.ok() { return 0, nil, syscall.EINVAL @@ -114,7 +114,7 @@ func (c *UnixConn) ReadFromUnix(b []byte) (int, *UnixAddr, error) { return n, addr, err } -// ReadFrom implements the PacketConn ReadFrom method. +// ReadFrom implements the [PacketConn] ReadFrom method. func (c *UnixConn) ReadFrom(b []byte) (int, Addr, error) { if !c.ok() { return 0, nil, syscall.EINVAL @@ -147,7 +147,7 @@ func (c *UnixConn) ReadMsgUnix(b, oob []byte) (n, oobn, flags int, addr *UnixAdd return } -// WriteToUnix acts like WriteTo but takes a UnixAddr. +// WriteToUnix acts like [UnixConn.WriteTo] but takes a [UnixAddr]. func (c *UnixConn) WriteToUnix(b []byte, addr *UnixAddr) (int, error) { if !c.ok() { return 0, syscall.EINVAL @@ -159,7 +159,7 @@ func (c *UnixConn) WriteToUnix(b []byte, addr *UnixAddr) (int, error) { return n, err } -// WriteTo implements the PacketConn WriteTo method. +// WriteTo implements the [PacketConn] WriteTo method. func (c *UnixConn) WriteTo(b []byte, addr Addr) (int, error) { if !c.ok() { return 0, syscall.EINVAL @@ -194,7 +194,7 @@ func (c *UnixConn) WriteMsgUnix(b, oob []byte, addr *UnixAddr) (n, oobn int, err func newUnixConn(fd *netFD) *UnixConn { return &UnixConn{conn{fd}} } -// DialUnix acts like Dial for Unix networks. +// DialUnix acts like [Dial] for Unix networks. // // The network must be a Unix network name; see func Dial for details. // @@ -215,7 +215,7 @@ func DialUnix(network string, laddr, raddr *UnixAddr) (*UnixConn, error) { } // UnixListener is a Unix domain socket listener. Clients should -// typically use variables of type Listener instead of assuming Unix +// typically use variables of type [Listener] instead of assuming Unix // domain sockets. type UnixListener struct { fd *netFD @@ -227,7 +227,7 @@ type UnixListener struct { func (ln *UnixListener) ok() bool { return ln != nil && ln.fd != nil } // SyscallConn returns a raw network connection. -// This implements the syscall.Conn interface. +// This implements the [syscall.Conn] interface. // // The returned RawConn only supports calling Control. Read and // Write return an error. @@ -251,8 +251,8 @@ func (l *UnixListener) AcceptUnix() (*UnixConn, error) { return c, nil } -// Accept implements the Accept method in the Listener interface. -// Returned connections will be of type *UnixConn. +// Accept implements the Accept method in the [Listener] interface. +// Returned connections will be of type [*UnixConn]. func (l *UnixListener) Accept() (Conn, error) { if !l.ok() { return nil, syscall.EINVAL @@ -290,7 +290,7 @@ func (l *UnixListener) SetDeadline(t time.Time) error { return l.fd.SetDeadline(t) } -// File returns a copy of the underlying os.File. +// File returns a copy of the underlying [os.File]. // It is the caller's responsibility to close f when finished. // Closing l does not affect f, and closing f does not affect l. // @@ -308,7 +308,7 @@ func (l *UnixListener) File() (f *os.File, err error) { return } -// ListenUnix acts like Listen for Unix networks. +// ListenUnix acts like [Listen] for Unix networks. // // The network must be "unix" or "unixpacket". func ListenUnix(network string, laddr *UnixAddr) (*UnixListener, error) { @@ -328,7 +328,7 @@ func ListenUnix(network string, laddr *UnixAddr) (*UnixListener, error) { return ln, nil } -// ListenUnixgram acts like ListenPacket for Unix networks. +// ListenUnixgram acts like [ListenPacket] for Unix networks. // // The network must be "unixgram". func ListenUnixgram(network string, laddr *UnixAddr) (*UnixConn, error) { diff --git a/src/net/unixsock_windows_test.go b/src/net/unixsock_windows_test.go index d541d89f78..1e54d6171a 100644 --- a/src/net/unixsock_windows_test.go +++ b/src/net/unixsock_windows_test.go @@ -33,7 +33,9 @@ func isBuild17063() bool { return ver >= 17063 } -func TestUnixConnLocalWindows(t *testing.T) { +func skipIfUnixSocketNotSupported(t *testing.T) { + // TODO: the isBuild17063 check should be enough, investigate why 386 and arm + // can't run these tests on newer Windows. switch runtime.GOARCH { case "386": t.Skip("not supported on windows/386, see golang.org/issue/27943") @@ -43,7 +45,10 @@ func TestUnixConnLocalWindows(t *testing.T) { if !isBuild17063() { t.Skip("unix test") } +} +func TestUnixConnLocalWindows(t *testing.T) { + skipIfUnixSocketNotSupported(t) handler := func(ls *localServer, ln Listener) {} for _, laddr := range []string{"", testUnixAddr(t)} { laddr := laddr @@ -95,3 +100,24 @@ func TestUnixConnLocalWindows(t *testing.T) { } } } + +func TestModeSocket(t *testing.T) { + skipIfUnixSocketNotSupported(t) + addr := testUnixAddr(t) + + l, err := Listen("unix", addr) + if err != nil { + t.Fatal(err) + } + defer l.Close() + + stat, err := os.Stat(addr) + if err != nil { + t.Fatal(err) + } + + mode := stat.Mode() + if mode&os.ModeSocket == 0 { + t.Fatalf("%v should have ModeSocket", mode) + } +} diff --git a/src/net/url/url.go b/src/net/url/url.go index 902310c244..f362958edd 100644 --- a/src/net/url/url.go +++ b/src/net/url/url.go @@ -175,7 +175,7 @@ func shouldEscape(c byte, mode encoding) bool { return true } -// QueryUnescape does the inverse transformation of QueryEscape, +// QueryUnescape does the inverse transformation of [QueryEscape], // converting each 3-byte encoded substring of the form "%AB" into the // hex-decoded byte 0xAB. // It returns an error if any % is not followed by two hexadecimal @@ -184,12 +184,12 @@ func QueryUnescape(s string) (string, error) { return unescape(s, encodeQueryComponent) } -// PathUnescape does the inverse transformation of PathEscape, +// PathUnescape does the inverse transformation of [PathEscape], // converting each 3-byte encoded substring of the form "%AB" into the // hex-decoded byte 0xAB. It returns an error if any % is not followed // by two hexadecimal digits. // -// PathUnescape is identical to QueryUnescape except that it does not +// PathUnescape is identical to [QueryUnescape] except that it does not // unescape '+' to ' ' (space). func PathUnescape(s string) (string, error) { return unescape(s, encodePathSegment) @@ -271,12 +271,12 @@ func unescape(s string, mode encoding) (string, error) { } // QueryEscape escapes the string so it can be safely placed -// inside a URL query. +// inside a [URL] query. func QueryEscape(s string) string { return escape(s, encodeQueryComponent) } -// PathEscape escapes the string so it can be safely placed inside a URL path segment, +// PathEscape escapes the string so it can be safely placed inside a [URL] path segment, // replacing special characters (including /) with %XX sequences as needed. func PathEscape(s string) string { return escape(s, encodePathSegment) @@ -358,7 +358,7 @@ func escape(s string, mode encoding) string { // Note that the Path field is stored in decoded form: /%47%6f%2f becomes /Go/. // A consequence is that it is impossible to tell which slashes in the Path were // slashes in the raw URL and which were %2f. This distinction is rarely important, -// but when it is, the code should use the EscapedPath method, which preserves +// but when it is, the code should use the [URL.EscapedPath] method, which preserves // the original encoding of Path. // // The RawPath field is an optional field which is only set when the default @@ -380,13 +380,13 @@ type URL struct { RawFragment string // encoded fragment hint (see EscapedFragment method) } -// User returns a Userinfo containing the provided username +// User returns a [Userinfo] containing the provided username // and no password set. func User(username string) *Userinfo { return &Userinfo{username, "", false} } -// UserPassword returns a Userinfo containing the provided username +// UserPassword returns a [Userinfo] containing the provided username // and password. // // This functionality should only be used with legacy web sites. @@ -399,7 +399,7 @@ func UserPassword(username, password string) *Userinfo { } // The Userinfo type is an immutable encapsulation of username and -// password details for a URL. An existing Userinfo value is guaranteed +// password details for a [URL]. An existing Userinfo value is guaranteed // to have a username set (potentially empty, as allowed by RFC 2396), // and optionally a password. type Userinfo struct { @@ -464,7 +464,7 @@ func getScheme(rawURL string) (scheme, path string, err error) { return "", rawURL, nil } -// Parse parses a raw url into a URL structure. +// Parse parses a raw url into a [URL] structure. // // The url may be relative (a path, without a host) or absolute // (starting with a scheme). Trying to parse a hostname and path @@ -486,7 +486,7 @@ func Parse(rawURL string) (*URL, error) { return url, nil } -// ParseRequestURI parses a raw url into a URL structure. It assumes that +// ParseRequestURI parses a raw url into a [URL] structure. It assumes that // url was received in an HTTP request, so the url is interpreted // only as an absolute URI or an absolute path. // The string url is assumed not to have a #fragment suffix. @@ -697,7 +697,7 @@ func (u *URL) setPath(p string) error { // EscapedPath returns u.RawPath when it is a valid escaping of u.Path. // Otherwise EscapedPath ignores u.RawPath and computes an escaped // form on its own. -// The String and RequestURI methods use EscapedPath to construct +// The [URL.String] and [URL.RequestURI] methods use EscapedPath to construct // their results. // In general, code should call EscapedPath instead of // reading u.RawPath directly. @@ -761,7 +761,7 @@ func (u *URL) setFragment(f string) error { // EscapedFragment returns u.RawFragment when it is a valid escaping of u.Fragment. // Otherwise EscapedFragment ignores u.RawFragment and computes an escaped // form on its own. -// The String method uses EscapedFragment to construct its result. +// The [URL.String] method uses EscapedFragment to construct its result. // In general, code should call EscapedFragment instead of // reading u.RawFragment directly. func (u *URL) EscapedFragment() string { @@ -791,7 +791,7 @@ func validOptionalPort(port string) bool { return true } -// String reassembles the URL into a valid URL string. +// String reassembles the [URL] into a valid URL string. // The general form of the result is one of: // // scheme:opaque?query#fragment @@ -865,7 +865,7 @@ func (u *URL) String() string { return buf.String() } -// Redacted is like String but replaces any password with "xxxxx". +// Redacted is like [URL.String] but replaces any password with "xxxxx". // Only the password in u.User is redacted. func (u *URL) Redacted() string { if u == nil { @@ -1060,15 +1060,15 @@ func resolvePath(base, ref string) string { return r } -// IsAbs reports whether the URL is absolute. +// IsAbs reports whether the [URL] is absolute. // Absolute means that it has a non-empty scheme. func (u *URL) IsAbs() bool { return u.Scheme != "" } -// Parse parses a URL in the context of the receiver. The provided URL +// Parse parses a [URL] in the context of the receiver. The provided URL // may be relative or absolute. Parse returns nil, err on parse -// failure, otherwise its return value is the same as ResolveReference. +// failure, otherwise its return value is the same as [URL.ResolveReference]. func (u *URL) Parse(ref string) (*URL, error) { refURL, err := Parse(ref) if err != nil { @@ -1080,7 +1080,7 @@ func (u *URL) Parse(ref string) (*URL, error) { // ResolveReference resolves a URI reference to an absolute URI from // an absolute base URI u, per RFC 3986 Section 5.2. The URI reference // may be relative or absolute. ResolveReference always returns a new -// URL instance, even if the returned URL is identical to either the +// [URL] instance, even if the returned URL is identical to either the // base or reference. If ref is an absolute URL, then ResolveReference // ignores base and returns a copy of ref. func (u *URL) ResolveReference(ref *URL) *URL { @@ -1117,7 +1117,7 @@ func (u *URL) ResolveReference(ref *URL) *URL { // Query parses RawQuery and returns the corresponding values. // It silently discards malformed value pairs. -// To check errors use ParseQuery. +// To check errors use [ParseQuery]. func (u *URL) Query() Values { v, _ := ParseQuery(u.RawQuery) return v @@ -1194,7 +1194,7 @@ func (u *URL) UnmarshalBinary(text []byte) error { return nil } -// JoinPath returns a new URL with the provided path elements joined to +// JoinPath returns a new [URL] with the provided path elements joined to // any existing path and the resulting path cleaned of any ./ or ../ elements. // Any sequences of multiple / characters will be reduced to a single /. func (u *URL) JoinPath(elem ...string) *URL { @@ -1260,7 +1260,7 @@ func stringContainsCTLByte(s string) bool { return false } -// JoinPath returns a URL string with the provided path elements joined to +// JoinPath returns a [URL] string with the provided path elements joined to // the existing path of base and the resulting path cleaned of any ./ or ../ elements. func JoinPath(base string, elem ...string) (result string, err error) { url, err := Parse(base) diff --git a/src/os/dir.go b/src/os/dir.go index 5306bcb3ba..9124de29e8 100644 --- a/src/os/dir.go +++ b/src/os/dir.go @@ -5,6 +5,8 @@ package os import ( + "internal/safefilepath" + "io" "io/fs" "sort" ) @@ -18,13 +20,13 @@ const ( ) // Readdir reads the contents of the directory associated with file and -// returns a slice of up to n FileInfo values, as would be returned -// by Lstat, in directory order. Subsequent calls on the same file will yield +// returns a slice of up to n [FileInfo] values, as would be returned +// by [Lstat], in directory order. Subsequent calls on the same file will yield // further FileInfos. // // If n > 0, Readdir returns at most n FileInfo structures. In this case, if // Readdir returns an empty slice, it will return a non-nil error -// explaining why. At the end of a directory, the error is io.EOF. +// explaining why. At the end of a directory, the error is [io.EOF]. // // If n <= 0, Readdir returns all the FileInfo from the directory in // a single slice. In this case, if Readdir succeeds (reads all @@ -55,7 +57,7 @@ func (f *File) Readdir(n int) ([]FileInfo, error) { // // If n > 0, Readdirnames returns at most n names. In this case, if // Readdirnames returns an empty slice, it will return a non-nil error -// explaining why. At the end of a directory, the error is io.EOF. +// explaining why. At the end of a directory, the error is [io.EOF]. // // If n <= 0, Readdirnames returns all the names from the directory in // a single slice. In this case, if Readdirnames succeeds (reads all @@ -78,16 +80,16 @@ func (f *File) Readdirnames(n int) (names []string, err error) { } // A DirEntry is an entry read from a directory -// (using the ReadDir function or a File's ReadDir method). +// (using the [ReadDir] function or a [File.ReadDir] method). type DirEntry = fs.DirEntry // ReadDir reads the contents of the directory associated with the file f -// and returns a slice of DirEntry values in directory order. +// and returns a slice of [DirEntry] values in directory order. // Subsequent calls on the same file will yield later DirEntry records in the directory. // // If n > 0, ReadDir returns at most n DirEntry records. // In this case, if ReadDir returns an empty slice, it will return an error explaining why. -// At the end of a directory, the error is io.EOF. +// At the end of a directory, the error is [io.EOF]. // // If n <= 0, ReadDir returns all the DirEntry records remaining in the directory. // When it succeeds, it returns a nil error (not io.EOF). @@ -123,3 +125,61 @@ func ReadDir(name string) ([]DirEntry, error) { sort.Slice(dirs, func(i, j int) bool { return dirs[i].Name() < dirs[j].Name() }) return dirs, err } + +// CopyFS copies the file system fsys into the directory dir, +// creating dir if necessary. +// +// Newly created directories and files have their default modes +// where any bits from the file in fsys that are not part of the +// standard read, write, and execute permissions will be zeroed +// out, and standard read and write permissions are set for owner, +// group, and others while retaining any existing execute bits from +// the file in fsys. +// +// Symbolic links in fsys are not supported, a *PathError with Err set +// to ErrInvalid is returned on symlink. +// +// Copying stops at and returns the first error encountered. +func CopyFS(dir string, fsys fs.FS) error { + return fs.WalkDir(fsys, ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + + fpath, err := safefilepath.Localize(path) + if err != nil { + return err + } + newPath := joinPath(dir, fpath) + if d.IsDir() { + return MkdirAll(newPath, 0777) + } + + // TODO(panjf2000): handle symlinks with the help of fs.ReadLinkFS + // once https://go.dev/issue/49580 is done. + // we also need safefilepath.IsLocal from https://go.dev/cl/564295. + if !d.Type().IsRegular() { + return &PathError{Op: "CopyFS", Path: path, Err: ErrInvalid} + } + + r, err := fsys.Open(path) + if err != nil { + return err + } + defer r.Close() + info, err := r.Stat() + if err != nil { + return err + } + w, err := OpenFile(newPath, O_CREATE|O_TRUNC|O_WRONLY, 0666|info.Mode()&0777) + if err != nil { + return err + } + + if _, err := io.Copy(w, r); err != nil { + w.Close() + return &PathError{Op: "Copy", Path: newPath, Err: err} + } + return w.Close() + }) +} diff --git a/src/os/env.go b/src/os/env.go index 63ad5ab4bd..9ac62451ae 100644 --- a/src/os/env.go +++ b/src/os/env.go @@ -12,7 +12,7 @@ import ( ) // Expand replaces ${var} or $var in the string based on the mapping function. -// For example, os.ExpandEnv(s) is equivalent to os.Expand(s, os.Getenv). +// For example, [os.ExpandEnv](s) is equivalent to [os.Expand](s, [os.Getenv]). func Expand(s string, mapping func(string) string) string { var buf []byte // ${} is all ASCII, so bytes are fine for this operation. @@ -97,7 +97,7 @@ func getShellName(s string) (string, int) { // Getenv retrieves the value of the environment variable named by the key. // It returns the value, which will be empty if the variable is not present. -// To distinguish between an empty value and an unset value, use LookupEnv. +// To distinguish between an empty value and an unset value, use [LookupEnv]. func Getenv(key string) string { testlog.Getenv(key) v, _ := syscall.Getenv(key) diff --git a/src/os/error.go b/src/os/error.go index 62ede9ded3..5a824a9e0e 100644 --- a/src/os/error.go +++ b/src/os/error.go @@ -12,7 +12,7 @@ import ( // Portable analogs of some common system call errors. // // Errors returned from this package may be tested against these errors -// with errors.Is. +// with [errors.Is]. var ( // ErrInvalid indicates an invalid argument. // Methods on File will return this error when the receiver is nil. @@ -61,7 +61,7 @@ func (e *SyscallError) Timeout() bool { return ok && t.Timeout() } -// NewSyscallError returns, as an error, a new SyscallError +// NewSyscallError returns, as an error, a new [SyscallError] // with the given system call name and error details. // As a convenience, if err is nil, NewSyscallError returns nil. func NewSyscallError(syscall string, err error) error { @@ -72,10 +72,10 @@ func NewSyscallError(syscall string, err error) error { } // IsExist returns a boolean indicating whether the error is known to report -// that a file or directory already exists. It is satisfied by ErrExist as +// that a file or directory already exists. It is satisfied by [ErrExist] as // well as some syscall errors. // -// This function predates errors.Is. It only supports errors returned by +// This function predates [errors.Is]. It only supports errors returned by // the os package. New code should use errors.Is(err, fs.ErrExist). func IsExist(err error) bool { return underlyingErrorIs(err, ErrExist) @@ -83,19 +83,19 @@ func IsExist(err error) bool { // IsNotExist returns a boolean indicating whether the error is known to // report that a file or directory does not exist. It is satisfied by -// ErrNotExist as well as some syscall errors. +// [ErrNotExist] as well as some syscall errors. // -// This function predates errors.Is. It only supports errors returned by +// This function predates [errors.Is]. It only supports errors returned by // the os package. New code should use errors.Is(err, fs.ErrNotExist). func IsNotExist(err error) bool { return underlyingErrorIs(err, ErrNotExist) } // IsPermission returns a boolean indicating whether the error is known to -// report that permission is denied. It is satisfied by ErrPermission as well +// report that permission is denied. It is satisfied by [ErrPermission] as well // as some syscall errors. // -// This function predates errors.Is. It only supports errors returned by +// This function predates [errors.Is]. It only supports errors returned by // the os package. New code should use errors.Is(err, fs.ErrPermission). func IsPermission(err error) bool { return underlyingErrorIs(err, ErrPermission) @@ -104,11 +104,11 @@ func IsPermission(err error) bool { // IsTimeout returns a boolean indicating whether the error is known // to report that a timeout occurred. // -// This function predates errors.Is, and the notion of whether an +// This function predates [errors.Is], and the notion of whether an // error indicates a timeout can be ambiguous. For example, the Unix // error EWOULDBLOCK sometimes indicates a timeout and sometimes does not. // New code should use errors.Is with a value appropriate to the call -// returning the error, such as os.ErrDeadlineExceeded. +// returning the error, such as [os.ErrDeadlineExceeded]. func IsTimeout(err error) bool { terr, ok := underlyingError(err).(timeout) return ok && terr.Timeout() diff --git a/src/os/example_test.go b/src/os/example_test.go index e9657ed1fc..7437a74cd0 100644 --- a/src/os/example_test.go +++ b/src/os/example_test.go @@ -5,12 +5,14 @@ package os_test import ( + "bytes" "errors" "fmt" "io/fs" "log" "os" "path/filepath" + "sync" "time" ) @@ -263,3 +265,131 @@ func ExampleMkdirAll() { log.Fatal(err) } } + +func ExampleReadlink() { + // First, we create a relative symlink to a file. + d, err := os.MkdirTemp("", "") + if err != nil { + log.Fatal(err) + } + defer os.RemoveAll(d) + targetPath := filepath.Join(d, "hello.txt") + if err := os.WriteFile(targetPath, []byte("Hello, Gophers!"), 0644); err != nil { + log.Fatal(err) + } + linkPath := filepath.Join(d, "hello.link") + if err := os.Symlink("hello.txt", filepath.Join(d, "hello.link")); err != nil { + if errors.Is(err, errors.ErrUnsupported) { + // Allow the example to run on platforms that do not support symbolic links. + fmt.Printf("%s links to %s\n", filepath.Base(linkPath), "hello.txt") + return + } + log.Fatal(err) + } + + // Readlink returns the relative path as passed to os.Symlink. + dst, err := os.Readlink(linkPath) + if err != nil { + log.Fatal(err) + } + fmt.Printf("%s links to %s\n", filepath.Base(linkPath), dst) + + var dstAbs string + if filepath.IsAbs(dst) { + dstAbs = dst + } else { + // Symlink targets are relative to the directory containing the link. + dstAbs = filepath.Join(filepath.Dir(linkPath), dst) + } + + // Check that the target is correct by comparing it with os.Stat + // on the original target path. + dstInfo, err := os.Stat(dstAbs) + if err != nil { + log.Fatal(err) + } + targetInfo, err := os.Stat(targetPath) + if err != nil { + log.Fatal(err) + } + if !os.SameFile(dstInfo, targetInfo) { + log.Fatalf("link destination (%s) is not the same file as %s", dstAbs, targetPath) + } + + // Output: + // hello.link links to hello.txt +} + +func ExampleUserCacheDir() { + dir, dirErr := os.UserCacheDir() + if dirErr == nil { + dir = filepath.Join(dir, "ExampleUserCacheDir") + } + + getCache := func(name string) ([]byte, error) { + if dirErr != nil { + return nil, &os.PathError{Op: "getCache", Path: name, Err: os.ErrNotExist} + } + return os.ReadFile(filepath.Join(dir, name)) + } + + var mkdirOnce sync.Once + putCache := func(name string, b []byte) error { + if dirErr != nil { + return &os.PathError{Op: "putCache", Path: name, Err: dirErr} + } + mkdirOnce.Do(func() { + if err := os.MkdirAll(dir, 0700); err != nil { + log.Printf("can't create user cache dir: %v", err) + } + }) + return os.WriteFile(filepath.Join(dir, name), b, 0600) + } + + // Read and store cached data. + // … + _ = getCache + _ = putCache + + // Output: +} + +func ExampleUserConfigDir() { + dir, dirErr := os.UserConfigDir() + + var ( + configPath string + origConfig []byte + ) + if dirErr == nil { + configPath = filepath.Join(dir, "ExampleUserConfigDir", "example.conf") + var err error + origConfig, err = os.ReadFile(configPath) + if err != nil && !os.IsNotExist(err) { + // The user has a config file but we couldn't read it. + // Report the error instead of ignoring their configuration. + log.Fatal(err) + } + } + + // Use and perhaps make changes to the config. + config := bytes.Clone(origConfig) + // … + + // Save changes. + if !bytes.Equal(config, origConfig) { + if configPath == "" { + log.Printf("not saving config changes: %v", dirErr) + } else { + err := os.MkdirAll(filepath.Dir(configPath), 0700) + if err == nil { + err = os.WriteFile(configPath, config, 0600) + } + if err != nil { + log.Printf("error saving config changes: %v", err) + } + } + } + + // Output: +} diff --git a/src/os/exec.go b/src/os/exec.go index ed5a75c4d1..12ba293a8d 100644 --- a/src/os/exec.go +++ b/src/os/exec.go @@ -14,19 +14,20 @@ import ( "time" ) -// ErrProcessDone indicates a Process has finished. +// ErrProcessDone indicates a [Process] has finished. var ErrProcessDone = errors.New("os: process already finished") -// Process stores the information about a process created by StartProcess. +// Process stores the information about a process created by [StartProcess]. type Process struct { Pid int - handle uintptr // handle is accessed atomically on Windows + handle atomic.Uintptr isdone atomic.Bool // process has been successfully waited on sigMu sync.RWMutex // avoid race between wait and signal } func newProcess(pid int, handle uintptr) *Process { - p := &Process{Pid: pid, handle: handle} + p := &Process{Pid: pid} + p.handle.Store(handle) runtime.SetFinalizer(p, (*Process).Release) return p } @@ -82,7 +83,7 @@ func Getppid() int { return syscall.Getppid() } // FindProcess looks for a running process by its pid. // -// The Process it returns can be used to obtain information +// The [Process] it returns can be used to obtain information // about the underlying operating system process. // // On Unix systems, FindProcess always succeeds and returns a Process @@ -94,38 +95,38 @@ func FindProcess(pid int) (*Process, error) { } // StartProcess starts a new process with the program, arguments and attributes -// specified by name, argv and attr. The argv slice will become os.Args in the +// specified by name, argv and attr. The argv slice will become [os.Args] in the // new process, so it normally starts with the program name. // // If the calling goroutine has locked the operating system thread -// with runtime.LockOSThread and modified any inheritable OS-level +// with [runtime.LockOSThread] and modified any inheritable OS-level // thread state (for example, Linux or Plan 9 name spaces), the new // process will inherit the caller's thread state. // -// StartProcess is a low-level interface. The os/exec package provides +// StartProcess is a low-level interface. The [os/exec] package provides // higher-level interfaces. // -// If there is an error, it will be of type *PathError. +// If there is an error, it will be of type [*PathError]. func StartProcess(name string, argv []string, attr *ProcAttr) (*Process, error) { testlog.Open(name) return startProcess(name, argv, attr) } -// Release releases any resources associated with the Process p, +// Release releases any resources associated with the [Process] p, // rendering it unusable in the future. -// Release only needs to be called if Wait is not. +// Release only needs to be called if [Process.Wait] is not. func (p *Process) Release() error { return p.release() } -// Kill causes the Process to exit immediately. Kill does not wait until +// Kill causes the [Process] to exit immediately. Kill does not wait until // the Process has actually exited. This only kills the Process itself, // not any other processes it may have started. func (p *Process) Kill() error { return p.kill() } -// Wait waits for the Process to exit, and then returns a +// Wait waits for the [Process] to exit, and then returns a // ProcessState describing its status and an error, if any. // Wait releases any resources associated with the Process. // On most operating systems, the Process must be a child @@ -134,8 +135,8 @@ func (p *Process) Wait() (*ProcessState, error) { return p.wait() } -// Signal sends a signal to the Process. -// Sending Interrupt on Windows is not implemented. +// Signal sends a signal to the [Process]. +// Sending [Interrupt] on Windows is not implemented. func (p *Process) Signal(sig Signal) error { return p.signal(sig) } @@ -165,14 +166,14 @@ func (p *ProcessState) Success() bool { // Sys returns system-dependent exit information about // the process. Convert it to the appropriate underlying -// type, such as syscall.WaitStatus on Unix, to access its contents. +// type, such as [syscall.WaitStatus] on Unix, to access its contents. func (p *ProcessState) Sys() any { return p.sys() } // SysUsage returns system-dependent resource usage information about // the exited process. Convert it to the appropriate underlying -// type, such as *syscall.Rusage on Unix, to access its contents. +// type, such as [*syscall.Rusage] on Unix, to access its contents. // (On Unix, *syscall.Rusage matches struct rusage as defined in the // getrusage(2) manual page.) func (p *ProcessState) SysUsage() any { diff --git a/src/os/exec/exec.go b/src/os/exec/exec.go index c88ee7f52c..ee57ac4771 100644 --- a/src/os/exec/exec.go +++ b/src/os/exec/exec.go @@ -12,7 +12,7 @@ // pipelines, or redirections typically done by shells. The package // behaves more like C's "exec" family of functions. To expand glob // patterns, either call the shell directly, taking care to escape any -// dangerous input, or use the path/filepath package's Glob function. +// dangerous input, or use the [path/filepath] package's Glob function. // To expand environment variables, use package os's ExpandEnv. // // Note that the examples in this package assume a Unix system. @@ -21,7 +21,7 @@ // // # Executables in the current directory // -// The functions Command and LookPath look for a program +// The functions [Command] and [LookPath] look for a program // in the directories listed in the current path, following the // conventions of the host operating system. // Operating systems have for decades included the current @@ -32,10 +32,10 @@ // // To avoid those security problems, as of Go 1.19, this package will not resolve a program // using an implicit or explicit path entry relative to the current directory. -// That is, if you run exec.LookPath("go"), it will not successfully return +// That is, if you run [LookPath]("go"), it will not successfully return // ./go on Unix nor .\go.exe on Windows, no matter how the path is configured. // Instead, if the usual path algorithms would result in that answer, -// these functions return an error err satisfying errors.Is(err, ErrDot). +// these functions return an error err satisfying [errors.Is](err, [ErrDot]). // // For example, consider these two program snippets: // @@ -106,7 +106,7 @@ import ( "time" ) -// Error is returned by LookPath when it fails to classify a file as an +// Error is returned by [LookPath] when it fails to classify a file as an // executable. type Error struct { // Name is the file name for which the error occurred. @@ -121,7 +121,7 @@ func (e *Error) Error() string { func (e *Error) Unwrap() error { return e.Err } -// ErrWaitDelay is returned by (*Cmd).Wait if the process exits with a +// ErrWaitDelay is returned by [Cmd.Wait] if the process exits with a // successful status code but its output pipes are not closed before the // command's WaitDelay expires. var ErrWaitDelay = errors.New("exec: WaitDelay expired before I/O complete") @@ -142,7 +142,7 @@ func (w wrappedError) Unwrap() error { // Cmd represents an external command being prepared or run. // -// A Cmd cannot be reused after calling its Run, Output or CombinedOutput +// A Cmd cannot be reused after calling its [Cmd.Run], [Cmd.Output] or [Cmd.CombinedOutput] // methods. type Cmd struct { // Path is the path of the command to run. @@ -351,12 +351,12 @@ type ctxResult struct { var execwait = godebug.New("#execwait") var execerrdot = godebug.New("execerrdot") -// Command returns the Cmd struct to execute the named program with +// Command returns the [Cmd] struct to execute the named program with // the given arguments. // // It sets only the Path and Args in the returned structure. // -// If name contains no path separators, Command uses LookPath to +// If name contains no path separators, Command uses [LookPath] to // resolve name to a complete path if possible. Otherwise it uses name // directly as Path. // @@ -447,10 +447,10 @@ func Command(name string, arg ...string) *Cmd { return cmd } -// CommandContext is like Command but includes a context. +// CommandContext is like [Command] but includes a context. // // The provided context is used to interrupt the process -// (by calling cmd.Cancel or os.Process.Kill) +// (by calling cmd.Cancel or [os.Process.Kill]) // if the context becomes done before the command completes on its own. // // CommandContext sets the command's Cancel function to invoke the Kill method @@ -594,10 +594,10 @@ func closeDescriptors(closers []io.Closer) { // status. // // If the command starts but does not complete successfully, the error is of -// type *ExitError. Other error types may be returned for other situations. +// type [*ExitError]. Other error types may be returned for other situations. // // If the calling goroutine has locked the operating system thread -// with runtime.LockOSThread and modified any inheritable OS-level +// with [runtime.LockOSThread] and modified any inheritable OS-level // thread state (for example, Linux or Plan 9 name spaces), the new // process will inherit the caller's thread state. func (c *Cmd) Run() error { @@ -611,7 +611,7 @@ func (c *Cmd) Run() error { // // If Start returns successfully, the c.Process field will be set. // -// After a successful call to Start the Wait method must be called in +// After a successful call to Start the [Cmd.Wait] method must be called in // order to release associated system resources. func (c *Cmd) Start() error { // Check for doubled Start calls before we defer failure cleanup. If the prior @@ -872,20 +872,20 @@ func (e *ExitError) Error() string { // Wait waits for the command to exit and waits for any copying to // stdin or copying from stdout or stderr to complete. // -// The command must have been started by Start. +// The command must have been started by [Cmd.Start]. // // The returned error is nil if the command runs, has no problems // copying stdin, stdout, and stderr, and exits with a zero exit // status. // // If the command fails to run or doesn't complete successfully, the -// error is of type *ExitError. Other error types may be +// error is of type [*ExitError]. Other error types may be // returned for I/O problems. // -// If any of c.Stdin, c.Stdout or c.Stderr are not an *os.File, Wait also waits +// If any of c.Stdin, c.Stdout or c.Stderr are not an [*os.File], Wait also waits // for the respective I/O loop copying to or from the process to complete. // -// Wait releases any resources associated with the Cmd. +// Wait releases any resources associated with the [Cmd]. func (c *Cmd) Wait() error { if c.Process == nil { return errors.New("exec: not started") @@ -974,8 +974,8 @@ func (c *Cmd) awaitGoroutines(timer *time.Timer) error { } // Output runs the command and returns its standard output. -// Any returned error will usually be of type *ExitError. -// If c.Stderr was nil, Output populates ExitError.Stderr. +// Any returned error will usually be of type [*ExitError]. +// If c.Stderr was nil, Output populates [ExitError.Stderr]. func (c *Cmd) Output() ([]byte, error) { if c.Stdout != nil { return nil, errors.New("exec: Stdout already set") @@ -1015,7 +1015,7 @@ func (c *Cmd) CombinedOutput() ([]byte, error) { // StdinPipe returns a pipe that will be connected to the command's // standard input when the command starts. -// The pipe will be closed automatically after Wait sees the command exit. +// The pipe will be closed automatically after [Cmd.Wait] sees the command exit. // A caller need only call Close to force the pipe to close sooner. // For example, if the command being run will not exit until standard input // is closed, the caller must close the pipe. @@ -1039,10 +1039,10 @@ func (c *Cmd) StdinPipe() (io.WriteCloser, error) { // StdoutPipe returns a pipe that will be connected to the command's // standard output when the command starts. // -// Wait will close the pipe after seeing the command exit, so most callers +// [Cmd.Wait] will close the pipe after seeing the command exit, so most callers // need not close the pipe themselves. It is thus incorrect to call Wait // before all reads from the pipe have completed. -// For the same reason, it is incorrect to call Run when using StdoutPipe. +// For the same reason, it is incorrect to call [Cmd.Run] when using StdoutPipe. // See the example for idiomatic usage. func (c *Cmd) StdoutPipe() (io.ReadCloser, error) { if c.Stdout != nil { @@ -1064,10 +1064,10 @@ func (c *Cmd) StdoutPipe() (io.ReadCloser, error) { // StderrPipe returns a pipe that will be connected to the command's // standard error when the command starts. // -// Wait will close the pipe after seeing the command exit, so most callers +// [Cmd.Wait] will close the pipe after seeing the command exit, so most callers // need not close the pipe themselves. It is thus incorrect to call Wait // before all reads from the pipe have completed. -// For the same reason, it is incorrect to use Run when using StderrPipe. +// For the same reason, it is incorrect to use [Cmd.Run] when using StderrPipe. // See the StdoutPipe example for idiomatic usage. func (c *Cmd) StderrPipe() (io.ReadCloser, error) { if c.Stderr != nil { diff --git a/src/os/exec/exec_test.go b/src/os/exec/exec_test.go index 71a00494ad..c4b89e0199 100644 --- a/src/os/exec/exec_test.go +++ b/src/os/exec/exec_test.go @@ -304,7 +304,7 @@ func cmdExit(args ...string) { } func cmdDescribeFiles(args ...string) { - f := os.NewFile(3, fmt.Sprintf("fd3")) + f := os.NewFile(3, "fd3") ln, err := net.FileListener(f) if err == nil { fmt.Printf("fd3: listener %s\n", ln.Addr()) @@ -1659,8 +1659,8 @@ func TestCancelErrors(t *testing.T) { // This test should kill the child process after 1ms, // To maximize compatibility with existing uses of exec.CommandContext, the // resulting error should be an exec.ExitError without additional wrapping. - if ee, ok := err.(*exec.ExitError); !ok { - t.Errorf("Wait error = %v; want %T", err, *ee) + if _, ok := err.(*exec.ExitError); !ok { + t.Errorf("Wait error = %v; want *exec.ExitError", err) } }) diff --git a/src/os/exec/lp_plan9.go b/src/os/exec/lp_plan9.go index dffdbac35f..87359b3551 100644 --- a/src/os/exec/lp_plan9.go +++ b/src/os/exec/lp_plan9.go @@ -34,7 +34,7 @@ func findExecutable(file string) error { // // In older versions of Go, LookPath could return a path relative to the current directory. // As of Go 1.19, LookPath will instead return that path along with an error satisfying -// errors.Is(err, ErrDot). See the package documentation for more details. +// [errors.Is](err, [ErrDot]). See the package documentation for more details. func LookPath(file string) (string, error) { // skip the path lookup for these prefixes skip := []string{"/", "#", "./", "../"} diff --git a/src/os/exec/lp_unix.go b/src/os/exec/lp_unix.go index 3787132078..8617d45e98 100644 --- a/src/os/exec/lp_unix.go +++ b/src/os/exec/lp_unix.go @@ -48,7 +48,7 @@ func findExecutable(file string) error { // // In older versions of Go, LookPath could return a path relative to the current directory. // As of Go 1.19, LookPath will instead return that path along with an error satisfying -// errors.Is(err, ErrDot). See the package documentation for more details. +// [errors.Is](err, [ErrDot]). See the package documentation for more details. func LookPath(file string) (string, error) { // NOTE(rsc): I wish we could use the Plan 9 behavior here // (only bypass the path if file begins with / or ./ or ../) diff --git a/src/os/exec/lp_windows.go b/src/os/exec/lp_windows.go index 698a97c40f..0e058d41b0 100644 --- a/src/os/exec/lp_windows.go +++ b/src/os/exec/lp_windows.go @@ -66,7 +66,7 @@ func findExecutable(file string, exts []string) (string, error) { // // In older versions of Go, LookPath could return a path relative to the current directory. // As of Go 1.19, LookPath will instead return that path along with an error satisfying -// errors.Is(err, ErrDot). See the package documentation for more details. +// [errors.Is](err, [ErrDot]). See the package documentation for more details. func LookPath(file string) (string, error) { return lookPath(file, pathExt()) } diff --git a/src/os/exec_plan9.go b/src/os/exec_plan9.go index 69714ff798..a1e74df8a5 100644 --- a/src/os/exec_plan9.go +++ b/src/os/exec_plan9.go @@ -14,7 +14,7 @@ import ( // The only signal values guaranteed to be present in the os package // on all systems are Interrupt (send the process an interrupt) and // Kill (force the process to exit). Interrupt is not implemented on -// Windows; using it with os.Process.Signal will return an error. +// Windows; using it with [os.Process.Signal] will return an error. var ( Interrupt Signal = syscall.Note("interrupt") Kill Signal = syscall.Note("kill") diff --git a/src/os/exec_unix.go b/src/os/exec_unix.go index f9063b4db4..36b320df18 100644 --- a/src/os/exec_unix.go +++ b/src/os/exec_unix.go @@ -48,9 +48,7 @@ func (p *Process) wait() (ps *ProcessState, err error) { if e != nil { return nil, NewSyscallError("wait", e) } - if pid1 != 0 { - p.setDone() - } + p.setDone() ps = &ProcessState{ pid: pid1, status: status, diff --git a/src/os/exec_windows.go b/src/os/exec_windows.go index 061a12b10f..9aa5b147c9 100644 --- a/src/os/exec_windows.go +++ b/src/os/exec_windows.go @@ -8,13 +8,12 @@ import ( "errors" "internal/syscall/windows" "runtime" - "sync/atomic" "syscall" "time" ) func (p *Process) wait() (ps *ProcessState, err error) { - handle := atomic.LoadUintptr(&p.handle) + handle := p.handle.Load() s, e := syscall.WaitForSingleObject(syscall.Handle(handle), syscall.INFINITE) switch s { case syscall.WAIT_OBJECT_0: @@ -40,7 +39,7 @@ func (p *Process) wait() (ps *ProcessState, err error) { } func (p *Process) signal(sig Signal) error { - handle := atomic.LoadUintptr(&p.handle) + handle := p.handle.Load() if handle == uintptr(syscall.InvalidHandle) { return syscall.EINVAL } @@ -63,7 +62,7 @@ func (p *Process) signal(sig Signal) error { } func (p *Process) release() error { - handle := atomic.SwapUintptr(&p.handle, uintptr(syscall.InvalidHandle)) + handle := p.handle.Swap(uintptr(syscall.InvalidHandle)) if handle == uintptr(syscall.InvalidHandle) { return syscall.EINVAL } diff --git a/src/os/executable.go b/src/os/executable.go index cc3134af1c..ae7ec139c3 100644 --- a/src/os/executable.go +++ b/src/os/executable.go @@ -9,7 +9,7 @@ package os // pointing to the correct executable. If a symlink was used to start // the process, depending on the operating system, the result might // be the symlink or the path it pointed to. If a stable result is -// needed, path/filepath.EvalSymlinks might help. +// needed, [path/filepath.EvalSymlinks] might help. // // Executable returns an absolute path unless an error occurred. // diff --git a/src/os/export_linux_test.go b/src/os/export_linux_test.go index 3fd5e61de7..942b48a17d 100644 --- a/src/os/export_linux_test.go +++ b/src/os/export_linux_test.go @@ -5,7 +5,8 @@ package os var ( - PollCopyFileRangeP = &pollCopyFileRange - PollSpliceFile = &pollSplice - GetPollFDForTest = getPollFD + PollCopyFileRangeP = &pollCopyFileRange + PollSpliceFile = &pollSplice + PollSendFile = &pollSendFile + GetPollFDAndNetwork = getPollFDAndNetwork ) diff --git a/src/os/export_windows_test.go b/src/os/export_windows_test.go index 6e1188816b..2e5904b3f5 100644 --- a/src/os/export_windows_test.go +++ b/src/os/export_windows_test.go @@ -8,7 +8,6 @@ package os var ( FixLongPath = fixLongPath - CanUseLongPaths = canUseLongPaths NewConsoleFile = newConsoleFile CommandLineToArgv = commandLineToArgv AllowReadDirFileID = &allowReadDirFileID diff --git a/src/os/file.go b/src/os/file.go index 82be00a834..c0c972bbd7 100644 --- a/src/os/file.go +++ b/src/os/file.go @@ -6,9 +6,9 @@ // functionality. The design is Unix-like, although the error handling is // Go-like; failing calls return values of type error rather than error numbers. // Often, more information is available within the error. For example, -// if a call that takes a file name fails, such as Open or Stat, the error +// if a call that takes a file name fails, such as [Open] or [Stat], the error // will include the failing file name when printed and will be of type -// *PathError, which may be unpacked for more information. +// [*PathError], which may be unpacked for more information. // // The os interface is intended to be uniform across all operating systems. // Features not generally available appear in the system-specific package syscall. @@ -157,20 +157,26 @@ func (f *File) ReadFrom(r io.Reader) (n int64, err error) { return n, f.wrapErr("write", e) } -func genericReadFrom(f *File, r io.Reader) (int64, error) { - return io.Copy(fileWithoutReadFrom{f}, r) +// noReadFrom can be embedded alongside another type to +// hide the ReadFrom method of that other type. +type noReadFrom struct{} + +// ReadFrom hides another ReadFrom method. +// It should never be called. +func (noReadFrom) ReadFrom(io.Reader) (int64, error) { + panic("can't happen") } // fileWithoutReadFrom implements all the methods of *File other // than ReadFrom. This is used to permit ReadFrom to call io.Copy // without leading to a recursive call to ReadFrom. type fileWithoutReadFrom struct { + noReadFrom *File } -// This ReadFrom method hides the *File ReadFrom method. -func (fileWithoutReadFrom) ReadFrom(fileWithoutReadFrom) { - panic("unreachable") +func genericReadFrom(f *File, r io.Reader) (int64, error) { + return io.Copy(fileWithoutReadFrom{File: f}, r) } // Write writes len(b) bytes from b to the File. @@ -229,6 +235,40 @@ func (f *File) WriteAt(b []byte, off int64) (n int, err error) { return } +// WriteTo implements io.WriterTo. +func (f *File) WriteTo(w io.Writer) (n int64, err error) { + if err := f.checkValid("read"); err != nil { + return 0, err + } + n, handled, e := f.writeTo(w) + if handled { + return n, f.wrapErr("read", e) + } + return genericWriteTo(f, w) // without wrapping +} + +// noWriteTo can be embedded alongside another type to +// hide the WriteTo method of that other type. +type noWriteTo struct{} + +// WriteTo hides another WriteTo method. +// It should never be called. +func (noWriteTo) WriteTo(io.Writer) (int64, error) { + panic("can't happen") +} + +// fileWithoutWriteTo implements all the methods of *File other +// than WriteTo. This is used to permit WriteTo to call io.Copy +// without leading to a recursive call to WriteTo. +type fileWithoutWriteTo struct { + noWriteTo + *File +} + +func genericWriteTo(f *File, w io.Writer) (int64, error) { + return io.Copy(w, fileWithoutWriteTo{File: f}) +} + // Seek sets the offset for the next Read or Write on file to offset, interpreted // according to whence: 0 means relative to the origin of the file, 1 means // relative to the current offset, and 2 means relative to the end. @@ -352,6 +392,15 @@ func Rename(oldpath, newpath string) error { return rename(oldpath, newpath) } +// Readlink returns the destination of the named symbolic link. +// If there is an error, it will be of type *PathError. +// +// If the link destination is relative, Readlink returns the relative path +// without resolving it to an absolute one. +func Readlink(name string) (string, error) { + return readlink(name) +} + // Many functions in package syscall return a count of -1 instead of 0. // Using fixCount(call()) instead of call() corrects the count. func fixCount(n int, err error) (int, error) { @@ -650,7 +699,15 @@ func (dir dirFS) ReadFile(name string) ([]byte, error) { if err != nil { return nil, &PathError{Op: "readfile", Path: name, Err: err} } - return ReadFile(fullname) + b, err := ReadFile(fullname) + if err != nil { + if e, ok := err.(*PathError); ok { + // See comment in dirFS.Open. + e.Path = name + } + return nil, err + } + return b, nil } // ReadDir reads the named directory, returning all its directory entries sorted @@ -660,7 +717,15 @@ func (dir dirFS) ReadDir(name string) ([]DirEntry, error) { if err != nil { return nil, &PathError{Op: "readdir", Path: name, Err: err} } - return ReadDir(fullname) + entries, err := ReadDir(fullname) + if err != nil { + if e, ok := err.(*PathError); ok { + // See comment in dirFS.Open. + e.Path = name + } + return nil, err + } + return entries, nil } func (dir dirFS) Stat(name string) (fs.FileInfo, error) { @@ -682,10 +747,7 @@ func (dir dirFS) join(name string) (string, error) { if dir == "" { return "", errors.New("os: DirFS with empty root") } - if !fs.ValidPath(name) { - return "", ErrInvalid - } - name, err := safefilepath.FromFS(name) + name, err := safefilepath.Localize(name) if err != nil { return "", ErrInvalid } diff --git a/src/os/file_plan9.go b/src/os/file_plan9.go index 03cdb5be4a..69a24316e6 100644 --- a/src/os/file_plan9.go +++ b/src/os/file_plan9.go @@ -33,8 +33,8 @@ type file struct { // Fd returns the integer Plan 9 file descriptor referencing the open file. // If f is closed, the file descriptor becomes invalid. // If f is garbage collected, a finalizer may close the file descriptor, -// making it invalid; see runtime.SetFinalizer for more information on when -// a finalizer might be run. On Unix systems this will cause the SetDeadline +// making it invalid; see [runtime.SetFinalizer] for more information on when +// a finalizer might be run. On Unix systems this will cause the [File.SetDeadline] // methods to stop working. // // As an alternative, see the f.SyscallConn method. @@ -505,9 +505,7 @@ func Symlink(oldname, newname string) error { return &LinkError{"symlink", oldname, newname, syscall.EPLAN9} } -// Readlink returns the destination of the named symbolic link. -// If there is an error, it will be of type *PathError. -func Readlink(name string) (string, error) { +func readlink(name string) (string, error) { return "", &PathError{Op: "readlink", Path: name, Err: syscall.EPLAN9} } @@ -544,7 +542,6 @@ func tempDir() string { dir = "/tmp" } return dir - } // Chdir changes the current working directory to the file, diff --git a/src/os/file_posix.go b/src/os/file_posix.go index 5692657753..8ff0ada462 100644 --- a/src/os/file_posix.go +++ b/src/os/file_posix.go @@ -12,9 +12,9 @@ import ( "time" ) -// Close closes the File, rendering it unusable for I/O. -// On files that support SetDeadline, any pending I/O operations will -// be canceled and return immediately with an ErrClosed error. +// Close closes the [File], rendering it unusable for I/O. +// On files that support [File.SetDeadline], any pending I/O operations will +// be canceled and return immediately with an [ErrClosed] error. // Close will return an error if it has already been called. func (f *File) Close() error { if f == nil { @@ -98,9 +98,9 @@ func (f *File) chmod(mode FileMode) error { // Chown changes the numeric uid and gid of the named file. // If the file is a symbolic link, it changes the uid and gid of the link's target. // A uid or gid of -1 means to not change that value. -// If there is an error, it will be of type *PathError. +// If there is an error, it will be of type [*PathError]. // -// On Windows or Plan 9, Chown always returns the syscall.EWINDOWS or +// On Windows or Plan 9, Chown always returns the [syscall.EWINDOWS] or // EPLAN9 error, wrapped in *PathError. func Chown(name string, uid, gid int) error { e := ignoringEINTR(func() error { @@ -114,9 +114,9 @@ func Chown(name string, uid, gid int) error { // Lchown changes the numeric uid and gid of the named file. // If the file is a symbolic link, it changes the uid and gid of the link itself. -// If there is an error, it will be of type *PathError. +// If there is an error, it will be of type [*PathError]. // -// On Windows, it always returns the syscall.EWINDOWS error, wrapped +// On Windows, it always returns the [syscall.EWINDOWS] error, wrapped // in *PathError. func Lchown(name string, uid, gid int) error { e := ignoringEINTR(func() error { @@ -129,9 +129,9 @@ func Lchown(name string, uid, gid int) error { } // Chown changes the numeric uid and gid of the named file. -// If there is an error, it will be of type *PathError. +// If there is an error, it will be of type [*PathError]. // -// On Windows, it always returns the syscall.EWINDOWS error, wrapped +// On Windows, it always returns the [syscall.EWINDOWS] error, wrapped // in *PathError. func (f *File) Chown(uid, gid int) error { if err := f.checkValid("chown"); err != nil { @@ -145,7 +145,7 @@ func (f *File) Chown(uid, gid int) error { // Truncate changes the size of the file. // It does not change the I/O offset. -// If there is an error, it will be of type *PathError. +// If there is an error, it will be of type [*PathError]. func (f *File) Truncate(size int64) error { if err := f.checkValid("truncate"); err != nil { return err @@ -171,11 +171,11 @@ func (f *File) Sync() error { // Chtimes changes the access and modification times of the named // file, similar to the Unix utime() or utimes() functions. -// A zero time.Time value will leave the corresponding file time unchanged. +// A zero [time.Time] value will leave the corresponding file time unchanged. // // The underlying filesystem may truncate or round the values to a // less precise time unit. -// If there is an error, it will be of type *PathError. +// If there is an error, it will be of type [*PathError]. func Chtimes(name string, atime time.Time, mtime time.Time) error { var utimes [2]syscall.Timespec set := func(i int, t time.Time) { @@ -195,7 +195,7 @@ func Chtimes(name string, atime time.Time, mtime time.Time) error { // Chdir changes the current working directory to the file, // which must be a directory. -// If there is an error, it will be of type *PathError. +// If there is an error, it will be of type [*PathError]. func (f *File) Chdir() error { if err := f.checkValid("chdir"); err != nil { return err diff --git a/src/os/file_unix.go b/src/os/file_unix.go index 533a48404b..6597186486 100644 --- a/src/os/file_unix.go +++ b/src/os/file_unix.go @@ -67,11 +67,11 @@ type file struct { // Fd returns the integer Unix file descriptor referencing the open file. // If f is closed, the file descriptor becomes invalid. // If f is garbage collected, a finalizer may close the file descriptor, -// making it invalid; see runtime.SetFinalizer for more information on when -// a finalizer might be run. On Unix systems this will cause the SetDeadline +// making it invalid; see [runtime.SetFinalizer] for more information on when +// a finalizer might be run. On Unix systems this will cause the [File.SetDeadline] // methods to stop working. // Because file descriptors can be reused, the returned file descriptor may -// only be closed through the Close method of f, or by its finalizer during +// only be closed through the [File.Close] method of f, or by its finalizer during // garbage collection. Otherwise, during garbage collection the finalizer // may close an unrelated file descriptor with the same (reused) number. // @@ -426,9 +426,7 @@ func Symlink(oldname, newname string) error { return nil } -// Readlink returns the destination of the named symbolic link. -// If there is an error, it will be of type *PathError. -func Readlink(name string) (string, error) { +func readlink(name string) (string, error) { for len := 128; ; len *= 2 { b := make([]byte, len) var ( diff --git a/src/os/file_windows.go b/src/os/file_windows.go index 63d53a1df8..49fdd8d44d 100644 --- a/src/os/file_windows.go +++ b/src/os/file_windows.go @@ -6,6 +6,7 @@ package os import ( "errors" + "internal/godebug" "internal/poll" "internal/syscall/windows" "runtime" @@ -31,8 +32,8 @@ type file struct { // Fd returns the Windows handle referencing the open file. // If f is closed, the file descriptor becomes invalid. // If f is garbage collected, a finalizer may close the file descriptor, -// making it invalid; see runtime.SetFinalizer for more information on when -// a finalizer might be run. On Unix systems this will cause the SetDeadline +// making it invalid; see [runtime.SetFinalizer] for more information on when +// a finalizer might be run. On Unix systems this will cause the [File.SetDeadline] // methods to stop working. func (file *File) Fd() uintptr { if file == nil { @@ -115,11 +116,7 @@ func openFileNolog(name string, flag int, perm FileMode) (*File, error) { } return nil, &PathError{Op: "open", Path: name, Err: e} } - f, e := newFile(r, name, "file"), nil - if e != nil { - return nil, &PathError{Op: "open", Path: name, Err: e} - } - return f, nil + return newFile(r, name, "file"), nil } func (file *file) close() error { @@ -353,6 +350,8 @@ func openSymlink(path string) (syscall.Handle, error) { return h, nil } +var winreadlinkvolume = godebug.New("winreadlinkvolume") + // normaliseLinkPath converts absolute paths returned by // DeviceIoControl(h, FSCTL_GET_REPARSE_POINT, ...) // into paths acceptable by all Windows APIs. @@ -360,7 +359,7 @@ func openSymlink(path string) (syscall.Handle, error) { // // \??\C:\foo\bar into C:\foo\bar // \??\UNC\foo\bar into \\foo\bar -// \??\Volume{abc}\ into C:\ +// \??\Volume{abc}\ into \\?\Volume{abc}\ func normaliseLinkPath(path string) (string, error) { if len(path) < 4 || path[:4] != `\??\` { // unexpected path, return it as is @@ -375,7 +374,10 @@ func normaliseLinkPath(path string) (string, error) { return `\\` + s[4:], nil } - // handle paths, like \??\Volume{abc}\... + // \??\Volume{abc}\ + if winreadlinkvolume.Value() != "0" { + return `\\?\` + path[4:], nil + } h, err := openSymlink(path) if err != nil { @@ -406,7 +408,7 @@ func normaliseLinkPath(path string) (string, error) { return "", errors.New("GetFinalPathNameByHandle returned unexpected path: " + s) } -func readlink(path string) (string, error) { +func readReparseLink(path string) (string, error) { h, err := openSymlink(path) if err != nil { return "", err @@ -438,10 +440,8 @@ func readlink(path string) (string, error) { } } -// Readlink returns the destination of the named symbolic link. -// If there is an error, it will be of type *PathError. -func Readlink(name string) (string, error) { - s, err := readlink(fixLongPath(name)) +func readlink(name string) (string, error) { + s, err := readReparseLink(fixLongPath(name)) if err != nil { return "", &PathError{Op: "readlink", Path: name, Err: err} } diff --git a/src/os/os_test.go b/src/os/os_test.go index ae12b9ce1b..68a3a03e2a 100644 --- a/src/os/os_test.go +++ b/src/os/os_test.go @@ -5,12 +5,14 @@ package os_test import ( + "bytes" "errors" "flag" "fmt" "internal/testenv" "io" "io/fs" + "log" . "os" "os/exec" "path/filepath" @@ -33,6 +35,8 @@ func TestMain(m *testing.M) { Exit(0) } + log.SetFlags(log.LstdFlags | log.Lshortfile) + Exit(m.Run()) } @@ -1620,8 +1624,17 @@ func TestFileChdir(t *testing.T) { if err != nil { t.Fatalf("Getwd: %s", err) } - if !equal(wdNew, wd) { - t.Fatalf("fd.Chdir failed, got %s, want %s", wdNew, wd) + + wdInfo, err := fd.Stat() + if err != nil { + t.Fatal(err) + } + newInfo, err := Stat(wdNew) + if err != nil { + t.Fatal(err) + } + if !SameFile(wdInfo, newInfo) { + t.Fatalf("fd.Chdir failed: got %s, want %s", wdNew, wd) } } @@ -2838,16 +2851,17 @@ func TestUserCacheDir(t *testing.T) { t.Fatalf("UserCacheDir returned %q; want non-empty path or error", dir) } - if err := MkdirAll(dir, 0777); err != nil { - t.Fatalf("could not create UserCacheDir: %v", err) - } - d, err := MkdirTemp(dir, "TestUserCacheDir") + fi, err := Stat(dir) if err != nil { - t.Fatalf("could not create a directory in UserCacheDir: %v", err) - } - if err := Remove(d); err != nil { + if IsNotExist(err) { + t.Log(err) + return + } t.Fatal(err) } + if !fi.IsDir() { + t.Fatalf("dir %s is not directory; type = %v", dir, fi.Mode()) + } } func TestUserConfigDir(t *testing.T) { @@ -2861,17 +2875,17 @@ func TestUserConfigDir(t *testing.T) { t.Fatalf("UserConfigDir returned %q; want non-empty path or error", dir) } - if err := MkdirAll(dir, 0777); err != nil { - t.Fatalf("could not create UserConfigDir: %v", err) - } - - d, err := MkdirTemp(dir, "TestUserConfigDir") + fi, err := Stat(dir) if err != nil { - t.Fatalf("could not create a directory in UserConfigDir: %v", err) - } - if err := Remove(d); err != nil { + if IsNotExist(err) { + t.Log(err) + return + } t.Fatal(err) } + if !fi.IsDir() { + t.Fatalf("dir %s is not directory; type = %v", dir, fi.Mode()) + } } func TestUserHomeDir(t *testing.T) { @@ -3017,35 +3031,44 @@ func TestOpenFileKeepsPermissions(t *testing.T) { } } -func TestDirFS(t *testing.T) { - t.Parallel() +func forceMFTUpdateOnWindows(t *testing.T, path string) { + t.Helper() + + if runtime.GOOS != "windows" { + return + } // On Windows, we force the MFT to update by reading the actual metadata from GetFileInformationByHandle and then // explicitly setting that. Otherwise it might get out of sync with FindFirstFile. See golang.org/issues/42637. - if runtime.GOOS == "windows" { - if err := filepath.WalkDir("./testdata/dirfs", func(path string, d fs.DirEntry, err error) error { - if err != nil { - t.Fatal(err) - } - info, err := d.Info() - if err != nil { - t.Fatal(err) - } - stat, err := Stat(path) // This uses GetFileInformationByHandle internally. - if err != nil { - t.Fatal(err) - } - if stat.ModTime() == info.ModTime() { - return nil - } - if err := Chtimes(path, stat.ModTime(), stat.ModTime()); err != nil { - t.Log(err) // We only log, not die, in case the test directory is not writable. - } - return nil - }); err != nil { + if err := filepath.WalkDir(path, func(path string, d fs.DirEntry, err error) error { + if err != nil { t.Fatal(err) } + info, err := d.Info() + if err != nil { + t.Fatal(err) + } + stat, err := Stat(path) // This uses GetFileInformationByHandle internally. + if err != nil { + t.Fatal(err) + } + if stat.ModTime() == info.ModTime() { + return nil + } + if err := Chtimes(path, stat.ModTime(), stat.ModTime()); err != nil { + t.Log(err) // We only log, not die, in case the test directory is not writable. + } + return nil + }); err != nil { + t.Fatal(err) } +} + +func TestDirFS(t *testing.T) { + t.Parallel() + + forceMFTUpdateOnWindows(t, "./testdata/dirfs") + fsys := DirFS("./testdata/dirfs") if err := fstest.TestFS(fsys, "a", "b", "dir/x"); err != nil { t.Fatal(err) @@ -3321,3 +3344,242 @@ func TestPipeCloseRace(t *testing.T) { t.Errorf("got nils %d errs %d, want 2 2", nils, errs) } } + +func TestRandomLen(t *testing.T) { + for range 5 { + dir, err := MkdirTemp(t.TempDir(), "*") + if err != nil { + t.Fatal(err) + } + base := filepath.Base(dir) + if len(base) > 10 { + t.Errorf("MkdirTemp returned len %d: %s", len(base), base) + } + } + for range 5 { + f, err := CreateTemp(t.TempDir(), "*") + if err != nil { + t.Fatal(err) + } + base := filepath.Base(f.Name()) + f.Close() + if len(base) > 10 { + t.Errorf("CreateTemp returned len %d: %s", len(base), base) + } + } +} + +func TestCopyFS(t *testing.T) { + t.Parallel() + + // Test with disk filesystem. + forceMFTUpdateOnWindows(t, "./testdata/dirfs") + fsys := DirFS("./testdata/dirfs") + tmpDir := t.TempDir() + if err := CopyFS(tmpDir, fsys); err != nil { + t.Fatal("CopyFS:", err) + } + forceMFTUpdateOnWindows(t, tmpDir) + tmpFsys := DirFS(tmpDir) + if err := fstest.TestFS(tmpFsys, "a", "b", "dir/x"); err != nil { + t.Fatal("TestFS:", err) + } + if err := fs.WalkDir(fsys, ".", func(path string, d fs.DirEntry, err error) error { + if d.IsDir() { + return nil + } + + data, err := fs.ReadFile(fsys, path) + if err != nil { + return err + } + newData, err := fs.ReadFile(tmpFsys, path) + if err != nil { + return err + } + if !bytes.Equal(data, newData) { + return errors.New("file " + path + " contents differ") + } + return nil + }); err != nil { + t.Fatal("comparing two directories:", err) + } + + // Test with memory filesystem. + fsys = fstest.MapFS{ + "william": {Data: []byte("Shakespeare\n")}, + "carl": {Data: []byte("Gauss\n")}, + "daVinci": {Data: []byte("Leonardo\n")}, + "einstein": {Data: []byte("Albert\n")}, + "dir/newton": {Data: []byte("Sir Isaac\n")}, + } + tmpDir = t.TempDir() + if err := CopyFS(tmpDir, fsys); err != nil { + t.Fatal("CopyFS:", err) + } + forceMFTUpdateOnWindows(t, tmpDir) + tmpFsys = DirFS(tmpDir) + if err := fstest.TestFS(tmpFsys, "william", "carl", "daVinci", "einstein", "dir/newton"); err != nil { + t.Fatal("TestFS:", err) + } + if err := fs.WalkDir(fsys, ".", func(path string, d fs.DirEntry, err error) error { + if d.IsDir() { + return nil + } + + data, err := fs.ReadFile(fsys, path) + if err != nil { + return err + } + newData, err := fs.ReadFile(tmpFsys, path) + if err != nil { + return err + } + if !bytes.Equal(data, newData) { + return errors.New("file " + path + " contents differ") + } + return nil + }); err != nil { + t.Fatal("comparing two directories:", err) + } +} + +func TestCopyFSWithSymlinks(t *testing.T) { + // Test it with absolute and relative symlinks that point inside and outside the tree. + testenv.MustHaveSymlink(t) + + // Create a directory and file outside. + tmpDir := t.TempDir() + outsideDir, err := MkdirTemp(tmpDir, "copyfs_out_") + if err != nil { + t.Fatalf("MkdirTemp: %v", err) + } + outsideFile := filepath.Join(outsideDir, "file.out.txt") + + if err := WriteFile(outsideFile, []byte("Testing CopyFS outside"), 0644); err != nil { + t.Fatalf("WriteFile: %v", err) + } + + // Create a directory and file inside. + insideDir, err := MkdirTemp(tmpDir, "copyfs_in_") + if err != nil { + t.Fatalf("MkdirTemp: %v", err) + } + insideFile := filepath.Join(insideDir, "file.in.txt") + if err := WriteFile(insideFile, []byte("Testing CopyFS inside"), 0644); err != nil { + t.Fatalf("WriteFile: %v", err) + } + + // Create directories for symlinks. + linkInDir := filepath.Join(insideDir, "in_symlinks") + if err := Mkdir(linkInDir, 0755); err != nil { + t.Fatalf("Mkdir: %v", err) + } + linkOutDir := filepath.Join(insideDir, "out_symlinks") + if err := Mkdir(linkOutDir, 0755); err != nil { + t.Fatalf("Mkdir: %v", err) + } + + // First, we create the absolute symlink pointing outside. + outLinkFile := filepath.Join(linkOutDir, "file.abs.out.link") + if err := Symlink(outsideFile, outLinkFile); err != nil { + t.Fatalf("Symlink: %v", err) + } + + // Then, we create the relative symlink pointing outside. + relOutsideFile, err := filepath.Rel(filepath.Join(linkOutDir, "."), outsideFile) + if err != nil { + t.Fatalf("filepath.Rel: %v", err) + } + relOutLinkFile := filepath.Join(linkOutDir, "file.rel.out.link") + if err := Symlink(relOutsideFile, relOutLinkFile); err != nil { + t.Fatalf("Symlink: %v", err) + } + + // Last, we create the relative symlink pointing inside. + relInsideFile, err := filepath.Rel(filepath.Join(linkInDir, "."), insideFile) + if err != nil { + t.Fatalf("filepath.Rel: %v", err) + } + relInLinkFile := filepath.Join(linkInDir, "file.rel.in.link") + if err := Symlink(relInsideFile, relInLinkFile); err != nil { + t.Fatalf("Symlink: %v", err) + } + + // Copy the directory tree and verify. + forceMFTUpdateOnWindows(t, insideDir) + fsys := DirFS(insideDir) + tmpDupDir, err := MkdirTemp(tmpDir, "copyfs_dup_") + if err != nil { + t.Fatalf("MkdirTemp: %v", err) + } + + // TODO(panjf2000): symlinks are currently not supported, and a specific error + // will be returned. Verify that error and skip the subsequent test, + // revisit this once #49580 is closed. + if err := CopyFS(tmpDupDir, fsys); !errors.Is(err, ErrInvalid) { + t.Fatalf("got %v, want ErrInvalid", err) + } + t.Skip("skip the subsequent test and wait for #49580") + + forceMFTUpdateOnWindows(t, tmpDupDir) + tmpFsys := DirFS(tmpDupDir) + if err := fstest.TestFS(tmpFsys, "file.in.txt", "out_symlinks/file.abs.out.link", "out_symlinks/file.rel.out.link", "in_symlinks/file.rel.in.link"); err != nil { + t.Fatal("TestFS:", err) + } + if err := fs.WalkDir(fsys, ".", func(path string, d fs.DirEntry, err error) error { + if d.IsDir() { + return nil + } + + fi, err := d.Info() + if err != nil { + return err + } + if filepath.Ext(path) == ".link" { + if fi.Mode()&ModeSymlink == 0 { + return errors.New("original file " + path + " should be a symlink") + } + tmpfi, err := fs.Stat(tmpFsys, path) + if err != nil { + return err + } + if tmpfi.Mode()&ModeSymlink != 0 { + return errors.New("copied file " + path + " should not be a symlink") + } + } + + data, err := fs.ReadFile(fsys, path) + if err != nil { + return err + } + newData, err := fs.ReadFile(tmpFsys, path) + if err != nil { + return err + } + if !bytes.Equal(data, newData) { + return errors.New("file " + path + " contents differ") + } + + var target string + switch fileName := filepath.Base(path); fileName { + case "file.abs.out.link", "file.rel.out.link": + target = outsideFile + case "file.rel.in.link": + target = insideFile + } + if len(target) > 0 { + targetData, err := ReadFile(target) + if err != nil { + return err + } + if !bytes.Equal(targetData, newData) { + return errors.New("file " + path + " contents differ from target") + } + } + + return nil + }); err != nil { + t.Fatal("comparing two directories:", err) + } +} diff --git a/src/os/os_windows_test.go b/src/os/os_windows_test.go index f8edaeb876..956ab07f51 100644 --- a/src/os/os_windows_test.go +++ b/src/os/os_windows_test.go @@ -7,6 +7,7 @@ package os_test import ( "errors" "fmt" + "internal/godebug" "internal/poll" "internal/syscall/windows" "internal/syscall/windows/registry" @@ -27,6 +28,9 @@ import ( "unsafe" ) +var winsymlink = godebug.New("winsymlink") +var winreadlinkvolume = godebug.New("winreadlinkvolume") + // For TestRawConnReadWrite. type syscallDescriptor = syscall.Handle @@ -90,9 +94,10 @@ func TestSameWindowsFile(t *testing.T) { } type dirLinkTest struct { - name string - mklink func(link, target string) error - issueNo int // correspondent issue number (for broken tests) + name string + mklink func(link, target string) error + issueNo int // correspondent issue number (for broken tests) + isMountPoint bool } func testDirLinks(t *testing.T, tests []dirLinkTest) { @@ -140,8 +145,8 @@ func testDirLinks(t *testing.T, tests []dirLinkTest) { t.Errorf("failed to stat link %v: %v", link, err) continue } - if !fi1.IsDir() { - t.Errorf("%q should be a directory", link) + if tp := fi1.Mode().Type(); tp != fs.ModeDir { + t.Errorf("Stat(%q) is type %v; want %v", link, tp, fs.ModeDir) continue } if fi1.Name() != filepath.Base(link) { @@ -158,13 +163,16 @@ func testDirLinks(t *testing.T, tests []dirLinkTest) { t.Errorf("failed to lstat link %v: %v", link, err) continue } - if m := fi2.Mode(); m&fs.ModeSymlink == 0 { - t.Errorf("%q should be a link, but is not (mode=0x%x)", link, uint32(m)) - continue + var wantType fs.FileMode + if test.isMountPoint && winsymlink.Value() != "0" { + // Mount points are reparse points, and we no longer treat them as symlinks. + wantType = fs.ModeIrregular + } else { + // This is either a real symlink, or a mount point treated as a symlink. + wantType = fs.ModeSymlink } - if m := fi2.Mode(); m&fs.ModeDir != 0 { - t.Errorf("%q should be a link, not a directory (mode=0x%x)", link, uint32(m)) - continue + if tp := fi2.Mode().Type(); tp != wantType { + t.Errorf("Lstat(%q) is type %v; want %v", link, tp, wantType) } } } @@ -272,7 +280,8 @@ func TestDirectoryJunction(t *testing.T) { var tests = []dirLinkTest{ { // Create link similar to what mklink does, by inserting \??\ at the front of absolute target. - name: "standard", + name: "standard", + isMountPoint: true, mklink: func(link, target string) error { var t reparseData t.addSubstituteName(`\??\` + target) @@ -282,7 +291,8 @@ func TestDirectoryJunction(t *testing.T) { }, { // Do as junction utility https://learn.microsoft.com/en-us/sysinternals/downloads/junction does - set PrintNameLength to 0. - name: "have_blank_print_name", + name: "have_blank_print_name", + isMountPoint: true, mklink: func(link, target string) error { var t reparseData t.addSubstituteName(`\??\` + target) @@ -296,7 +306,8 @@ func TestDirectoryJunction(t *testing.T) { if mklinkSupportsJunctionLinks { tests = append(tests, dirLinkTest{ - name: "use_mklink_cmd", + name: "use_mklink_cmd", + isMountPoint: true, mklink: func(link, target string) error { output, err := testenv.Command(t, "cmd", "/c", "mklink", "/J", link, target).CombinedOutput() if err != nil { @@ -581,7 +592,7 @@ func TestStatLxSymLink(t *testing.T) { } if m := fi.Mode(); m&fs.ModeSymlink != 0 { // This can happen depending on newer WSL versions when running as admin or in developer mode. - t.Skip("skipping: WSL created reparse tag IO_REPARSE_TAG_SYMLINK instead of a IO_REPARSE_TAG_LX_SYMLINK") + t.Skip("skipping: WSL created reparse tag IO_REPARSE_TAG_SYMLINK instead of an IO_REPARSE_TAG_LX_SYMLINK") } // Stat'ing a IO_REPARSE_TAG_LX_SYMLINK from outside WSL always return ERROR_CANT_ACCESS_FILE. // We check this condition to validate that os.Stat has tried to follow the link. @@ -1242,112 +1253,125 @@ func TestRootDirAsTemp(t *testing.T) { } } -func testReadlink(t *testing.T, path, want string) { - got, err := os.Readlink(path) +// replaceDriveWithVolumeID returns path with its volume name replaced with +// the mounted volume ID. E.g. C:\foo -> \\?\Volume{GUID}\foo. +func replaceDriveWithVolumeID(t *testing.T, path string) string { + t.Helper() + cmd := testenv.Command(t, "cmd", "/c", "mountvol", filepath.VolumeName(path), "/L") + out, err := cmd.CombinedOutput() if err != nil { - t.Error(err) - return - } - if got != want { - t.Errorf(`Readlink(%q): got %q, want %q`, path, got, want) + t.Fatalf("%v: %v\n%s", cmd, err, out) } + vol := strings.Trim(string(out), " \n\r") + return filepath.Join(vol, path[len(filepath.VolumeName(path)):]) } -func mklink(t *testing.T, link, target string) { - output, err := testenv.Command(t, "cmd", "/c", "mklink", link, target).CombinedOutput() - if err != nil { - t.Fatalf("failed to run mklink %v %v: %v %q", link, target, err, output) +func TestReadlink(t *testing.T) { + tests := []struct { + junction bool + dir bool + drive bool + relative bool + }{ + {junction: true, dir: true, drive: true, relative: false}, + {junction: true, dir: true, drive: false, relative: false}, + {junction: true, dir: true, drive: false, relative: true}, + {junction: false, dir: true, drive: true, relative: false}, + {junction: false, dir: true, drive: false, relative: false}, + {junction: false, dir: true, drive: false, relative: true}, + {junction: false, dir: false, drive: true, relative: false}, + {junction: false, dir: false, drive: false, relative: false}, + {junction: false, dir: false, drive: false, relative: true}, } -} + for _, tt := range tests { + tt := tt + var name string + if tt.junction { + name = "junction" + } else { + name = "symlink" + } + if tt.dir { + name += "_dir" + } else { + name += "_file" + } + if tt.drive { + name += "_drive" + } else { + name += "_volume" + } + if tt.relative { + name += "_relative" + } else { + name += "_absolute" + } -func mklinkj(t *testing.T, link, target string) { - output, err := testenv.Command(t, "cmd", "/c", "mklink", "/J", link, target).CombinedOutput() - if err != nil { - t.Fatalf("failed to run mklink %v %v: %v %q", link, target, err, output) + t.Run(name, func(t *testing.T) { + if !tt.relative { + t.Parallel() + } + // Make sure tmpdir is not a symlink, otherwise tests will fail. + tmpdir, err := filepath.EvalSymlinks(t.TempDir()) + if err != nil { + t.Fatal(err) + } + link := filepath.Join(tmpdir, "link") + target := filepath.Join(tmpdir, "target") + if tt.dir { + if err := os.MkdirAll(target, 0777); err != nil { + t.Fatal(err) + } + } else { + if err := os.WriteFile(target, nil, 0666); err != nil { + t.Fatal(err) + } + } + var want string + if tt.relative { + relTarget := filepath.Base(target) + if tt.junction { + want = target // relative directory junction resolves to absolute path + } else { + want = relTarget + } + chdir(t, tmpdir) + link = filepath.Base(link) + target = relTarget + } else { + if tt.drive { + want = target + } else { + volTarget := replaceDriveWithVolumeID(t, target) + if winreadlinkvolume.Value() == "0" { + want = target + } else { + want = volTarget + } + target = volTarget + } + } + if tt.junction { + cmd := testenv.Command(t, "cmd", "/c", "mklink", "/J", link, target) + if out, err := cmd.CombinedOutput(); err != nil { + t.Fatalf("%v: %v\n%s", cmd, err, out) + } + } else { + if err := os.Symlink(target, link); err != nil { + t.Fatalf("Symlink(%#q, %#q): %v", target, link, err) + } + } + got, err := os.Readlink(link) + if err != nil { + t.Fatal(err) + } + if got != want { + t.Fatalf("Readlink(%#q) = %#q; want %#q", target, got, want) + } + }) } } -func mklinkd(t *testing.T, link, target string) { - output, err := testenv.Command(t, "cmd", "/c", "mklink", "/D", link, target).CombinedOutput() - if err != nil { - t.Fatalf("failed to run mklink %v %v: %v %q", link, target, err, output) - } -} - -func TestWindowsReadlink(t *testing.T) { - tmpdir, err := os.MkdirTemp("", "TestWindowsReadlink") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - // Make sure tmpdir is not a symlink, otherwise tests will fail. - tmpdir, err = filepath.EvalSymlinks(tmpdir) - if err != nil { - t.Fatal(err) - } - chdir(t, tmpdir) - - vol := filepath.VolumeName(tmpdir) - output, err := testenv.Command(t, "cmd", "/c", "mountvol", vol, "/L").CombinedOutput() - if err != nil { - t.Fatalf("failed to run mountvol %v /L: %v %q", vol, err, output) - } - ntvol := strings.Trim(string(output), " \n\r") - - dir := filepath.Join(tmpdir, "dir") - err = os.MkdirAll(dir, 0777) - if err != nil { - t.Fatal(err) - } - - absdirjlink := filepath.Join(tmpdir, "absdirjlink") - mklinkj(t, absdirjlink, dir) - testReadlink(t, absdirjlink, dir) - - ntdirjlink := filepath.Join(tmpdir, "ntdirjlink") - mklinkj(t, ntdirjlink, ntvol+absdirjlink[len(filepath.VolumeName(absdirjlink)):]) - testReadlink(t, ntdirjlink, absdirjlink) - - ntdirjlinktolink := filepath.Join(tmpdir, "ntdirjlinktolink") - mklinkj(t, ntdirjlinktolink, ntvol+absdirjlink[len(filepath.VolumeName(absdirjlink)):]) - testReadlink(t, ntdirjlinktolink, absdirjlink) - - mklinkj(t, "reldirjlink", "dir") - testReadlink(t, "reldirjlink", dir) // relative directory junction resolves to absolute path - - // Make sure we have sufficient privilege to run mklink command. - testenv.MustHaveSymlink(t) - - absdirlink := filepath.Join(tmpdir, "absdirlink") - mklinkd(t, absdirlink, dir) - testReadlink(t, absdirlink, dir) - - ntdirlink := filepath.Join(tmpdir, "ntdirlink") - mklinkd(t, ntdirlink, ntvol+absdirlink[len(filepath.VolumeName(absdirlink)):]) - testReadlink(t, ntdirlink, absdirlink) - - mklinkd(t, "reldirlink", "dir") - testReadlink(t, "reldirlink", "dir") - - file := filepath.Join(tmpdir, "file") - err = os.WriteFile(file, []byte(""), 0666) - if err != nil { - t.Fatal(err) - } - - filelink := filepath.Join(tmpdir, "filelink") - mklink(t, filelink, file) - testReadlink(t, filelink, file) - - linktofilelink := filepath.Join(tmpdir, "linktofilelink") - mklink(t, linktofilelink, ntvol+filelink[len(filepath.VolumeName(filelink)):]) - testReadlink(t, linktofilelink, filelink) - - mklink(t, "relfilelink", "file") - testReadlink(t, "relfilelink", "file") -} - func TestOpenDirTOCTOU(t *testing.T) { t.Parallel() @@ -1414,16 +1438,10 @@ func TestAppExecLinkStat(t *testing.T) { if lfi.Name() != pythonExeName { t.Errorf("Stat %s: got %q, but wanted %q", pythonPath, lfi.Name(), pythonExeName) } - if m := lfi.Mode(); m&fs.ModeSymlink != 0 { - t.Errorf("%q should be a file, not a link (mode=0x%x)", pythonPath, uint32(m)) - } - if m := lfi.Mode(); m&fs.ModeDir != 0 { - t.Errorf("%q should be a file, not a directory (mode=0x%x)", pythonPath, uint32(m)) - } - if m := lfi.Mode(); m&fs.ModeIrregular == 0 { + if tp := lfi.Mode().Type(); tp != fs.ModeIrregular { // A reparse point is not a regular file, but we don't have a more appropriate // ModeType bit for it, so it should be marked as irregular. - t.Errorf("%q should not be a regular file (mode=0x%x)", pythonPath, uint32(m)) + t.Errorf("%q should not be a an irregular file (mode=0x%x)", pythonPath, uint32(tp)) } if sfi.Name() != pythonExeName { diff --git a/src/os/path.go b/src/os/path.go index 6ac4cbe20f..a46c20bfd2 100644 --- a/src/os/path.go +++ b/src/os/path.go @@ -68,7 +68,7 @@ func MkdirAll(path string, perm FileMode) error { // It removes everything it can but returns the first error // it encounters. If the path does not exist, RemoveAll // returns nil (no error). -// If there is an error, it will be of type *PathError. +// If there is an error, it will be of type [*PathError]. func RemoveAll(path string) error { return removeAll(path) } diff --git a/src/os/path_windows.go b/src/os/path_windows.go index 0522025148..98139679d4 100644 --- a/src/os/path_windows.go +++ b/src/os/path_windows.go @@ -4,6 +4,8 @@ package os +import "internal/syscall/windows" + const ( PathSeparator = '\\' // OS-specific path separator PathListSeparator = ';' // OS-specific path list separator @@ -128,10 +130,6 @@ func dirname(path string) string { return vol + dir } -// This is set via go:linkname on runtime.canUseLongPaths, and is true when the OS -// supports opting into proper long path handling without the need for fixups. -var canUseLongPaths bool - // fixLongPath returns the extended-length (\\?\-prefixed) form of // path when needed, in order to avoid the default 260 character file // path limit imposed by Windows. If path is not easily converted to @@ -141,7 +139,7 @@ var canUseLongPaths bool // // See https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file#maximum-path-length-limitation func fixLongPath(path string) string { - if canUseLongPaths { + if windows.CanUseLongPaths { return path } // Do nothing (and don't allocate) if the path is "short". diff --git a/src/os/path_windows_test.go b/src/os/path_windows_test.go index 4e5e501d1f..6fa864a98d 100644 --- a/src/os/path_windows_test.go +++ b/src/os/path_windows_test.go @@ -16,7 +16,7 @@ import ( ) func TestFixLongPath(t *testing.T) { - if os.CanUseLongPaths { + if windows.CanUseLongPaths { return } t.Parallel() diff --git a/src/os/proc.go b/src/os/proc.go index 3aae5680ee..ea029158ee 100644 --- a/src/os/proc.go +++ b/src/os/proc.go @@ -47,7 +47,7 @@ func Getegid() int { return syscall.Getegid() } // Getgroups returns a list of the numeric ids of groups that the caller belongs to. // -// On Windows, it returns syscall.EWINDOWS. See the os/user package +// On Windows, it returns [syscall.EWINDOWS]. See the [os/user] package // for a possible alternative. func Getgroups() ([]int, error) { gids, e := syscall.Getgroups() diff --git a/src/os/readfrom_linux_test.go b/src/os/readfrom_linux_test.go index 4f98be4b9b..93f78032e7 100644 --- a/src/os/readfrom_linux_test.go +++ b/src/os/readfrom_linux_test.go @@ -749,12 +749,12 @@ func TestProcCopy(t *testing.T) { } } -func TestGetPollFDFromReader(t *testing.T) { - t.Run("tcp", func(t *testing.T) { testGetPollFromReader(t, "tcp") }) - t.Run("unix", func(t *testing.T) { testGetPollFromReader(t, "unix") }) +func TestGetPollFDAndNetwork(t *testing.T) { + t.Run("tcp4", func(t *testing.T) { testGetPollFDAndNetwork(t, "tcp4") }) + t.Run("unix", func(t *testing.T) { testGetPollFDAndNetwork(t, "unix") }) } -func testGetPollFromReader(t *testing.T, proto string) { +func testGetPollFDAndNetwork(t *testing.T, proto string) { _, server := createSocketPair(t, proto) sc, ok := server.(syscall.Conn) if !ok { @@ -765,12 +765,15 @@ func testGetPollFromReader(t *testing.T, proto string) { t.Fatalf("server SyscallConn error: %v", err) } if err = rc.Control(func(fd uintptr) { - pfd := GetPollFDForTest(server) + pfd, network := GetPollFDAndNetwork(server) if pfd == nil { - t.Fatalf("GetPollFDForTest didn't return poll.FD") + t.Fatalf("GetPollFDAndNetwork didn't return poll.FD") + } + if string(network) != proto { + t.Fatalf("GetPollFDAndNetwork returned wrong network, got: %s, want: %s", network, proto) } if pfd.Sysfd != int(fd) { - t.Fatalf("GetPollFDForTest returned wrong poll.FD, got: %d, want: %d", pfd.Sysfd, int(fd)) + t.Fatalf("GetPollFDAndNetwork returned wrong poll.FD, got: %d, want: %d", pfd.Sysfd, int(fd)) } if !pfd.IsStream { t.Fatalf("expected IsStream to be true") diff --git a/src/os/signal/doc.go b/src/os/signal/doc.go index a2a7525ef0..07ed9ce524 100644 --- a/src/os/signal/doc.go +++ b/src/os/signal/doc.go @@ -73,10 +73,10 @@ SIGTHAW, SIGLOST, SIGXRES, SIGJVM1, SIGJVM2, and any real time signals used on the system. Note that not all of these signals are available on all systems. -If the program was started with SIGHUP or SIGINT ignored, and Notify +If the program was started with SIGHUP or SIGINT ignored, and [Notify] is called for either signal, a signal handler will be installed for -that signal and it will no longer be ignored. If, later, Reset or -Ignore is called for that signal, or Stop is called on all channels +that signal and it will no longer be ignored. If, later, [Reset] or +[Ignore] is called for that signal, or [Stop] is called on all channels passed to Notify for that signal, the signal will once again be ignored. Reset will restore the system default behavior for the signal, while Ignore will cause the system to ignore the signal diff --git a/src/os/signal/signal.go b/src/os/signal/signal.go index 4250a7e0de..9a4cd64fb7 100644 --- a/src/os/signal/signal.go +++ b/src/os/signal/signal.go @@ -81,7 +81,7 @@ func cancel(sigs []os.Signal, action func(int)) { // Ignore causes the provided signals to be ignored. If they are received by // the program, nothing will happen. Ignore undoes the effect of any prior -// calls to Notify for the provided signals. +// calls to [Notify] for the provided signals. // If no signals are provided, all incoming signals will be ignored. func Ignore(sig ...os.Signal) { cancel(sig, ignoreSignal) @@ -113,7 +113,7 @@ var ( // // It is allowed to call Notify multiple times with the same channel: // each call expands the set of signals sent to that channel. -// The only way to remove signals from the set is to call Stop. +// The only way to remove signals from the set is to call [Stop]. // // It is allowed to call Notify multiple times with different channels // and the same signals: each channel receives copies of incoming @@ -167,7 +167,7 @@ func Notify(c chan<- os.Signal, sig ...os.Signal) { } } -// Reset undoes the effect of any prior calls to Notify for the provided +// Reset undoes the effect of any prior calls to [Notify] for the provided // signals. // If no signals are provided, all signal handlers will be reset. func Reset(sig ...os.Signal) { @@ -175,7 +175,7 @@ func Reset(sig ...os.Signal) { } // Stop causes package signal to stop relaying incoming signals to c. -// It undoes the effect of all prior calls to Notify using c. +// It undoes the effect of all prior calls to [Notify] using c. // When Stop returns, it is guaranteed that c will receive no more signals. func Stop(c chan<- os.Signal) { handlers.Lock() @@ -264,9 +264,9 @@ func process(sig os.Signal) { // when the returned stop function is called, or when the parent context's // Done channel is closed, whichever happens first. // -// The stop function unregisters the signal behavior, which, like signal.Reset, +// The stop function unregisters the signal behavior, which, like [signal.Reset], // may restore the default behavior for a given signal. For example, the default -// behavior of a Go program receiving os.Interrupt is to exit. Calling +// behavior of a Go program receiving [os.Interrupt] is to exit. Calling // NotifyContext(parent, os.Interrupt) will change the behavior to cancel // the returned context. Future interrupts received will not trigger the default // (exit) behavior until the returned stop function is called. diff --git a/src/os/stat.go b/src/os/stat.go index 11d9efa457..50acb6dbdd 100644 --- a/src/os/stat.go +++ b/src/os/stat.go @@ -6,17 +6,17 @@ package os import "internal/testlog" -// Stat returns a FileInfo describing the named file. -// If there is an error, it will be of type *PathError. +// Stat returns a [FileInfo] describing the named file. +// If there is an error, it will be of type [*PathError]. func Stat(name string) (FileInfo, error) { testlog.Stat(name) return statNolog(name) } -// Lstat returns a FileInfo describing the named file. +// Lstat returns a [FileInfo] describing the named file. // If the file is a symbolic link, the returned FileInfo // describes the symbolic link. Lstat makes no attempt to follow the link. -// If there is an error, it will be of type *PathError. +// If there is an error, it will be of type [*PathError]. // // On Windows, if the file is a reparse point that is a surrogate for another // named entity (such as a symbolic link or mounted folder), the returned diff --git a/src/os/stat_test.go b/src/os/stat_test.go index 96019699aa..e79f6a90c6 100644 --- a/src/os/stat_test.go +++ b/src/os/stat_test.go @@ -9,18 +9,25 @@ import ( "io/fs" "os" "path/filepath" + "runtime" "testing" ) +type testStatAndLstatParams struct { + isLink bool + statCheck func(*testing.T, string, fs.FileInfo) + lstatCheck func(*testing.T, string, fs.FileInfo) +} + // testStatAndLstat verifies that all os.Stat, os.Lstat os.File.Stat and os.Readdir work. -func testStatAndLstat(t *testing.T, path string, isLink bool, statCheck, lstatCheck func(*testing.T, string, fs.FileInfo)) { +func testStatAndLstat(t *testing.T, path string, params testStatAndLstatParams) { // test os.Stat sfi, err := os.Stat(path) if err != nil { t.Error(err) return } - statCheck(t, path, sfi) + params.statCheck(t, path, sfi) // test os.Lstat lsfi, err := os.Lstat(path) @@ -28,9 +35,9 @@ func testStatAndLstat(t *testing.T, path string, isLink bool, statCheck, lstatCh t.Error(err) return } - lstatCheck(t, path, lsfi) + params.lstatCheck(t, path, lsfi) - if isLink { + if params.isLink { if os.SameFile(sfi, lsfi) { t.Errorf("stat and lstat of %q should not be the same", path) } @@ -53,13 +60,13 @@ func testStatAndLstat(t *testing.T, path string, isLink bool, statCheck, lstatCh t.Error(err) return } - statCheck(t, path, sfi2) + params.statCheck(t, path, sfi2) if !os.SameFile(sfi, sfi2) { t.Errorf("stat of open %q file and stat of %q should be the same", path, path) } - if isLink { + if params.isLink { if os.SameFile(sfi2, lsfi) { t.Errorf("stat of opened %q file and lstat of %q should not be the same", path, path) } @@ -69,12 +76,13 @@ func testStatAndLstat(t *testing.T, path string, isLink bool, statCheck, lstatCh } } - // test fs.FileInfo returned by os.Readdir - if len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) { - // skip os.Readdir test of directories with slash at the end + parentdir, base := filepath.Split(path) + if parentdir == "" || base == "" { + // skip os.Readdir test of files without directory or file name component, + // such as directories with slash at the end or Windows device names. return } - parentdir := filepath.Dir(path) + parent, err := os.Open(parentdir) if err != nil { t.Error(err) @@ -88,7 +96,6 @@ func testStatAndLstat(t *testing.T, path string, isLink bool, statCheck, lstatCh return } var lsfi2 fs.FileInfo - base := filepath.Base(path) for _, fi2 := range fis { if fi2.Name() == base { lsfi2 = fi2 @@ -99,7 +106,7 @@ func testStatAndLstat(t *testing.T, path string, isLink bool, statCheck, lstatCh t.Errorf("failed to find %q in its parent", path) return } - lstatCheck(t, path, lsfi2) + params.lstatCheck(t, path, lsfi2) if !os.SameFile(lsfi, lsfi2) { t.Errorf("lstat of %q file in %q directory and %q should be the same", lsfi2.Name(), parentdir, path) @@ -140,19 +147,34 @@ func testIsFile(t *testing.T, path string, fi fs.FileInfo) { } func testDirStats(t *testing.T, path string) { - testStatAndLstat(t, path, false, testIsDir, testIsDir) + params := testStatAndLstatParams{ + isLink: false, + statCheck: testIsDir, + lstatCheck: testIsDir, + } + testStatAndLstat(t, path, params) } func testFileStats(t *testing.T, path string) { - testStatAndLstat(t, path, false, testIsFile, testIsFile) + params := testStatAndLstatParams{ + isLink: false, + statCheck: testIsFile, + lstatCheck: testIsFile, + } + testStatAndLstat(t, path, params) } func testSymlinkStats(t *testing.T, path string, isdir bool) { - if isdir { - testStatAndLstat(t, path, true, testIsDir, testIsSymlink) - } else { - testStatAndLstat(t, path, true, testIsFile, testIsSymlink) + params := testStatAndLstatParams{ + isLink: true, + lstatCheck: testIsSymlink, } + if isdir { + params.statCheck = testIsDir + } else { + params.statCheck = testIsFile + } + testStatAndLstat(t, path, params) } func testSymlinkSameFile(t *testing.T, path, link string) { @@ -294,3 +316,24 @@ func TestSymlinkWithTrailingSlash(t *testing.T) { t.Errorf("os.Stat(%q) and os.Stat(%q) are not the same file", dir, dirlinkWithSlash) } } + +func TestStatConsole(t *testing.T) { + if runtime.GOOS != "windows" { + t.Skip("skipping on non-Windows") + } + t.Parallel() + consoleNames := []string{ + "CONIN$", + "CONOUT$", + "CON", + } + for _, name := range consoleNames { + params := testStatAndLstatParams{ + isLink: false, + statCheck: testIsFile, + lstatCheck: testIsFile, + } + testStatAndLstat(t, name, params) + testStatAndLstat(t, `\\.\`+name, params) + } +} diff --git a/src/os/stat_unix.go b/src/os/stat_unix.go index 431df33fae..486a16413e 100644 --- a/src/os/stat_unix.go +++ b/src/os/stat_unix.go @@ -10,8 +10,8 @@ import ( "syscall" ) -// Stat returns the FileInfo structure describing file. -// If there is an error, it will be of type *PathError. +// Stat returns the [FileInfo] structure describing file. +// If there is an error, it will be of type [*PathError]. func (f *File) Stat() (FileInfo, error) { if f == nil { return nil, ErrInvalid diff --git a/src/os/stat_windows.go b/src/os/stat_windows.go index 668255f74a..fd948ab0e3 100644 --- a/src/os/stat_windows.go +++ b/src/os/stat_windows.go @@ -10,8 +10,8 @@ import ( "unsafe" ) -// Stat returns the FileInfo structure describing file. -// If there is an error, it will be of type *PathError. +// Stat returns the [FileInfo] structure describing file. +// If there is an error, it will be of type [*PathError]. func (file *File) Stat() (FileInfo, error) { if file == nil { return nil, ErrInvalid @@ -33,6 +33,15 @@ func stat(funcname, name string, followSurrogates bool) (FileInfo, error) { // See https://golang.org/issues/19922#issuecomment-300031421 for details. var fa syscall.Win32FileAttributeData err = syscall.GetFileAttributesEx(namep, syscall.GetFileExInfoStandard, (*byte)(unsafe.Pointer(&fa))) + if err == nil && fa.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 { + // Not a surrogate for another named entity, because it isn't any kind of reparse point. + // The information we got from GetFileAttributesEx is good enough for now. + fs := newFileStatFromWin32FileAttributeData(&fa) + if err := fs.saveInfoFromPath(name); err != nil { + return nil, err + } + return fs, nil + } // GetFileAttributesEx fails with ERROR_SHARING_VIOLATION error for // files like c:\pagefile.sys. Use FindFirstFile for such files. @@ -53,28 +62,20 @@ func stat(funcname, name string, followSurrogates bool) (FileInfo, error) { } } - if err == nil && fa.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT == 0 { - // Not a surrogate for another named entity, because it isn't any kind of reparse point. - // The information we got from GetFileAttributesEx is good enough for now. - fs := &fileStat{ - FileAttributes: fa.FileAttributes, - CreationTime: fa.CreationTime, - LastAccessTime: fa.LastAccessTime, - LastWriteTime: fa.LastWriteTime, - FileSizeHigh: fa.FileSizeHigh, - FileSizeLow: fa.FileSizeLow, - } - if err := fs.saveInfoFromPath(name); err != nil { - return nil, err - } - return fs, nil - } - // Use CreateFile to determine whether the file is a name surrogate and, if so, // save information about the link target. // Set FILE_FLAG_BACKUP_SEMANTICS so that CreateFile will create the handle // even if name refers to a directory. - h, err := syscall.CreateFile(namep, 0, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS|syscall.FILE_FLAG_OPEN_REPARSE_POINT, 0) + var flags uint32 = syscall.FILE_FLAG_BACKUP_SEMANTICS | syscall.FILE_FLAG_OPEN_REPARSE_POINT + h, err := syscall.CreateFile(namep, 0, 0, nil, syscall.OPEN_EXISTING, flags, 0) + + if err == windows.ERROR_INVALID_PARAMETER { + // Console handles, like "\\.\con", require generic read access. See + // https://learn.microsoft.com/en-us/windows/win32/api/fileapi/nf-fileapi-createfilew#consoles. + // We haven't set it previously because it is normally not required + // to read attributes and some files may not allow it. + h, err = syscall.CreateFile(namep, syscall.GENERIC_READ, 0, nil, syscall.OPEN_EXISTING, flags, 0) + } if err != nil { // Since CreateFile failed, we can't determine whether name refers to a // name surrogate, or some other kind of reparse point. Since we can't return a diff --git a/src/os/tempfile.go b/src/os/tempfile.go index 315f65ad9c..5ccc006296 100644 --- a/src/os/tempfile.go +++ b/src/os/tempfile.go @@ -8,23 +8,26 @@ import ( "errors" "internal/bytealg" "internal/itoa" + _ "unsafe" // for go:linkname ) -// fastrand provided by runtime. +// random number source provided by runtime. // We generate random temporary file names so that there's a good // chance the file doesn't exist yet - keeps the number of tries in // TempFile to a minimum. -func fastrand() uint32 +// +//go:linkname runtime_rand runtime.rand +func runtime_rand() uint64 func nextRandom() string { - return itoa.Uitoa(uint(fastrand())) + return itoa.Uitoa(uint(uint32(runtime_rand()))) } // CreateTemp creates a new temporary file in the directory dir, // opens the file for reading and writing, and returns the resulting file. // The filename is generated by taking pattern and adding a random string to the end. // If pattern includes a "*", the random string replaces the last "*". -// If dir is the empty string, CreateTemp uses the default directory for temporary files, as returned by TempDir. +// If dir is the empty string, CreateTemp uses the default directory for temporary files, as returned by [TempDir]. // Multiple programs or goroutines calling CreateTemp simultaneously will not choose the same file. // The caller can use the file's Name method to find the pathname of the file. // It is the caller's responsibility to remove the file when it is no longer needed. diff --git a/src/os/types.go b/src/os/types.go index d8edd98b68..66eb8bc8cb 100644 --- a/src/os/types.go +++ b/src/os/types.go @@ -17,17 +17,17 @@ type File struct { *file // os specific } -// A FileInfo describes a file and is returned by Stat and Lstat. +// A FileInfo describes a file and is returned by [Stat] and [Lstat]. type FileInfo = fs.FileInfo // A FileMode represents a file's mode and permission bits. // The bits have the same definition on all systems, so that // information about files can be moved from one system // to another portably. Not all bits apply to all systems. -// The only required bit is ModeDir for directories. +// The only required bit is [ModeDir] for directories. type FileMode = fs.FileMode -// The defined file mode bits are the most significant bits of the FileMode. +// The defined file mode bits are the most significant bits of the [FileMode]. // The nine least-significant bits are the standard Unix rwxrwxrwx permissions. // The values of these bits should be considered part of the public API and // may be used in wire protocols or disk representations: they must not be @@ -62,7 +62,7 @@ func (fs *fileStat) IsDir() bool { return fs.Mode().IsDir() } // For example, on Unix this means that the device and inode fields // of the two underlying structures are identical; on other systems // the decision may be based on the path names. -// SameFile only applies to results returned by this package's Stat. +// SameFile only applies to results returned by this package's [Stat]. // It returns false in other cases. func SameFile(fi1, fi2 FileInfo) bool { fs1, ok1 := fi1.(*fileStat) diff --git a/src/os/types_windows.go b/src/os/types_windows.go index 6b9fef6c12..c4a8721924 100644 --- a/src/os/types_windows.go +++ b/src/os/types_windows.go @@ -5,6 +5,7 @@ package os import ( + "internal/godebug" "internal/syscall/windows" "sync" "syscall" @@ -48,18 +49,14 @@ func newFileStatFromGetFileInformationByHandle(path string, h syscall.Handle) (f return nil, &PathError{Op: "GetFileInformationByHandle", Path: path, Err: err} } - var ti windows.FILE_ATTRIBUTE_TAG_INFO - err = windows.GetFileInformationByHandleEx(h, windows.FileAttributeTagInfo, (*byte)(unsafe.Pointer(&ti)), uint32(unsafe.Sizeof(ti))) - if err != nil { - if errno, ok := err.(syscall.Errno); ok && errno == windows.ERROR_INVALID_PARAMETER { - // It appears calling GetFileInformationByHandleEx with - // FILE_ATTRIBUTE_TAG_INFO fails on FAT file system with - // ERROR_INVALID_PARAMETER. Clear ti.ReparseTag in that - // instance to indicate no symlinks are possible. - ti.ReparseTag = 0 - } else { + var reparseTag uint32 + if d.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 { + var ti windows.FILE_ATTRIBUTE_TAG_INFO + err = windows.GetFileInformationByHandleEx(h, windows.FileAttributeTagInfo, (*byte)(unsafe.Pointer(&ti)), uint32(unsafe.Sizeof(ti))) + if err != nil { return nil, &PathError{Op: "GetFileInformationByHandleEx", Path: path, Err: err} } + reparseTag = ti.ReparseTag } return &fileStat{ @@ -73,13 +70,26 @@ func newFileStatFromGetFileInformationByHandle(path string, h syscall.Handle) (f vol: d.VolumeSerialNumber, idxhi: d.FileIndexHigh, idxlo: d.FileIndexLow, - ReparseTag: ti.ReparseTag, + ReparseTag: reparseTag, // fileStat.path is used by os.SameFile to decide if it needs // to fetch vol, idxhi and idxlo. But these are already set, // so set fileStat.path to "" to prevent os.SameFile doing it again. }, nil } +// newFileStatFromWin32FileAttributeData copies all required information +// from syscall.Win32FileAttributeData d into the newly created fileStat. +func newFileStatFromWin32FileAttributeData(d *syscall.Win32FileAttributeData) *fileStat { + return &fileStat{ + FileAttributes: d.FileAttributes, + CreationTime: d.CreationTime, + LastAccessTime: d.LastAccessTime, + LastWriteTime: d.LastWriteTime, + FileSizeHigh: d.FileSizeHigh, + FileSizeLow: d.FileSizeLow, + } +} + // newFileStatFromFileIDBothDirInfo copies all required information // from windows.FILE_ID_BOTH_DIR_INFO d into the newly created fileStat. func newFileStatFromFileIDBothDirInfo(d *windows.FILE_ID_BOTH_DIR_INFO) *fileStat { @@ -142,50 +152,54 @@ func newFileStatFromWin32finddata(d *syscall.Win32finddata) *fileStat { // and https://learn.microsoft.com/en-us/windows/win32/fileio/reparse-point-tags. func (fs *fileStat) isReparseTagNameSurrogate() bool { // True for IO_REPARSE_TAG_SYMLINK and IO_REPARSE_TAG_MOUNT_POINT. - return fs.ReparseTag&0x20000000 != 0 -} - -func (fs *fileStat) isSymlink() bool { - // As of https://go.dev/cl/86556, we treat MOUNT_POINT reparse points as - // symlinks because otherwise certain directory junction tests in the - // path/filepath package would fail. - // - // However, - // https://learn.microsoft.com/en-us/windows/win32/fileio/hard-links-and-junctions - // seems to suggest that directory junctions should be treated like hard - // links, not symlinks. - // - // TODO(bcmills): Get more input from Microsoft on what the behavior ought to - // be for MOUNT_POINT reparse points. - - return fs.ReparseTag == syscall.IO_REPARSE_TAG_SYMLINK || - fs.ReparseTag == windows.IO_REPARSE_TAG_MOUNT_POINT + return fs.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 && fs.ReparseTag&0x20000000 != 0 } func (fs *fileStat) Size() int64 { return int64(fs.FileSizeHigh)<<32 + int64(fs.FileSizeLow) } +var winsymlink = godebug.New("winsymlink") + func (fs *fileStat) Mode() (m FileMode) { + if winsymlink.Value() == "0" { + return fs.modePreGo1_23() + } if fs.FileAttributes&syscall.FILE_ATTRIBUTE_READONLY != 0 { m |= 0444 } else { m |= 0666 } - if fs.isSymlink() { - return m | ModeSymlink + + // Windows reports the FILE_ATTRIBUTE_DIRECTORY bit for reparse points + // that refer to directories, such as symlinks and mount points. + // However, we follow symlink POSIX semantics and do not set the mode bits. + // This allows users to walk directories without following links + // by just calling "fi, err := os.Lstat(name); err == nil && fi.IsDir()". + // Note that POSIX only defines the semantics for symlinks, not for + // mount points or other surrogate reparse points, but we treat them + // the same way for consistency. Also, mount points can contain infinite + // loops, so it is not safe to walk them without special handling. + if !fs.isReparseTagNameSurrogate() { + if fs.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + m |= ModeDir | 0111 + } + + switch fs.filetype { + case syscall.FILE_TYPE_PIPE: + m |= ModeNamedPipe + case syscall.FILE_TYPE_CHAR: + m |= ModeDevice | ModeCharDevice + } } - if fs.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - m |= ModeDir | 0111 - } - switch fs.filetype { - case syscall.FILE_TYPE_PIPE: - m |= ModeNamedPipe - case syscall.FILE_TYPE_CHAR: - m |= ModeDevice | ModeCharDevice - } - if fs.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 && m&ModeType == 0 { - if fs.ReparseTag == windows.IO_REPARSE_TAG_DEDUP { + + if fs.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 { + switch fs.ReparseTag { + case syscall.IO_REPARSE_TAG_SYMLINK: + m |= ModeSymlink + case windows.IO_REPARSE_TAG_AF_UNIX: + m |= ModeSocket + case windows.IO_REPARSE_TAG_DEDUP: // If the Data Deduplication service is enabled on Windows Server, its // Optimization job may convert regular files to IO_REPARSE_TAG_DEDUP // whenever that job runs. @@ -199,10 +213,48 @@ func (fs *fileStat) Mode() (m FileMode) { // raw device files on Linux, POSIX FIFO special files, and so on), so // to avoid files changing unpredictably from regular to irregular we will // consider DEDUP files to be close enough to regular to treat as such. - } else { + default: m |= ModeIrregular } } + return +} + +// modePreGo1_23 returns the FileMode for the fileStat, using the pre-Go 1.23 +// logic for determining the file mode. +// The logic is subtle and not well-documented, so it is better to keep it +// separate from the new logic. +func (fs *fileStat) modePreGo1_23() (m FileMode) { + if fs.FileAttributes&syscall.FILE_ATTRIBUTE_READONLY != 0 { + m |= 0444 + } else { + m |= 0666 + } + if fs.ReparseTag == syscall.IO_REPARSE_TAG_SYMLINK || + fs.ReparseTag == windows.IO_REPARSE_TAG_MOUNT_POINT { + return m | ModeSymlink + } + if fs.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { + m |= ModeDir | 0111 + } + switch fs.filetype { + case syscall.FILE_TYPE_PIPE: + m |= ModeNamedPipe + case syscall.FILE_TYPE_CHAR: + m |= ModeDevice | ModeCharDevice + } + if fs.FileAttributes&syscall.FILE_ATTRIBUTE_REPARSE_POINT != 0 { + if fs.ReparseTag == windows.IO_REPARSE_TAG_AF_UNIX { + m |= ModeSocket + } + if m&ModeType == 0 { + if fs.ReparseTag == windows.IO_REPARSE_TAG_DEDUP { + // See comment in fs.Mode. + } else { + m |= ModeIrregular + } + } + } return m } diff --git a/src/os/user/lookup.go b/src/os/user/lookup.go index ed33d0c7cd..fb10b53938 100644 --- a/src/os/user/lookup.go +++ b/src/os/user/lookup.go @@ -35,7 +35,7 @@ var cache struct { } // Lookup looks up a user by username. If the user cannot be found, the -// returned error is of type UnknownUserError. +// returned error is of type [UnknownUserError]. func Lookup(username string) (*User, error) { if u, err := Current(); err == nil && u.Username == username { return u, err @@ -44,7 +44,7 @@ func Lookup(username string) (*User, error) { } // LookupId looks up a user by userid. If the user cannot be found, the -// returned error is of type UnknownUserIdError. +// returned error is of type [UnknownUserIdError]. func LookupId(uid string) (*User, error) { if u, err := Current(); err == nil && u.Uid == uid { return u, err @@ -53,13 +53,13 @@ func LookupId(uid string) (*User, error) { } // LookupGroup looks up a group by name. If the group cannot be found, the -// returned error is of type UnknownGroupError. +// returned error is of type [UnknownGroupError]. func LookupGroup(name string) (*Group, error) { return lookupGroup(name) } // LookupGroupId looks up a group by groupid. If the group cannot be found, the -// returned error is of type UnknownGroupIdError. +// returned error is of type [UnknownGroupIdError]. func LookupGroupId(gid string) (*Group, error) { return lookupGroupId(gid) } diff --git a/src/os/user/user.go b/src/os/user/user.go index 0307d2ad6a..952da3d8bd 100644 --- a/src/os/user/user.go +++ b/src/os/user/user.go @@ -63,14 +63,14 @@ type Group struct { Name string // group name } -// UnknownUserIdError is returned by LookupId when a user cannot be found. +// UnknownUserIdError is returned by [LookupId] when a user cannot be found. type UnknownUserIdError int func (e UnknownUserIdError) Error() string { return "user: unknown userid " + strconv.Itoa(int(e)) } -// UnknownUserError is returned by Lookup when +// UnknownUserError is returned by [Lookup] when // a user cannot be found. type UnknownUserError string @@ -78,7 +78,7 @@ func (e UnknownUserError) Error() string { return "user: unknown user " + string(e) } -// UnknownGroupIdError is returned by LookupGroupId when +// UnknownGroupIdError is returned by [LookupGroupId] when // a group cannot be found. type UnknownGroupIdError string @@ -86,7 +86,7 @@ func (e UnknownGroupIdError) Error() string { return "group: unknown groupid " + string(e) } -// UnknownGroupError is returned by LookupGroup when +// UnknownGroupError is returned by [LookupGroup] when // a group cannot be found. type UnknownGroupError string diff --git a/src/os/writeto_linux_test.go b/src/os/writeto_linux_test.go new file mode 100644 index 0000000000..5ffab88a2a --- /dev/null +++ b/src/os/writeto_linux_test.go @@ -0,0 +1,171 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package os_test + +import ( + "bytes" + "internal/poll" + "io" + "math/rand" + "net" + . "os" + "strconv" + "syscall" + "testing" + "time" +) + +func TestSendFile(t *testing.T) { + sizes := []int{ + 1, + 42, + 1025, + syscall.Getpagesize() + 1, + 32769, + } + t.Run("sendfile-to-unix", func(t *testing.T) { + for _, size := range sizes { + t.Run(strconv.Itoa(size), func(t *testing.T) { + testSendFile(t, "unix", int64(size)) + }) + } + }) + t.Run("sendfile-to-tcp", func(t *testing.T) { + for _, size := range sizes { + t.Run(strconv.Itoa(size), func(t *testing.T) { + testSendFile(t, "tcp", int64(size)) + }) + } + }) +} + +func testSendFile(t *testing.T, proto string, size int64) { + dst, src, recv, data, hook := newSendFileTest(t, proto, size) + + // Now call WriteTo (through io.Copy), which will hopefully call poll.SendFile + n, err := io.Copy(dst, src) + if err != nil { + t.Fatalf("io.Copy error: %v", err) + } + + // We should have called poll.Splice with the right file descriptor arguments. + if n > 0 && !hook.called { + t.Fatal("expected to called poll.SendFile") + } + if hook.called && hook.srcfd != int(src.Fd()) { + t.Fatalf("wrong source file descriptor: got %d, want %d", hook.srcfd, src.Fd()) + } + sc, ok := dst.(syscall.Conn) + if !ok { + t.Fatalf("destination is not a syscall.Conn") + } + rc, err := sc.SyscallConn() + if err != nil { + t.Fatalf("destination SyscallConn error: %v", err) + } + if err = rc.Control(func(fd uintptr) { + if hook.called && hook.dstfd != int(fd) { + t.Fatalf("wrong destination file descriptor: got %d, want %d", hook.dstfd, int(fd)) + } + }); err != nil { + t.Fatalf("destination Conn Control error: %v", err) + } + + // Verify the data size and content. + dataSize := len(data) + dstData := make([]byte, dataSize) + m, err := io.ReadFull(recv, dstData) + if err != nil { + t.Fatalf("server Conn Read error: %v", err) + } + if n != int64(dataSize) { + t.Fatalf("data length mismatch for io.Copy, got %d, want %d", n, dataSize) + } + if m != dataSize { + t.Fatalf("data length mismatch for net.Conn.Read, got %d, want %d", m, dataSize) + } + if !bytes.Equal(dstData, data) { + t.Errorf("data mismatch, got %s, want %s", dstData, data) + } +} + +// newSendFileTest initializes a new test for sendfile. +// +// It creates source file and destination sockets, and populates the source file +// with random data of the specified size. It also hooks package os' call +// to poll.Sendfile and returns the hook so it can be inspected. +func newSendFileTest(t *testing.T, proto string, size int64) (net.Conn, *File, net.Conn, []byte, *sendFileHook) { + t.Helper() + + hook := hookSendFile(t) + + client, server := createSocketPair(t, proto) + tempFile, data := createTempFile(t, size) + + return client, tempFile, server, data, hook +} + +func hookSendFile(t *testing.T) *sendFileHook { + h := new(sendFileHook) + h.install() + t.Cleanup(h.uninstall) + return h +} + +type sendFileHook struct { + called bool + dstfd int + srcfd int + remain int64 + + written int64 + handled bool + err error + + original func(dst *poll.FD, src int, remain int64) (int64, error, bool) +} + +func (h *sendFileHook) install() { + h.original = *PollSendFile + *PollSendFile = func(dst *poll.FD, src int, remain int64) (int64, error, bool) { + h.called = true + h.dstfd = dst.Sysfd + h.srcfd = src + h.remain = remain + h.written, h.err, h.handled = h.original(dst, src, remain) + return h.written, h.err, h.handled + } +} + +func (h *sendFileHook) uninstall() { + *PollSendFile = h.original +} + +func createTempFile(t *testing.T, size int64) (*File, []byte) { + f, err := CreateTemp(t.TempDir(), "writeto-sendfile-to-socket") + if err != nil { + t.Fatalf("failed to create temporary file: %v", err) + } + t.Cleanup(func() { + f.Close() + }) + + randSeed := time.Now().Unix() + t.Logf("random data seed: %d\n", randSeed) + prng := rand.New(rand.NewSource(randSeed)) + data := make([]byte, size) + prng.Read(data) + if _, err := f.Write(data); err != nil { + t.Fatalf("failed to create and feed the file: %v", err) + } + if err := f.Sync(); err != nil { + t.Fatalf("failed to save the file: %v", err) + } + if _, err := f.Seek(0, io.SeekStart); err != nil { + t.Fatalf("failed to rewind the file: %v", err) + } + + return f, data +} diff --git a/src/os/readfrom_linux.go b/src/os/zero_copy_linux.go similarity index 70% rename from src/os/readfrom_linux.go rename to src/os/zero_copy_linux.go index 7e8024028e..7c45aefeee 100644 --- a/src/os/readfrom_linux.go +++ b/src/os/zero_copy_linux.go @@ -13,8 +13,33 @@ import ( var ( pollCopyFileRange = poll.CopyFileRange pollSplice = poll.Splice + pollSendFile = poll.SendFile ) +func (f *File) writeTo(w io.Writer) (written int64, handled bool, err error) { + pfd, network := getPollFDAndNetwork(w) + // TODO(panjf2000): same as File.spliceToFile. + if pfd == nil || !pfd.IsStream || !isUnixOrTCP(string(network)) { + return + } + + sc, err := f.SyscallConn() + if err != nil { + return + } + + rerr := sc.Read(func(fd uintptr) (done bool) { + written, err, handled = pollSendFile(pfd, int(fd), 1<<63-1) + return true + }) + + if err == nil { + err = rerr + } + + return written, handled, wrapSyscallError("sendfile", err) +} + func (f *File) readFrom(r io.Reader) (written int64, handled bool, err error) { // Neither copy_file_range(2) nor splice(2) supports destinations opened with // O_APPEND, so don't bother to try zero-copy with these system calls. @@ -41,7 +66,7 @@ func (f *File) spliceToFile(r io.Reader) (written int64, handled bool, err error return 0, true, nil } - pfd := getPollFD(r) + pfd, _ := getPollFDAndNetwork(r) // TODO(panjf2000): run some tests to see if we should unlock the non-streams for splice. // Streams benefit the most from the splice(2), non-streams are not even supported in old kernels // where splice(2) will just return EINVAL; newer kernels support non-streams like UDP, but I really @@ -63,25 +88,6 @@ func (f *File) spliceToFile(r io.Reader) (written int64, handled bool, err error return written, handled, wrapSyscallError(syscallName, err) } -// getPollFD tries to get the poll.FD from the given io.Reader by expecting -// the underlying type of r to be the implementation of syscall.Conn that contains -// a *net.rawConn. -func getPollFD(r io.Reader) *poll.FD { - sc, ok := r.(syscall.Conn) - if !ok { - return nil - } - rc, err := sc.SyscallConn() - if err != nil { - return nil - } - ipfd, ok := rc.(interface{ PollFD() *poll.FD }) - if !ok { - return nil - } - return ipfd.PollFD() -} - func (f *File) copyFileRange(r io.Reader) (written int64, handled bool, err error) { var ( remain int64 @@ -91,10 +97,16 @@ func (f *File) copyFileRange(r io.Reader) (written int64, handled bool, err erro return 0, true, nil } - src, ok := r.(*File) - if !ok { + var src *File + switch v := r.(type) { + case *File: + src = v + case fileWithoutWriteTo: + src = v.File + default: return 0, false, nil } + if src.checkValid("ReadFrom") != nil { // Avoid returning the error as we report handled as false, // leave further error handling as the responsibility of the caller. @@ -108,6 +120,28 @@ func (f *File) copyFileRange(r io.Reader) (written int64, handled bool, err erro return written, handled, wrapSyscallError("copy_file_range", err) } +// getPollFDAndNetwork tries to get the poll.FD and network type from the given interface +// by expecting the underlying type of i to be the implementation of syscall.Conn +// that contains a *net.rawConn. +func getPollFDAndNetwork(i any) (*poll.FD, poll.String) { + sc, ok := i.(syscall.Conn) + if !ok { + return nil, "" + } + rc, err := sc.SyscallConn() + if err != nil { + return nil, "" + } + irc, ok := rc.(interface { + PollFD() *poll.FD + Network() poll.String + }) + if !ok { + return nil, "" + } + return irc.PollFD(), irc.Network() +} + // tryLimitedReader tries to assert the io.Reader to io.LimitedReader, it returns the io.LimitedReader, // the underlying io.Reader and the remaining amount of bytes if the assertion succeeds, // otherwise it just returns the original io.Reader and the theoretical unlimited remaining amount of bytes. @@ -122,3 +156,12 @@ func tryLimitedReader(r io.Reader) (*io.LimitedReader, io.Reader, int64) { remain = lr.N return lr, lr.R, remain } + +func isUnixOrTCP(network string) bool { + switch network { + case "tcp", "tcp4", "tcp6", "unix": + return true + default: + return false + } +} diff --git a/src/os/readfrom_stub.go b/src/os/zero_copy_stub.go similarity index 74% rename from src/os/readfrom_stub.go rename to src/os/zero_copy_stub.go index 8b7d5fb8f9..9ec5808101 100644 --- a/src/os/readfrom_stub.go +++ b/src/os/zero_copy_stub.go @@ -8,6 +8,10 @@ package os import "io" +func (f *File) writeTo(w io.Writer) (written int64, handled bool, err error) { + return 0, false, nil +} + func (f *File) readFrom(r io.Reader) (n int64, handled bool, err error) { return 0, false, nil } diff --git a/src/path/filepath/match.go b/src/path/filepath/match.go index b5cc4b8cf3..12f0bfa7d3 100644 --- a/src/path/filepath/match.go +++ b/src/path/filepath/match.go @@ -35,7 +35,7 @@ var ErrBadPattern = errors.New("syntax error in pattern") // lo '-' hi matches character c for lo <= c <= hi // // Match requires pattern to match all of name, not just a substring. -// The only possible returned error is ErrBadPattern, when pattern +// The only possible returned error is [ErrBadPattern], when pattern // is malformed. // // On Windows, escaping is disabled. Instead, '\\' is treated as @@ -233,11 +233,11 @@ func getEsc(chunk string) (r rune, nchunk string, err error) { // Glob returns the names of all files matching pattern or nil // if there is no matching file. The syntax of patterns is the same -// as in Match. The pattern may describe hierarchical names such as -// /usr/*/bin/ed (assuming the Separator is '/'). +// as in [Match]. The pattern may describe hierarchical names such as +// /usr/*/bin/ed (assuming the [Separator] is '/'). // // Glob ignores file system errors such as I/O errors reading directories. -// The only possible returned error is ErrBadPattern, when pattern +// The only possible returned error is [ErrBadPattern], when pattern // is malformed. func Glob(pattern string) (matches []string, err error) { return globWithLimit(pattern, 0) diff --git a/src/path/filepath/path.go b/src/path/filepath/path.go index 3d693f840a..6c8a0aa8b3 100644 --- a/src/path/filepath/path.go +++ b/src/path/filepath/path.go @@ -13,6 +13,7 @@ package filepath import ( "errors" + "internal/safefilepath" "io/fs" "os" "slices" @@ -73,7 +74,7 @@ const ( // by purely lexical processing. It applies the following rules // iteratively until no further processing can be done: // -// 1. Replace multiple Separator elements with a single one. +// 1. Replace multiple [Separator] elements with a single one. // 2. Eliminate each . path name element (the current directory). // 3. Eliminate each inner .. path name element (the parent directory) // along with the non-.. element that precedes it. @@ -211,6 +212,18 @@ func unixIsLocal(path string) bool { return true } +// Localize converts a slash-separated path into an operating system path. +// The input path must be a valid path as reported by [io/fs.ValidPath]. +// +// Localize returns an error if the path cannot be represented by the operating system. +// For example, the path a\b is rejected on Windows, on which \ is a separator +// character and cannot be part of a filename. +// +// The path returned by Localize will always be local, as reported by IsLocal. +func Localize(path string) (string, error) { + return safefilepath.Localize(path) +} + // ToSlash returns the result of replacing each separator character // in path with a slash ('/') character. Multiple separators are // replaced by multiple slashes. @@ -224,6 +237,9 @@ func ToSlash(path string) string { // FromSlash returns the result of replacing each slash ('/') character // in path with a separator character. Multiple slashes are replaced // by multiple separators. +// +// See also the Localize function, which converts a slash-separated path +// as used by the io/fs package to an operating system path. func FromSlash(path string) string { if Separator == '/' { return path @@ -231,7 +247,7 @@ func FromSlash(path string) string { return strings.ReplaceAll(path, "/", string(Separator)) } -// SplitList splits a list of paths joined by the OS-specific ListSeparator, +// SplitList splits a list of paths joined by the OS-specific [ListSeparator], // usually found in PATH or GOPATH environment variables. // Unlike strings.Split, SplitList returns an empty slice when passed an empty // string. @@ -239,7 +255,7 @@ func SplitList(path string) []string { return splitList(path) } -// Split splits path immediately following the final Separator, +// Split splits path immediately following the final [Separator], // separating it into a directory and file name component. // If there is no Separator in path, Split returns an empty dir // and file set to path. @@ -254,7 +270,7 @@ func Split(path string) (dir, file string) { } // Join joins any number of path elements into a single path, -// separating them with an OS specific Separator. Empty elements +// separating them with an OS specific [Separator]. Empty elements // are ignored. The result is Cleaned. However, if the argument // list is empty or all its elements are empty, Join returns // an empty string. @@ -281,7 +297,7 @@ func Ext(path string) string { // links. // If path is relative the result will be relative to the current directory, // unless one of the components is an absolute symbolic link. -// EvalSymlinks calls Clean on the result. +// EvalSymlinks calls [Clean] on the result. func EvalSymlinks(path string) (string, error) { return evalSymlinks(path) } @@ -290,7 +306,7 @@ func EvalSymlinks(path string) (string, error) { // If the path is not absolute it will be joined with the current // working directory to turn it into an absolute path. The absolute // path name for a given file is not guaranteed to be unique. -// Abs calls Clean on the result. +// Abs calls [Clean] on the result. func Abs(path string) (string, error) { return abs(path) } @@ -308,12 +324,12 @@ func unixAbs(path string) (string, error) { // Rel returns a relative path that is lexically equivalent to targpath when // joined to basepath with an intervening separator. That is, -// Join(basepath, Rel(basepath, targpath)) is equivalent to targpath itself. +// [Join](basepath, Rel(basepath, targpath)) is equivalent to targpath itself. // On success, the returned path will always be relative to basepath, // even if basepath and targpath share no elements. // An error is returned if targpath can't be made relative to basepath or if // knowing the current working directory would be necessary to compute it. -// Rel calls Clean on the result. +// Rel calls [Clean] on the result. func Rel(basepath, targpath string) (string, error) { baseVol := VolumeName(basepath) targVol := VolumeName(targpath) @@ -386,17 +402,17 @@ func Rel(basepath, targpath string) (string, error) { return targ[t0:], nil } -// SkipDir is used as a return value from WalkFuncs to indicate that +// SkipDir is used as a return value from [WalkFunc] to indicate that // the directory named in the call is to be skipped. It is not returned // as an error by any function. var SkipDir error = fs.SkipDir -// SkipAll is used as a return value from WalkFuncs to indicate that +// SkipAll is used as a return value from [WalkFunc] to indicate that // all remaining files and directories are to be skipped. It is not returned // as an error by any function. var SkipAll error = fs.SkipAll -// WalkFunc is the type of the function called by Walk to visit each +// WalkFunc is the type of the function called by [Walk] to visit each // file or directory. // // The path argument contains the argument to Walk as a prefix. @@ -412,9 +428,9 @@ var SkipAll error = fs.SkipAll // The info argument is the fs.FileInfo for the named path. // // The error result returned by the function controls how Walk continues. -// If the function returns the special value SkipDir, Walk skips the +// If the function returns the special value [SkipDir], Walk skips the // current directory (path if info.IsDir() is true, otherwise path's -// parent directory). If the function returns the special value SkipAll, +// parent directory). If the function returns the special value [SkipAll], // Walk skips all remaining files and directories. Otherwise, if the function // returns a non-nil error, Walk stops entirely and returns that error. // @@ -425,14 +441,14 @@ var SkipAll error = fs.SkipAll // // Walk calls the function with a non-nil err argument in two cases. // -// First, if an os.Lstat on the root directory or any directory or file +// First, if an [os.Lstat] on the root directory or any directory or file // in the tree fails, Walk calls the function with path set to that // directory or file's path, info set to nil, and err set to the error // from os.Lstat. // // Second, if a directory's Readdirnames method fails, Walk calls the // function with path set to the directory's path, info, set to an -// fs.FileInfo describing the directory, and err set to the error from +// [fs.FileInfo] describing the directory, and err set to the error from // Readdirnames. type WalkFunc func(path string, info fs.FileInfo, err error) error @@ -514,7 +530,7 @@ func walk(path string, info fs.FileInfo, walkFn WalkFunc) error { // directory in the tree, including root. // // All errors that arise visiting files and directories are filtered by fn: -// see the fs.WalkDirFunc documentation for details. +// see the [fs.WalkDirFunc] documentation for details. // // The files are walked in lexical order, which makes the output deterministic // but requires WalkDir to read an entire directory into memory before proceeding @@ -542,7 +558,7 @@ func WalkDir(root string, fn fs.WalkDirFunc) error { // directory in the tree, including root. // // All errors that arise visiting files and directories are filtered by fn: -// see the WalkFunc documentation for details. +// see the [WalkFunc] documentation for details. // // The files are walked in lexical order, which makes the output deterministic // but requires Walk to read an entire directory into memory before proceeding @@ -550,7 +566,7 @@ func WalkDir(root string, fn fs.WalkDirFunc) error { // // Walk does not follow symbolic links. // -// Walk is less efficient than WalkDir, introduced in Go 1.16, +// Walk is less efficient than [WalkDir], introduced in Go 1.16, // which avoids calling os.Lstat on every visited file or directory. func Walk(root string, fn WalkFunc) error { info, err := os.Lstat(root) @@ -611,7 +627,7 @@ func Base(path string) string { } // Dir returns all but the last element of path, typically the path's directory. -// After dropping the final element, Dir calls Clean on the path and trailing +// After dropping the final element, Dir calls [Clean] on the path and trailing // slashes are removed. // If the path is empty, Dir returns ".". // If the path consists entirely of separators, Dir returns a single separator. diff --git a/src/path/filepath/path_test.go b/src/path/filepath/path_test.go index ed3990859b..8a66538f6a 100644 --- a/src/path/filepath/path_test.go +++ b/src/path/filepath/path_test.go @@ -237,6 +237,73 @@ func TestIsLocal(t *testing.T) { } } +type LocalizeTest struct { + path string + want string +} + +var localizetests = []LocalizeTest{ + {"", ""}, + {".", "."}, + {"..", ""}, + {"a/..", ""}, + {"/", ""}, + {"/a", ""}, + {"a\xffb", ""}, + {"a/", ""}, + {"a/./b", ""}, + {"\x00", ""}, + {"a", "a"}, + {"a/b/c", "a/b/c"}, +} + +var plan9localizetests = []LocalizeTest{ + {"#a", ""}, + {`a\b:c`, `a\b:c`}, +} + +var unixlocalizetests = []LocalizeTest{ + {"#a", "#a"}, + {`a\b:c`, `a\b:c`}, +} + +var winlocalizetests = []LocalizeTest{ + {"#a", "#a"}, + {"c:", ""}, + {`a\b`, ""}, + {`a:b`, ""}, + {`a/b:c`, ""}, + {`NUL`, ""}, + {`a/NUL`, ""}, + {`./com1`, ""}, + {`a/nul/b`, ""}, +} + +func TestLocalize(t *testing.T) { + tests := localizetests + switch runtime.GOOS { + case "plan9": + tests = append(tests, plan9localizetests...) + case "windows": + tests = append(tests, winlocalizetests...) + for i := range tests { + tests[i].want = filepath.FromSlash(tests[i].want) + } + default: + tests = append(tests, unixlocalizetests...) + } + for _, test := range tests { + got, err := filepath.Localize(test.path) + wantErr := "" + if test.want == "" { + wantErr = "error" + } + if got != test.want || ((err == nil) != (test.want != "")) { + t.Errorf("IsLocal(%q) = %q, %v want %q, %v", test.path, got, err, test.want, wantErr) + } + } +} + const sep = filepath.Separator var slashtests = []PathTest{ @@ -1403,6 +1470,9 @@ func TestAbs(t *testing.T) { } } + // Make sure the global absTests slice is not + // modified by multiple invocations of TestAbs. + tests := absTests if runtime.GOOS == "windows" { vol := filepath.VolumeName(root) var extra []string @@ -1413,7 +1483,7 @@ func TestAbs(t *testing.T) { path = vol + path extra = append(extra, path) } - absTests = append(absTests, extra...) + tests = append(slices.Clip(tests), extra...) } err = os.Chdir(absTestDirs[0]) @@ -1421,7 +1491,7 @@ func TestAbs(t *testing.T) { t.Fatal("chdir failed: ", err) } - for _, path := range absTests { + for _, path := range tests { path = strings.ReplaceAll(path, "$", root) info, err := os.Stat(path) if err != nil { @@ -1910,3 +1980,16 @@ func TestEscaping(t *testing.T) { } } } + +func TestEvalSymlinksTooManyLinks(t *testing.T) { + testenv.MustHaveSymlink(t) + dir := filepath.Join(t.TempDir(), "dir") + err := os.Symlink(dir, dir) + if err != nil { + t.Fatal(err) + } + _, err = filepath.EvalSymlinks(dir) + if err == nil { + t.Fatal("expected error, got nil") + } +} diff --git a/src/path/filepath/path_windows.go b/src/path/filepath/path_windows.go index eacab0e5ce..6adb7d4bc4 100644 --- a/src/path/filepath/path_windows.go +++ b/src/path/filepath/path_windows.go @@ -277,46 +277,6 @@ func join(elem []string) string { return Clean(b.String()) } -// joinNonEmpty is like join, but it assumes that the first element is non-empty. -func joinNonEmpty(elem []string) string { - if len(elem[0]) == 2 && elem[0][1] == ':' { - // First element is drive letter without terminating slash. - // Keep path relative to current directory on that drive. - // Skip empty elements. - i := 1 - for ; i < len(elem); i++ { - if elem[i] != "" { - break - } - } - return Clean(elem[0] + strings.Join(elem[i:], string(Separator))) - } - // The following logic prevents Join from inadvertently creating a - // UNC path on Windows. Unless the first element is a UNC path, Join - // shouldn't create a UNC path. See golang.org/issue/9167. - p := Clean(strings.Join(elem, string(Separator))) - if !isUNC(p) { - return p - } - // p == UNC only allowed when the first element is a UNC path. - head := Clean(elem[0]) - if isUNC(head) { - return p - } - // head + tail == UNC, but joining two non-UNC paths should not result - // in a UNC path. Undo creation of UNC path. - tail := Clean(strings.Join(elem[1:], string(Separator))) - if head[len(head)-1] == Separator { - return head + tail - } - return head + string(Separator) + tail -} - -// isUNC reports whether path is a UNC path. -func isUNC(path string) bool { - return len(path) > 1 && isSlash(path[0]) && isSlash(path[1]) -} - func sameWord(a, b string) bool { return strings.EqualFold(a, b) } diff --git a/src/path/filepath/path_windows_test.go b/src/path/filepath/path_windows_test.go index 42aeb4f619..2862f390d0 100644 --- a/src/path/filepath/path_windows_test.go +++ b/src/path/filepath/path_windows_test.go @@ -7,6 +7,7 @@ package filepath_test import ( "flag" "fmt" + "internal/godebug" "internal/testenv" "io/fs" "os" @@ -486,6 +487,110 @@ func TestWalkDirectorySymlink(t *testing.T) { testWalkMklink(t, "D") } +func createMountPartition(t *testing.T, vhd string, args string) []byte { + testenv.MustHaveExecPath(t, "powershell") + t.Cleanup(func() { + cmd := testenv.Command(t, "powershell", "-Command", fmt.Sprintf("Dismount-VHD %q", vhd)) + out, err := cmd.CombinedOutput() + if err != nil { + if t.Skipped() { + // Probably failed to dismount because we never mounted it in + // the first place. Log the error, but ignore it. + t.Logf("%v: %v (skipped)\n%s", cmd, err, out) + } else { + // Something went wrong, and we don't want to leave dangling VHDs. + // Better to fail the test than to just log the error and continue. + t.Errorf("%v: %v\n%s", cmd, err, out) + } + } + }) + + script := filepath.Join(t.TempDir(), "test.ps1") + cmd := strings.Join([]string{ + "$ErrorActionPreference = \"Stop\"", + fmt.Sprintf("$vhd = New-VHD -Path %q -SizeBytes 3MB -Fixed", vhd), + "$vhd | Mount-VHD", + fmt.Sprintf("$vhd = Get-VHD %q", vhd), + "$vhd | Get-Disk | Initialize-Disk -PartitionStyle GPT", + "$part = $vhd | Get-Disk | New-Partition -UseMaximumSize -AssignDriveLetter:$false", + "$vol = $part | Format-Volume -FileSystem NTFS", + args, + }, "\n") + + err := os.WriteFile(script, []byte(cmd), 0666) + if err != nil { + t.Fatal(err) + } + output, err := testenv.Command(t, "powershell", "-File", script).CombinedOutput() + if err != nil { + // This can happen if Hyper-V is not installed or enabled. + t.Skip("skipping test because failed to create VHD: ", err, string(output)) + } + return output +} + +var winsymlink = godebug.New("winsymlink") +var winreadlinkvolume = godebug.New("winreadlinkvolume") + +func TestEvalSymlinksJunctionToVolumeID(t *testing.T) { + // Test that EvalSymlinks resolves a directory junction which + // is mapped to volumeID (instead of drive letter). See go.dev/issue/39786. + if winsymlink.Value() == "0" { + t.Skip("skipping test because winsymlink is not enabled") + } + t.Parallel() + + output, _ := exec.Command("cmd", "/c", "mklink", "/?").Output() + if !strings.Contains(string(output), " /J ") { + t.Skip("skipping test because mklink command does not support junctions") + } + + tmpdir := tempDirCanonical(t) + vhd := filepath.Join(tmpdir, "Test.vhdx") + output = createMountPartition(t, vhd, "Write-Host $vol.Path -NoNewline") + vol := string(output) + + dirlink := filepath.Join(tmpdir, "dirlink") + output, err := testenv.Command(t, "cmd", "/c", "mklink", "/J", dirlink, vol).CombinedOutput() + if err != nil { + t.Fatalf("failed to run mklink %v %v: %v %q", dirlink, vol, err, output) + } + got, err := filepath.EvalSymlinks(dirlink) + if err != nil { + t.Fatal(err) + } + if got != dirlink { + t.Errorf(`EvalSymlinks(%q): got %q, want %q`, dirlink, got, dirlink) + } +} + +func TestEvalSymlinksMountPointRecursion(t *testing.T) { + // Test that EvalSymlinks doesn't follow recursive mount points. + // See go.dev/issue/40176. + if winsymlink.Value() == "0" { + t.Skip("skipping test because winsymlink is not enabled") + } + t.Parallel() + + tmpdir := tempDirCanonical(t) + dirlink := filepath.Join(tmpdir, "dirlink") + err := os.Mkdir(dirlink, 0755) + if err != nil { + t.Fatal(err) + } + + vhd := filepath.Join(tmpdir, "Test.vhdx") + createMountPartition(t, vhd, fmt.Sprintf("$part | Add-PartitionAccessPath -AccessPath %q\n", dirlink)) + + got, err := filepath.EvalSymlinks(dirlink) + if err != nil { + t.Fatal(err) + } + if got != dirlink { + t.Errorf(`EvalSymlinks(%q): got %q, want %q`, dirlink, got, dirlink) + } +} + func TestNTNamespaceSymlink(t *testing.T) { output, _ := exec.Command("cmd", "/c", "mklink", "/?").Output() if !strings.Contains(string(output), " /J ") { @@ -511,7 +616,17 @@ func TestNTNamespaceSymlink(t *testing.T) { if err != nil { t.Fatal(err) } - if want := vol + `\`; got != want { + var want string + if winsymlink.Value() == "0" { + if winreadlinkvolume.Value() == "0" { + want = vol + `\` + } else { + want = target + } + } else { + want = dirlink + } + if got != want { t.Errorf(`EvalSymlinks(%q): got %q, want %q`, dirlink, got, want) } @@ -524,7 +639,7 @@ func TestNTNamespaceSymlink(t *testing.T) { t.Fatal(err) } - target += file[len(filepath.VolumeName(file)):] + target = filepath.Join(target, file[len(filepath.VolumeName(file)):]) filelink := filepath.Join(tmpdir, "filelink") output, err = exec.Command("cmd", "/c", "mklink", filelink, target).CombinedOutput() @@ -536,7 +651,13 @@ func TestNTNamespaceSymlink(t *testing.T) { if err != nil { t.Fatal(err) } - if want := file; got != want { + + if winreadlinkvolume.Value() == "0" { + want = file + } else { + want = target + } + if got != want { t.Errorf(`EvalSymlinks(%q): got %q, want %q`, filelink, got, want) } } diff --git a/src/path/match.go b/src/path/match.go index 673bbc7ff6..d8b6809568 100644 --- a/src/path/match.go +++ b/src/path/match.go @@ -32,7 +32,7 @@ var ErrBadPattern = errors.New("syntax error in pattern") // lo '-' hi matches character c for lo <= c <= hi // // Match requires pattern to match all of name, not just a substring. -// The only possible returned error is ErrBadPattern, when pattern +// The only possible returned error is [ErrBadPattern], when pattern // is malformed. func Match(pattern, name string) (matched bool, err error) { Pattern: diff --git a/src/path/path.go b/src/path/path.go index 50065ac731..5149a92c4f 100644 --- a/src/path/path.go +++ b/src/path/path.go @@ -214,7 +214,7 @@ func IsAbs(path string) bool { } // Dir returns all but the last element of path, typically the path's directory. -// After dropping the final element using Split, the path is Cleaned and trailing +// After dropping the final element using [Split], the path is Cleaned and trailing // slashes are removed. // If the path is empty, Dir returns ".". // If the path consists entirely of slashes followed by non-slash bytes, Dir diff --git a/src/reflect/all_test.go b/src/reflect/all_test.go index 71969106e4..6e5c7d12e2 100644 --- a/src/reflect/all_test.go +++ b/src/reflect/all_test.go @@ -1500,6 +1500,12 @@ func TestIsZero(t *testing.T) { {setField(struct{ _, a, _ func() }{}, 0*unsafe.Sizeof((func())(nil)), func() {}), true}, {setField(struct{ _, a, _ func() }{}, 1*unsafe.Sizeof((func())(nil)), func() {}), false}, {setField(struct{ _, a, _ func() }{}, 2*unsafe.Sizeof((func())(nil)), func() {}), true}, + {struct{ a [256]S }{}, true}, + {struct{ a [256]S }{a: [256]S{2: {i1: 1}}}, false}, + {struct{ a [256]float32 }{}, true}, + {struct{ a [256]float32 }{a: [256]float32{2: 1.0}}, false}, + {struct{ _, a [256]S }{}, true}, + {setField(struct{ _, a [256]S }{}, 0*unsafe.Sizeof(int64(0)), int64(1)), true}, // UnsafePointer {(unsafe.Pointer)(nil), true}, {(unsafe.Pointer)(new(int)), false}, @@ -1541,7 +1547,7 @@ func TestIsZero(t *testing.T) { func TestInternalIsZero(t *testing.T) { b := make([]byte, 512) for a := 0; a < 8; a++ { - for i := 256 + 7; i <= 512-a; i++ { + for i := 1; i <= 512-a; i++ { InternalIsZero(b[a : a+i]) } } @@ -4749,7 +4755,7 @@ func TestConvertSlice2Array(t *testing.T) { // Converting a slice to non-empty array needs to return // a non-addressable copy of the original memory. if v.CanAddr() { - t.Fatalf("convert slice to non-empty array returns a addressable copy array") + t.Fatalf("convert slice to non-empty array returns an addressable copy array") } for i := range s { ov.Index(i).Set(ValueOf(i + 1)) @@ -4821,7 +4827,7 @@ func TestComparable(t *testing.T) { } } -func TestOverflow(t *testing.T) { +func TestValueOverflow(t *testing.T) { if ovf := V(float64(0)).OverflowFloat(1e300); ovf { t.Errorf("%v wrongly overflows float64", 1e300) } @@ -4860,6 +4866,45 @@ func TestOverflow(t *testing.T) { } } +func TestTypeOverflow(t *testing.T) { + if ovf := TypeFor[float64]().OverflowFloat(1e300); ovf { + t.Errorf("%v wrongly overflows float64", 1e300) + } + + maxFloat32 := float64((1<<24 - 1) << (127 - 23)) + if ovf := TypeFor[float32]().OverflowFloat(maxFloat32); ovf { + t.Errorf("%v wrongly overflows float32", maxFloat32) + } + ovfFloat32 := float64((1<<24-1)<<(127-23) + 1<<(127-52)) + if ovf := TypeFor[float32]().OverflowFloat(ovfFloat32); !ovf { + t.Errorf("%v should overflow float32", ovfFloat32) + } + if ovf := TypeFor[float32]().OverflowFloat(-ovfFloat32); !ovf { + t.Errorf("%v should overflow float32", -ovfFloat32) + } + + maxInt32 := int64(0x7fffffff) + if ovf := TypeFor[int32]().OverflowInt(maxInt32); ovf { + t.Errorf("%v wrongly overflows int32", maxInt32) + } + if ovf := TypeFor[int32]().OverflowInt(-1 << 31); ovf { + t.Errorf("%v wrongly overflows int32", -int64(1)<<31) + } + ovfInt32 := int64(1 << 31) + if ovf := TypeFor[int32]().OverflowInt(ovfInt32); !ovf { + t.Errorf("%v should overflow int32", ovfInt32) + } + + maxUint32 := uint64(0xffffffff) + if ovf := TypeFor[uint32]().OverflowUint(maxUint32); ovf { + t.Errorf("%v wrongly overflows uint32", maxUint32) + } + ovfUint32 := uint64(1 << 32) + if ovf := TypeFor[uint32]().OverflowUint(ovfUint32); !ovf { + t.Errorf("%v should overflow uint32", ovfUint32) + } +} + func checkSameType(t *testing.T, x Type, y any) { if x != TypeOf(y) || TypeOf(Zero(x).Interface()) != TypeOf(y) { t.Errorf("did not find preexisting type for %s (vs %s)", TypeOf(x), TypeOf(y)) @@ -8021,6 +8066,7 @@ func TestValue_Comparable(t *testing.T) { var a int var s []int var i interface{} = a + var iNil interface{} var iSlice interface{} = s var iArrayFalse interface{} = [2]interface{}{1, map[int]int{}} var iArrayTrue interface{} = [2]interface{}{1, struct{ I interface{} }{1}} @@ -8029,6 +8075,11 @@ func TestValue_Comparable(t *testing.T) { comparable bool deref bool }{ + { + ValueOf(&iNil), + true, + true, + }, { ValueOf(32), true, diff --git a/src/reflect/asm_loong64.s b/src/reflect/asm_loong64.s index 341a6d55c1..520f0afdd5 100644 --- a/src/reflect/asm_loong64.s +++ b/src/reflect/asm_loong64.s @@ -7,34 +7,83 @@ #define REGCTXT R29 +// The frames of each of the two functions below contain two locals, at offsets +// that are known to the runtime. +// +// The first local is a bool called retValid with a whole pointer-word reserved +// for it on the stack. The purpose of this word is so that the runtime knows +// whether the stack-allocated return space contains valid values for stack +// scanning. +// +// The second local is an abi.RegArgs value whose offset is also known to the +// runtime, so that a stack map for it can be constructed, since it contains +// pointers visible to the GC. +#define LOCAL_RETVALID 40 +#define LOCAL_REGARGS 48 + +// The frame size of the functions below is +// 32 (args of callReflect) + 8 (bool + padding) + 392 (abi.RegArgs) = 432. + // makeFuncStub is the code half of the function returned by MakeFunc. // See the comment on the declaration of makeFuncStub in makefunc.go // for more details. // No arg size here, runtime pulls arg map out of the func value. -TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$40 +TEXT ·makeFuncStub(SB),(NOSPLIT|WRAPPER),$432 NO_LOCAL_POINTERS + ADDV $LOCAL_REGARGS, R3, R25 // spillArgs using R25 + JAL runtime·spillArgs(SB) + MOVV REGCTXT, 32(R3) // save REGCTXT > args of moveMakeFuncArgPtrs < LOCAL_REGARGS + +#ifdef GOEXPERIMENT_regabiargs + MOVV REGCTXT, R4 + MOVV R25, R5 +#else MOVV REGCTXT, 8(R3) - MOVV $argframe+0(FP), R19 - MOVV R19, 16(R3) - MOVB R0, 40(R3) - ADDV $40, R3, R19 - MOVV R19, 24(R3) - MOVV R0, 32(R3) + MOVV R25, 16(R3) +#endif + JAL ·moveMakeFuncArgPtrs(SB) + MOVV 32(R3), REGCTXT // restore REGCTXT + + MOVV REGCTXT, 8(R3) + MOVV $argframe+0(FP), R20 + MOVV R20, 16(R3) + MOVV R0, LOCAL_RETVALID(R3) + ADDV $LOCAL_RETVALID, R3, R20 + MOVV R20, 24(R3) + ADDV $LOCAL_REGARGS, R3, R20 + MOVV R20, 32(R3) JAL ·callReflect(SB) + ADDV $LOCAL_REGARGS, R3, R25 //unspillArgs using R25 + JAL runtime·unspillArgs(SB) RET // methodValueCall is the code half of the function returned by makeMethodValue. // See the comment on the declaration of methodValueCall in makefunc.go // for more details. // No arg size here; runtime pulls arg map out of the func value. -TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$40 +TEXT ·methodValueCall(SB),(NOSPLIT|WRAPPER),$432 NO_LOCAL_POINTERS + ADDV $LOCAL_REGARGS, R3, R25 // spillArgs using R25 + JAL runtime·spillArgs(SB) + MOVV REGCTXT, 32(R3) // save REGCTXT > args of moveMakeFuncArgPtrs < LOCAL_REGARGS +#ifdef GOEXPERIMENT_regabiargs + MOVV REGCTXT, R4 + MOVV R25, R5 +#else MOVV REGCTXT, 8(R3) - MOVV $argframe+0(FP), R19 - MOVV R19, 16(R3) - MOVB R0, 40(R3) - ADDV $40, R3, R19 - MOVV R19, 24(R3) - MOVV R0, 32(R3) + MOVV R25, 16(R3) +#endif + JAL ·moveMakeFuncArgPtrs(SB) + MOVV 32(R3), REGCTXT // restore REGCTXT + MOVV REGCTXT, 8(R3) + MOVV $argframe+0(FP), R20 + MOVV R20, 16(R3) + MOVB R0, LOCAL_RETVALID(R3) + ADDV $LOCAL_RETVALID, R3, R20 + MOVV R20, 24(R3) + ADDV $LOCAL_REGARGS, R3, R20 + MOVV R20, 32(R3) // frame size to 32+SP as callreflect args) JAL ·callMethod(SB) + ADDV $LOCAL_REGARGS, R3, R25 // unspillArgs using R25 + JAL runtime·unspillArgs(SB) RET diff --git a/src/reflect/benchmark_test.go b/src/reflect/benchmark_test.go index 4aa47669a2..2e701b062e 100644 --- a/src/reflect/benchmark_test.go +++ b/src/reflect/benchmark_test.go @@ -126,6 +126,9 @@ func BenchmarkIsZero(b *testing.B) { type Int1024 struct { a [1024]int } + type Int512 struct { + a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16 [16]S + } s := struct { ArrayComparable [4]T ArrayIncomparable [4]_Complex @@ -137,6 +140,7 @@ func BenchmarkIsZero(b *testing.B) { Struct4Int Int4 ArrayStruct4Int_1024 [256]Int4 ArrayChanInt_1024 [1024]chan int + StructInt_512 Int512 }{} s.ArrayInt_1024_NoZero[512] = 1 source := ValueOf(s) diff --git a/src/reflect/deepequal.go b/src/reflect/deepequal.go index 961e170118..502ea9f146 100644 --- a/src/reflect/deepequal.go +++ b/src/reflect/deepequal.go @@ -39,7 +39,7 @@ func deepValueEqual(v1, v2 Value, visited map[visit]bool) bool { hard := func(v1, v2 Value) bool { switch v1.Kind() { case Pointer: - if v1.typ().PtrBytes == 0 { + if !v1.typ().Pointers() { // not-in-heap pointers can't be cyclic. // At least, all of our current uses of runtime/internal/sys.NotInHeap // have that property. The runtime ones aren't cyclic (and we don't use diff --git a/src/reflect/export_test.go b/src/reflect/export_test.go index 1648eb3624..902f4bfa10 100644 --- a/src/reflect/export_test.go +++ b/src/reflect/export_test.go @@ -63,7 +63,7 @@ func FuncLayout(t Type, rcvr Type) (frametype Type, argSize, retOffset uintptr, } // Expand frame type's GC bitmap into byte-map. - ptrs = ft.PtrBytes != 0 + ptrs = ft.Pointers() if ptrs { nptrs := ft.PtrBytes / goarch.PtrSize gcdata := ft.GcSlice(0, (nptrs+7)/8) @@ -166,3 +166,5 @@ func SetArgRegs(ints, floats int, floatSize uintptr) (oldInts, oldFloats int, ol var MethodValueCallCodePtr = methodValueCallCodePtr var InternalIsZero = isZero + +var IsRegularMemory = isRegularMemory diff --git a/src/reflect/swapper.go b/src/reflect/swapper.go index 1e8f4ed163..78f6a19e4a 100644 --- a/src/reflect/swapper.go +++ b/src/reflect/swapper.go @@ -34,7 +34,7 @@ func Swapper(slice any) func(i, j int) { typ := v.Type().Elem().common() size := typ.Size() - hasPtr := typ.PtrBytes != 0 + hasPtr := typ.Pointers() // Some common & small cases, without using memmove: if hasPtr { diff --git a/src/reflect/type.go b/src/reflect/type.go index a35898547a..56cecc80c6 100644 --- a/src/reflect/type.go +++ b/src/reflect/type.go @@ -225,6 +225,22 @@ type Type interface { // It panics if i is not in the range [0, NumOut()). Out(i int) Type + // OverflowComplex reports whether the complex128 x cannot be represented by type t. + // It panics if t's Kind is not Complex64 or Complex128. + OverflowComplex(x complex128) bool + + // OverflowFloat reports whether the float64 x cannot be represented by type t. + // It panics if t's Kind is not Float32 or Float64. + OverflowFloat(x float64) bool + + // OverflowInt reports whether the int64 x cannot be represented by type t. + // It panics if t's Kind is not Int, Int8, Int16, Int32, or Int64. + OverflowInt(x int64) bool + + // OverflowUint reports whether the uint64 x cannot be represented by type t. + // It panics if t's Kind is not Uint, Uintptr, Uint8, Uint16, Uint32, or Uint64. + OverflowUint(x uint64) bool + common() *abi.Type uncommon() *uncommonType } @@ -812,6 +828,50 @@ func (t *rtype) IsVariadic() bool { return tt.IsVariadic() } +func (t *rtype) OverflowComplex(x complex128) bool { + k := t.Kind() + switch k { + case Complex64: + return overflowFloat32(real(x)) || overflowFloat32(imag(x)) + case Complex128: + return false + } + panic("reflect: OverflowComplex of non-complex type " + t.String()) +} + +func (t *rtype) OverflowFloat(x float64) bool { + k := t.Kind() + switch k { + case Float32: + return overflowFloat32(x) + case Float64: + return false + } + panic("reflect: OverflowFloat of non-float type " + t.String()) +} + +func (t *rtype) OverflowInt(x int64) bool { + k := t.Kind() + switch k { + case Int, Int8, Int16, Int32, Int64: + bitSize := t.Size() * 8 + trunc := (x << (64 - bitSize)) >> (64 - bitSize) + return x != trunc + } + panic("reflect: OverflowInt of non-int type " + t.String()) +} + +func (t *rtype) OverflowUint(x uint64) bool { + k := t.Kind() + switch k { + case Uint, Uintptr, Uint8, Uint16, Uint32, Uint64: + bitSize := t.Size() * 8 + trunc := (x << (64 - bitSize)) >> (64 - bitSize) + return x != trunc + } + panic("reflect: OverflowUint of non-uint type " + t.String()) +} + // add returns p+x. // // The whySafe string is ignored, so that the function still inlines @@ -1694,13 +1754,13 @@ func MapOf(key, elem Type) Type { return typehash(ktyp, p, seed) } mt.Flags = 0 - if ktyp.Size_ > maxKeySize { + if ktyp.Size_ > abi.MapMaxKeyBytes { mt.KeySize = uint8(goarch.PtrSize) mt.Flags |= 1 // indirect key } else { mt.KeySize = uint8(ktyp.Size_) } - if etyp.Size_ > maxValSize { + if etyp.Size_ > abi.MapMaxElemBytes { mt.ValueSize = uint8(goarch.PtrSize) mt.Flags |= 2 // indirect value } else { @@ -1914,7 +1974,7 @@ func needKeyUpdate(t *abi.Type) bool { case Float32, Float64, Complex64, Complex128, Interface, String: // Float keys can be updated from +0 to -0. // String keys can be updated to use a smaller backing store. - // Interfaces might have floats of strings in them. + // Interfaces might have floats or strings in them. return true case Array: tt := (*arrayType)(unsafe.Pointer(t)) @@ -1954,21 +2014,11 @@ func hashMightPanic(t *abi.Type) bool { } } -// Make sure these routines stay in sync with ../runtime/map.go! -// These types exist only for GC, so we only fill out GC relevant info. -// Currently, that's just size and the GC program. We also fill in string -// for possible debugging use. -const ( - bucketSize uintptr = abi.MapBucketCount - maxKeySize uintptr = abi.MapMaxKeyBytes - maxValSize uintptr = abi.MapMaxElemBytes -) - func bucketOf(ktyp, etyp *abi.Type) *abi.Type { - if ktyp.Size_ > maxKeySize { + if ktyp.Size_ > abi.MapMaxKeyBytes { ktyp = ptrTo(ktyp) } - if etyp.Size_ > maxValSize { + if etyp.Size_ > abi.MapMaxElemBytes { etyp = ptrTo(etyp) } @@ -1980,29 +2030,29 @@ func bucketOf(ktyp, etyp *abi.Type) *abi.Type { var gcdata *byte var ptrdata uintptr - size := bucketSize*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize + size := abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize if size&uintptr(ktyp.Align_-1) != 0 || size&uintptr(etyp.Align_-1) != 0 { panic("reflect: bad size computation in MapOf") } - if ktyp.PtrBytes != 0 || etyp.PtrBytes != 0 { - nptr := (bucketSize*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize + if ktyp.Pointers() || etyp.Pointers() { + nptr := (abi.MapBucketCount*(1+ktyp.Size_+etyp.Size_) + goarch.PtrSize) / goarch.PtrSize n := (nptr + 7) / 8 // Runtime needs pointer masks to be a multiple of uintptr in size. n = (n + goarch.PtrSize - 1) &^ (goarch.PtrSize - 1) mask := make([]byte, n) - base := bucketSize / goarch.PtrSize + base := uintptr(abi.MapBucketCount / goarch.PtrSize) - if ktyp.PtrBytes != 0 { - emitGCMask(mask, base, ktyp, bucketSize) + if ktyp.Pointers() { + emitGCMask(mask, base, ktyp, abi.MapBucketCount) } - base += bucketSize * ktyp.Size_ / goarch.PtrSize + base += abi.MapBucketCount * ktyp.Size_ / goarch.PtrSize - if etyp.PtrBytes != 0 { - emitGCMask(mask, base, etyp, bucketSize) + if etyp.Pointers() { + emitGCMask(mask, base, etyp, abi.MapBucketCount) } - base += bucketSize * etyp.Size_ / goarch.PtrSize + base += abi.MapBucketCount * etyp.Size_ / goarch.PtrSize word := base mask[word/8] |= 1 << (word % 8) @@ -2152,6 +2202,51 @@ func isValidFieldName(fieldName string) bool { return len(fieldName) > 0 } +// This must match cmd/compile/internal/compare.IsRegularMemory +func isRegularMemory(t Type) bool { + switch t.Kind() { + case Array: + elem := t.Elem() + if isRegularMemory(elem) { + return true + } + return elem.Comparable() && t.Len() == 0 + case Int8, Int16, Int32, Int64, Int, Uint8, Uint16, Uint32, Uint64, Uint, Uintptr, Chan, Pointer, Bool, UnsafePointer: + return true + case Struct: + num := t.NumField() + switch num { + case 0: + return true + case 1: + field := t.Field(0) + if field.Name == "_" { + return false + } + return isRegularMemory(field.Type) + default: + for i := range num { + field := t.Field(i) + if field.Name == "_" || !isRegularMemory(field.Type) || isPaddedField(t, i) { + return false + } + } + return true + } + } + return false +} + +// isPaddedField reports whether the i'th field of struct type t is followed +// by padding. +func isPaddedField(t Type, i int) bool { + field := t.Field(i) + if i+1 < t.NumField() { + return field.Offset+field.Type.Size() != t.Field(i+1).Offset + } + return field.Offset+field.Type.Size() != t.Size() +} + // StructOf returns the struct type containing fields. // The Offset and Index fields are ignored and computed as they would be // by the compiler. @@ -2445,7 +2540,11 @@ func StructOf(fields []StructField) Type { } typ.Str = resolveReflectName(newName(str, "", false, false)) - typ.TFlag = 0 // TODO: set tflagRegularMemory + if isRegularMemory(toType(&typ.Type)) { + typ.TFlag = abi.TFlagRegularMemory + } else { + typ.TFlag = 0 + } typ.Hash = hash typ.Size_ = size typ.PtrBytes = typeptrdata(&typ.Type) @@ -2582,9 +2681,6 @@ func typeptrdata(t *abi.Type) uintptr { } } -// See cmd/compile/internal/reflectdata/reflect.go for derivation of constant. -const maxPtrmaskBytes = 2048 - // ArrayOf returns the array type with the given length and element type. // For example, if t represents int, ArrayOf(5, t) represents [5]int. // @@ -2633,7 +2729,7 @@ func ArrayOf(length int, elem Type) Type { } } array.Size_ = typ.Size_ * uintptr(length) - if length > 0 && typ.PtrBytes != 0 { + if length > 0 && typ.Pointers() { array.PtrBytes = typ.Size_*uintptr(length-1) + typ.PtrBytes } array.Align_ = typ.Align_ @@ -2642,7 +2738,7 @@ func ArrayOf(length int, elem Type) Type { array.Slice = &(SliceOf(elem).(*rtype).t) switch { - case typ.PtrBytes == 0 || array.Size_ == 0: + case !typ.Pointers() || array.Size_ == 0: // No pointers. array.GCData = nil array.PtrBytes = 0 @@ -2653,7 +2749,7 @@ func ArrayOf(length int, elem Type) Type { array.GCData = typ.GCData array.PtrBytes = typ.PtrBytes - case typ.Kind_&kindGCProg == 0 && array.Size_ <= maxPtrmaskBytes*8*goarch.PtrSize: + case typ.Kind_&kindGCProg == 0 && array.Size_ <= abi.MaxPtrmaskBytes*8*goarch.PtrSize: // Element is small with pointer mask; array is still small. // Create direct pointer mask by turning each 1 bit in elem // into length 1 bits in larger mask. @@ -2842,7 +2938,7 @@ func (bv *bitVector) append(bit uint8) { } func addTypeBits(bv *bitVector, offset uintptr, t *abi.Type) { - if t.PtrBytes == 0 { + if !t.Pointers() { return } @@ -2881,5 +2977,9 @@ func addTypeBits(bv *bitVector, offset uintptr, t *abi.Type) { // TypeFor returns the [Type] that represents the type argument T. func TypeFor[T any]() Type { - return TypeOf((*T)(nil)).Elem() + var v T + if t := TypeOf(v); t != nil { + return t // optimize for T being a non-interface kind + } + return TypeOf((*T)(nil)).Elem() // only for an interface kind } diff --git a/src/reflect/type_test.go b/src/reflect/type_test.go index 9e124273a2..200ecf6eca 100644 --- a/src/reflect/type_test.go +++ b/src/reflect/type_test.go @@ -57,3 +57,63 @@ func TestStructOfEmbeddedIfaceMethodCall(t *testing.T) { _ = x.Name() }) } + +func TestIsRegularMemory(t *testing.T) { + type args struct { + t reflect.Type + } + type S struct { + int + } + tests := []struct { + name string + args args + want bool + }{ + {"struct{i int}", args{reflect.TypeOf(struct{ i int }{})}, true}, + {"struct{}", args{reflect.TypeOf(struct{}{})}, true}, + {"struct{i int; s S}", args{reflect.TypeOf(struct { + i int + s S + }{})}, true}, + {"map[int][int]", args{reflect.TypeOf(map[int]int{})}, false}, + {"[4]chan int", args{reflect.TypeOf([4]chan int{})}, true}, + {"[0]struct{_ S}", args{reflect.TypeOf([0]struct { + _ S + }{})}, true}, + {"struct{i int; _ S}", args{reflect.TypeOf(struct { + i int + _ S + }{})}, false}, + {"struct{a int16; b int32}", args{reflect.TypeOf(struct { + a int16 + b int32 + }{})}, false}, + {"struct {x int32; y int16}", args{reflect.TypeOf(struct { + x int32 + y int16 + }{})}, false}, + {"struct {_ int32 }", args{reflect.TypeOf(struct{ _ int32 }{})}, false}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := reflect.IsRegularMemory(tt.args.t); got != tt.want { + t.Errorf("isRegularMemory() = %v, want %v", got, tt.want) + } + }) + } +} + +var sinkType reflect.Type + +func BenchmarkTypeForString(b *testing.B) { + for i := 0; i < b.N; i++ { + sinkType = reflect.TypeFor[string]() + } +} + +func BenchmarkTypeForError(b *testing.B) { + for i := 0; i < b.N; i++ { + sinkType = reflect.TypeFor[error]() + } +} diff --git a/src/reflect/value.go b/src/reflect/value.go index 5bfdb55fd9..5fa2daae86 100644 --- a/src/reflect/value.go +++ b/src/reflect/value.go @@ -129,8 +129,6 @@ func packEface(v Value) any { // Value is indirect, and so is the interface we're making. ptr := v.ptr if v.flag&flagAddr != 0 { - // TODO: pass safe boolean from valueInterface so - // we don't need to copy if safe==true? c := unsafe_New(t) typedmemmove(t, c, ptr) ptr = c @@ -210,14 +208,7 @@ type emptyInterface struct { // nonEmptyInterface is the header for an interface value with methods. type nonEmptyInterface struct { - // see ../runtime/iface.go:/Itab - itab *struct { - ityp *abi.Type // static interface type - typ *abi.Type // dynamic concrete type - hash uint32 // copy of typ.hash - _ [4]byte - fun [100000]unsafe.Pointer // method table - } + itab *abi.ITab word unsafe.Pointer } @@ -899,8 +890,8 @@ func methodReceiver(op string, v Value, methodIndex int) (rcvrtype *abi.Type, t if iface.itab == nil { panic("reflect: " + op + " of method on nil interface value") } - rcvrtype = iface.itab.typ - fn = unsafe.Pointer(&iface.itab.fun[i]) + rcvrtype = iface.itab.Type + fn = unsafe.Pointer(&unsafe.Slice(&iface.itab.Fun[0], i+1)[i]) t = (*funcType)(unsafe.Pointer(tt.typeOff(m.Typ))) } else { rcvrtype = v.typ() @@ -954,7 +945,7 @@ func align(x, n uintptr) uintptr { // so that the linker can make it work correctly for panic and recover. // The gc compilers know to do that for the name "reflect.callMethod". // -// ctxt is the "closure" generated by makeVethodValue. +// ctxt is the "closure" generated by makeMethodValue. // frame is a pointer to the arguments to that closure on the stack. // retValid points to a boolean which should be set when the results // section of frame is set. @@ -1522,7 +1513,6 @@ func valueInterface(v Value, safe bool) any { })(v.ptr) } - // TODO: pass safe to packEface so we don't need to copy if safe==true? return packEface(v) } @@ -1598,24 +1588,23 @@ func (v Value) IsZero() bool { case Complex64, Complex128: return v.Complex() == 0 case Array: - array := (*abi.ArrayType)(unsafe.Pointer(v.typ())) - // Avoid performance degradation of small benchmarks. + if v.flag&flagIndir == 0 { + return v.ptr == nil + } + typ := (*abi.ArrayType)(unsafe.Pointer(v.typ())) // If the type is comparable, then compare directly with zero. - if array.Equal != nil && array.Size() <= maxZero { - if v.flag&flagIndir == 0 { - return v.ptr == nil - } + if typ.Equal != nil && typ.Size() <= abi.ZeroValSize { // v.ptr doesn't escape, as Equal functions are compiler generated // and never escape. The escape analysis doesn't know, as it is a // function pointer call. - return array.Equal(noescape(v.ptr), unsafe.Pointer(&zeroVal[0])) + return typ.Equal(noescape(v.ptr), unsafe.Pointer(&zeroVal[0])) } - if array.TFlag&abi.TFlagRegularMemory != 0 { + if typ.TFlag&abi.TFlagRegularMemory != 0 { // For some types where the zero value is a value where all bits of this type are 0 // optimize it. - return isZero(unsafe.Slice(((*byte)(v.ptr)), array.Size())) + return isZero(unsafe.Slice(((*byte)(v.ptr)), typ.Size())) } - n := int(array.Len) + n := int(typ.Len) for i := 0; i < n; i++ { if !v.Index(i).IsZero() { return false @@ -1627,13 +1616,19 @@ func (v Value) IsZero() bool { case String: return v.Len() == 0 case Struct: + if v.flag&flagIndir == 0 { + return v.ptr == nil + } + typ := (*abi.StructType)(unsafe.Pointer(v.typ())) // If the type is comparable, then compare directly with zero. - if v.typ().Equal != nil && v.typ().Size() <= maxZero { - if v.flag&flagIndir == 0 { - return v.ptr == nil - } + if typ.Equal != nil && typ.Size() <= abi.ZeroValSize { // See noescape justification above. - return v.typ().Equal(noescape(v.ptr), unsafe.Pointer(&zeroVal[0])) + return typ.Equal(noescape(v.ptr), unsafe.Pointer(&zeroVal[0])) + } + if typ.TFlag&abi.TFlagRegularMemory != 0 { + // For some types where the zero value is a value where all bits of this type are 0 + // optimize it. + return isZero(unsafe.Slice(((*byte)(v.ptr)), typ.Size())) } n := v.NumField() @@ -1650,30 +1645,48 @@ func (v Value) IsZero() bool { } } -// isZero must have len(b)>256+7 to ensure at -// least one 8-byte aligned [256]byte, -// otherwise the access will be out of bounds. -// For all zeros, performance is not as good as +// isZero For all zeros, performance is not as good as // return bytealg.Count(b, byte(0)) == len(b) func isZero(b []byte) bool { + if len(b) == 0 { + return true + } const n = 32 - const bit = n * 8 - // Align memory addresses to 8 bytes + // Align memory addresses to 8 bytes. for uintptr(unsafe.Pointer(&b[0]))%8 != 0 { if b[0] != 0 { return false } b = b[1:] + if len(b) == 0 { + return true + } } - for len(b)%bit != 0 { + for len(b)%8 != 0 { if b[len(b)-1] != 0 { return false } b = b[:len(b)-1] } + if len(b) == 0 { + return true + } w := unsafe.Slice((*uint64)(unsafe.Pointer(&b[0])), len(b)/8) + for len(w)%n != 0 { + if w[0] != 0 { + return false + } + w = w[1:] + } for len(w) >= n { - if w[0] != 0 || w[1] != 0 || w[2] != 0 || w[3] != 0 || w[4] != 0 || w[5] != 0 || w[6] != 0 || w[7] != 0 || w[8] != 0 || w[9] != 0 || w[10] != 0 || w[11] != 0 || w[12] != 0 || w[13] != 0 || w[14] != 0 || w[15] != 0 || w[16] != 0 || w[17] != 0 || w[18] != 0 || w[19] != 0 || w[20] != 0 || w[21] != 0 || w[22] != 0 || w[23] != 0 || w[24] != 0 || w[25] != 0 || w[26] != 0 || w[27] != 0 || w[28] != 0 || w[29] != 0 || w[30] != 0 || w[31] != 0 { + if w[0] != 0 || w[1] != 0 || w[2] != 0 || w[3] != 0 || + w[4] != 0 || w[5] != 0 || w[6] != 0 || w[7] != 0 || + w[8] != 0 || w[9] != 0 || w[10] != 0 || w[11] != 0 || + w[12] != 0 || w[13] != 0 || w[14] != 0 || w[15] != 0 || + w[16] != 0 || w[17] != 0 || w[18] != 0 || w[19] != 0 || + w[20] != 0 || w[21] != 0 || w[22] != 0 || w[23] != 0 || + w[24] != 0 || w[25] != 0 || w[26] != 0 || w[27] != 0 || + w[28] != 0 || w[29] != 0 || w[30] != 0 || w[31] != 0 { return false } w = w[n:] @@ -1791,7 +1804,7 @@ func (v Value) MapIndex(key Value) Value { // of unexported fields. var e unsafe.Pointer - if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= maxValSize { + if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes { k := *(*string)(key.ptr) e = mapaccess_faststr(v.typ(), v.pointer(), k) } else { @@ -2190,7 +2203,7 @@ func (v Value) Pointer() uintptr { k := v.kind() switch k { case Pointer: - if v.typ().PtrBytes == 0 { + if !v.typ().Pointers() { val := *(*uintptr)(v.ptr) // Since it is a not-in-heap pointer, all pointers to the heap are // forbidden! See comment in Value.Elem and issue #48399. @@ -2427,7 +2440,7 @@ func (v Value) SetMapIndex(key, elem Value) { key.mustBeExported() tt := (*mapType)(unsafe.Pointer(v.typ())) - if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= maxValSize { + if (tt.Key == stringType || key.kind() == String) && tt.Key == key.typ() && tt.Elem.Size() <= abi.MapMaxElemBytes { k := *(*string)(key.ptr) if elem.typ() == nil { mapdelete_faststr(v.typ(), v.pointer(), k) @@ -2770,7 +2783,7 @@ func (v Value) UnsafePointer() unsafe.Pointer { k := v.kind() switch k { case Pointer: - if v.typ().PtrBytes == 0 { + if !v.typ().Pointers() { // Since it is a not-in-heap pointer, all pointers to the heap are // forbidden! See comment in Value.Elem and issue #48399. if !verifyNotInHeapPtr(*(*uintptr)(v.ptr)) { @@ -3254,7 +3267,7 @@ func Zero(typ Type) Value { fl := flag(t.Kind()) if t.IfaceIndir() { var p unsafe.Pointer - if t.Size() <= maxZero { + if t.Size() <= abi.ZeroValSize { p = unsafe.Pointer(&zeroVal[0]) } else { p = unsafe_New(t) @@ -3264,11 +3277,8 @@ func Zero(typ Type) Value { return Value{t, nil, fl} } -// must match declarations in runtime/map.go. -const maxZero = 1024 - //go:linkname zeroVal runtime.zeroVal -var zeroVal [maxZero]byte +var zeroVal [abi.ZeroValSize]byte // New returns a Value representing a pointer to a new zero value // for the specified type. That is, the returned Value's Type is PointerTo(typ). @@ -3396,7 +3406,7 @@ func (v Value) Comparable() bool { return v.Type().Comparable() case Interface: - return v.Elem().Comparable() + return v.IsNil() || v.Elem().Comparable() case Struct: for i := 0; i < v.NumField(); i++ { diff --git a/src/regexp/regexp.go b/src/regexp/regexp.go index 462f235b1b..d1218ad0e8 100644 --- a/src/regexp/regexp.go +++ b/src/regexp/regexp.go @@ -1074,7 +1074,7 @@ func (re *Regexp) FindReaderSubmatchIndex(r io.RuneReader) []int { const startSize = 10 // The size at which to start a slice in the 'All' routines. -// FindAll is the 'All' version of Find; it returns a slice of all successive +// FindAll is the 'All' version of [Regexp.Find]; it returns a slice of all successive // matches of the expression, as defined by the 'All' description in the // package comment. // A return value of nil indicates no match. diff --git a/src/regexp/syntax/doc.go b/src/regexp/syntax/doc.go index eb8a971c73..abc58bb159 100644 --- a/src/regexp/syntax/doc.go +++ b/src/regexp/syntax/doc.go @@ -7,12 +7,12 @@ /* Package syntax parses regular expressions into parse trees and compiles parse trees into programs. Most clients of regular expressions will use the -facilities of package regexp (such as Compile and Match) instead of this package. +facilities of package [regexp] (such as [regexp.Compile] and [regexp.Match]) instead of this package. # Syntax -The regular expression syntax understood by this package when parsing with the Perl flag is as follows. -Parts of the syntax can be disabled by passing alternate flags to Parse. +The regular expression syntax understood by this package when parsing with the [Perl] flag is as follows. +Parts of the syntax can be disabled by passing alternate flags to [Parse]. Single characters: @@ -137,6 +137,6 @@ ASCII character classes: [[:word:]] word characters (== [0-9A-Za-z_]) [[:xdigit:]] hex digit (== [0-9A-Fa-f]) -Unicode character classes are those in unicode.Categories and unicode.Scripts. +Unicode character classes are those in [unicode.Categories] and [unicode.Scripts]. */ package syntax diff --git a/src/regexp/syntax/prog.go b/src/regexp/syntax/prog.go index d69ae1a12d..6a3705ec8f 100644 --- a/src/regexp/syntax/prog.go +++ b/src/regexp/syntax/prog.go @@ -191,7 +191,7 @@ Loop: const noMatch = -1 // MatchRune reports whether the instruction matches (and consumes) r. -// It should only be called when i.Op == InstRune. +// It should only be called when i.Op == [InstRune]. func (i *Inst) MatchRune(r rune) bool { return i.MatchRunePos(r) != noMatch } @@ -200,7 +200,7 @@ func (i *Inst) MatchRune(r rune) bool { // If so, MatchRunePos returns the index of the matching rune pair // (or, when len(i.Rune) == 1, rune singleton). // If not, MatchRunePos returns -1. -// MatchRunePos should only be called when i.Op == InstRune. +// MatchRunePos should only be called when i.Op == [InstRune]. func (i *Inst) MatchRunePos(r rune) int { rune := i.Rune @@ -262,7 +262,7 @@ func (i *Inst) MatchRunePos(r rune) int { // MatchEmptyWidth reports whether the instruction matches // an empty string between the runes before and after. -// It should only be called when i.Op == InstEmptyWidth. +// It should only be called when i.Op == [InstEmptyWidth]. func (i *Inst) MatchEmptyWidth(before rune, after rune) bool { switch EmptyOp(i.Arg) { case EmptyBeginLine: diff --git a/src/runtime/abi_test.go b/src/runtime/abi_test.go index d7039e758a..4caee597c5 100644 --- a/src/runtime/abi_test.go +++ b/src/runtime/abi_test.go @@ -15,25 +15,34 @@ import ( "os" "os/exec" "runtime" + "runtime/internal/atomic" "strings" "testing" "time" ) -var regConfirmRun chan int +var regConfirmRun atomic.Int32 //go:registerparams -func regFinalizerPointer(v *Tint) (int, float32, [10]byte) { - regConfirmRun <- *(*int)(v) +func regFinalizerPointer(v *TintPointer) (int, float32, [10]byte) { + regConfirmRun.Store(int32(*(*int)(v.p))) return 5151, 4.0, [10]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} } //go:registerparams func regFinalizerIface(v Tinter) (int, float32, [10]byte) { - regConfirmRun <- *(*int)(v.(*Tint)) + regConfirmRun.Store(int32(*(*int)(v.(*TintPointer).p))) return 5151, 4.0, [10]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10} } +// TintPointer has a pointer member to make sure that it isn't allocated by the +// tiny allocator, so we know when its finalizer will run +type TintPointer struct { + p *Tint +} + +func (*TintPointer) m() {} + func TestFinalizerRegisterABI(t *testing.T) { testenv.MustHaveExec(t) @@ -87,10 +96,8 @@ func TestFinalizerRegisterABI(t *testing.T) { for i := range tests { test := &tests[i] t.Run(test.name, func(t *testing.T) { - regConfirmRun = make(chan int) - - x := new(Tint) - *x = (Tint)(test.confirmValue) + x := &TintPointer{p: new(Tint)} + *x.p = (Tint)(test.confirmValue) runtime.SetFinalizer(x, test.fin) runtime.KeepAlive(x) @@ -99,13 +106,11 @@ func TestFinalizerRegisterABI(t *testing.T) { runtime.GC() runtime.GC() - select { - case <-time.After(time.Second): + if !runtime.BlockUntilEmptyFinalizerQueue(int64(time.Second)) { t.Fatal("finalizer failed to execute") - case gotVal := <-regConfirmRun: - if gotVal != test.confirmValue { - t.Fatalf("wrong finalizer executed? got %d, want %d", gotVal, test.confirmValue) - } + } + if got := int(regConfirmRun.Load()); got != test.confirmValue { + t.Fatalf("wrong finalizer executed? got %d, want %d", got, test.confirmValue) } }) } diff --git a/src/runtime/alg.go b/src/runtime/alg.go index 336058d159..93b14accb4 100644 --- a/src/runtime/alg.go +++ b/src/runtime/alg.go @@ -66,7 +66,7 @@ func f32hash(p unsafe.Pointer, h uintptr) uintptr { case f == 0: return c1 * (c0 ^ h) // +0, -0 case f != f: - return c1 * (c0 ^ h ^ uintptr(fastrand())) // any kind of NaN + return c1 * (c0 ^ h ^ uintptr(rand())) // any kind of NaN default: return memhash(p, h, 4) } @@ -78,7 +78,7 @@ func f64hash(p unsafe.Pointer, h uintptr) uintptr { case f == 0: return c1 * (c0 ^ h) // +0, -0 case f != f: - return c1 * (c0 ^ h ^ uintptr(fastrand())) // any kind of NaN + return c1 * (c0 ^ h ^ uintptr(rand())) // any kind of NaN default: return memhash(p, h, 8) } @@ -100,7 +100,7 @@ func interhash(p unsafe.Pointer, h uintptr) uintptr { if tab == nil { return h } - t := tab._type + t := tab.Type if t.Equal == nil { // Check hashability here. We could do this check inside // typehash, but we want to report the topmost type in @@ -223,7 +223,7 @@ func mapKeyError2(t *_type, p unsafe.Pointer) error { if a.tab == nil { return nil } - t = a.tab._type + t = a.tab.Type pdata = &a.data } @@ -329,7 +329,7 @@ func ifaceeq(tab *itab, x, y unsafe.Pointer) bool { if tab == nil { return true } - t := tab._type + t := tab.Type eq := t.Equal if eq == nil { panic(errorString("comparing uncomparable type " + toRType(t).string())) @@ -390,17 +390,18 @@ func alginit() { initAlgAES() return } - getRandomData((*[len(hashkey) * goarch.PtrSize]byte)(unsafe.Pointer(&hashkey))[:]) - hashkey[0] |= 1 // make sure these numbers are odd - hashkey[1] |= 1 - hashkey[2] |= 1 - hashkey[3] |= 1 + for i := range hashkey { + hashkey[i] = uintptr(rand()) | 1 // make sure these numbers are odd + } } func initAlgAES() { useAeshash = true // Initialize with random data so hash collisions will be hard to engineer. - getRandomData(aeskeysched[:]) + key := (*[hashRandomBytes / 8]uint64)(unsafe.Pointer(&aeskeysched)) + for i := range key { + key[i] = bootstrapRand() + } } // Note: These routines perform the read with a native endianness. diff --git a/src/runtime/arena.go b/src/runtime/arena.go index e0e5c393c6..3fdd4cbdd6 100644 --- a/src/runtime/arena.go +++ b/src/runtime/arena.go @@ -482,7 +482,7 @@ func (s *mspan) userArenaNextFree(typ *_type, cap int) unsafe.Pointer { mp.mallocing = 1 var ptr unsafe.Pointer - if typ.PtrBytes == 0 { + if !typ.Pointers() { // Allocate pointer-less objects from the tail end of the chunk. v, ok := s.userArenaChunkFree.takeFromBack(size, typ.Align_) if ok { @@ -504,7 +504,7 @@ func (s *mspan) userArenaNextFree(typ *_type, cap int) unsafe.Pointer { throw("arena chunk needs zeroing, but should already be zeroed") } // Set up heap bitmap and do extra accounting. - if typ.PtrBytes != 0 { + if typ.Pointers() { if cap >= 0 { userArenaHeapBitsSetSliceType(typ, cap, ptr, s) } else { @@ -589,7 +589,7 @@ func newUserArenaChunk() (unsafe.Pointer, *mspan) { // This may be racing with GC so do it atomically if there can be // a race marking the bit. if gcphase != _GCoff { - gcmarknewobject(span, span.base(), span.elemsize) + gcmarknewobject(span, span.base()) } if raceenabled { @@ -765,7 +765,7 @@ func freeUserArenaChunk(s *mspan, x unsafe.Pointer) { throw("invalid user arena span size") } - // Mark the region as free to various santizers immediately instead + // Mark the region as free to various sanitizers immediately instead // of handling them at sweep time. if raceenabled { racefree(unsafe.Pointer(s.base()), s.elemsize) diff --git a/src/runtime/asan.go b/src/runtime/asan.go index 25b83277e6..d79637a334 100644 --- a/src/runtime/asan.go +++ b/src/runtime/asan.go @@ -29,6 +29,7 @@ const asanenabled = true // asan{read,write} are nosplit because they may be called between // fork and exec, when the stack must not grow. See issue #50391. +//go:linkname asanread //go:nosplit func asanread(addr unsafe.Pointer, sz uintptr) { sp := getcallersp() @@ -36,6 +37,7 @@ func asanread(addr unsafe.Pointer, sz uintptr) { doasanread(addr, sz, sp, pc) } +//go:linkname asanwrite //go:nosplit func asanwrite(addr unsafe.Pointer, sz uintptr) { sp := getcallersp() diff --git a/src/runtime/asan0.go b/src/runtime/asan0.go index 0948786200..bcfd96f1ab 100644 --- a/src/runtime/asan0.go +++ b/src/runtime/asan0.go @@ -1,4 +1,4 @@ -// Copyright 2021 The Go Authors. All rights reserved. +// Copyright 2021 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/src/runtime/asan_amd64.s b/src/runtime/asan_amd64.s index ac09ec1105..195faf4e6d 100644 --- a/src/runtime/asan_amd64.s +++ b/src/runtime/asan_amd64.s @@ -28,7 +28,7 @@ // func runtime·doasanread(addr unsafe.Pointer, sz, sp, pc uintptr) TEXT runtime·doasanread(SB), NOSPLIT, $0-32 MOVQ addr+0(FP), RARG0 - MOVQ size+8(FP), RARG1 + MOVQ sz+8(FP), RARG1 MOVQ sp+16(FP), RARG2 MOVQ pc+24(FP), RARG3 // void __asan_read_go(void *addr, uintptr_t sz, void *sp, void *pc); @@ -38,7 +38,7 @@ TEXT runtime·doasanread(SB), NOSPLIT, $0-32 // func runtime·doasanwrite(addr unsafe.Pointer, sz, sp, pc uintptr) TEXT runtime·doasanwrite(SB), NOSPLIT, $0-32 MOVQ addr+0(FP), RARG0 - MOVQ size+8(FP), RARG1 + MOVQ sz+8(FP), RARG1 MOVQ sp+16(FP), RARG2 MOVQ pc+24(FP), RARG3 // void __asan_write_go(void *addr, uintptr_t sz, void *sp, void *pc); @@ -48,7 +48,7 @@ TEXT runtime·doasanwrite(SB), NOSPLIT, $0-32 // func runtime·asanunpoison(addr unsafe.Pointer, sz uintptr) TEXT runtime·asanunpoison(SB), NOSPLIT, $0-16 MOVQ addr+0(FP), RARG0 - MOVQ size+8(FP), RARG1 + MOVQ sz+8(FP), RARG1 // void __asan_unpoison_go(void *addr, uintptr_t sz); MOVQ $__asan_unpoison_go(SB), AX JMP asancall<>(SB) @@ -56,7 +56,7 @@ TEXT runtime·asanunpoison(SB), NOSPLIT, $0-16 // func runtime·asanpoison(addr unsafe.Pointer, sz uintptr) TEXT runtime·asanpoison(SB), NOSPLIT, $0-16 MOVQ addr+0(FP), RARG0 - MOVQ size+8(FP), RARG1 + MOVQ sz+8(FP), RARG1 // void __asan_poison_go(void *addr, uintptr_t sz); MOVQ $__asan_poison_go(SB), AX JMP asancall<>(SB) @@ -64,7 +64,7 @@ TEXT runtime·asanpoison(SB), NOSPLIT, $0-16 // func runtime·asanregisterglobals(addr unsafe.Pointer, n uintptr) TEXT runtime·asanregisterglobals(SB), NOSPLIT, $0-16 MOVQ addr+0(FP), RARG0 - MOVQ size+8(FP), RARG1 + MOVQ n+8(FP), RARG1 // void __asan_register_globals_go(void *addr, uintptr_t n); MOVQ $__asan_register_globals_go(SB), AX JMP asancall<>(SB) diff --git a/src/runtime/asan_arm64.s b/src/runtime/asan_arm64.s index 697c98206e..dfa3f81bf2 100644 --- a/src/runtime/asan_arm64.s +++ b/src/runtime/asan_arm64.s @@ -17,7 +17,7 @@ // func runtime·doasanread(addr unsafe.Pointer, sz, sp, pc uintptr) TEXT runtime·doasanread(SB), NOSPLIT, $0-32 MOVD addr+0(FP), RARG0 - MOVD size+8(FP), RARG1 + MOVD sz+8(FP), RARG1 MOVD sp+16(FP), RARG2 MOVD pc+24(FP), RARG3 // void __asan_read_go(void *addr, uintptr_t sz, void *sp, void *pc); @@ -27,7 +27,7 @@ TEXT runtime·doasanread(SB), NOSPLIT, $0-32 // func runtime·doasanwrite(addr unsafe.Pointer, sz, sp, pc uintptr) TEXT runtime·doasanwrite(SB), NOSPLIT, $0-32 MOVD addr+0(FP), RARG0 - MOVD size+8(FP), RARG1 + MOVD sz+8(FP), RARG1 MOVD sp+16(FP), RARG2 MOVD pc+24(FP), RARG3 // void __asan_write_go(void *addr, uintptr_t sz, void *sp, void *pc); @@ -37,7 +37,7 @@ TEXT runtime·doasanwrite(SB), NOSPLIT, $0-32 // func runtime·asanunpoison(addr unsafe.Pointer, sz uintptr) TEXT runtime·asanunpoison(SB), NOSPLIT, $0-16 MOVD addr+0(FP), RARG0 - MOVD size+8(FP), RARG1 + MOVD sz+8(FP), RARG1 // void __asan_unpoison_go(void *addr, uintptr_t sz); MOVD $__asan_unpoison_go(SB), FARG JMP asancall<>(SB) @@ -45,7 +45,7 @@ TEXT runtime·asanunpoison(SB), NOSPLIT, $0-16 // func runtime·asanpoison(addr unsafe.Pointer, sz uintptr) TEXT runtime·asanpoison(SB), NOSPLIT, $0-16 MOVD addr+0(FP), RARG0 - MOVD size+8(FP), RARG1 + MOVD sz+8(FP), RARG1 // void __asan_poison_go(void *addr, uintptr_t sz); MOVD $__asan_poison_go(SB), FARG JMP asancall<>(SB) @@ -53,7 +53,7 @@ TEXT runtime·asanpoison(SB), NOSPLIT, $0-16 // func runtime·asanregisterglobals(addr unsafe.Pointer, n uintptr) TEXT runtime·asanregisterglobals(SB), NOSPLIT, $0-16 MOVD addr+0(FP), RARG0 - MOVD size+8(FP), RARG1 + MOVD n+8(FP), RARG1 // void __asan_register_globals_go(void *addr, uintptr_t n); MOVD $__asan_register_globals_go(SB), FARG JMP asancall<>(SB) diff --git a/src/runtime/asan_loong64.s b/src/runtime/asan_loong64.s index 70386fcb6e..0034a31687 100644 --- a/src/runtime/asan_loong64.s +++ b/src/runtime/asan_loong64.s @@ -17,7 +17,7 @@ // func runtime·doasanread(addr unsafe.Pointer, sz, sp, pc uintptr) TEXT runtime·doasanread(SB), NOSPLIT, $0-32 MOVV addr+0(FP), RARG0 - MOVV size+8(FP), RARG1 + MOVV sz+8(FP), RARG1 MOVV sp+16(FP), RARG2 MOVV pc+24(FP), RARG3 // void __asan_read_go(void *addr, uintptr_t sz, void *sp, void *pc); @@ -27,7 +27,7 @@ TEXT runtime·doasanread(SB), NOSPLIT, $0-32 // func runtime·doasanwrite(addr unsafe.Pointer, sz, sp, pc uintptr) TEXT runtime·doasanwrite(SB), NOSPLIT, $0-32 MOVV addr+0(FP), RARG0 - MOVV size+8(FP), RARG1 + MOVV sz+8(FP), RARG1 MOVV sp+16(FP), RARG2 MOVV pc+24(FP), RARG3 // void __asan_write_go(void *addr, uintptr_t sz, void *sp, void *pc); @@ -37,7 +37,7 @@ TEXT runtime·doasanwrite(SB), NOSPLIT, $0-32 // func runtime·asanunpoison(addr unsafe.Pointer, sz uintptr) TEXT runtime·asanunpoison(SB), NOSPLIT, $0-16 MOVV addr+0(FP), RARG0 - MOVV size+8(FP), RARG1 + MOVV sz+8(FP), RARG1 // void __asan_unpoison_go(void *addr, uintptr_t sz); MOVV $__asan_unpoison_go(SB), FARG JMP asancall<>(SB) @@ -45,7 +45,7 @@ TEXT runtime·asanunpoison(SB), NOSPLIT, $0-16 // func runtime·asanpoison(addr unsafe.Pointer, sz uintptr) TEXT runtime·asanpoison(SB), NOSPLIT, $0-16 MOVV addr+0(FP), RARG0 - MOVV size+8(FP), RARG1 + MOVV sz+8(FP), RARG1 // void __asan_poison_go(void *addr, uintptr_t sz); MOVV $__asan_poison_go(SB), FARG JMP asancall<>(SB) @@ -53,7 +53,7 @@ TEXT runtime·asanpoison(SB), NOSPLIT, $0-16 // func runtime·asanregisterglobals(addr unsafe.Pointer, n uintptr) TEXT runtime·asanregisterglobals(SB), NOSPLIT, $0-16 MOVV addr+0(FP), RARG0 - MOVV size+8(FP), RARG1 + MOVV n+8(FP), RARG1 // void __asan_register_globals_go(void *addr, uintptr_t n); MOVV $__asan_register_globals_go(SB), FARG JMP asancall<>(SB) diff --git a/src/runtime/asm.s b/src/runtime/asm.s index 012a6a095d..64b9e420b6 100644 --- a/src/runtime/asm.s +++ b/src/runtime/asm.s @@ -13,11 +13,17 @@ TEXT ·sigpanic0(SB),NOSPLIT,$0-0 TEXT ·mapinitnoop(SB),NOSPLIT,$0-0 RET +#ifndef GOARCH_arm #ifndef GOARCH_amd64 #ifndef GOARCH_arm64 +#ifndef GOARCH_loong64 #ifndef GOARCH_mips64 #ifndef GOARCH_mips64le +#ifndef GOARCH_ppc64 +#ifndef GOARCH_ppc64le #ifndef GOARCH_riscv64 +#ifndef GOARCH_s390x +#ifndef GOARCH_wasm // stub to appease shared build mode. TEXT ·switchToCrashStack0(SB),NOSPLIT,$0-0 UNDEF @@ -26,3 +32,9 @@ TEXT ·switchToCrashStack0(SB),NOSPLIT,$0-0 #endif #endif #endif +#endif +#endif +#endif +#endif +#endif +#endif diff --git a/src/runtime/asm_arm.s b/src/runtime/asm_arm.s index e3206a1d27..4d57ec6062 100644 --- a/src/runtime/asm_arm.s +++ b/src/runtime/asm_arm.s @@ -39,10 +39,10 @@ TEXT _rt0_arm_lib(SB),NOSPLIT,$104 MOVW g, 32(R13) MOVW R11, 36(R13) - // Skip floating point registers on GOARM < 6. - MOVB runtime·goarm(SB), R11 - CMP $6, R11 - BLT skipfpsave + // Skip floating point registers on goarmsoftfp != 0. + MOVB runtime·goarmsoftfp(SB), R11 + CMP $0, R11 + BNE skipfpsave MOVD F8, (40+8*0)(R13) MOVD F9, (40+8*1)(R13) MOVD F10, (40+8*2)(R13) @@ -77,9 +77,9 @@ nocgo: BL runtime·newosproc0(SB) rr: // Restore callee-save registers and return. - MOVB runtime·goarm(SB), R11 - CMP $6, R11 - BLT skipfprest + MOVB runtime·goarmsoftfp(SB), R11 + CMP $0, R11 + BNE skipfprest MOVD (40+8*0)(R13), F8 MOVD (40+8*1)(R13), F9 MOVD (40+8*2)(R13), F10 @@ -197,10 +197,10 @@ TEXT runtime·breakpoint(SB),NOSPLIT,$0-0 RET TEXT runtime·asminit(SB),NOSPLIT,$0-0 - // disable runfast (flush-to-zero) mode of vfp if runtime.goarm > 5 - MOVB runtime·goarm(SB), R11 - CMP $5, R11 - BLE 4(PC) + // disable runfast (flush-to-zero) mode of vfp if runtime.goarmsoftfp == 0 + MOVB runtime·goarmsoftfp(SB), R11 + CMP $0, R11 + BNE 4(PC) WORD $0xeef1ba10 // vmrs r11, fpscr BIC $(1<<24), R11 WORD $0xeee1ba10 // vmsr fpscr, r11 @@ -333,6 +333,30 @@ noswitch: MOVW.P 4(R13), R14 // restore LR B (R0) +// func switchToCrashStack0(fn func()) +TEXT runtime·switchToCrashStack0(SB), NOSPLIT, $0-4 + MOVW fn+0(FP), R7 // context register + MOVW g_m(g), R1 // curm + + // set g to gcrash + MOVW $runtime·gcrash(SB), R0 + BL setg<>(SB) // g = &gcrash + MOVW R1, g_m(g) // g.m = curm + MOVW g, m_g0(R1) // curm.g0 = g + + // switch to crashstack + MOVW (g_stack+stack_hi)(g), R1 + SUB $(4*8), R1 + MOVW R1, R13 + + // call target function + MOVW 0(R7), R0 + BL (R0) + + // should never return + CALL runtime·abort(SB) + UNDEF + /* * support for morestack */ @@ -349,6 +373,14 @@ TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 // Cannot grow scheduler stack (m->g0). MOVW g_m(g), R8 MOVW m_g0(R8), R4 + + // Called from f. + // Set g->sched to context in f. + MOVW R13, (g_sched+gobuf_sp)(g) + MOVW LR, (g_sched+gobuf_pc)(g) + MOVW R3, (g_sched+gobuf_lr)(g) + MOVW R7, (g_sched+gobuf_ctxt)(g) + CMP g, R4 BNE 3(PC) BL runtime·badmorestackg0(SB) @@ -361,13 +393,6 @@ TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 BL runtime·badmorestackgsignal(SB) B runtime·abort(SB) - // Called from f. - // Set g->sched to context in f. - MOVW R13, (g_sched+gobuf_sp)(g) - MOVW LR, (g_sched+gobuf_pc)(g) - MOVW R3, (g_sched+gobuf_lr)(g) - MOVW R7, (g_sched+gobuf_ctxt)(g) - // Called from f. // Set m->morebuf to f's caller. MOVW R3, (m_morebuf+gobuf_pc)(R8) // f's caller's PC diff --git a/src/runtime/asm_loong64.s b/src/runtime/asm_loong64.s index 6ffa1392c4..7c5ecb8a01 100644 --- a/src/runtime/asm_loong64.s +++ b/src/runtime/asm_loong64.s @@ -72,7 +72,7 @@ nocgo: MOVV R0, 1(R0) RET -DATA runtime·mainPC+0(SB)/8,$runtime·main(SB) +DATA runtime·mainPC+0(SB)/8,$runtime·main(SB) GLOBL runtime·mainPC(SB),RODATA,$8 TEXT runtime·breakpoint(SB),NOSPLIT|NOFRAME,$0-0 @@ -123,26 +123,31 @@ TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0 // Switch to m->g0's stack, call fn(g). // Fn must never return. It should gogo(&g->sched) // to keep running g. -TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8 +TEXT runtime·mcall(SB), NOSPLIT|NOFRAME, $0-8 +#ifdef GOEXPERIMENT_regabiargs + MOVV R4, REGCTXT +#else + MOVV fn+0(FP), REGCTXT +#endif + // Save caller state in g->sched MOVV R3, (g_sched+gobuf_sp)(g) MOVV R1, (g_sched+gobuf_pc)(g) MOVV R0, (g_sched+gobuf_lr)(g) // Switch to m->g0 & its stack, call fn. - MOVV g, R19 - MOVV g_m(g), R4 - MOVV m_g0(R4), g + MOVV g, R4 // arg = g + MOVV g_m(g), R20 + MOVV m_g0(R20), g JAL runtime·save_g(SB) - BNE g, R19, 2(PC) + BNE g, R4, 2(PC) JMP runtime·badmcall(SB) - MOVV fn+0(FP), REGCTXT // context - MOVV 0(REGCTXT), R5 // code pointer + MOVV 0(REGCTXT), R20 // code pointer MOVV (g_sched+gobuf_sp)(g), R3 // sp = m->g0->sched.sp ADDV $-16, R3 - MOVV R19, 8(R3) + MOVV R4, 8(R3) MOVV R0, 0(R3) - JAL (R5) + JAL (R20) JMP runtime·badmcall2(SB) // systemstack_switch is a dummy routine that systemstack leaves at the bottom @@ -208,19 +213,49 @@ noswitch: ADDV $8, R3 JMP (R4) +// func switchToCrashStack0(fn func()) +TEXT runtime·switchToCrashStack0(SB), NOSPLIT, $0-8 + MOVV fn+0(FP), REGCTXT // context register + MOVV g_m(g), R4 // curm + + // set g to gcrash + MOVV $runtime·gcrash(SB), g // g = &gcrash + JAL runtime·save_g(SB) + MOVV R4, g_m(g) // g.m = curm + MOVV g, m_g0(R4) // curm.g0 = g + + // switch to crashstack + MOVV (g_stack+stack_hi)(g), R4 + ADDV $(-4*8), R4, R3 + + // call target function + MOVV 0(REGCTXT), R6 + JAL (R6) + + // should never return + JAL runtime·abort(SB) + UNDEF + /* * support for morestack */ // Called during function prolog when more stack is needed. // Caller has already loaded: -// loong64: R5: LR +// loong64: R31: LR // // The traceback routines see morestack on a g0 as being // the top of a stack (for example, morestack calling newstack // calling the scheduler calling newm calling gc), so we must // record an argument size. For that purpose, it has no arguments. TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 + // Called from f. + // Set g->sched to context in f. + MOVV R3, (g_sched+gobuf_sp)(g) + MOVV R1, (g_sched+gobuf_pc)(g) + MOVV R31, (g_sched+gobuf_lr)(g) + MOVV REGCTXT, (g_sched+gobuf_ctxt)(g) + // Cannot grow scheduler stack (m->g0). MOVV g_m(g), R7 MOVV m_g0(R7), R8 @@ -234,16 +269,9 @@ TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 JAL runtime·badmorestackgsignal(SB) JAL runtime·abort(SB) - // Called from f. - // Set g->sched to context in f. - MOVV R3, (g_sched+gobuf_sp)(g) - MOVV R1, (g_sched+gobuf_pc)(g) - MOVV R5, (g_sched+gobuf_lr)(g) - MOVV REGCTXT, (g_sched+gobuf_ctxt)(g) - // Called from f. // Set m->morebuf to f's caller. - MOVV R5, (m_morebuf+gobuf_pc)(R7) // f's caller's PC + MOVV R31, (m_morebuf+gobuf_pc)(R7) // f's caller's PC MOVV R3, (m_morebuf+gobuf_sp)(R7) // f's caller's SP MOVV g, (m_morebuf+gobuf_g)(R7) @@ -272,7 +300,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 JMP runtime·morestack(SB) // reflectcall: call a function with the given argument list -// func call(argtype *_type, f *FuncVal, arg *byte, argsize, retoffset uint32). +// func call(stackArgsType *_type, f *FuncVal, stackArgs *byte, stackArgsSize, stackRetOffset, frameSize uint32, regArgs *abi.RegArgs). // we don't have variable-sized frames, so we use a small number // of constant-sized-frame functions to encode a few bits of size in the pc. // Caution: ugly multiline assembly macros in your future! @@ -286,7 +314,7 @@ TEXT runtime·morestack_noctxt(SB),NOSPLIT|NOFRAME,$0-0 // Note: can't just "BR NAME(SB)" - bad inlining results. TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48 - MOVWU stackArgsSize+24(FP), R19 + MOVWU frameSize+32(FP), R19 DISPATCH(runtime·call32, 32) DISPATCH(runtime·call64, 64) DISPATCH(runtime·call128, 128) @@ -317,7 +345,7 @@ TEXT ·reflectcall(SB), NOSPLIT|NOFRAME, $0-48 JMP (R4) #define CALLFN(NAME,MAXSIZE) \ -TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ +TEXT NAME(SB), WRAPPER, $MAXSIZE-48; \ NO_LOCAL_POINTERS; \ /* copy arguments to stack */ \ MOVV arg+16(FP), R4; \ @@ -331,12 +359,17 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ MOVBU R6, (R12); \ ADDV $1, R12; \ JMP -5(PC); \ + /* set up argument registers */ \ + MOVV regArgs+40(FP), R25; \ + JAL ·unspillArgs(SB); \ /* call function */ \ MOVV f+8(FP), REGCTXT; \ - MOVV (REGCTXT), R6; \ + MOVV (REGCTXT), R25; \ PCDATA $PCDATA_StackMapIndex, $0; \ - JAL (R6); \ + JAL (R25); \ /* copy return values back */ \ + MOVV regArgs+40(FP), R25; \ + JAL ·spillArgs(SB); \ MOVV argtype+0(FP), R7; \ MOVV arg+16(FP), R4; \ MOVWU n+24(FP), R5; \ @@ -352,11 +385,13 @@ TEXT NAME(SB), WRAPPER, $MAXSIZE-24; \ // separate function so it can allocate stack space for the arguments // to reflectcallmove. It does not follow the Go ABI; it expects its // arguments in registers. -TEXT callRet<>(SB), NOSPLIT, $32-0 +TEXT callRet<>(SB), NOSPLIT, $40-0 + NO_LOCAL_POINTERS MOVV R7, 8(R3) MOVV R4, 16(R3) MOVV R12, 24(R3) MOVV R5, 32(R3) + MOVV R25, 40(R3) JAL runtime·reflectcallmove(SB) RET @@ -567,7 +602,7 @@ havem: // If the m on entry wasn't nil, // 1. the thread might be a Go thread, // 2. or it wasn't the first call from a C thread on pthread platforms, - // since then we skip dropm to reuse the m in the first call. + // since then we skip dropm to resue the m in the first call. MOVV savedm-8(SP), R12 BNE R12, droppedm @@ -604,14 +639,14 @@ TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0 UNDEF // AES hashing not implemented for loong64 -TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32 - JMP runtime·memhashFallback(SB) -TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-24 - JMP runtime·strhashFallback(SB) -TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24 - JMP runtime·memhash32Fallback(SB) -TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24 - JMP runtime·memhash64Fallback(SB) +TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32 + JMP runtime·memhashFallback(SB) +TEXT runtime·strhash(SB),NOSPLIT|NOFRAME,$0-24 + JMP runtime·strhashFallback(SB) +TEXT runtime·memhash32(SB),NOSPLIT|NOFRAME,$0-24 + JMP runtime·memhash32Fallback(SB) +TEXT runtime·memhash64(SB),NOSPLIT|NOFRAME,$0-24 + JMP runtime·memhash64Fallback(SB) TEXT runtime·return0(SB), NOSPLIT, $0 MOVW $0, R19 @@ -642,11 +677,102 @@ TEXT runtime·goexit(SB),NOSPLIT|NOFRAME|TOPFRAME,$0-0 // traceback from goexit1 must hit code range of goexit NOOP +// This is called from .init_array and follows the platform, not Go, ABI. +TEXT runtime·addmoduledata(SB),NOSPLIT,$0-0 + ADDV $-0x10, R3 + MOVV R30, 8(R3) // The access to global variables below implicitly uses R30, which is callee-save + MOVV runtime·lastmoduledatap(SB), R12 + MOVV R4, moduledata_next(R12) + MOVV R4, runtime·lastmoduledatap(SB) + MOVV 8(R3), R30 + ADDV $0x10, R3 + RET + TEXT ·checkASM(SB),NOSPLIT,$0-1 MOVW $1, R19 MOVB R19, ret+0(FP) RET +#ifdef GOEXPERIMENT_regabiargs +// spillArgs stores return values from registers to a *internal/abi.RegArgs in R25. +TEXT ·spillArgs(SB),NOSPLIT,$0-0 + MOVV R4, (0*8)(R25) + MOVV R5, (1*8)(R25) + MOVV R6, (2*8)(R25) + MOVV R7, (3*8)(R25) + MOVV R8, (4*8)(R25) + MOVV R9, (5*8)(R25) + MOVV R10, (6*8)(R25) + MOVV R11, (7*8)(R25) + MOVV R12, (8*8)(R25) + MOVV R13, (9*8)(R25) + MOVV R14, (10*8)(R25) + MOVV R15, (11*8)(R25) + MOVV R16, (12*8)(R25) + MOVV R17, (13*8)(R25) + MOVV R18, (14*8)(R25) + MOVV R19, (15*8)(R25) + MOVD F0, (16*8)(R25) + MOVD F1, (17*8)(R25) + MOVD F2, (18*8)(R25) + MOVD F3, (19*8)(R25) + MOVD F4, (20*8)(R25) + MOVD F5, (21*8)(R25) + MOVD F6, (22*8)(R25) + MOVD F7, (23*8)(R25) + MOVD F8, (24*8)(R25) + MOVD F9, (25*8)(R25) + MOVD F10, (26*8)(R25) + MOVD F11, (27*8)(R25) + MOVD F12, (28*8)(R25) + MOVD F13, (29*8)(R25) + MOVD F14, (30*8)(R25) + MOVD F15, (31*8)(R25) + RET + +// unspillArgs loads args into registers from a *internal/abi.RegArgs in R25. +TEXT ·unspillArgs(SB),NOSPLIT,$0-0 + MOVV (0*8)(R25), R4 + MOVV (1*8)(R25), R5 + MOVV (2*8)(R25), R6 + MOVV (3*8)(R25), R7 + MOVV (4*8)(R25), R8 + MOVV (5*8)(R25), R9 + MOVV (6*8)(R25), R10 + MOVV (7*8)(R25), R11 + MOVV (8*8)(R25), R12 + MOVV (9*8)(R25), R13 + MOVV (10*8)(R25), R14 + MOVV (11*8)(R25), R15 + MOVV (12*8)(R25), R16 + MOVV (13*8)(R25), R17 + MOVV (14*8)(R25), R18 + MOVV (15*8)(R25), R19 + MOVD (16*8)(R25), F0 + MOVD (17*8)(R25), F1 + MOVD (18*8)(R25), F2 + MOVD (19*8)(R25), F3 + MOVD (20*8)(R25), F4 + MOVD (21*8)(R25), F5 + MOVD (22*8)(R25), F6 + MOVD (23*8)(R25), F7 + MOVD (24*8)(R25), F8 + MOVD (25*8)(R25), F9 + MOVD (26*8)(R25), F10 + MOVD (27*8)(R25), F11 + MOVD (28*8)(R25), F12 + MOVD (29*8)(R25), F13 + MOVD (30*8)(R25), F14 + MOVD (31*8)(R25), F15 + RET +#else +TEXT ·spillArgs(SB),NOSPLIT,$0-0 + RET + +TEXT ·unspillArgs(SB),NOSPLIT,$0-0 + RET +#endif + // gcWriteBarrier informs the GC about heap pointer writes. // // gcWriteBarrier does NOT follow the Go ABI. It accepts the @@ -774,71 +900,156 @@ TEXT runtime·gcWriteBarrier8(SB),NOSPLIT,$0 // in the caller's stack frame. These stubs write the args into that stack space and // then tail call to the corresponding runtime handler. // The tail call makes these stubs disappear in backtraces. -TEXT runtime·panicIndex(SB),NOSPLIT,$0-16 - MOVV R19, x+0(FP) - MOVV R18, y+8(FP) - JMP runtime·goPanicIndex(SB) -TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16 - MOVV R19, x+0(FP) - MOVV R18, y+8(FP) - JMP runtime·goPanicIndexU(SB) -TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16 - MOVV R18, x+0(FP) - MOVV R17, y+8(FP) - JMP runtime·goPanicSliceAlen(SB) -TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16 - MOVV R18, x+0(FP) - MOVV R17, y+8(FP) - JMP runtime·goPanicSliceAlenU(SB) -TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16 - MOVV R18, x+0(FP) - MOVV R17, y+8(FP) - JMP runtime·goPanicSliceAcap(SB) -TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16 - MOVV R18, x+0(FP) - MOVV R17, y+8(FP) - JMP runtime·goPanicSliceAcapU(SB) -TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16 - MOVV R19, x+0(FP) - MOVV R18, y+8(FP) - JMP runtime·goPanicSliceB(SB) -TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16 - MOVV R19, x+0(FP) - MOVV R18, y+8(FP) - JMP runtime·goPanicSliceBU(SB) -TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16 - MOVV R17, x+0(FP) - MOVV R4, y+8(FP) - JMP runtime·goPanicSlice3Alen(SB) -TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16 - MOVV R17, x+0(FP) - MOVV R4, y+8(FP) - JMP runtime·goPanicSlice3AlenU(SB) -TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16 - MOVV R17, x+0(FP) - MOVV R4, y+8(FP) - JMP runtime·goPanicSlice3Acap(SB) -TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16 - MOVV R17, x+0(FP) - MOVV R4, y+8(FP) - JMP runtime·goPanicSlice3AcapU(SB) -TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16 - MOVV R18, x+0(FP) - MOVV R17, y+8(FP) - JMP runtime·goPanicSlice3B(SB) -TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16 - MOVV R18, x+0(FP) - MOVV R17, y+8(FP) - JMP runtime·goPanicSlice3BU(SB) -TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16 - MOVV R19, x+0(FP) - MOVV R18, y+8(FP) - JMP runtime·goPanicSlice3C(SB) -TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16 - MOVV R19, x+0(FP) - MOVV R18, y+8(FP) - JMP runtime·goPanicSlice3CU(SB) -TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16 - MOVV R17, x+0(FP) - MOVV R4, y+8(FP) - JMP runtime·goPanicSliceConvert(SB) +TEXT runtime·panicIndex(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R20, R4 + MOVV R21, R5 +#else + MOVV R20, x+0(FP) + MOVV R21, y+8(FP) +#endif + JMP runtime·goPanicIndex(SB) +TEXT runtime·panicIndexU(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R20, R4 + MOVV R21, R5 +#else + MOVV R20, x+0(FP) + MOVV R21, y+8(FP) +#endif + JMP runtime·goPanicIndexU(SB) +TEXT runtime·panicSliceAlen(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R21, R4 + MOVV R23, R5 +#else + MOVV R21, x+0(FP) + MOVV R23, y+8(FP) +#endif + JMP runtime·goPanicSliceAlen(SB) +TEXT runtime·panicSliceAlenU(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R21, R4 + MOVV R23, R5 +#else + MOVV R21, x+0(FP) + MOVV R23, y+8(FP) +#endif + JMP runtime·goPanicSliceAlenU(SB) +TEXT runtime·panicSliceAcap(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R21, R4 + MOVV R23, R5 +#else + MOVV R21, x+0(FP) + MOVV R23, y+8(FP) +#endif + JMP runtime·goPanicSliceAcap(SB) +TEXT runtime·panicSliceAcapU(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R21, R4 + MOVV R23, R5 +#else + MOVV R21, x+0(FP) + MOVV R23, y+8(FP) +#endif + JMP runtime·goPanicSliceAcapU(SB) +TEXT runtime·panicSliceB(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R20, R4 + MOVV R21, R5 +#else + MOVV R20, x+0(FP) + MOVV R21, y+8(FP) +#endif + JMP runtime·goPanicSliceB(SB) +TEXT runtime·panicSliceBU(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R20, R4 + MOVV R21, R5 +#else + MOVV R20, x+0(FP) + MOVV R21, y+8(FP) +#endif + JMP runtime·goPanicSliceBU(SB) +TEXT runtime·panicSlice3Alen(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R23, R4 + MOVV R24, R5 +#else + MOVV R23, x+0(FP) + MOVV R24, y+8(FP) +#endif + JMP runtime·goPanicSlice3Alen(SB) +TEXT runtime·panicSlice3AlenU(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R23, R4 + MOVV R24, R5 +#else + MOVV R23, x+0(FP) + MOVV R24, y+8(FP) +#endif + JMP runtime·goPanicSlice3AlenU(SB) +TEXT runtime·panicSlice3Acap(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R23, R4 + MOVV R24, R5 +#else + MOVV R23, x+0(FP) + MOVV R24, y+8(FP) +#endif + JMP runtime·goPanicSlice3Acap(SB) +TEXT runtime·panicSlice3AcapU(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R23, R4 + MOVV R24, R5 +#else + MOVV R23, x+0(FP) + MOVV R24, y+8(FP) +#endif + JMP runtime·goPanicSlice3AcapU(SB) +TEXT runtime·panicSlice3B(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R21, R4 + MOVV R23, R5 +#else + MOVV R21, x+0(FP) + MOVV R23, y+8(FP) +#endif + JMP runtime·goPanicSlice3B(SB) +TEXT runtime·panicSlice3BU(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R21, R4 + MOVV R23, R5 +#else + MOVV R21, x+0(FP) + MOVV R23, y+8(FP) +#endif + JMP runtime·goPanicSlice3BU(SB) +TEXT runtime·panicSlice3C(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R20, R4 + MOVV R21, R5 +#else + MOVV R20, x+0(FP) + MOVV R21, y+8(FP) +#endif + JMP runtime·goPanicSlice3C(SB) +TEXT runtime·panicSlice3CU(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R20, R4 + MOVV R21, R5 +#else + MOVV R20, x+0(FP) + MOVV R21, y+8(FP) +#endif + JMP runtime·goPanicSlice3CU(SB) +TEXT runtime·panicSliceConvert(SB),NOSPLIT,$0-16 +#ifdef GOEXPERIMENT_regabiargs + MOVV R23, R4 + MOVV R24, R5 +#else + MOVV R23, x+0(FP) + MOVV R24, y+8(FP) +#endif + JMP runtime·goPanicSliceConvert(SB) diff --git a/src/runtime/asm_ppc64x.s b/src/runtime/asm_ppc64x.s index 922c50dc4f..ff9b736430 100644 --- a/src/runtime/asm_ppc64x.s +++ b/src/runtime/asm_ppc64x.s @@ -284,6 +284,31 @@ noswitch: #endif RET +// func switchToCrashStack0(fn func()) +TEXT runtime·switchToCrashStack0(SB), NOSPLIT, $0-8 + MOVD R3, R11 // context register + MOVD g_m(g), R3 // curm + + // set g to gcrash + MOVD $runtime·gcrash(SB), g // g = &gcrash + CALL runtime·save_g(SB) // clobbers R31 + MOVD R3, g_m(g) // g.m = curm + MOVD g, m_g0(R3) // curm.g0 = g + + // switch to crashstack + MOVD (g_stack+stack_hi)(g), R3 + SUB $(4*8), R3 + MOVD R3, R1 + + // call target function + MOVD 0(R11), R12 // code pointer + MOVD R12, CTR + BL (CTR) + + // should never return + CALL runtime·abort(SB) + UNDEF + /* * support for morestack */ @@ -297,6 +322,14 @@ noswitch: // calling the scheduler calling newm calling gc), so we must // record an argument size. For that purpose, it has no arguments. TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 + // Called from f. + // Set g->sched to context in f. + MOVD R1, (g_sched+gobuf_sp)(g) + MOVD LR, R8 + MOVD R8, (g_sched+gobuf_pc)(g) + MOVD R5, (g_sched+gobuf_lr)(g) + MOVD R11, (g_sched+gobuf_ctxt)(g) + // Cannot grow scheduler stack (m->g0). MOVD g_m(g), R7 MOVD m_g0(R7), R8 @@ -312,14 +345,6 @@ TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 BL runtime·badmorestackgsignal(SB) BL runtime·abort(SB) - // Called from f. - // Set g->sched to context in f. - MOVD R1, (g_sched+gobuf_sp)(g) - MOVD LR, R8 - MOVD R8, (g_sched+gobuf_pc)(g) - MOVD R5, (g_sched+gobuf_lr)(g) - MOVD R11, (g_sched+gobuf_ctxt)(g) - // Called from f. // Set m->morebuf to f's caller. MOVD R5, (m_morebuf+gobuf_pc)(R7) // f's caller's PC diff --git a/src/runtime/asm_s390x.s b/src/runtime/asm_s390x.s index a8e1424bf1..f2354a6d53 100644 --- a/src/runtime/asm_s390x.s +++ b/src/runtime/asm_s390x.s @@ -292,6 +292,29 @@ noswitch: ADD $8, R15 BR (R3) +// func switchToCrashStack0(fn func()) +TEXT runtime·switchToCrashStack0(SB), NOSPLIT, $0-8 + MOVD fn+0(FP), R12 // context + MOVD g_m(g), R4 // curm + + // set g to gcrash + MOVD $runtime·gcrash(SB), g // g = &gcrash + BL runtime·save_g(SB) + MOVD R4, g_m(g) // g.m = curm + MOVD g, m_g0(R4) // curm.g0 = g + + // switch to crashstack + MOVD (g_stack+stack_hi)(g), R4 + ADD $(-4*8), R4, R15 + + // call target function + MOVD 0(R12), R3 // code pointer + BL (R3) + + // should never return + BL runtime·abort(SB) + UNDEF + /* * support for morestack */ @@ -305,6 +328,14 @@ noswitch: // calling the scheduler calling newm calling gc), so we must // record an argument size. For that purpose, it has no arguments. TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 + // Called from f. + // Set g->sched to context in f. + MOVD R15, (g_sched+gobuf_sp)(g) + MOVD LR, R8 + MOVD R8, (g_sched+gobuf_pc)(g) + MOVD R5, (g_sched+gobuf_lr)(g) + MOVD R12, (g_sched+gobuf_ctxt)(g) + // Cannot grow scheduler stack (m->g0). MOVD g_m(g), R7 MOVD m_g0(R7), R8 @@ -319,14 +350,6 @@ TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0 BL runtime·badmorestackgsignal(SB) BL runtime·abort(SB) - // Called from f. - // Set g->sched to context in f. - MOVD R15, (g_sched+gobuf_sp)(g) - MOVD LR, R8 - MOVD R8, (g_sched+gobuf_pc)(g) - MOVD R5, (g_sched+gobuf_lr)(g) - MOVD R12, (g_sched+gobuf_ctxt)(g) - // Called from f. // Set m->morebuf to f's caller. MOVD R5, (m_morebuf+gobuf_pc)(R7) // f's caller's PC diff --git a/src/runtime/asm_wasm.s b/src/runtime/asm_wasm.s index a96115b02c..b44a4f7dd4 100644 --- a/src/runtime/asm_wasm.s +++ b/src/runtime/asm_wasm.s @@ -140,6 +140,7 @@ TEXT runtime·systemstack(SB), NOSPLIT, $0-8 I64Ne If CALLNORESUME runtime·badsystemstack(SB) + CALLNORESUME runtime·abort(SB) End // switch: @@ -181,6 +182,9 @@ TEXT runtime·systemstack(SB), NOSPLIT, $0-8 TEXT runtime·systemstack_switch(SB), NOSPLIT, $0-0 RET +TEXT runtime·abort(SB),NOSPLIT|NOFRAME,$0-0 + UNDEF + // AES hashing not implemented for wasm TEXT runtime·memhash(SB),NOSPLIT|NOFRAME,$0-32 JMP runtime·memhashFallback(SB) @@ -208,6 +212,33 @@ TEXT runtime·procyield(SB), NOSPLIT, $0-0 // FIXME TEXT runtime·breakpoint(SB), NOSPLIT, $0-0 UNDEF +// func switchToCrashStack0(fn func()) +TEXT runtime·switchToCrashStack0(SB), NOSPLIT, $0-8 + MOVD fn+0(FP), CTXT // context register + MOVD g_m(g), R2 // curm + + // set g to gcrash + MOVD $runtime·gcrash(SB), g // g = &gcrash + MOVD R2, g_m(g) // g.m = curm + MOVD g, m_g0(R2) // curm.g0 = g + + // switch to crashstack + I64Load (g_stack+stack_hi)(g) + I64Const $(-4*8) + I64Add + I32WrapI64 + Set SP + + // call target function + Get CTXT + I32WrapI64 + I64Load $0 + CALL + + // should never return + CALL runtime·abort(SB) + UNDEF + // Called during function prolog when more stack is needed. // // The traceback routines see morestack on a g0 as being @@ -221,12 +252,19 @@ TEXT runtime·morestack(SB), NOSPLIT, $0-0 // R2 = g0 MOVD m_g0(R1), R2 + // Set g->sched to context in f. + NOP SP // tell vet SP changed - stop checking offsets + MOVD 0(SP), g_sched+gobuf_pc(g) + MOVD $8(SP), g_sched+gobuf_sp(g) // f's SP + MOVD CTXT, g_sched+gobuf_ctxt(g) + // Cannot grow scheduler stack (m->g0). Get g Get R2 I64Eq If CALLNORESUME runtime·badmorestackg0(SB) + CALLNORESUME runtime·abort(SB) End // Cannot grow signal stack (m->gsignal). @@ -235,20 +273,15 @@ TEXT runtime·morestack(SB), NOSPLIT, $0-0 I64Eq If CALLNORESUME runtime·badmorestackgsignal(SB) + CALLNORESUME runtime·abort(SB) End // Called from f. // Set m->morebuf to f's caller. - NOP SP // tell vet SP changed - stop checking offsets MOVD 8(SP), m_morebuf+gobuf_pc(R1) MOVD $16(SP), m_morebuf+gobuf_sp(R1) // f's caller's SP MOVD g, m_morebuf+gobuf_g(R1) - // Set g->sched to context in f. - MOVD 0(SP), g_sched+gobuf_pc(g) - MOVD $8(SP), g_sched+gobuf_sp(g) // f's SP - MOVD CTXT, g_sched+gobuf_ctxt(g) - // Call newstack on m->g0's stack. MOVD R2, g MOVD g_sched+gobuf_sp(R2), SP diff --git a/src/runtime/cgo/asm_arm.s b/src/runtime/cgo/asm_arm.s index 095e9c06c9..425899ebe5 100644 --- a/src/runtime/cgo/asm_arm.s +++ b/src/runtime/cgo/asm_arm.s @@ -32,10 +32,10 @@ TEXT crosscall2(SB),NOSPLIT|NOFRAME,$0 // starting at 4(R13). MOVW.W R14, -4(R13) - // Skip floating point registers on GOARM < 6. - MOVB runtime·goarm(SB), R11 - CMP $6, R11 - BLT skipfpsave + // Skip floating point registers if goarmsoftfp!=0. + MOVB runtime·goarmsoftfp(SB), R11 + CMP $0, R11 + BNE skipfpsave MOVD F8, (13*4+8*1)(R13) MOVD F9, (13*4+8*2)(R13) MOVD F10, (13*4+8*3)(R13) @@ -50,9 +50,9 @@ skipfpsave: // We set up the arguments to cgocallback when saving registers above. BL runtime·cgocallback(SB) - MOVB runtime·goarm(SB), R11 - CMP $6, R11 - BLT skipfprest + MOVB runtime·goarmsoftfp(SB), R11 + CMP $0, R11 + BNE skipfprest MOVD (13*4+8*1)(R13), F8 MOVD (13*4+8*2)(R13), F9 MOVD (13*4+8*3)(R13), F10 diff --git a/src/runtime/cgo/gcc_libinit.c b/src/runtime/cgo/gcc_libinit.c index 68f4a02379..33a9ff93ca 100644 --- a/src/runtime/cgo/gcc_libinit.c +++ b/src/runtime/cgo/gcc_libinit.c @@ -4,6 +4,13 @@ //go:build unix +// When cross-compiling with clang to linux/armv5, atomics are emulated +// and cause a compiler warning. This results in a build failure since +// cgo uses -Werror. See #65290. +#pragma GCC diagnostic ignored "-Wpragmas" +#pragma GCC diagnostic ignored "-Wunknown-warning-option" +#pragma GCC diagnostic ignored "-Watomic-alignment" + #include #include #include diff --git a/src/runtime/cgo/gcc_stack_unix.c b/src/runtime/cgo/gcc_stack_unix.c index f3fead9c9e..67efd9bc63 100644 --- a/src/runtime/cgo/gcc_stack_unix.c +++ b/src/runtime/cgo/gcc_stack_unix.c @@ -29,6 +29,8 @@ x_cgo_getstackbound(uintptr bounds[2]) pthread_attr_get_np(pthread_self(), &attr); pthread_attr_getstack(&attr, &addr, &size); // low address #else + // We don't know how to get the current stacks, so assume they are the + // same as the default stack bounds. pthread_attr_init(&attr); pthread_attr_getstacksize(&attr, &size); addr = __builtin_frame_address(0) + 4096 - size; diff --git a/src/runtime/cgo/libcgo.h b/src/runtime/cgo/libcgo.h index 295c12c53c..26da68fadb 100644 --- a/src/runtime/cgo/libcgo.h +++ b/src/runtime/cgo/libcgo.h @@ -76,7 +76,7 @@ void x_cgo_getstackbound(uintptr bounds[2]); /* * Prints error then calls abort. For linux and android. */ -void fatalf(const char* format, ...); +void fatalf(const char* format, ...) __attribute__ ((noreturn)); /* * Registers the current mach thread port for EXC_BAD_ACCESS processing. diff --git a/src/runtime/cgocall.go b/src/runtime/cgocall.go index f2dd98702d..05fa47158a 100644 --- a/src/runtime/cgocall.go +++ b/src/runtime/cgocall.go @@ -541,7 +541,7 @@ const cgoResultFail = "cgo result is unpinned Go pointer or points to unpinned G // level, where Go pointers are allowed. Go pointers to pinned objects are // allowed as long as they don't reference other unpinned pointers. func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { - if t.PtrBytes == 0 || p == nil { + if !t.Pointers() || p == nil { // If the type has no pointers there is nothing to do. return } @@ -604,7 +604,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { if !top && !isPinned(p) { panic(errorString(msg)) } - if st.Elem.PtrBytes == 0 { + if !st.Elem.Pointers() { return } for i := 0; i < s.cap; i++ { @@ -629,7 +629,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) { return } for _, f := range st.Fields { - if f.Typ.PtrBytes == 0 { + if !f.Typ.Pointers() { continue } cgoCheckArg(f.Typ, add(p, f.Offset), true, top, msg) diff --git a/src/runtime/cgocheck.go b/src/runtime/cgocheck.go index 3d6de4f855..fd87723dfc 100644 --- a/src/runtime/cgocheck.go +++ b/src/runtime/cgocheck.go @@ -90,7 +90,7 @@ func cgoCheckMemmove(typ *_type, dst, src unsafe.Pointer) { //go:nosplit //go:nowritebarrier func cgoCheckMemmove2(typ *_type, dst, src unsafe.Pointer, off, size uintptr) { - if typ.PtrBytes == 0 { + if !typ.Pointers() { return } if !cgoIsGoPointer(src) { @@ -111,7 +111,7 @@ func cgoCheckMemmove2(typ *_type, dst, src unsafe.Pointer, off, size uintptr) { //go:nosplit //go:nowritebarrier func cgoCheckSliceCopy(typ *_type, dst, src unsafe.Pointer, n int) { - if typ.PtrBytes == 0 { + if !typ.Pointers() { return } if !cgoIsGoPointer(src) { @@ -247,7 +247,7 @@ func cgoCheckBits(src unsafe.Pointer, gcbits *byte, off, size uintptr) { //go:nowritebarrier //go:systemstack func cgoCheckUsingType(typ *_type, src unsafe.Pointer, off, size uintptr) { - if typ.PtrBytes == 0 { + if !typ.Pointers() { return } diff --git a/src/runtime/chan.go b/src/runtime/chan.go index ff9e2a9155..c793d6cef3 100644 --- a/src/runtime/chan.go +++ b/src/runtime/chan.go @@ -96,7 +96,7 @@ func makechan(t *chantype, size int) *hchan { c = (*hchan)(mallocgc(hchanSize, nil, true)) // Race detector uses this location for synchronization. c.buf = c.raceaddr() - case elem.PtrBytes == 0: + case !elem.Pointers(): // Elements do not contain pointers. // Allocate hchan and buf in one call. c = (*hchan)(mallocgc(hchanSize+mem, nil, true)) @@ -724,20 +724,21 @@ func reflect_chanrecv(c *hchan, nb bool, elem unsafe.Pointer) (selected bool, re return chanrecv(c, elem, !nb) } -//go:linkname reflect_chanlen reflect.chanlen -func reflect_chanlen(c *hchan) int { +func chanlen(c *hchan) int { if c == nil { return 0 } return int(c.qcount) } +//go:linkname reflect_chanlen reflect.chanlen +func reflect_chanlen(c *hchan) int { + return chanlen(c) +} + //go:linkname reflectlite_chanlen internal/reflectlite.chanlen func reflectlite_chanlen(c *hchan) int { - if c == nil { - return 0 - } - return int(c.qcount) + return chanlen(c) } //go:linkname reflect_chancap reflect.chancap diff --git a/src/runtime/checkptr.go b/src/runtime/checkptr.go index 3c49645a44..810787bff5 100644 --- a/src/runtime/checkptr.go +++ b/src/runtime/checkptr.go @@ -16,7 +16,7 @@ func checkptrAlignment(p unsafe.Pointer, elem *_type, n uintptr) { // Note that we allow unaligned pointers if the types they point to contain // no pointers themselves. See issue 37298. // TODO(mdempsky): What about fieldAlign? - if elem.PtrBytes != 0 && uintptr(p)&(uintptr(elem.Align_)-1) != 0 { + if elem.Pointers() && uintptr(p)&(uintptr(elem.Align_)-1) != 0 { throw("checkptr: misaligned pointer conversion") } diff --git a/src/runtime/coro.go b/src/runtime/coro.go new file mode 100644 index 0000000000..0d6666e343 --- /dev/null +++ b/src/runtime/coro.go @@ -0,0 +1,165 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime + +import "unsafe" + +// A coro represents extra concurrency without extra parallelism, +// as would be needed for a coroutine implementation. +// The coro does not represent a specific coroutine, only the ability +// to do coroutine-style control transfers. +// It can be thought of as like a special channel that always has +// a goroutine blocked on it. If another goroutine calls coroswitch(c), +// the caller becomes the goroutine blocked in c, and the goroutine +// formerly blocked in c starts running. +// These switches continue until a call to coroexit(c), +// which ends the use of the coro by releasing the blocked +// goroutine in c and exiting the current goroutine. +// +// Coros are heap allocated and garbage collected, so that user code +// can hold a pointer to a coro without causing potential dangling +// pointer errors. +type coro struct { + gp guintptr + f func(*coro) +} + +//go:linkname newcoro + +// newcoro creates a new coro containing a +// goroutine blocked waiting to run f +// and returns that coro. +func newcoro(f func(*coro)) *coro { + c := new(coro) + c.f = f + pc := getcallerpc() + gp := getg() + systemstack(func() { + start := corostart + startfv := *(**funcval)(unsafe.Pointer(&start)) + gp = newproc1(startfv, gp, pc) + }) + gp.coroarg = c + gp.waitreason = waitReasonCoroutine + casgstatus(gp, _Grunnable, _Gwaiting) + c.gp.set(gp) + return c +} + +//go:linkname corostart + +// corostart is the entry func for a new coroutine. +// It runs the coroutine user function f passed to corostart +// and then calls coroexit to remove the extra concurrency. +func corostart() { + gp := getg() + c := gp.coroarg + gp.coroarg = nil + + c.f(c) + coroexit(c) +} + +// coroexit is like coroswitch but closes the coro +// and exits the current goroutine +func coroexit(c *coro) { + gp := getg() + gp.coroarg = c + gp.coroexit = true + mcall(coroswitch_m) +} + +//go:linkname coroswitch + +// coroswitch switches to the goroutine blocked on c +// and then blocks the current goroutine on c. +func coroswitch(c *coro) { + gp := getg() + gp.coroarg = c + mcall(coroswitch_m) +} + +// coroswitch_m is the implementation of coroswitch +// that runs on the m stack. +// +// Note: Coroutine switches are expected to happen at +// an order of magnitude (or more) higher frequency +// than regular goroutine switches, so this path is heavily +// optimized to remove unnecessary work. +// The fast path here is three CAS: the one at the top on gp.atomicstatus, +// the one in the middle to choose the next g, +// and the one at the bottom on gnext.atomicstatus. +// It is important not to add more atomic operations or other +// expensive operations to the fast path. +func coroswitch_m(gp *g) { + // TODO(rsc,mknyszek): add tracing support in a lightweight manner. + // Probably the tracer will need a global bool (set and cleared during STW) + // that this code can check to decide whether to use trace.gen.Load(); + // we do not want to do the atomic load all the time, especially when + // tracer use is relatively rare. + c := gp.coroarg + gp.coroarg = nil + exit := gp.coroexit + gp.coroexit = false + mp := gp.m + + if exit { + gdestroy(gp) + gp = nil + } else { + // If we can CAS ourselves directly from running to waiting, so do, + // keeping the control transfer as lightweight as possible. + gp.waitreason = waitReasonCoroutine + if !gp.atomicstatus.CompareAndSwap(_Grunning, _Gwaiting) { + // The CAS failed: use casgstatus, which will take care of + // coordinating with the garbage collector about the state change. + casgstatus(gp, _Grunning, _Gwaiting) + } + + // Clear gp.m. + setMNoWB(&gp.m, nil) + } + + // The goroutine stored in c is the one to run next. + // Swap it with ourselves. + var gnext *g + for { + // Note: this is a racy load, but it will eventually + // get the right value, and if it gets the wrong value, + // the c.gp.cas will fail, so no harm done other than + // a wasted loop iteration. + // The cas will also sync c.gp's + // memory enough that the next iteration of the racy load + // should see the correct value. + // We are avoiding the atomic load to keep this path + // as lightweight as absolutely possible. + // (The atomic load is free on x86 but not free elsewhere.) + next := c.gp + if next.ptr() == nil { + throw("coroswitch on exited coro") + } + var self guintptr + self.set(gp) + if c.gp.cas(next, self) { + gnext = next.ptr() + break + } + } + + // Start running next, without heavy scheduling machinery. + // Set mp.curg and gnext.m and then update scheduling state + // directly if possible. + setGNoWB(&mp.curg, gnext) + setMNoWB(&gnext.m, mp) + if !gnext.atomicstatus.CompareAndSwap(_Gwaiting, _Grunning) { + // The CAS failed: use casgstatus, which will take care of + // coordinating with the garbage collector about the state change. + casgstatus(gnext, _Gwaiting, _Grunnable) + casgstatus(gnext, _Grunnable, _Grunning) + } + + // Switch to gnext. Does not return. + gogo(&gnext.sched) +} diff --git a/src/runtime/coverage/testdata/harness.go b/src/runtime/coverage/testdata/harness.go index 5c87e4cf7d..03969da426 100644 --- a/src/runtime/coverage/testdata/harness.go +++ b/src/runtime/coverage/testdata/harness.go @@ -9,8 +9,8 @@ import ( "fmt" "internal/coverage/slicewriter" "io" - "io/ioutil" "log" + "os" "path/filepath" "runtime/coverage" "strings" @@ -27,7 +27,7 @@ func emitToWriter() { log.Fatalf("error: WriteMeta returns %v", err) } mf := filepath.Join(*outdirflag, "covmeta.0abcdef") - if err := ioutil.WriteFile(mf, slwm.BytesWritten(), 0666); err != nil { + if err := os.WriteFile(mf, slwm.BytesWritten(), 0666); err != nil { log.Fatalf("error: writing %s: %v", mf, err) } var slwc slicewriter.WriteSeeker @@ -35,7 +35,7 @@ func emitToWriter() { log.Fatalf("error: WriteCounters returns %v", err) } cf := filepath.Join(*outdirflag, "covcounters.0abcdef.99.77") - if err := ioutil.WriteFile(cf, slwc.BytesWritten(), 0666); err != nil { + if err := os.WriteFile(cf, slwc.BytesWritten(), 0666); err != nil { log.Fatalf("error: writing %s: %v", cf, err) } } diff --git a/src/runtime/cpuprof.go b/src/runtime/cpuprof.go index 331484b1ff..b2898ba909 100644 --- a/src/runtime/cpuprof.go +++ b/src/runtime/cpuprof.go @@ -62,8 +62,8 @@ var cpuprof cpuProfile // If hz <= 0, SetCPUProfileRate turns off profiling. // If the profiler is on, the rate cannot be changed without first turning it off. // -// Most clients should use the runtime/pprof package or -// the testing package's -test.cpuprofile flag instead of calling +// Most clients should use the [runtime/pprof] package or +// the [testing] package's -test.cpuprofile flag instead of calling // SetCPUProfileRate directly. func SetCPUProfileRate(hz int) { // Clamp hz to something reasonable. @@ -204,7 +204,7 @@ func (p *cpuProfile) addExtra() { // // Deprecated: Use the [runtime/pprof] package, // or the handlers in the [net/http/pprof] package, -// or the testing package's -test.cpuprofile flag instead. +// or the [testing] package's -test.cpuprofile flag instead. func CPUProfile() []byte { panic("CPUProfile no longer available") } diff --git a/src/runtime/crash_test.go b/src/runtime/crash_test.go index ffd99f3a87..9ba45b8f2a 100644 --- a/src/runtime/crash_test.go +++ b/src/runtime/crash_test.go @@ -6,15 +6,21 @@ package runtime_test import ( "bytes" + "context" "errors" "flag" "fmt" + "internal/goexperiment" "internal/testenv" + tracev2 "internal/trace/v2" + "io" + "log" "os" "os/exec" "path/filepath" "regexp" "runtime" + "runtime/trace" "strings" "sync" "testing" @@ -23,7 +29,19 @@ import ( var toRemove []string +const entrypointVar = "RUNTIME_TEST_ENTRYPOINT" + func TestMain(m *testing.M) { + switch entrypoint := os.Getenv(entrypointVar); entrypoint { + case "crash": + crash() + panic("unreachable") + default: + log.Fatalf("invalid %s: %q", entrypointVar, entrypoint) + case "": + // fall through to normal behavior + } + _, coreErrBefore := os.Stat("core") status := m.Run() @@ -773,6 +791,16 @@ func init() { // We expect to crash, so exit 0 to indicate failure. os.Exit(0) } + if os.Getenv("GO_TEST_RUNTIME_NPE_READMEMSTATS") == "1" { + runtime.ReadMemStats(nil) + os.Exit(0) + } + if os.Getenv("GO_TEST_RUNTIME_NPE_FUNCMETHOD") == "1" { + var f *runtime.Func + _ = f.Entry() + os.Exit(0) + } + } func TestRuntimePanic(t *testing.T) { @@ -788,6 +816,32 @@ func TestRuntimePanic(t *testing.T) { } } +func TestTracebackRuntimeFunction(t *testing.T) { + testenv.MustHaveExec(t) + cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=TestTracebackRuntimeFunction")) + cmd.Env = append(cmd.Env, "GO_TEST_RUNTIME_NPE_READMEMSTATS=1") + out, err := cmd.CombinedOutput() + t.Logf("%s", out) + if err == nil { + t.Error("child process did not fail") + } else if want := "runtime.ReadMemStats"; !bytes.Contains(out, []byte(want)) { + t.Errorf("output did not contain expected string %q", want) + } +} + +func TestTracebackRuntimeMethod(t *testing.T) { + testenv.MustHaveExec(t) + cmd := testenv.CleanCmdEnv(exec.Command(os.Args[0], "-test.run=TestTracebackRuntimeMethod")) + cmd.Env = append(cmd.Env, "GO_TEST_RUNTIME_NPE_FUNCMETHOD=1") + out, err := cmd.CombinedOutput() + t.Logf("%s", out) + if err == nil { + t.Error("child process did not fail") + } else if want := "runtime.(*Func).Entry"; !bytes.Contains(out, []byte(want)) { + t.Errorf("output did not contain expected string %q", want) + } +} + // Test that g0 stack overflows are handled gracefully. func TestG0StackOverflow(t *testing.T) { testenv.MustHaveExec(t) @@ -795,14 +849,12 @@ func TestG0StackOverflow(t *testing.T) { if runtime.GOOS == "ios" { testenv.SkipFlaky(t, 62671) } - if runtime.GOOS == "windows" && runtime.GOARCH == "arm64" { - testenv.SkipFlaky(t, 63938) // TODO(cherry): fix and unskip - } if os.Getenv("TEST_G0_STACK_OVERFLOW") != "1" { cmd := testenv.CleanCmdEnv(testenv.Command(t, os.Args[0], "-test.run=^TestG0StackOverflow$", "-test.v")) cmd.Env = append(cmd.Env, "TEST_G0_STACK_OVERFLOW=1") out, err := cmd.CombinedOutput() + t.Logf("output:\n%s", out) // Don't check err since it's expected to crash. if n := strings.Count(string(out), "morestack on g0\n"); n != 1 { t.Fatalf("%s\n(exit status %v)", out, err) @@ -827,6 +879,71 @@ func TestG0StackOverflow(t *testing.T) { runtime.G0StackOverflow() } +// For TestCrashWhileTracing: test a panic without involving the testing +// harness, as we rely on stdout only containing trace output. +func init() { + if os.Getenv("TEST_CRASH_WHILE_TRACING") == "1" { + trace.Start(os.Stdout) + trace.Log(context.Background(), "xyzzy-cat", "xyzzy-msg") + panic("yzzyx") + } +} + +func TestCrashWhileTracing(t *testing.T) { + if !goexperiment.ExecTracer2 { + t.Skip("skipping because this test is incompatible with the legacy tracer") + } + + testenv.MustHaveExec(t) + + cmd := testenv.CleanCmdEnv(testenv.Command(t, os.Args[0])) + cmd.Env = append(cmd.Env, "TEST_CRASH_WHILE_TRACING=1") + stdOut, err := cmd.StdoutPipe() + var errOut bytes.Buffer + cmd.Stderr = &errOut + + if err := cmd.Start(); err != nil { + t.Fatalf("could not start subprocess: %v", err) + } + r, err := tracev2.NewReader(stdOut) + if err != nil { + t.Fatalf("could not create trace.NewReader: %v", err) + } + var seen bool + i := 1 +loop: + for ; ; i++ { + ev, err := r.ReadEvent() + if err != nil { + if err != io.EOF { + t.Errorf("error at event %d: %v", i, err) + } + break loop + } + switch ev.Kind() { + case tracev2.EventLog: + v := ev.Log() + if v.Category == "xyzzy-cat" && v.Message == "xyzzy-msg" { + // Should we already stop reading here? More events may come, but + // we're not guaranteeing a fully unbroken trace until the last + // byte... + seen = true + } + } + } + if err := cmd.Wait(); err == nil { + t.Error("the process should have panicked") + } + if !seen { + t.Errorf("expected one matching log event matching, but none of the %d received trace events match", i) + } + t.Logf("stderr output:\n%s", errOut.String()) + needle := "yzzyx\n" + if n := strings.Count(errOut.String(), needle); n != 1 { + t.Fatalf("did not find expected panic message %q\n(exit status %v)", needle, err) + } +} + // Test that panic message is not clobbered. // See issue 30150. func TestDoublePanic(t *testing.T) { diff --git a/src/runtime/debug.go b/src/runtime/debug.go index 0e61692f3d..3233ce8ee7 100644 --- a/src/runtime/debug.go +++ b/src/runtime/debug.go @@ -52,6 +52,17 @@ func NumCgoCall() int64 { return n } +func totalMutexWaitTimeNanos() int64 { + total := sched.totalMutexWaitTime.Load() + + total += sched.totalRuntimeLockWaitTime.Load() + for mp := (*m)(atomic.Loadp(unsafe.Pointer(&allm))); mp != nil; mp = mp.alllink { + total += mp.mLockProfile.waitTime.Load() + } + + return total +} + // NumGoroutine returns the number of goroutines that currently exist. func NumGoroutine() int { return int(gcount()) diff --git a/src/runtime/debug/example_monitor_test.go b/src/runtime/debug/example_monitor_test.go new file mode 100644 index 0000000000..5a1f4e1417 --- /dev/null +++ b/src/runtime/debug/example_monitor_test.go @@ -0,0 +1,99 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package debug_test + +import ( + "io" + "log" + "os" + "os/exec" + "runtime/debug" +) + +// ExampleSetCrashOutput_monitor shows an example of using +// [debug.SetCrashOutput] to direct crashes to a "monitor" process, +// for automated crash reporting. The monitor is the same executable, +// invoked in a special mode indicated by an environment variable. +func ExampleSetCrashOutput_monitor() { + appmain() + + // This Example doesn't actually run as a test because its + // purpose is to crash, so it has no "Output:" comment + // within the function body. + // + // To observe the monitor in action, replace the entire text + // of this comment with "Output:" and run this command: + // + // $ go test -run=ExampleSetCrashOutput_monitor runtime/debug + // panic: oops + // ...stack... + // monitor: saved crash report at /tmp/10804884239807998216.crash +} + +// appmain represents the 'main' function of your application. +func appmain() { + monitor() + + // Run the application. + println("hello") + panic("oops") +} + +// monitor starts the monitor process, which performs automated +// crash reporting. Call this function immediately within main. +// +// This function re-executes the same executable as a child process, +// in a special mode. In that mode, the call to monitor will never +// return. +func monitor() { + const monitorVar = "RUNTIME_DEBUG_MONITOR" + if os.Getenv(monitorVar) != "" { + // This is the monitor (child) process. + log.SetFlags(0) + log.SetPrefix("monitor: ") + + crash, err := io.ReadAll(os.Stdin) + if err != nil { + log.Fatalf("failed to read from input pipe: %v", err) + } + if len(crash) == 0 { + // Parent process terminated without reporting a crash. + os.Exit(0) + } + + // Save the crash report securely in the file system. + f, err := os.CreateTemp("", "*.crash") + if err != nil { + log.Fatal(err) + } + if _, err := f.Write(crash); err != nil { + log.Fatal(err) + } + if err := f.Close(); err != nil { + log.Fatal(err) + } + log.Fatalf("saved crash report at %s", f.Name()) + } + + // This is the application process. + // Fork+exec the same executable in monitor mode. + exe, err := os.Executable() + if err != nil { + log.Fatal(err) + } + cmd := exec.Command(exe, "-test.run=ExampleSetCrashOutput_monitor") + cmd.Env = append(os.Environ(), monitorVar+"=1") + cmd.Stderr = os.Stderr + cmd.Stdout = os.Stderr + pipe, err := cmd.StdinPipe() + if err != nil { + log.Fatalf("StdinPipe: %v", err) + } + debug.SetCrashOutput(pipe.(*os.File)) // (this conversion is safe) + if err := cmd.Start(); err != nil { + log.Fatalf("can't start monitor: %v", err) + } + // Now return and start the application proper... +} diff --git a/src/runtime/debug/stack.go b/src/runtime/debug/stack.go index 3999840d3c..8dfea52d34 100644 --- a/src/runtime/debug/stack.go +++ b/src/runtime/debug/stack.go @@ -7,8 +7,10 @@ package debug import ( + "internal/poll" "os" "runtime" + _ "unsafe" // for linkname ) // PrintStack prints to standard error the stack trace returned by runtime.Stack. @@ -28,3 +30,58 @@ func Stack() []byte { buf = make([]byte, 2*len(buf)) } } + +// SetCrashOutput configures a single additional file where unhandled +// panics and other fatal errors are printed, in addition to standard error. +// There is only one additional file: calling SetCrashOutput again overrides +// any earlier call. +// SetCrashOutput duplicates f's file descriptor, so the caller may safely +// close f as soon as SetCrashOutput returns. +// To disable this additional crash output, call SetCrashOutput(nil). +// If called concurrently with a crash, some in-progress output may be written +// to the old file even after an overriding SetCrashOutput returns. +func SetCrashOutput(f *os.File) error { + fd := ^uintptr(0) + if f != nil { + // The runtime will write to this file descriptor from + // low-level routines during a panic, possibly without + // a G, so we must call f.Fd() eagerly. This creates a + // danger that that the file descriptor is no longer + // valid at the time of the write, because the caller + // (incorrectly) called f.Close() and the kernel + // reissued the fd in a later call to open(2), leading + // to crashes being written to the wrong file. + // + // So, we duplicate the fd to obtain a private one + // that cannot be closed by the user. + // This also alleviates us from concerns about the + // lifetime and finalization of f. + // (DupCloseOnExec returns an fd, not a *File, so + // there is no finalizer, and we are responsible for + // closing it.) + // + // The new fd must be close-on-exec, otherwise if the + // crash monitor is a child process, it may inherit + // it, so it will never see EOF from the pipe even + // when this process crashes. + // + // A side effect of Fd() is that it calls SetBlocking, + // which is important so that writes of a crash report + // to a full pipe buffer don't get lost. + fd2, _, err := poll.DupCloseOnExec(int(f.Fd())) + if err != nil { + return err + } + runtime.KeepAlive(f) // prevent finalization before dup + fd = uintptr(fd2) + } + if prev := runtime_setCrashFD(fd); prev != ^uintptr(0) { + // We use NewFile+Close because it is portable + // unlike syscall.Close, whose parameter type varies. + os.NewFile(prev, "").Close() // ignore error + } + return nil +} + +//go:linkname runtime_setCrashFD runtime.setCrashFD +func runtime_setCrashFD(uintptr) uintptr diff --git a/src/runtime/debug/stack_test.go b/src/runtime/debug/stack_test.go index 671057c3a0..289749ccb4 100644 --- a/src/runtime/debug/stack_test.go +++ b/src/runtime/debug/stack_test.go @@ -8,6 +8,7 @@ import ( "bytes" "fmt" "internal/testenv" + "log" "os" "os/exec" "path/filepath" @@ -18,10 +19,24 @@ import ( ) func TestMain(m *testing.M) { - if os.Getenv("GO_RUNTIME_DEBUG_TEST_DUMP_GOROOT") != "" { + switch os.Getenv("GO_RUNTIME_DEBUG_TEST_ENTRYPOINT") { + case "dumpgoroot": fmt.Println(runtime.GOROOT()) os.Exit(0) + + case "setcrashoutput": + f, err := os.Create(os.Getenv("CRASHOUTPUT")) + if err != nil { + log.Fatal(err) + } + if err := SetCrashOutput(f); err != nil { + log.Fatal(err) // e.g. EMFILE + } + println("hello") + panic("oops") } + + // default: run the tests. os.Exit(m.Run()) } @@ -77,7 +92,7 @@ func TestStack(t *testing.T) { t.Fatal(err) } cmd := exec.Command(exe) - cmd.Env = append(os.Environ(), "GOROOT=", "GO_RUNTIME_DEBUG_TEST_DUMP_GOROOT=1") + cmd.Env = append(os.Environ(), "GOROOT=", "GO_RUNTIME_DEBUG_TEST_ENTRYPOINT=dumpgoroot") out, err := cmd.Output() if err != nil { t.Fatal(err) @@ -119,3 +134,64 @@ func TestStack(t *testing.T) { frame("runtime/debug/stack_test.go", "runtime/debug_test.TestStack") frame("testing/testing.go", "") } + +func TestSetCrashOutput(t *testing.T) { + testenv.MustHaveExec(t) + exe, err := os.Executable() + if err != nil { + t.Fatal(err) + } + + crashOutput := filepath.Join(t.TempDir(), "crash.out") + + cmd := exec.Command(exe) + cmd.Stderr = new(strings.Builder) + cmd.Env = append(os.Environ(), "GO_RUNTIME_DEBUG_TEST_ENTRYPOINT=setcrashoutput", "CRASHOUTPUT="+crashOutput) + err = cmd.Run() + stderr := fmt.Sprint(cmd.Stderr) + if err == nil { + t.Fatalf("child process succeeded unexpectedly (stderr: %s)", stderr) + } + t.Logf("child process finished with error %v and stderr <<%s>>", err, stderr) + + // Read the file the child process should have written. + // It should contain a crash report such as this: + // + // panic: oops + // + // goroutine 1 [running]: + // runtime/debug_test.TestMain(0x1400007e0a0) + // GOROOT/src/runtime/debug/stack_test.go:33 +0x18c + // main.main() + // _testmain.go:71 +0x170 + data, err := os.ReadFile(crashOutput) + if err != nil { + t.Fatalf("child process failed to write crash report: %v", err) + } + crash := string(data) + t.Logf("crash = <<%s>>", crash) + t.Logf("stderr = <<%s>>", stderr) + + // Check that the crash file and the stderr both contain the panic and stack trace. + for _, want := range []string{ + "panic: oops", + "goroutine 1", + "debug_test.TestMain", + } { + if !strings.Contains(crash, want) { + t.Errorf("crash output does not contain %q", want) + } + if !strings.Contains(stderr, want) { + t.Errorf("stderr output does not contain %q", want) + } + } + + // Check that stderr, but not crash, contains the output of println(). + printlnOnly := "hello" + if strings.Contains(crash, printlnOnly) { + t.Errorf("crash output contains %q, but should not", printlnOnly) + } + if !strings.Contains(stderr, printlnOnly) { + t.Errorf("stderr output does not contain %q, but should", printlnOnly) + } +} diff --git a/src/runtime/duff_loong64.s b/src/runtime/duff_loong64.s index 63fa3bcca1..b05502d91d 100644 --- a/src/runtime/duff_loong64.s +++ b/src/runtime/duff_loong64.s @@ -4,904 +4,904 @@ #include "textflag.h" -TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 - MOVV R0, (R19) - ADDV $8, R19 +TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 + MOVV R0, (R20) + ADDV $8, R20 RET -TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) +TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0 + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 - MOVV (R19), R30 - ADDV $8, R19 - MOVV R30, (R20) + MOVV (R20), R30 ADDV $8, R20 + MOVV R30, (R21) + ADDV $8, R21 RET diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index d2f3563956..9b84e96e50 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -31,6 +31,8 @@ var Exitsyscall = exitsyscall var LockedOSThread = lockedOSThread var Xadduintptr = atomic.Xadduintptr +var ReadRandomFailed = &readRandomFailed + var Fastlog2 = fastlog2 var Atoi = atoi @@ -349,7 +351,7 @@ func benchSetType(n int, resetTimer func(), len int, x unsafe.Pointer, t *_type) // Round up the size to the size class to make the benchmark a little more // realistic. However, validate it, to make sure this is safe. - allocSize := roundupsize(size, t.PtrBytes == 0) + allocSize := roundupsize(size, !t.Pointers()) if s.npages*pageSize < allocSize { panic("backing span not large enough for benchmark") } @@ -398,9 +400,9 @@ func CountPagesInUse() (pagesInUse, counted uintptr) { return } -func Fastrand() uint32 { return fastrand() } -func Fastrand64() uint64 { return fastrand64() } -func Fastrandn(n uint32) uint32 { return fastrandn(n) } +func Fastrand() uint32 { return uint32(rand()) } +func Fastrand64() uint64 { return rand() } +func Fastrandn(n uint32) uint32 { return randn(n) } type ProfBuf profBuf @@ -464,6 +466,8 @@ func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) startTheWorld(stw) } +var DoubleCheckReadMemStats = &doubleCheckReadMemStats + // ReadMemStatsSlow returns both the runtime-computed MemStats and // MemStats accumulated by scanning the heap. func ReadMemStatsSlow() (base, slow MemStats) { @@ -582,6 +586,10 @@ type RWMutex struct { rw rwmutex } +func (rw *RWMutex) Init() { + rw.rw.init(lockRankTestR, lockRankTestRInternal, lockRankTestW) +} + func (rw *RWMutex) RLock() { rw.rw.rlock() } @@ -743,7 +751,7 @@ func MapTombstoneCheck(m map[int]int) { b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize))) n := 0 for b := b0; b != nil; b = b.overflow(t) { - for i := 0; i < bucketCnt; i++ { + for i := 0; i < abi.MapBucketCount; i++ { if b.tophash[i] != emptyRest { n++ } @@ -751,7 +759,7 @@ func MapTombstoneCheck(m map[int]int) { } k := 0 for b := b0; b != nil; b = b.overflow(t) { - for i := 0; i < bucketCnt; i++ { + for i := 0; i < abi.MapBucketCount; i++ { if k < n && b.tophash[i] == emptyRest { panic("early emptyRest") } @@ -1340,6 +1348,18 @@ func PageCachePagesLeaked() (leaked uintptr) { return } +type Mutex = mutex + +var Lock = lock +var Unlock = unlock + +var MutexContended = mutexContended + +func SemRootLock(addr *uint32) *mutex { + root := semtable.rootFor(addr) + return &root.lock +} + var Semacquire = semacquire var Semrelease1 = semrelease1 @@ -1920,24 +1940,8 @@ func UserArenaClone[T any](s T) T { var AlignUp = alignUp -// BlockUntilEmptyFinalizerQueue blocks until either the finalizer -// queue is emptied (and the finalizers have executed) or the timeout -// is reached. Returns true if the finalizer queue was emptied. func BlockUntilEmptyFinalizerQueue(timeout int64) bool { - start := nanotime() - for nanotime()-start < timeout { - lock(&finlock) - // We know the queue has been drained when both finq is nil - // and the finalizer g has stopped executing. - empty := finq == nil - empty = empty && readgstatus(fing) == _Gwaiting && fing.waitreason == waitReasonFinalizerWait - unlock(&finlock) - if empty { - return true - } - Gosched() - } - return false + return blockUntilEmptyFinalizerQueue(timeout) } func FrameStartLine(f *Frame) int { diff --git a/src/runtime/extern.go b/src/runtime/extern.go index f8db296e6b..e42122fd3a 100644 --- a/src/runtime/extern.go +++ b/src/runtime/extern.go @@ -28,7 +28,7 @@ program. GOMEMLIMIT is a numeric value in bytes with an optional unit suffix. The supported suffixes include B, KiB, MiB, GiB, and TiB. These suffixes represent quantities of bytes as defined by the IEC 80000-13 standard. That is, they are based on powers of two: KiB means 2^10 bytes, MiB means 2^20 bytes, -and so on. The default setting is math.MaxInt64, which effectively disables the +and so on. The default setting is [math.MaxInt64], which effectively disables the memory limit. [runtime/debug.SetMemoryLimit] allows changing this limit at run time. @@ -55,6 +55,13 @@ It is a comma-separated list of name=val pairs setting these named variables: cgocheck mode can be enabled using GOEXPERIMENT (which requires a rebuild), see https://pkg.go.dev/internal/goexperiment for details. + disablethp: setting disablethp=1 on Linux disables transparent huge pages for the heap. + It has no effect on other platforms. disablethp is meant for compatibility with versions + of Go before 1.21, which stopped working around a Linux kernel default that can result + in significant memory overuse. See https://go.dev/issue/64332. This setting will be + removed in a future release, so operators should tweak their Linux configuration to suit + their needs before then. See https://go.dev/doc/gc-guide#Linux_transparent_huge_pages. + dontfreezetheworld: by default, the start of a fatal panic or throw "freezes the world", preempting all threads to stop all running goroutines, which makes it possible to traceback all goroutines, and @@ -145,6 +152,18 @@ It is a comma-separated list of name=val pairs setting these named variables: risk in that scenario. Currently not supported on Windows, plan9 or js/wasm. Setting this option for some applications can produce large traces, so use with care. + panicnil: setting panicnil=1 disables the runtime error when calling panic with nil + interface value or an untyped nil. + + runtimecontentionstacks: setting runtimecontentionstacks=1 enables inclusion of call stacks + related to contention on runtime-internal locks in the "mutex" profile, subject to the + MutexProfileFraction setting. When runtimecontentionstacks=0, contention on + runtime-internal locks will report as "runtime._LostContendedRuntimeLock". When + runtimecontentionstacks=1, the call stacks will correspond to the unlock call that released + the lock. But instead of the value corresponding to the amount of contention that call + stack caused, it corresponds to the amount of time the caller of unlock had to wait in its + original call to lock. A future release is expected to align those and remove this setting. + invalidptr: invalidptr=1 (the default) causes the garbage collector and stack copier to crash the program if an invalid pointer value (for example, 1) is found in a pointer-typed location. Setting invalidptr=0 disables this check. @@ -199,17 +218,17 @@ It is a comma-separated list of name=val pairs setting these named variables: because it also disables the conservative stack scanning used for asynchronously preempted goroutines. -The net and net/http packages also refer to debugging variables in GODEBUG. +The [net] and [net/http] packages also refer to debugging variables in GODEBUG. See the documentation for those packages for details. The GOMAXPROCS variable limits the number of operating system threads that can execute user-level Go code simultaneously. There is no limit to the number of threads that can be blocked in system calls on behalf of Go code; those do not count against -the GOMAXPROCS limit. This package's GOMAXPROCS function queries and changes +the GOMAXPROCS limit. This package's [GOMAXPROCS] function queries and changes the limit. The GORACE variable configures the race detector, for programs built using -race. -See https://golang.org/doc/articles/race_detector.html for details. +See the [Race Detector article] for details. The GOTRACEBACK variable controls the amount of output generated when a Go program fails due to an unrecovered panic or an unexpected runtime condition. @@ -228,14 +247,13 @@ SIGABRT to trigger a core dump. GOTRACEBACK=wer is like “crash” but doesn't disable Windows Error Reporting (WER). For historical reasons, the GOTRACEBACK settings 0, 1, and 2 are synonyms for none, all, and system, respectively. -The runtime/debug package's SetTraceback function allows increasing the +The [runtime/debug.SetTraceback] function allows increasing the amount of output at run time, but it cannot reduce the amount below that specified by the environment variable. -See https://golang.org/pkg/runtime/debug/#SetTraceback. The GOARCH, GOOS, GOPATH, and GOROOT environment variables complete the set of Go environment variables. They influence the building of Go programs -(see https://golang.org/cmd/go and https://golang.org/pkg/go/build). +(see [cmd/go] and [go/build]). GOARCH, GOOS, and GOROOT are recorded at compile time and made available by constants or functions in this package, but they do not influence the execution of the run-time system. @@ -258,6 +276,8 @@ things: encounters an unrecoverable panic that would otherwise override the value of GOTRACEBACK, the goroutine stack, registers, and other memory related information are omitted. + +[Race Detector article]: https://go.dev/doc/articles/race_detector */ package runtime @@ -269,7 +289,7 @@ import ( // Caller reports file and line number information about function invocations on // the calling goroutine's stack. The argument skip is the number of stack frames // to ascend, with 0 identifying the caller of Caller. (For historical reasons the -// meaning of skip differs between Caller and Callers.) The return values report the +// meaning of skip differs between Caller and [Callers].) The return values report the // program counter, file name, and line number within the file of the corresponding // call. The boolean ok is false if it was not possible to recover the information. func Caller(skip int) (pc uintptr, file string, line int, ok bool) { diff --git a/src/runtime/gc_test.go b/src/runtime/gc_test.go index b80396aa11..c6759a172c 100644 --- a/src/runtime/gc_test.go +++ b/src/runtime/gc_test.go @@ -577,6 +577,11 @@ func TestPageAccounting(t *testing.T) { } } +func init() { + // Enable ReadMemStats' double-check mode. + *runtime.DoubleCheckReadMemStats = true +} + func TestReadMemStats(t *testing.T) { base, slow := runtime.ReadMemStatsSlow() if base != slow { diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go index 276c5bfaf6..8bae8c0636 100644 --- a/src/runtime/heapdump.go +++ b/src/runtime/heapdump.go @@ -206,7 +206,7 @@ func dumptype(t *_type) { dwritebyte('.') dwrite(unsafe.Pointer(unsafe.StringData(name)), uintptr(len(name))) } - dumpbool(t.Kind_&kindDirectIface == 0 || t.PtrBytes != 0) + dumpbool(t.Kind_&kindDirectIface == 0 || t.Pointers()) } // dump an object. @@ -540,7 +540,7 @@ func dumpparams() { } func itab_callback(tab *itab) { - t := tab._type + t := tab.Type dumptype(t) dumpint(tagItab) dumpint(uint64(uintptr(unsafe.Pointer(tab)))) diff --git a/src/runtime/iface.go b/src/runtime/iface.go index 4563809a9d..99e9a367f5 100644 --- a/src/runtime/iface.go +++ b/src/runtime/iface.go @@ -66,19 +66,19 @@ func getitab(inter *interfacetype, typ *_type, canfail bool) *itab { // Entry doesn't exist yet. Make a new entry & add it. m = (*itab)(persistentalloc(unsafe.Sizeof(itab{})+uintptr(len(inter.Methods)-1)*goarch.PtrSize, 0, &memstats.other_sys)) - m.inter = inter - m._type = typ + m.Inter = inter + m.Type = typ // The hash is used in type switches. However, compiler statically generates itab's // for all interface/type pairs used in switches (which are added to itabTable // in itabsinit). The dynamically-generated itab's never participate in type switches, // and thus the hash is irrelevant. - // Note: m.hash is _not_ the hash used for the runtime itabTable hash table. - m.hash = 0 - m.init() + // Note: m.Hash is _not_ the hash used for the runtime itabTable hash table. + m.Hash = 0 + itabInit(m, true) itabAdd(m) unlock(&itabLock) finish: - if m.fun[0] != 0 { + if m.Fun[0] != 0 { return m } if canfail { @@ -90,7 +90,7 @@ finish: // The cached result doesn't record which // interface function was missing, so initialize // the itab again to get the missing function name. - panic(&TypeAssertionError{concrete: typ, asserted: &inter.Type, missingMethod: m.init()}) + panic(&TypeAssertionError{concrete: typ, asserted: &inter.Type, missingMethod: itabInit(m, false)}) } // find finds the given interface/type pair in t. @@ -110,7 +110,7 @@ func (t *itabTableType) find(inter *interfacetype, typ *_type) *itab { if m == nil { return nil } - if m.inter == inter && m._type == typ { + if m.Inter == inter && m.Type == typ { return m } h += i @@ -161,7 +161,7 @@ func (t *itabTableType) add(m *itab) { // See comment in find about the probe sequence. // Insert new itab in the first empty spot in the probe sequence. mask := t.size - 1 - h := itabHashFunc(m.inter, m._type) & mask + h := itabHashFunc(m.Inter, m.Type) & mask for i := uintptr(1); ; i++ { p := (**itab)(add(unsafe.Pointer(&t.entries), h*goarch.PtrSize)) m2 := *p @@ -186,13 +186,15 @@ func (t *itabTableType) add(m *itab) { } } -// init fills in the m.fun array with all the code pointers for -// the m.inter/m._type pair. If the type does not implement the interface, -// it sets m.fun[0] to 0 and returns the name of an interface function that is missing. -// It is ok to call this multiple times on the same m, even concurrently. -func (m *itab) init() string { - inter := m.inter - typ := m._type +// itabInit fills in the m.Fun array with all the code pointers for +// the m.Inter/m.Type pair. If the type does not implement the interface, +// it sets m.Fun[0] to 0 and returns the name of an interface function that is missing. +// If !firstTime, itabInit will not write anything to m.Fun (see issue 65962). +// It is ok to call this multiple times on the same m, even concurrently +// (although it will only be called once with firstTime==true). +func itabInit(m *itab, firstTime bool) string { + inter := m.Inter + typ := m.Type x := typ.Uncommon() // both inter and typ have method sorted by name, @@ -203,7 +205,7 @@ func (m *itab) init() string { nt := int(x.Mcount) xmhdr := (*[1 << 16]abi.Method)(add(unsafe.Pointer(x), uintptr(x.Moff)))[:nt:nt] j := 0 - methods := (*[1 << 16]unsafe.Pointer)(unsafe.Pointer(&m.fun[0]))[:ni:ni] + methods := (*[1 << 16]unsafe.Pointer)(unsafe.Pointer(&m.Fun[0]))[:ni:ni] var fun0 unsafe.Pointer imethods: for k := 0; k < ni; k++ { @@ -227,8 +229,8 @@ imethods: if tname.IsExported() || pkgPath == ipkg { ifn := rtyp.textOff(t.Ifn) if k == 0 { - fun0 = ifn // we'll set m.fun[0] at the end - } else { + fun0 = ifn // we'll set m.Fun[0] at the end + } else if firstTime { methods[k] = ifn } continue imethods @@ -236,10 +238,12 @@ imethods: } } // didn't find method - m.fun[0] = 0 + // Leaves m.Fun[0] set to 0. return iname } - m.fun[0] = uintptr(fun0) + if firstTime { + m.Fun[0] = uintptr(fun0) + } return "" } @@ -267,7 +271,7 @@ func panicdottypeE(have, want, iface *_type) { func panicdottypeI(have *itab, want, iface *_type) { var t *_type if have != nil { - t = have._type + t = have.Type } panicdottypeE(t, want, iface) } @@ -440,14 +444,14 @@ func typeAssert(s *abi.TypeAssert, t *_type) *itab { // Maybe update the cache, so the next time the generated code // doesn't need to call into the runtime. - if fastrand()&1023 != 0 { + if cheaprand()&1023 != 0 { // Only bother updating the cache ~1 in 1000 times. return tab } // Load the current cache. oldC := (*abi.TypeAssertCache)(atomic.Loadp(unsafe.Pointer(&s.Cache))) - if fastrand()&uint32(oldC.Mask) != 0 { + if cheaprand()&uint32(oldC.Mask) != 0 { // As cache gets larger, choose to update it less often // so we can amortize the cost of building a new cache. return tab @@ -540,7 +544,7 @@ func interfaceSwitch(s *abi.InterfaceSwitch, t *_type) (int, *itab) { // Maybe update the cache, so the next time the generated code // doesn't need to call into the runtime. - if fastrand()&1023 != 0 { + if cheaprand()&1023 != 0 { // Only bother updating the cache ~1 in 1000 times. // This ensures we don't waste memory on switches, or // switch arguments, that only happen a few times. @@ -549,7 +553,7 @@ func interfaceSwitch(s *abi.InterfaceSwitch, t *_type) (int, *itab) { // Load the current cache. oldC := (*abi.InterfaceSwitchCache)(atomic.Loadp(unsafe.Pointer(&s.Cache))) - if fastrand()&uint32(oldC.Mask) != 0 { + if cheaprand()&uint32(oldC.Mask) != 0 { // As cache gets larger, choose to update it less often // so we can amortize the cost of building a new cache // (that cost is linear in oldc.Mask). @@ -567,7 +571,7 @@ func interfaceSwitch(s *abi.InterfaceSwitch, t *_type) (int, *itab) { return case_, tab } -// buildInterfaceSwitchCache constructs a interface switch cache +// buildInterfaceSwitchCache constructs an interface switch cache // containing all the entries from oldC plus the new entry // (typ,case_,tab). func buildInterfaceSwitchCache(oldC *abi.InterfaceSwitchCache, typ *_type, case_ int, tab *itab) *abi.InterfaceSwitchCache { diff --git a/src/runtime/internal/atomic/atomic_andor_generic.go b/src/runtime/internal/atomic/atomic_andor_generic.go new file mode 100644 index 0000000000..f8b148dda5 --- /dev/null +++ b/src/runtime/internal/atomic/atomic_andor_generic.go @@ -0,0 +1,67 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build arm || mips || mipsle || mips64 || mips64le || wasm + +package atomic + +//go:nosplit +func And32(ptr *uint32, val uint32) uint32 { + for { + old := *ptr + if Cas(ptr, old, old&val) { + return old + } + } +} + +//go:nosplit +func Or32(ptr *uint32, val uint32) uint32 { + for { + old := *ptr + if Cas(ptr, old, old|val) { + return old + } + } +} + +//go:nosplit +func And64(ptr *uint64, val uint64) uint64 { + for { + old := *ptr + if Cas64(ptr, old, old&val) { + return old + } + } +} + +//go:nosplit +func Or64(ptr *uint64, val uint64) uint64 { + for { + old := *ptr + if Cas64(ptr, old, old|val) { + return old + } + } +} + +//go:nosplit +func Anduintptr(ptr *uintptr, val uintptr) uintptr { + for { + old := *ptr + if Casuintptr(ptr, old, old&val) { + return old + } + } +} + +//go:nosplit +func Oruintptr(ptr *uintptr, val uintptr) uintptr { + for { + old := *ptr + if Casuintptr(ptr, old, old|val) { + return old + } + } +} diff --git a/src/runtime/internal/atomic/atomic_andor_test.go b/src/runtime/internal/atomic/atomic_andor_test.go index 9dd8b60ae4..a2f3b6f3a9 100644 --- a/src/runtime/internal/atomic/atomic_andor_test.go +++ b/src/runtime/internal/atomic/atomic_andor_test.go @@ -1,6 +1,3 @@ -//go:build 386 || amd64 || arm || arm64 || ppc64 || ppc64le || riscv64 || wasm - -// // Copyright 2023 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/src/runtime/internal/atomic/atomic_arm.go b/src/runtime/internal/atomic/atomic_arm.go index ae609cf4db..567e951244 100644 --- a/src/runtime/internal/atomic/atomic_arm.go +++ b/src/runtime/internal/atomic/atomic_arm.go @@ -208,66 +208,6 @@ func And(addr *uint32, v uint32) { } } -//go:nosplit -func Or32(addr *uint32, v uint32) uint32 { - for { - old := *addr - if Cas(addr, old, old|v) { - return old - } - } -} - -//go:nosplit -func And32(addr *uint32, v uint32) uint32 { - for { - old := *addr - if Cas(addr, old, old&v) { - return old - } - } -} - -//go:nosplit -func Or64(addr *uint64, v uint64) uint64 { - for { - old := *addr - if Cas64(addr, old, old|v) { - return old - } - } -} - -//go:nosplit -func And64(addr *uint64, v uint64) uint64 { - for { - old := *addr - if Cas64(addr, old, old&v) { - return old - } - } -} - -//go:nosplit -func Oruintptr(addr *uintptr, v uintptr) uintptr { - for { - old := *addr - if Casuintptr(addr, old, old|v) { - return old - } - } -} - -//go:nosplit -func Anduintptr(addr *uintptr, v uintptr) uintptr { - for { - old := *addr - if Casuintptr(addr, old, old&v) { - return old - } - } -} - //go:nosplit func armcas(ptr *uint32, old, new uint32) bool diff --git a/src/runtime/internal/atomic/atomic_arm.s b/src/runtime/internal/atomic/atomic_arm.s index 662b5987f2..1cf7d8f6ef 100644 --- a/src/runtime/internal/atomic/atomic_arm.s +++ b/src/runtime/internal/atomic/atomic_arm.s @@ -41,8 +41,10 @@ casl: BNE casl MOVW $1, R0 - CMP $7, R8 - BLT 2(PC) +#ifndef GOARM_7 + CMP $0, R11 + BEQ 2(PC) +#endif DMB MB_ISH MOVB R0, ret+12(FP) diff --git a/src/runtime/internal/atomic/atomic_loong64.go b/src/runtime/internal/atomic/atomic_loong64.go index d82a5b8e2a..de6d4b4ba6 100644 --- a/src/runtime/internal/atomic/atomic_loong64.go +++ b/src/runtime/internal/atomic/atomic_loong64.go @@ -59,6 +59,24 @@ func Or8(ptr *uint8, val uint8) //go:noescape func Or(ptr *uint32, val uint32) +//go:noescape +func And32(ptr *uint32, val uint32) uint32 + +//go:noescape +func Or32(ptr *uint32, val uint32) uint32 + +//go:noescape +func And64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Or64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Anduintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Oruintptr(ptr *uintptr, val uintptr) uintptr + // NOTE: Do not add atomicxor8 (XOR is not idempotent). //go:noescape diff --git a/src/runtime/internal/atomic/atomic_loong64.s b/src/runtime/internal/atomic/atomic_loong64.s index 34193add3e..c7452d2e11 100644 --- a/src/runtime/internal/atomic/atomic_loong64.s +++ b/src/runtime/internal/atomic/atomic_loong64.s @@ -256,6 +256,66 @@ TEXT ·And(SB), NOSPLIT, $0-12 DBAR RET +// func Or32(addr *uint32, v uint32) old uint32 +TEXT ·Or32(SB), NOSPLIT, $0-20 + MOVV ptr+0(FP), R4 + MOVW val+8(FP), R5 + DBAR + LL (R4), R6 + OR R5, R6, R7 + SC R7, (R4) + BEQ R7, -4(PC) + DBAR + MOVW R6, ret+16(FP) + RET + +// func And32(addr *uint32, v uint32) old uint32 +TEXT ·And32(SB), NOSPLIT, $0-20 + MOVV ptr+0(FP), R4 + MOVW val+8(FP), R5 + DBAR + LL (R4), R6 + AND R5, R6, R7 + SC R7, (R4) + BEQ R7, -4(PC) + DBAR + MOVW R6, ret+16(FP) + RET + +// func Or64(addr *uint64, v uint64) old uint64 +TEXT ·Or64(SB), NOSPLIT, $0-24 + MOVV ptr+0(FP), R4 + MOVV val+8(FP), R5 + DBAR + LLV (R4), R6 + OR R5, R6, R7 + SCV R7, (R4) + BEQ R7, -4(PC) + DBAR + MOVV R6, ret+16(FP) + RET + +// func And64(addr *uint64, v uint64) old uint64 +TEXT ·And64(SB), NOSPLIT, $0-24 + MOVV ptr+0(FP), R4 + MOVV val+8(FP), R5 + DBAR + LLV (R4), R6 + AND R5, R6, R7 + SCV R7, (R4) + BEQ R7, -4(PC) + DBAR + MOVV R6, ret+16(FP) + RET + +// func Anduintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Anduintptr(SB), NOSPLIT, $0-24 + JMP ·And64(SB) + +// func Oruintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Oruintptr(SB), NOSPLIT, $0-24 + JMP ·Or64(SB) + // uint32 runtime∕internal∕atomic·Load(uint32 volatile* ptr) TEXT ·Load(SB),NOSPLIT|NOFRAME,$0-12 MOVV ptr+0(FP), R19 diff --git a/src/runtime/internal/atomic/atomic_s390x.go b/src/runtime/internal/atomic/atomic_s390x.go index 9855bf0780..68b4e160f9 100644 --- a/src/runtime/internal/atomic/atomic_s390x.go +++ b/src/runtime/internal/atomic/atomic_s390x.go @@ -98,6 +98,24 @@ func And(ptr *uint32, val uint32) //go:noescape func Or(ptr *uint32, val uint32) +//go:noescape +func And32(ptr *uint32, val uint32) uint32 + +//go:noescape +func Or32(ptr *uint32, val uint32) uint32 + +//go:noescape +func And64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Or64(ptr *uint64, val uint64) uint64 + +//go:noescape +func Anduintptr(ptr *uintptr, val uintptr) uintptr + +//go:noescape +func Oruintptr(ptr *uintptr, val uintptr) uintptr + //go:noescape func Xadd(ptr *uint32, delta int32) uint32 diff --git a/src/runtime/internal/atomic/atomic_s390x.s b/src/runtime/internal/atomic/atomic_s390x.s index a0c204b0e1..6e4ea0e32a 100644 --- a/src/runtime/internal/atomic/atomic_s390x.s +++ b/src/runtime/internal/atomic/atomic_s390x.s @@ -246,3 +246,59 @@ TEXT ·And(SB), NOSPLIT, $0-12 MOVW val+8(FP), R4 LAN R4, R6, 0(R3) // R6 = *R3; *R3 &= R4; (atomic) RET + +// func Or32(addr *uint32, v uint32) old uint32 +TEXT ·Or32(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R4 + MOVW val+8(FP), R5 + MOVW (R4), R3 +repeat: + OR R5, R3, R6 + CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) + BNE repeat + MOVW R3, ret+16(FP) + RET + +// func And32(addr *uint32, v uint32) old uint32 +TEXT ·And32(SB), NOSPLIT, $0-20 + MOVD ptr+0(FP), R4 + MOVW val+8(FP), R5 + MOVW (R4), R3 +repeat: + AND R5, R3, R6 + CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) + BNE repeat + MOVW R3, ret+16(FP) + RET + +// func Or64(addr *uint64, v uint64) old uint64 +TEXT ·Or64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R4 + MOVD val+8(FP), R5 + MOVD (R4), R3 +repeat: + OR R5, R3, R6 + CSG R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) + BNE repeat + MOVD R3, ret+16(FP) + RET + +// func And64(addr *uint64, v uint64) old uint64 +TEXT ·And64(SB), NOSPLIT, $0-24 + MOVD ptr+0(FP), R4 + MOVD val+8(FP), R5 + MOVD (R4), R3 +repeat: + AND R5, R3, R6 + CSG R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4) + BNE repeat + MOVD R3, ret+16(FP) + RET + +// func Anduintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Anduintptr(SB), NOSPLIT, $0-24 + BR ·And64(SB) + +// func Oruintptr(addr *uintptr, v uintptr) old uintptr +TEXT ·Oruintptr(SB), NOSPLIT, $0-24 + BR ·Or64(SB) diff --git a/src/runtime/internal/atomic/atomic_wasm.go b/src/runtime/internal/atomic/atomic_wasm.go index d1ca994205..835fc43ccf 100644 --- a/src/runtime/internal/atomic/atomic_wasm.go +++ b/src/runtime/internal/atomic/atomic_wasm.go @@ -339,51 +339,3 @@ func Xaddint64(ptr *int64, delta int64) int64 { *ptr = new return new } - -//go:nosplit -//go:noinline -func And32(ptr *uint32, val uint32) uint32 { - old := *ptr - *ptr = old & val - return old -} - -//go:nosplit -//go:noinline -func And64(ptr *uint64, val uint64) uint64 { - old := *ptr - *ptr = old & val - return old -} - -//go:nosplit -//go:noinline -func Anduintptr(ptr *uintptr, val uintptr) uintptr { - old := *ptr - *ptr = old & val - return old -} - -//go:nosplit -//go:noinline -func Or32(ptr *uint32, val uint32) uint32 { - old := *ptr - *ptr = old | val - return old -} - -//go:nosplit -//go:noinline -func Or64(ptr *uint64, val uint64) uint64 { - old := *ptr - *ptr = old | val - return old -} - -//go:nosplit -//go:noinline -func Oruintptr(ptr *uintptr, val uintptr) uintptr { - old := *ptr - *ptr = old | val - return old -} diff --git a/src/runtime/internal/sys/intrinsics_test.go b/src/runtime/internal/sys/intrinsics_test.go index bf75f19848..6799885001 100644 --- a/src/runtime/internal/sys/intrinsics_test.go +++ b/src/runtime/internal/sys/intrinsics_test.go @@ -1,3 +1,7 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package sys_test import ( diff --git a/src/runtime/internal/syscall/asm_linux_loong64.s b/src/runtime/internal/syscall/asm_linux_loong64.s deleted file mode 100644 index d6a33f90a7..0000000000 --- a/src/runtime/internal/syscall/asm_linux_loong64.s +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -#include "textflag.h" - -// func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr) -TEXT ·Syscall6(SB),NOSPLIT,$0-80 - MOVV num+0(FP), R11 // syscall entry - MOVV a1+8(FP), R4 - MOVV a2+16(FP), R5 - MOVV a3+24(FP), R6 - MOVV a4+32(FP), R7 - MOVV a5+40(FP), R8 - MOVV a6+48(FP), R9 - SYSCALL - MOVW $-4096, R12 - BGEU R12, R4, ok - MOVV $-1, R12 - MOVV R12, r1+56(FP) - MOVV R0, r2+64(FP) - SUBVU R4, R0, R4 - MOVV R4, errno+72(FP) - RET -ok: - MOVV R4, r1+56(FP) - MOVV R0, r2+64(FP) // r2 is not used. Always set to 0. - MOVV R0, errno+72(FP) - RET diff --git a/src/runtime/internal/wasitest/tcpecho_test.go b/src/runtime/internal/wasitest/tcpecho_test.go index 11373955f3..bbcea90310 100644 --- a/src/runtime/internal/wasitest/tcpecho_test.go +++ b/src/runtime/internal/wasitest/tcpecho_test.go @@ -68,18 +68,14 @@ func TestTCPEcho(t *testing.T) { defer subProcess.Process.Kill() var conn net.Conn - var err error for { + var err error conn, err = net.Dial("tcp", host) if err == nil { break } time.Sleep(500 * time.Millisecond) } - if err != nil { - t.Log(b.String()) - t.Fatal(err) - } defer conn.Close() payload := []byte("foobar") diff --git a/src/runtime/lock_futex.go b/src/runtime/lock_futex.go index cc7d465ef1..867e2b34d0 100644 --- a/src/runtime/lock_futex.go +++ b/src/runtime/lock_futex.go @@ -44,6 +44,10 @@ func key32(p *uintptr) *uint32 { return (*uint32)(unsafe.Pointer(p)) } +func mutexContended(l *mutex) bool { + return atomic.Load(key32(&l.key)) > mutex_locked +} + func lock(l *mutex) { lockWithRank(l, getLockRank(l)) } @@ -71,6 +75,8 @@ func lock2(l *mutex) { // its wakeup call. wait := v + timer := &lockTimer{lock: l} + timer.begin() // On uniprocessors, no point spinning. // On multiprocessors, spin for ACTIVE_SPIN attempts. spin := 0 @@ -82,6 +88,7 @@ func lock2(l *mutex) { for i := 0; i < spin; i++ { for l.key == mutex_unlocked { if atomic.Cas(key32(&l.key), mutex_unlocked, wait) { + timer.end() return } } @@ -92,6 +99,7 @@ func lock2(l *mutex) { for i := 0; i < passive_spin; i++ { for l.key == mutex_unlocked { if atomic.Cas(key32(&l.key), mutex_unlocked, wait) { + timer.end() return } } @@ -101,6 +109,7 @@ func lock2(l *mutex) { // Sleep. v = atomic.Xchg(key32(&l.key), mutex_sleeping) if v == mutex_unlocked { + timer.end() return } wait = mutex_sleeping @@ -122,6 +131,7 @@ func unlock2(l *mutex) { } gp := getg() + gp.m.mLockProfile.recordUnlock(l) gp.m.locks-- if gp.m.locks < 0 { throw("runtime·unlock: lock count") diff --git a/src/runtime/lock_js.go b/src/runtime/lock_js.go index 91ad7be317..b6ee5ec7af 100644 --- a/src/runtime/lock_js.go +++ b/src/runtime/lock_js.go @@ -23,6 +23,10 @@ const ( passive_spin = 1 ) +func mutexContended(l *mutex) bool { + return false +} + func lock(l *mutex) { lockWithRank(l, getLockRank(l)) } diff --git a/src/runtime/lock_sema.go b/src/runtime/lock_sema.go index 9afba08b0b..073e7d410e 100644 --- a/src/runtime/lock_sema.go +++ b/src/runtime/lock_sema.go @@ -31,6 +31,10 @@ const ( passive_spin = 1 ) +func mutexContended(l *mutex) bool { + return atomic.Loaduintptr(&l.key) > locked +} + func lock(l *mutex) { lockWithRank(l, getLockRank(l)) } @@ -48,6 +52,8 @@ func lock2(l *mutex) { } semacreate(gp.m) + timer := &lockTimer{lock: l} + timer.begin() // On uniprocessor's, no point spinning. // On multiprocessors, spin for ACTIVE_SPIN attempts. spin := 0 @@ -60,6 +66,7 @@ Loop: if v&locked == 0 { // Unlocked. Try to lock. if atomic.Casuintptr(&l.key, v, v|locked) { + timer.end() return } i = 0 @@ -119,6 +126,7 @@ func unlock2(l *mutex) { } } } + gp.m.mLockProfile.recordUnlock(l) gp.m.locks-- if gp.m.locks < 0 { throw("runtime·unlock: lock count") diff --git a/src/runtime/lock_wasip1.go b/src/runtime/lock_wasip1.go index c4fc59f6cc..acfc62acb4 100644 --- a/src/runtime/lock_wasip1.go +++ b/src/runtime/lock_wasip1.go @@ -19,6 +19,10 @@ const ( active_spin_cnt = 30 ) +func mutexContended(l *mutex) bool { + return false +} + func lock(l *mutex) { lockWithRank(l, getLockRank(l)) } diff --git a/src/runtime/lockrank.go b/src/runtime/lockrank.go index 103131df5e..5048b4f19e 100644 --- a/src/runtime/lockrank.go +++ b/src/runtime/lockrank.go @@ -18,19 +18,25 @@ const ( lockRankSweepWaiters lockRankAssistQueue lockRankSweep - lockRankPollDesc + lockRankTestR + lockRankTestW + lockRankAllocmW + lockRankExecW lockRankCpuprof + lockRankPollDesc + lockRankWakeableSleep + // SCHED + lockRankAllocmR + lockRankExecR lockRankSched lockRankAllg lockRankAllp lockRankTimers - lockRankWakeableSleep lockRankNetpollInit + lockRankTimer lockRankHchan lockRankNotifyList lockRankSudog - lockRankRwmutexW - lockRankRwmutexR lockRankRoot lockRankItab lockRankReflectOffs @@ -64,6 +70,9 @@ const ( lockRankPanic lockRankDeadlock lockRankRaceFini + lockRankAllocmRInternal + lockRankExecRInternal + lockRankTestRInternal ) // lockRankLeafRank is the rank of lock that does not have a declared rank, @@ -72,53 +81,61 @@ const lockRankLeafRank lockRank = 1000 // lockNames gives the names associated with each of the above ranks. var lockNames = []string{ - lockRankSysmon: "sysmon", - lockRankScavenge: "scavenge", - lockRankForcegc: "forcegc", - lockRankDefer: "defer", - lockRankSweepWaiters: "sweepWaiters", - lockRankAssistQueue: "assistQueue", - lockRankSweep: "sweep", - lockRankPollDesc: "pollDesc", - lockRankCpuprof: "cpuprof", - lockRankSched: "sched", - lockRankAllg: "allg", - lockRankAllp: "allp", - lockRankTimers: "timers", - lockRankWakeableSleep: "wakeableSleep", - lockRankNetpollInit: "netpollInit", - lockRankHchan: "hchan", - lockRankNotifyList: "notifyList", - lockRankSudog: "sudog", - lockRankRwmutexW: "rwmutexW", - lockRankRwmutexR: "rwmutexR", - lockRankRoot: "root", - lockRankItab: "itab", - lockRankReflectOffs: "reflectOffs", - lockRankUserArenaState: "userArenaState", - lockRankTraceBuf: "traceBuf", - lockRankTraceStrings: "traceStrings", - lockRankFin: "fin", - lockRankSpanSetSpine: "spanSetSpine", - lockRankMspanSpecial: "mspanSpecial", - lockRankGcBitsArenas: "gcBitsArenas", - lockRankProfInsert: "profInsert", - lockRankProfBlock: "profBlock", - lockRankProfMemActive: "profMemActive", - lockRankProfMemFuture: "profMemFuture", - lockRankGscan: "gscan", - lockRankStackpool: "stackpool", - lockRankStackLarge: "stackLarge", - lockRankHchanLeaf: "hchanLeaf", - lockRankWbufSpans: "wbufSpans", - lockRankMheap: "mheap", - lockRankMheapSpecial: "mheapSpecial", - lockRankGlobalAlloc: "globalAlloc", - lockRankTrace: "trace", - lockRankTraceStackTab: "traceStackTab", - lockRankPanic: "panic", - lockRankDeadlock: "deadlock", - lockRankRaceFini: "raceFini", + lockRankSysmon: "sysmon", + lockRankScavenge: "scavenge", + lockRankForcegc: "forcegc", + lockRankDefer: "defer", + lockRankSweepWaiters: "sweepWaiters", + lockRankAssistQueue: "assistQueue", + lockRankSweep: "sweep", + lockRankTestR: "testR", + lockRankTestW: "testW", + lockRankAllocmW: "allocmW", + lockRankExecW: "execW", + lockRankCpuprof: "cpuprof", + lockRankPollDesc: "pollDesc", + lockRankWakeableSleep: "wakeableSleep", + lockRankAllocmR: "allocmR", + lockRankExecR: "execR", + lockRankSched: "sched", + lockRankAllg: "allg", + lockRankAllp: "allp", + lockRankTimers: "timers", + lockRankNetpollInit: "netpollInit", + lockRankTimer: "timer", + lockRankHchan: "hchan", + lockRankNotifyList: "notifyList", + lockRankSudog: "sudog", + lockRankRoot: "root", + lockRankItab: "itab", + lockRankReflectOffs: "reflectOffs", + lockRankUserArenaState: "userArenaState", + lockRankTraceBuf: "traceBuf", + lockRankTraceStrings: "traceStrings", + lockRankFin: "fin", + lockRankSpanSetSpine: "spanSetSpine", + lockRankMspanSpecial: "mspanSpecial", + lockRankGcBitsArenas: "gcBitsArenas", + lockRankProfInsert: "profInsert", + lockRankProfBlock: "profBlock", + lockRankProfMemActive: "profMemActive", + lockRankProfMemFuture: "profMemFuture", + lockRankGscan: "gscan", + lockRankStackpool: "stackpool", + lockRankStackLarge: "stackLarge", + lockRankHchanLeaf: "hchanLeaf", + lockRankWbufSpans: "wbufSpans", + lockRankMheap: "mheap", + lockRankMheapSpecial: "mheapSpecial", + lockRankGlobalAlloc: "globalAlloc", + lockRankTrace: "trace", + lockRankTraceStackTab: "traceStackTab", + lockRankPanic: "panic", + lockRankDeadlock: "deadlock", + lockRankRaceFini: "raceFini", + lockRankAllocmRInternal: "allocmRInternal", + lockRankExecRInternal: "execRInternal", + lockRankTestRInternal: "testRInternal", } func (rank lockRank) String() string { @@ -140,51 +157,59 @@ func (rank lockRank) String() string { // // Lock ranks that allow self-cycles list themselves. var lockPartialOrder [][]lockRank = [][]lockRank{ - lockRankSysmon: {}, - lockRankScavenge: {lockRankSysmon}, - lockRankForcegc: {lockRankSysmon}, - lockRankDefer: {}, - lockRankSweepWaiters: {}, - lockRankAssistQueue: {}, - lockRankSweep: {}, - lockRankPollDesc: {}, - lockRankCpuprof: {}, - lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof}, - lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched}, - lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched}, - lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllp, lockRankTimers}, - lockRankWakeableSleep: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllp, lockRankTimers}, - lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllp, lockRankTimers}, - lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankHchan}, - lockRankNotifyList: {}, - lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankHchan, lockRankNotifyList}, - lockRankRwmutexW: {}, - lockRankRwmutexR: {lockRankSysmon, lockRankRwmutexW}, - lockRankRoot: {}, - lockRankItab: {}, - lockRankReflectOffs: {lockRankItab}, - lockRankUserArenaState: {}, - lockRankTraceBuf: {lockRankSysmon, lockRankScavenge}, - lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf}, - lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial}, - lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, - lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive}, - lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture}, - lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, - lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, - lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf}, - lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, - lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans}, - lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, - lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial}, - lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, - lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankPollDesc, lockRankCpuprof, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRwmutexW, lockRankRwmutexR, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace}, - lockRankPanic: {}, - lockRankDeadlock: {lockRankPanic, lockRankDeadlock}, - lockRankRaceFini: {lockRankPanic}, + lockRankSysmon: {}, + lockRankScavenge: {lockRankSysmon}, + lockRankForcegc: {lockRankSysmon}, + lockRankDefer: {}, + lockRankSweepWaiters: {}, + lockRankAssistQueue: {}, + lockRankSweep: {}, + lockRankTestR: {}, + lockRankTestW: {}, + lockRankAllocmW: {}, + lockRankExecW: {}, + lockRankCpuprof: {}, + lockRankPollDesc: {}, + lockRankWakeableSleep: {}, + lockRankAllocmR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep}, + lockRankExecR: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep}, + lockRankSched: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR}, + lockRankAllg: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched}, + lockRankAllp: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched}, + lockRankTimers: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllp, lockRankTimers}, + lockRankNetpollInit: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllp, lockRankTimers}, + lockRankTimer: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllp, lockRankTimers}, + lockRankHchan: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankWakeableSleep, lockRankHchan}, + lockRankNotifyList: {}, + lockRankSudog: {lockRankSysmon, lockRankScavenge, lockRankSweep, lockRankTestR, lockRankWakeableSleep, lockRankHchan, lockRankNotifyList}, + lockRankRoot: {}, + lockRankItab: {}, + lockRankReflectOffs: {lockRankItab}, + lockRankUserArenaState: {}, + lockRankTraceBuf: {lockRankSysmon, lockRankScavenge}, + lockRankTraceStrings: {lockRankSysmon, lockRankScavenge, lockRankTraceBuf}, + lockRankFin: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankSpanSetSpine: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankMspanSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankGcBitsArenas: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankMspanSpecial}, + lockRankProfInsert: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfBlock: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfMemActive: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings}, + lockRankProfMemFuture: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankHchan, lockRankNotifyList, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankProfMemActive}, + lockRankGscan: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture}, + lockRankStackpool: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankStackLarge: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankHchanLeaf: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankHchan, lockRankNotifyList, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankHchanLeaf}, + lockRankWbufSpans: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankTimer, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan}, + lockRankMheap: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankTimer, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans}, + lockRankMheapSpecial: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankTimer, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, + lockRankGlobalAlloc: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankTimer, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankMheapSpecial}, + lockRankTrace: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankTimer, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap}, + lockRankTraceStackTab: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankDefer, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR, lockRankExecR, lockRankSched, lockRankAllg, lockRankAllp, lockRankTimers, lockRankNetpollInit, lockRankTimer, lockRankHchan, lockRankNotifyList, lockRankSudog, lockRankRoot, lockRankItab, lockRankReflectOffs, lockRankUserArenaState, lockRankTraceBuf, lockRankTraceStrings, lockRankFin, lockRankSpanSetSpine, lockRankMspanSpecial, lockRankGcBitsArenas, lockRankProfInsert, lockRankProfBlock, lockRankProfMemActive, lockRankProfMemFuture, lockRankGscan, lockRankStackpool, lockRankStackLarge, lockRankWbufSpans, lockRankMheap, lockRankTrace}, + lockRankPanic: {}, + lockRankDeadlock: {lockRankPanic, lockRankDeadlock}, + lockRankRaceFini: {lockRankPanic}, + lockRankAllocmRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankAllocmW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankAllocmR}, + lockRankExecRInternal: {lockRankSysmon, lockRankScavenge, lockRankForcegc, lockRankSweepWaiters, lockRankAssistQueue, lockRankSweep, lockRankTestR, lockRankExecW, lockRankCpuprof, lockRankPollDesc, lockRankWakeableSleep, lockRankExecR}, + lockRankTestRInternal: {lockRankTestR, lockRankTestW}, } diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index 398eaf5d52..271e4c43db 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -1043,7 +1043,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { var span *mspan var header **_type var x unsafe.Pointer - noscan := typ == nil || typ.PtrBytes == 0 + noscan := typ == nil || !typ.Pointers() // In some cases block zeroing can profitably (for latency reduction purposes) // be delayed till preemption is possible; delayedZeroing tracks that state. delayedZeroing := false @@ -1153,7 +1153,8 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { memclrNoHeapPointers(x, size) } if goexperiment.AllocHeaders && hasHeader { - header = (**_type)(unsafe.Pointer(uintptr(v) + size - mallocHeaderSize)) + header = (**_type)(x) + x = add(x, mallocHeaderSize) size -= mallocHeaderSize } } @@ -1187,7 +1188,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { // Array allocation. If there are any // pointers, GC has to scan to the last // element. - if typ.PtrBytes != 0 { + if typ.Pointers() { scanSize = dataSize - typ.Size_ + typ.PtrBytes } } else { @@ -1220,12 +1221,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { // This may be racing with GC so do it atomically if there can be // a race marking the bit. if gcphase != _GCoff { - // Pass the full size of the allocation to the number of bytes - // marked. - // - // If !goexperiment.AllocHeaders, "size" doesn't include the - // allocation header, so use span.elemsize unconditionally. - gcmarknewobject(span, uintptr(x), span.elemsize) + gcmarknewobject(span, uintptr(x)) } if raceenabled { @@ -1247,12 +1243,28 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { asanunpoison(x, userSize) } + // If !goexperiment.AllocHeaders, "size" doesn't include the + // allocation header, so use span.elemsize as the "full" size + // for various computations below. + // + // TODO(mknyszek): We should really count the header as part + // of gc_sys or something, but it's risky to change the + // accounting so much right now. Just pretend its internal + // fragmentation and match the GC's accounting by using the + // whole allocation slot. + fullSize := size + if goexperiment.AllocHeaders { + fullSize = span.elemsize + } if rate := MemProfileRate; rate > 0 { // Note cache c only valid while m acquired; see #47302 - if rate != 1 && size < c.nextSample { - c.nextSample -= size + // + // N.B. Use the full size because that matches how the GC + // will update the mem profile on the "free" side. + if rate != 1 && fullSize < c.nextSample { + c.nextSample -= fullSize } else { - profilealloc(mp, x, size) + profilealloc(mp, x, fullSize) } } mp.mallocing = 0 @@ -1267,6 +1279,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { if goexperiment.AllocHeaders && header != nil { throw("unexpected malloc header in delayed zeroing of large object") } + // N.B. size == fullSize always in this case. memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302 } @@ -1277,14 +1290,17 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { if inittrace.active && inittrace.id == getg().goid { // Init functions are executed sequentially in a single goroutine. - inittrace.bytes += uint64(size) + inittrace.bytes += uint64(fullSize) } } if assistG != nil { // Account for internal fragmentation in the assist // debt now that we know it. - assistG.gcAssistBytes -= int64(size - dataSize) + // + // N.B. Use the full size because that's how the rest + // of the GC accounts for bytes marked. + assistG.gcAssistBytes -= int64(fullSize - dataSize) } if shouldhelpgc { @@ -1456,7 +1472,7 @@ func fastexprand(mean int) int32 { // x = -log_e(q) * mean // x = log_2(q) * (-log_e(2)) * mean ; Using log_2 for efficiency const randomBitCount = 26 - q := fastrandn(1< 0 { qlog = 0 @@ -1474,7 +1490,7 @@ func nextSampleNoFP() uintptr { rate = 0x3fffffff } if rate != 0 { - return uintptr(fastrandn(uint32(2 * rate))) + return uintptr(cheaprandn(uint32(2 * rate))) } return 0 } diff --git a/src/runtime/map.go b/src/runtime/map.go index 6f5623b102..bb3ac39e94 100644 --- a/src/runtime/map.go +++ b/src/runtime/map.go @@ -64,20 +64,12 @@ import ( const ( // Maximum number of key/elem pairs a bucket can hold. bucketCntBits = abi.MapBucketCountBits - bucketCnt = abi.MapBucketCount // Maximum average load of a bucket that triggers growth is bucketCnt*13/16 (about 80% full) // Because of minimum alignment rules, bucketCnt is known to be at least 8. // Represent as loadFactorNum/loadFactorDen, to allow integer math. loadFactorDen = 2 - loadFactorNum = loadFactorDen * bucketCnt * 13 / 16 - - // Maximum key or elem size to keep inline (instead of mallocing per element). - // Must fit in a uint8. - // Fast versions cannot handle big elems - the cutoff size for - // fast versions in cmd/compile/internal/gc/walk.go must be at most this elem. - maxKeySize = abi.MapMaxKeyBytes - maxElemSize = abi.MapMaxElemBytes + loadFactorNum = loadFactorDen * abi.MapBucketCount * 13 / 16 // data offset should be the size of the bmap struct, but needs to be // aligned correctly. For amd64p32 this means 64-bit alignment @@ -152,7 +144,7 @@ type bmap struct { // tophash generally contains the top byte of the hash value // for each key in this bucket. If tophash[0] < minTopHash, // tophash[0] is a bucket evacuation state instead. - tophash [bucketCnt]uint8 + tophash [abi.MapBucketCount]uint8 // Followed by bucketCnt keys and then bucketCnt elems. // NOTE: packing all the keys together and then all the elems together makes the // code a bit more complicated than alternating key/elem/key/elem/... but it allows @@ -238,8 +230,8 @@ func (h *hmap) incrnoverflow() { // as many overflow buckets as buckets. mask := uint32(1)<<(h.B-15) - 1 // Example: if h.B == 18, then mask == 7, - // and fastrand & 7 == 0 with probability 1/8. - if fastrand()&mask == 0 { + // and rand() & 7 == 0 with probability 1/8. + if uint32(rand())&mask == 0 { h.noverflow++ } } @@ -264,7 +256,7 @@ func (h *hmap) newoverflow(t *maptype, b *bmap) *bmap { ovf = (*bmap)(newobject(t.Bucket)) } h.incrnoverflow() - if t.Bucket.PtrBytes == 0 { + if !t.Bucket.Pointers() { h.createOverflow() *h.extra.overflow = append(*h.extra.overflow, ovf) } @@ -293,7 +285,7 @@ func makemap64(t *maptype, hint int64, h *hmap) *hmap { // at compile time and the map needs to be allocated on the heap. func makemap_small() *hmap { h := new(hmap) - h.hash0 = fastrand() + h.hash0 = uint32(rand()) return h } @@ -312,7 +304,7 @@ func makemap(t *maptype, hint int, h *hmap) *hmap { if h == nil { h = new(hmap) } - h.hash0 = fastrand() + h.hash0 = uint32(rand()) // Find the size parameter B which will hold the requested # of elements. // For hint < 0 overLoadFactor returns false since hint < bucketCnt. @@ -354,7 +346,7 @@ func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets un // used with this value of b. nbuckets += bucketShift(b - 4) sz := t.Bucket.Size_ * nbuckets - up := roundupsize(sz, t.Bucket.PtrBytes == 0) + up := roundupsize(sz, !t.Bucket.Pointers()) if up != sz { nbuckets = up / t.Bucket.Size_ } @@ -368,7 +360,7 @@ func makeBucketArray(t *maptype, b uint8, dirtyalloc unsafe.Pointer) (buckets un // but may not be empty. buckets = dirtyalloc size := t.Bucket.Size_ * nbuckets - if t.Bucket.PtrBytes != 0 { + if t.Bucket.Pointers() { memclrHasPointers(buckets, size) } else { memclrNoHeapPointers(buckets, size) @@ -431,7 +423,7 @@ func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer { top := tophash(hash) bucketloop: for ; b != nil; b = b.overflow(t) { - for i := uintptr(0); i < bucketCnt; i++ { + for i := uintptr(0); i < abi.MapBucketCount; i++ { if b.tophash[i] != top { if b.tophash[i] == emptyRest { break bucketloop @@ -443,7 +435,7 @@ bucketloop: k = *((*unsafe.Pointer)(k)) } if t.Key.Equal(key, k) { - e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) + e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) if t.IndirectElem() { e = *((*unsafe.Pointer)(e)) } @@ -492,7 +484,7 @@ func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) top := tophash(hash) bucketloop: for ; b != nil; b = b.overflow(t) { - for i := uintptr(0); i < bucketCnt; i++ { + for i := uintptr(0); i < abi.MapBucketCount; i++ { if b.tophash[i] != top { if b.tophash[i] == emptyRest { break bucketloop @@ -504,7 +496,7 @@ bucketloop: k = *((*unsafe.Pointer)(k)) } if t.Key.Equal(key, k) { - e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) + e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) if t.IndirectElem() { e = *((*unsafe.Pointer)(e)) } @@ -536,7 +528,7 @@ func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe top := tophash(hash) bucketloop: for ; b != nil; b = b.overflow(t) { - for i := uintptr(0); i < bucketCnt; i++ { + for i := uintptr(0); i < abi.MapBucketCount; i++ { if b.tophash[i] != top { if b.tophash[i] == emptyRest { break bucketloop @@ -548,7 +540,7 @@ bucketloop: k = *((*unsafe.Pointer)(k)) } if t.Key.Equal(key, k) { - e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) + e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) if t.IndirectElem() { e = *((*unsafe.Pointer)(e)) } @@ -618,12 +610,12 @@ again: var elem unsafe.Pointer bucketloop: for { - for i := uintptr(0); i < bucketCnt; i++ { + for i := uintptr(0); i < abi.MapBucketCount; i++ { if b.tophash[i] != top { if isEmpty(b.tophash[i]) && inserti == nil { inserti = &b.tophash[i] insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.KeySize)) - elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) + elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) } if b.tophash[i] == emptyRest { break bucketloop @@ -641,7 +633,7 @@ bucketloop: if t.NeedKeyUpdate() { typedmemmove(t.Key, k, key) } - elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) + elem = add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) goto done } ovf := b.overflow(t) @@ -665,7 +657,7 @@ bucketloop: newb := h.newoverflow(t, b) inserti = &newb.tophash[0] insertk = add(unsafe.Pointer(newb), dataOffset) - elem = add(insertk, bucketCnt*uintptr(t.KeySize)) + elem = add(insertk, abi.MapBucketCount*uintptr(t.KeySize)) } // store new key/elem at insert position @@ -731,7 +723,7 @@ func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) { top := tophash(hash) search: for ; b != nil; b = b.overflow(t) { - for i := uintptr(0); i < bucketCnt; i++ { + for i := uintptr(0); i < abi.MapBucketCount; i++ { if b.tophash[i] != top { if b.tophash[i] == emptyRest { break search @@ -749,13 +741,13 @@ search: // Only clear key if there are pointers in it. if t.IndirectKey() { *(*unsafe.Pointer)(k) = nil - } else if t.Key.PtrBytes != 0 { + } else if t.Key.Pointers() { memclrHasPointers(k, t.Key.Size_) } - e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) + e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) if t.IndirectElem() { *(*unsafe.Pointer)(e) = nil - } else if t.Elem.PtrBytes != 0 { + } else if t.Elem.Pointers() { memclrHasPointers(e, t.Elem.Size_) } else { memclrNoHeapPointers(e, t.Elem.Size_) @@ -765,7 +757,7 @@ search: // change those to emptyRest states. // It would be nice to make this a separate function, but // for loops are not currently inlineable. - if i == bucketCnt-1 { + if i == abi.MapBucketCount-1 { if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { goto notLast } @@ -784,7 +776,7 @@ search: c := b for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { } - i = bucketCnt - 1 + i = abi.MapBucketCount - 1 } else { i-- } @@ -797,7 +789,7 @@ search: // Reset the hash seed to make it more difficult for attackers to // repeatedly trigger hash collisions. See issue 25237. if h.count == 0 { - h.hash0 = fastrand() + h.hash0 = uint32(rand()) } break search } @@ -832,7 +824,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) { // grab snapshot of bucket state it.B = h.B it.buckets = h.buckets - if t.Bucket.PtrBytes == 0 { + if !t.Bucket.Pointers() { // Allocate the current slice and remember pointers to both current and old. // This preserves all relevant overflow buckets alive even if // the table grows and/or overflow buckets are added to the table @@ -843,14 +835,9 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) { } // decide where to start - var r uintptr - if h.B > 31-bucketCntBits { - r = uintptr(fastrand64()) - } else { - r = uintptr(fastrand()) - } + r := uintptr(rand()) it.startBucket = r & bucketMask(h.B) - it.offset = uint8(r >> h.B & (bucketCnt - 1)) + it.offset = uint8(r >> h.B & (abi.MapBucketCount - 1)) // iterator state it.bucket = it.startBucket @@ -911,8 +898,8 @@ next: } i = 0 } - for ; i < bucketCnt; i++ { - offi := (i + it.offset) & (bucketCnt - 1) + for ; i < abi.MapBucketCount; i++ { + offi := (i + it.offset) & (abi.MapBucketCount - 1) if isEmpty(b.tophash[offi]) || b.tophash[offi] == evacuatedEmpty { // TODO: emptyRest is hard to use here, as we start iterating // in the middle of a bucket. It's feasible, just tricky. @@ -922,7 +909,7 @@ next: if t.IndirectKey() { k = *((*unsafe.Pointer)(k)) } - e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize)) + e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(offi)*uintptr(t.ValueSize)) if checkBucket != noCheck && !h.sameSizeGrow() { // Special case: iterator was started during a grow to a larger size // and the grow is not done yet. We're working on a bucket whose @@ -1013,7 +1000,7 @@ func mapclear(t *maptype, h *hmap) { for i := uintptr(0); i <= mask; i++ { b := (*bmap)(add(bucket, i*uintptr(t.BucketSize))) for ; b != nil; b = b.overflow(t) { - for i := uintptr(0); i < bucketCnt; i++ { + for i := uintptr(0); i < abi.MapBucketCount; i++ { b.tophash[i] = emptyRest } } @@ -1032,7 +1019,7 @@ func mapclear(t *maptype, h *hmap) { // Reset the hash seed to make it more difficult for attackers to // repeatedly trigger hash collisions. See issue 25237. - h.hash0 = fastrand() + h.hash0 = uint32(rand()) // Keep the mapextra allocation but clear any extra information. if h.extra != nil { @@ -1100,7 +1087,7 @@ func hashGrow(t *maptype, h *hmap) { // overLoadFactor reports whether count items placed in 1< bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen) + return count > abi.MapBucketCount && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen) } // tooManyOverflowBuckets reports whether noverflow buckets is too many for a map with 1< maxKeySize && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) || - t.Key.Size_ <= maxKeySize && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) { + if t.Key.Size_ > abi.MapMaxKeyBytes && (!t.IndirectKey() || t.KeySize != uint8(goarch.PtrSize)) || + t.Key.Size_ <= abi.MapMaxKeyBytes && (t.IndirectKey() || t.KeySize != uint8(t.Key.Size_)) { throw("key size wrong") } - if t.Elem.Size_ > maxElemSize && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) || - t.Elem.Size_ <= maxElemSize && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) { + if t.Elem.Size_ > abi.MapMaxElemBytes && (!t.IndirectElem() || t.ValueSize != uint8(goarch.PtrSize)) || + t.Elem.Size_ <= abi.MapMaxElemBytes && (t.IndirectElem() || t.ValueSize != uint8(t.Elem.Size_)) { throw("elem size wrong") } - if t.Key.Align_ > bucketCnt { + if t.Key.Align_ > abi.MapBucketCount { throw("key align too big") } - if t.Elem.Align_ > bucketCnt { + if t.Elem.Align_ > abi.MapBucketCount { throw("elem align too big") } if t.Key.Size_%uintptr(t.Key.Align_) != 0 { @@ -1332,7 +1319,7 @@ func reflect_makemap(t *maptype, cap int) *hmap { if t.Elem.Size_%uintptr(t.Elem.Align_) != 0 { throw("elem size not a multiple of elem align") } - if bucketCnt < 8 { + if abi.MapBucketCount < 8 { throw("bucketsize too small for proper alignment") } if dataOffset%uintptr(t.Key.Align_) != 0 { @@ -1436,8 +1423,7 @@ func reflectlite_maplen(h *hmap) int { return h.count } -const maxZero = 1024 // must match value in reflect/value.go:maxZero cmd/compile/internal/gc/walk.go:zeroValSize -var zeroVal [maxZero]byte +var zeroVal [abi.ZeroValSize]byte // mapinitnoop is a no-op function known the Go linker; if a given global // map (of the right size) is determined to be dead, the linker will @@ -1458,35 +1444,47 @@ func mapclone(m any) any { // moveToBmap moves a bucket from src to dst. It returns the destination bucket or new destination bucket if it overflows // and the pos that the next key/value will be written, if pos == bucketCnt means needs to written in overflow bucket. func moveToBmap(t *maptype, h *hmap, dst *bmap, pos int, src *bmap) (*bmap, int) { - for i := 0; i < bucketCnt; i++ { + for i := 0; i < abi.MapBucketCount; i++ { if isEmpty(src.tophash[i]) { continue } - for ; pos < bucketCnt; pos++ { + for ; pos < abi.MapBucketCount; pos++ { if isEmpty(dst.tophash[pos]) { break } } - if pos == bucketCnt { + if pos == abi.MapBucketCount { dst = h.newoverflow(t, dst) pos = 0 } srcK := add(unsafe.Pointer(src), dataOffset+uintptr(i)*uintptr(t.KeySize)) - srcEle := add(unsafe.Pointer(src), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize)) + srcEle := add(unsafe.Pointer(src), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(i)*uintptr(t.ValueSize)) dstK := add(unsafe.Pointer(dst), dataOffset+uintptr(pos)*uintptr(t.KeySize)) - dstEle := add(unsafe.Pointer(dst), dataOffset+bucketCnt*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize)) + dstEle := add(unsafe.Pointer(dst), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+uintptr(pos)*uintptr(t.ValueSize)) dst.tophash[pos] = src.tophash[i] if t.IndirectKey() { - *(*unsafe.Pointer)(dstK) = *(*unsafe.Pointer)(srcK) + srcK = *(*unsafe.Pointer)(srcK) + if t.NeedKeyUpdate() { + kStore := newobject(t.Key) + typedmemmove(t.Key, kStore, srcK) + srcK = kStore + } + // Note: if NeedKeyUpdate is false, then the memory + // used to store the key is immutable, so we can share + // it between the original map and its clone. + *(*unsafe.Pointer)(dstK) = srcK } else { typedmemmove(t.Key, dstK, srcK) } if t.IndirectElem() { - *(*unsafe.Pointer)(dstEle) = *(*unsafe.Pointer)(srcEle) + srcEle = *(*unsafe.Pointer)(srcEle) + eStore := newobject(t.Elem) + typedmemmove(t.Elem, eStore, srcEle) + *(*unsafe.Pointer)(dstEle) = eStore } else { typedmemmove(t.Elem, dstEle, srcEle) } @@ -1510,14 +1508,14 @@ func mapclone2(t *maptype, src *hmap) *hmap { fatal("concurrent map clone and map write") } - if src.B == 0 { + if src.B == 0 && !(t.IndirectKey() && t.NeedKeyUpdate()) && !t.IndirectElem() { + // Quick copy for small maps. dst.buckets = newobject(t.Bucket) dst.count = src.count typedmemmove(t.Bucket, dst.buckets, src.buckets) return dst } - //src.B != 0 if dst.B == 0 { dst.buckets = newobject(t.Bucket) } @@ -1565,9 +1563,11 @@ func mapclone2(t *maptype, src *hmap) *hmap { continue } + // oldB < dst.B, so a single source bucket may go to multiple destination buckets. + // Process entries one at a time. for srcBmap != nil { // move from oldBlucket to new bucket - for i := uintptr(0); i < bucketCnt; i++ { + for i := uintptr(0); i < abi.MapBucketCount; i++ { if isEmpty(srcBmap.tophash[i]) { continue } @@ -1581,7 +1581,7 @@ func mapclone2(t *maptype, src *hmap) *hmap { srcK = *((*unsafe.Pointer)(srcK)) } - srcEle := add(unsafe.Pointer(srcBmap), dataOffset+bucketCnt*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) + srcEle := add(unsafe.Pointer(srcBmap), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+i*uintptr(t.ValueSize)) if t.IndirectElem() { srcEle = *((*unsafe.Pointer)(srcEle)) } @@ -1606,8 +1606,8 @@ func keys(m any, p unsafe.Pointer) { return } s := (*slice)(p) - r := int(fastrand()) - offset := uint8(r >> h.B & (bucketCnt - 1)) + r := int(rand()) + offset := uint8(r >> h.B & (abi.MapBucketCount - 1)) if h.B == 0 { copyKeys(t, h, (*bmap)(h.buckets), s, offset) return @@ -1636,8 +1636,8 @@ func keys(m any, p unsafe.Pointer) { func copyKeys(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) { for b != nil { - for i := uintptr(0); i < bucketCnt; i++ { - offi := (i + uintptr(offset)) & (bucketCnt - 1) + for i := uintptr(0); i < abi.MapBucketCount; i++ { + offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1) if isEmpty(b.tophash[offi]) { continue } @@ -1669,8 +1669,8 @@ func values(m any, p unsafe.Pointer) { return } s := (*slice)(p) - r := int(fastrand()) - offset := uint8(r >> h.B & (bucketCnt - 1)) + r := int(rand()) + offset := uint8(r >> h.B & (abi.MapBucketCount - 1)) if h.B == 0 { copyValues(t, h, (*bmap)(h.buckets), s, offset) return @@ -1699,8 +1699,8 @@ func values(m any, p unsafe.Pointer) { func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) { for b != nil { - for i := uintptr(0); i < bucketCnt; i++ { - offi := (i + uintptr(offset)) & (bucketCnt - 1) + for i := uintptr(0); i < abi.MapBucketCount; i++ { + offi := (i + uintptr(offset)) & (abi.MapBucketCount - 1) if isEmpty(b.tophash[offi]) { continue } @@ -1709,7 +1709,7 @@ func copyValues(t *maptype, h *hmap, b *bmap, s *slice, offset uint8) { fatal("concurrent map read and map write") } - ele := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.KeySize)+offi*uintptr(t.ValueSize)) + ele := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*uintptr(t.KeySize)+offi*uintptr(t.ValueSize)) if t.IndirectElem() { ele = *((*unsafe.Pointer)(ele)) } diff --git a/src/runtime/map_fast32.go b/src/runtime/map_fast32.go index d10dca3e91..01a81439e3 100644 --- a/src/runtime/map_fast32.go +++ b/src/runtime/map_fast32.go @@ -41,9 +41,9 @@ func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer { } } for ; b != nil; b = b.overflow(t) { - for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) { + for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) { if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) { - return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize)) + return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize)) } } } @@ -81,9 +81,9 @@ func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) { } } for ; b != nil; b = b.overflow(t) { - for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) { + for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) { if *(*uint32)(k) == key && !isEmpty(b.tophash[i]) { - return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize)), true + return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize)), true } } } @@ -123,7 +123,7 @@ again: bucketloop: for { - for i := uintptr(0); i < bucketCnt; i++ { + for i := uintptr(0); i < abi.MapBucketCount; i++ { if isEmpty(b.tophash[i]) { if insertb == nil { inserti = i @@ -163,7 +163,7 @@ bucketloop: insertb = h.newoverflow(t, b) inserti = 0 // not necessary, but avoids needlessly spilling inserti } - insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks + insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4) // store new key at insert position @@ -172,7 +172,7 @@ bucketloop: h.count++ done: - elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.ValueSize)) + elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*4+inserti*uintptr(t.ValueSize)) if h.flags&hashWriting == 0 { fatal("concurrent map writes") } @@ -213,7 +213,7 @@ again: bucketloop: for { - for i := uintptr(0); i < bucketCnt; i++ { + for i := uintptr(0); i < abi.MapBucketCount; i++ { if isEmpty(b.tophash[i]) { if insertb == nil { inserti = i @@ -253,7 +253,7 @@ bucketloop: insertb = h.newoverflow(t, b) inserti = 0 // not necessary, but avoids needlessly spilling inserti } - insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks + insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4) // store new key at insert position @@ -262,7 +262,7 @@ bucketloop: h.count++ done: - elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.ValueSize)) + elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*4+inserti*uintptr(t.ValueSize)) if h.flags&hashWriting == 0 { fatal("concurrent map writes") } @@ -295,20 +295,20 @@ func mapdelete_fast32(t *maptype, h *hmap, key uint32) { bOrig := b search: for ; b != nil; b = b.overflow(t) { - for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) { + for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 4) { if key != *(*uint32)(k) || isEmpty(b.tophash[i]) { continue } // Only clear key if there are pointers in it. // This can only happen if pointers are 32 bit // wide as 64 bit pointers do not fit into a 32 bit key. - if goarch.PtrSize == 4 && t.Key.PtrBytes != 0 { + if goarch.PtrSize == 4 && t.Key.Pointers() { // The key must be a pointer as we checked pointers are // 32 bits wide and the key is 32 bits wide also. *(*unsafe.Pointer)(k) = nil } - e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.ValueSize)) - if t.Elem.PtrBytes != 0 { + e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*4+i*uintptr(t.ValueSize)) + if t.Elem.Pointers() { memclrHasPointers(e, t.Elem.Size_) } else { memclrNoHeapPointers(e, t.Elem.Size_) @@ -316,7 +316,7 @@ search: b.tophash[i] = emptyOne // If the bucket now ends in a bunch of emptyOne states, // change those to emptyRest states. - if i == bucketCnt-1 { + if i == abi.MapBucketCount-1 { if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { goto notLast } @@ -335,7 +335,7 @@ search: c := b for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { } - i = bucketCnt - 1 + i = abi.MapBucketCount - 1 } else { i-- } @@ -348,7 +348,7 @@ search: // Reset the hash seed to make it more difficult for attackers to // repeatedly trigger hash collisions. See issue 25237. if h.count == 0 { - h.hash0 = fastrand() + h.hash0 = uint32(rand()) } break search } @@ -383,7 +383,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) { x := &xy[0] x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize))) x.k = add(unsafe.Pointer(x.b), dataOffset) - x.e = add(x.k, bucketCnt*4) + x.e = add(x.k, abi.MapBucketCount*4) if !h.sameSizeGrow() { // Only calculate y pointers if we're growing bigger. @@ -391,13 +391,13 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) { y := &xy[1] y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize))) y.k = add(unsafe.Pointer(y.b), dataOffset) - y.e = add(y.k, bucketCnt*4) + y.e = add(y.k, abi.MapBucketCount*4) } for ; b != nil; b = b.overflow(t) { k := add(unsafe.Pointer(b), dataOffset) - e := add(k, bucketCnt*4) - for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) { + e := add(k, abi.MapBucketCount*4) + for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 4), add(e, uintptr(t.ValueSize)) { top := b.tophash[i] if isEmpty(top) { b.tophash[i] = evacuatedEmpty @@ -419,16 +419,16 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) { b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap dst := &xy[useY] // evacuation destination - if dst.i == bucketCnt { + if dst.i == abi.MapBucketCount { dst.b = h.newoverflow(t, dst.b) dst.i = 0 dst.k = add(unsafe.Pointer(dst.b), dataOffset) - dst.e = add(dst.k, bucketCnt*4) + dst.e = add(dst.k, abi.MapBucketCount*4) } - dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check + dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check // Copy key. - if goarch.PtrSize == 4 && t.Key.PtrBytes != 0 && writeBarrier.enabled { + if goarch.PtrSize == 4 && t.Key.Pointers() && writeBarrier.enabled { // Write with a write barrier. *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k) } else { @@ -446,7 +446,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) { } } // Unlink the overflow buckets & clear key/elem to help GC. - if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 { + if h.flags&oldIterator == 0 && t.Bucket.Pointers() { b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)) // Preserve b.tophash because the evacuation // state is maintained there. diff --git a/src/runtime/map_fast64.go b/src/runtime/map_fast64.go index d771e0b747..f47bc96f70 100644 --- a/src/runtime/map_fast64.go +++ b/src/runtime/map_fast64.go @@ -41,9 +41,9 @@ func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer { } } for ; b != nil; b = b.overflow(t) { - for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) { + for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) { if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) { - return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.ValueSize)) + return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize)) } } } @@ -81,9 +81,9 @@ func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) { } } for ; b != nil; b = b.overflow(t) { - for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) { + for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) { if *(*uint64)(k) == key && !isEmpty(b.tophash[i]) { - return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.ValueSize)), true + return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize)), true } } } @@ -123,7 +123,7 @@ again: bucketloop: for { - for i := uintptr(0); i < bucketCnt; i++ { + for i := uintptr(0); i < abi.MapBucketCount; i++ { if isEmpty(b.tophash[i]) { if insertb == nil { insertb = b @@ -163,7 +163,7 @@ bucketloop: insertb = h.newoverflow(t, b) inserti = 0 // not necessary, but avoids needlessly spilling inserti } - insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks + insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8) // store new key at insert position @@ -172,7 +172,7 @@ bucketloop: h.count++ done: - elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.ValueSize)) + elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*8+inserti*uintptr(t.ValueSize)) if h.flags&hashWriting == 0 { fatal("concurrent map writes") } @@ -213,7 +213,7 @@ again: bucketloop: for { - for i := uintptr(0); i < bucketCnt; i++ { + for i := uintptr(0); i < abi.MapBucketCount; i++ { if isEmpty(b.tophash[i]) { if insertb == nil { insertb = b @@ -253,7 +253,7 @@ bucketloop: insertb = h.newoverflow(t, b) inserti = 0 // not necessary, but avoids needlessly spilling inserti } - insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks + insertb.tophash[inserti&(abi.MapBucketCount-1)] = tophash(hash) // mask inserti to avoid bounds checks insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8) // store new key at insert position @@ -262,7 +262,7 @@ bucketloop: h.count++ done: - elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.ValueSize)) + elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*8+inserti*uintptr(t.ValueSize)) if h.flags&hashWriting == 0 { fatal("concurrent map writes") } @@ -295,12 +295,12 @@ func mapdelete_fast64(t *maptype, h *hmap, key uint64) { bOrig := b search: for ; b != nil; b = b.overflow(t) { - for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) { + for i, k := uintptr(0), b.keys(); i < abi.MapBucketCount; i, k = i+1, add(k, 8) { if key != *(*uint64)(k) || isEmpty(b.tophash[i]) { continue } // Only clear key if there are pointers in it. - if t.Key.PtrBytes != 0 { + if t.Key.Pointers() { if goarch.PtrSize == 8 { *(*unsafe.Pointer)(k) = nil } else { @@ -309,8 +309,8 @@ search: memclrHasPointers(k, 8) } } - e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.ValueSize)) - if t.Elem.PtrBytes != 0 { + e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*8+i*uintptr(t.ValueSize)) + if t.Elem.Pointers() { memclrHasPointers(e, t.Elem.Size_) } else { memclrNoHeapPointers(e, t.Elem.Size_) @@ -318,7 +318,7 @@ search: b.tophash[i] = emptyOne // If the bucket now ends in a bunch of emptyOne states, // change those to emptyRest states. - if i == bucketCnt-1 { + if i == abi.MapBucketCount-1 { if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { goto notLast } @@ -337,7 +337,7 @@ search: c := b for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { } - i = bucketCnt - 1 + i = abi.MapBucketCount - 1 } else { i-- } @@ -350,7 +350,7 @@ search: // Reset the hash seed to make it more difficult for attackers to // repeatedly trigger hash collisions. See issue 25237. if h.count == 0 { - h.hash0 = fastrand() + h.hash0 = uint32(rand()) } break search } @@ -385,7 +385,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) { x := &xy[0] x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize))) x.k = add(unsafe.Pointer(x.b), dataOffset) - x.e = add(x.k, bucketCnt*8) + x.e = add(x.k, abi.MapBucketCount*8) if !h.sameSizeGrow() { // Only calculate y pointers if we're growing bigger. @@ -393,13 +393,13 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) { y := &xy[1] y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize))) y.k = add(unsafe.Pointer(y.b), dataOffset) - y.e = add(y.k, bucketCnt*8) + y.e = add(y.k, abi.MapBucketCount*8) } for ; b != nil; b = b.overflow(t) { k := add(unsafe.Pointer(b), dataOffset) - e := add(k, bucketCnt*8) - for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) { + e := add(k, abi.MapBucketCount*8) + for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 8), add(e, uintptr(t.ValueSize)) { top := b.tophash[i] if isEmpty(top) { b.tophash[i] = evacuatedEmpty @@ -421,16 +421,16 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) { b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap dst := &xy[useY] // evacuation destination - if dst.i == bucketCnt { + if dst.i == abi.MapBucketCount { dst.b = h.newoverflow(t, dst.b) dst.i = 0 dst.k = add(unsafe.Pointer(dst.b), dataOffset) - dst.e = add(dst.k, bucketCnt*8) + dst.e = add(dst.k, abi.MapBucketCount*8) } - dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check + dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check // Copy key. - if t.Key.PtrBytes != 0 && writeBarrier.enabled { + if t.Key.Pointers() && writeBarrier.enabled { if goarch.PtrSize == 8 { // Write with a write barrier. *(*unsafe.Pointer)(dst.k) = *(*unsafe.Pointer)(k) @@ -454,7 +454,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) { } } // Unlink the overflow buckets & clear key/elem to help GC. - if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 { + if h.flags&oldIterator == 0 && t.Bucket.Pointers() { b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)) // Preserve b.tophash because the evacuation // state is maintained there. diff --git a/src/runtime/map_faststr.go b/src/runtime/map_faststr.go index ef71da859a..a9898ba1ca 100644 --- a/src/runtime/map_faststr.go +++ b/src/runtime/map_faststr.go @@ -27,7 +27,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { b := (*bmap)(h.buckets) if key.len < 32 { // short key, doing lots of comparisons is ok - for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { + for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { k := (*stringStruct)(kptr) if k.len != key.len || isEmpty(b.tophash[i]) { if b.tophash[i] == emptyRest { @@ -36,14 +36,14 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { continue } if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)) + return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)) } } return unsafe.Pointer(&zeroVal[0]) } // long key, try not to do more comparisons than necessary - keymaybe := uintptr(bucketCnt) - for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { + keymaybe := uintptr(abi.MapBucketCount) + for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { k := (*stringStruct)(kptr) if k.len != key.len || isEmpty(b.tophash[i]) { if b.tophash[i] == emptyRest { @@ -52,7 +52,7 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { continue } if k.str == key.str { - return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)) + return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)) } // check first 4 bytes if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { @@ -62,16 +62,16 @@ func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer { if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { continue } - if keymaybe != bucketCnt { + if keymaybe != abi.MapBucketCount { // Two keys are potential matches. Use hash to distinguish them. goto dohash } keymaybe = i } - if keymaybe != bucketCnt { + if keymaybe != abi.MapBucketCount { k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize)) if memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)) + return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)) } } return unsafe.Pointer(&zeroVal[0]) @@ -92,13 +92,13 @@ dohash: } top := tophash(hash) for ; b != nil; b = b.overflow(t) { - for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { + for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { k := (*stringStruct)(kptr) if k.len != key.len || b.tophash[i] != top { continue } if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)) + return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)) } } } @@ -122,7 +122,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { b := (*bmap)(h.buckets) if key.len < 32 { // short key, doing lots of comparisons is ok - for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { + for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { k := (*stringStruct)(kptr) if k.len != key.len || isEmpty(b.tophash[i]) { if b.tophash[i] == emptyRest { @@ -131,14 +131,14 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { continue } if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true + return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true } } return unsafe.Pointer(&zeroVal[0]), false } // long key, try not to do more comparisons than necessary - keymaybe := uintptr(bucketCnt) - for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { + keymaybe := uintptr(abi.MapBucketCount) + for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { k := (*stringStruct)(kptr) if k.len != key.len || isEmpty(b.tophash[i]) { if b.tophash[i] == emptyRest { @@ -147,7 +147,7 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { continue } if k.str == key.str { - return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true + return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true } // check first 4 bytes if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) { @@ -157,16 +157,16 @@ func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) { if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) { continue } - if keymaybe != bucketCnt { + if keymaybe != abi.MapBucketCount { // Two keys are potential matches. Use hash to distinguish them. goto dohash } keymaybe = i } - if keymaybe != bucketCnt { + if keymaybe != abi.MapBucketCount { k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*goarch.PtrSize)) if memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true + return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+keymaybe*uintptr(t.ValueSize)), true } } return unsafe.Pointer(&zeroVal[0]), false @@ -187,13 +187,13 @@ dohash: } top := tophash(hash) for ; b != nil; b = b.overflow(t) { - for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { + for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { k := (*stringStruct)(kptr) if k.len != key.len || b.tophash[i] != top { continue } if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) { - return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true + return add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)), true } } } @@ -235,7 +235,7 @@ again: bucketloop: for { - for i := uintptr(0); i < bucketCnt; i++ { + for i := uintptr(0); i < abi.MapBucketCount; i++ { if b.tophash[i] != top { if isEmpty(b.tophash[i]) && insertb == nil { insertb = b @@ -282,7 +282,7 @@ bucketloop: insertb = h.newoverflow(t, b) inserti = 0 // not necessary, but avoids needlessly spilling inserti } - insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks + insertb.tophash[inserti&(abi.MapBucketCount-1)] = top // mask inserti to avoid bounds checks insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*2*goarch.PtrSize) // store new key at insert position @@ -290,7 +290,7 @@ bucketloop: h.count++ done: - elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*goarch.PtrSize+inserti*uintptr(t.ValueSize)) + elem := add(unsafe.Pointer(insertb), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+inserti*uintptr(t.ValueSize)) if h.flags&hashWriting == 0 { fatal("concurrent map writes") } @@ -325,7 +325,7 @@ func mapdelete_faststr(t *maptype, h *hmap, ky string) { top := tophash(hash) search: for ; b != nil; b = b.overflow(t) { - for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { + for i, kptr := uintptr(0), b.keys(); i < abi.MapBucketCount; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) { k := (*stringStruct)(kptr) if k.len != key.len || b.tophash[i] != top { continue @@ -335,8 +335,8 @@ search: } // Clear key's pointer. k.str = nil - e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.ValueSize)) - if t.Elem.PtrBytes != 0 { + e := add(unsafe.Pointer(b), dataOffset+abi.MapBucketCount*2*goarch.PtrSize+i*uintptr(t.ValueSize)) + if t.Elem.Pointers() { memclrHasPointers(e, t.Elem.Size_) } else { memclrNoHeapPointers(e, t.Elem.Size_) @@ -344,7 +344,7 @@ search: b.tophash[i] = emptyOne // If the bucket now ends in a bunch of emptyOne states, // change those to emptyRest states. - if i == bucketCnt-1 { + if i == abi.MapBucketCount-1 { if b.overflow(t) != nil && b.overflow(t).tophash[0] != emptyRest { goto notLast } @@ -363,7 +363,7 @@ search: c := b for b = bOrig; b.overflow(t) != c; b = b.overflow(t) { } - i = bucketCnt - 1 + i = abi.MapBucketCount - 1 } else { i-- } @@ -376,7 +376,7 @@ search: // Reset the hash seed to make it more difficult for attackers to // repeatedly trigger hash collisions. See issue 25237. if h.count == 0 { - h.hash0 = fastrand() + h.hash0 = uint32(rand()) } break search } @@ -411,7 +411,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { x := &xy[0] x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.BucketSize))) x.k = add(unsafe.Pointer(x.b), dataOffset) - x.e = add(x.k, bucketCnt*2*goarch.PtrSize) + x.e = add(x.k, abi.MapBucketCount*2*goarch.PtrSize) if !h.sameSizeGrow() { // Only calculate y pointers if we're growing bigger. @@ -419,13 +419,13 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { y := &xy[1] y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.BucketSize))) y.k = add(unsafe.Pointer(y.b), dataOffset) - y.e = add(y.k, bucketCnt*2*goarch.PtrSize) + y.e = add(y.k, abi.MapBucketCount*2*goarch.PtrSize) } for ; b != nil; b = b.overflow(t) { k := add(unsafe.Pointer(b), dataOffset) - e := add(k, bucketCnt*2*goarch.PtrSize) - for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) { + e := add(k, abi.MapBucketCount*2*goarch.PtrSize) + for i := 0; i < abi.MapBucketCount; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.ValueSize)) { top := b.tophash[i] if isEmpty(top) { b.tophash[i] = evacuatedEmpty @@ -447,13 +447,13 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { b.tophash[i] = evacuatedX + useY // evacuatedX + 1 == evacuatedY, enforced in makemap dst := &xy[useY] // evacuation destination - if dst.i == bucketCnt { + if dst.i == abi.MapBucketCount { dst.b = h.newoverflow(t, dst.b) dst.i = 0 dst.k = add(unsafe.Pointer(dst.b), dataOffset) - dst.e = add(dst.k, bucketCnt*2*goarch.PtrSize) + dst.e = add(dst.k, abi.MapBucketCount*2*goarch.PtrSize) } - dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check + dst.b.tophash[dst.i&(abi.MapBucketCount-1)] = top // mask dst.i as an optimization, to avoid a bounds check // Copy key. *(*string)(dst.k) = *(*string)(k) @@ -469,7 +469,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { } } // Unlink the overflow buckets & clear key/elem to help GC. - if h.flags&oldIterator == 0 && t.Bucket.PtrBytes != 0 { + if h.flags&oldIterator == 0 && t.Bucket.Pointers() { b := add(h.oldbuckets, oldbucket*uintptr(t.BucketSize)) // Preserve b.tophash because the evacuation // state is maintained there. diff --git a/src/runtime/mbarrier.go b/src/runtime/mbarrier.go index c4b6c2a789..dc6922da54 100644 --- a/src/runtime/mbarrier.go +++ b/src/runtime/mbarrier.go @@ -153,7 +153,7 @@ func typedmemmove(typ *abi.Type, dst, src unsafe.Pointer) { if dst == src { return } - if writeBarrier.enabled && typ.PtrBytes != 0 { + if writeBarrier.enabled && typ.Pointers() { // This always copies a full value of type typ so it's safe // to pass typ along as an optimization. See the comment on // bulkBarrierPreWrite. @@ -232,7 +232,7 @@ func reflectlite_typedmemmove(typ *_type, dst, src unsafe.Pointer) { // //go:nosplit func reflectcallmove(typ *_type, dst, src unsafe.Pointer, size uintptr, regs *abi.RegArgs) { - if writeBarrier.enabled && typ != nil && typ.PtrBytes != 0 && size >= goarch.PtrSize { + if writeBarrier.enabled && typ != nil && typ.Pointers() && size >= goarch.PtrSize { // Pass nil for the type. dst does not point to value of type typ, // but rather points into one, so applying the optimization is not // safe. See the comment on this function. @@ -305,7 +305,7 @@ func typedslicecopy(typ *_type, dstPtr unsafe.Pointer, dstLen int, srcPtr unsafe //go:linkname reflect_typedslicecopy reflect.typedslicecopy func reflect_typedslicecopy(elemType *_type, dst, src slice) int { - if elemType.PtrBytes == 0 { + if !elemType.Pointers() { return slicecopy(dst.array, dst.len, src.array, src.len, elemType.Size_) } return typedslicecopy(elemType, dst.array, dst.len, src.array, src.len) @@ -323,7 +323,7 @@ func reflect_typedslicecopy(elemType *_type, dst, src slice) int { // //go:nosplit func typedmemclr(typ *_type, ptr unsafe.Pointer) { - if writeBarrier.enabled && typ.PtrBytes != 0 { + if writeBarrier.enabled && typ.Pointers() { // This always clears a whole value of type typ, so it's // safe to pass a type here and apply the optimization. // See the comment on bulkBarrierPreWrite. @@ -339,7 +339,7 @@ func reflect_typedmemclr(typ *_type, ptr unsafe.Pointer) { //go:linkname reflect_typedmemclrpartial reflect.typedmemclrpartial func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintptr) { - if writeBarrier.enabled && typ.PtrBytes != 0 { + if writeBarrier.enabled && typ.Pointers() { // Pass nil for the type. ptr does not point to value of type typ, // but rather points into one so it's not safe to apply the optimization. // See the comment on this function in the reflect package and the @@ -352,7 +352,7 @@ func reflect_typedmemclrpartial(typ *_type, ptr unsafe.Pointer, off, size uintpt //go:linkname reflect_typedarrayclear reflect.typedarrayclear func reflect_typedarrayclear(typ *_type, ptr unsafe.Pointer, len int) { size := typ.Size_ * uintptr(len) - if writeBarrier.enabled && typ.PtrBytes != 0 { + if writeBarrier.enabled && typ.Pointers() { // This always clears whole elements of an array, so it's // safe to pass a type here. See the comment on bulkBarrierPreWrite. bulkBarrierPreWrite(uintptr(ptr), 0, size, typ) diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go index a0402d2933..cdd1c5fc3b 100644 --- a/src/runtime/mbitmap.go +++ b/src/runtime/mbitmap.go @@ -441,7 +441,7 @@ func typeBitsBulkBarrier(typ *_type, dst, src, size uintptr) { } // countAlloc returns the number of objects allocated in span s by -// scanning the allocation bitmap. +// scanning the mark bitmap. func (s *mspan) countAlloc() int { count := 0 bytes := divRoundUp(uintptr(s.nelems), 8) diff --git a/src/runtime/mbitmap_allocheaders.go b/src/runtime/mbitmap_allocheaders.go index 03cec5ffcc..1ec055352e 100644 --- a/src/runtime/mbitmap_allocheaders.go +++ b/src/runtime/mbitmap_allocheaders.go @@ -48,9 +48,9 @@ // is zeroed, so the GC just observes nil pointers. // Note that this "tiled" bitmap isn't stored anywhere; it is generated on-the-fly. // -// For objects without their own span, the type metadata is stored in the last -// word of the allocation slot. For objects with their own span, the type metadata -// is stored in the mspan. +// For objects without their own span, the type metadata is stored in the first +// word before the object at the beginning of the allocation slot. For objects +// with their own span, the type metadata is stored in the mspan. // // The bitmap for small unallocated objects in scannable spans is not maintained // (can be junk). @@ -167,7 +167,8 @@ func (span *mspan) typePointersOf(addr, size uintptr) typePointers { } // typePointersOfUnchecked is like typePointersOf, but assumes addr is the base -// pointer of an object in span. It returns an iterator that generates all pointers +// of an allocation slot in a span (the start of the object if no header, the +// header otherwise). It returns an iterator that generates all pointers // in the range [addr, addr+span.elemsize). // // nosplit because it is used during write barriers and must not be preempted. @@ -192,8 +193,9 @@ func (span *mspan) typePointersOfUnchecked(addr uintptr) typePointers { // All of these objects have a header. var typ *_type if spc.sizeclass() != 0 { - // Pull the allocation header from the last word of the object. - typ = *(**_type)(unsafe.Pointer(addr + span.elemsize - mallocHeaderSize)) + // Pull the allocation header from the first word of the object. + typ = *(**_type)(unsafe.Pointer(addr)) + addr += mallocHeaderSize } else { typ = span.largeType } @@ -881,12 +883,12 @@ func heapSetType(x, dataSize uintptr, typ *_type, header **_type, span *mspan) ( // We only need to write size, PtrBytes, and GCData, since that's all // the GC cares about. gctyp = (*_type)(unsafe.Pointer(progSpan.base())) - gctyp.Kind_ |= kindGCProg gctyp.Size_ = typ.Size_ gctyp.PtrBytes = typ.PtrBytes gctyp.GCData = (*byte)(add(unsafe.Pointer(progSpan.base()), heapBitsOff)) + gctyp.TFlag = abi.TFlagUnrolledBitmap - // Expand the GC program into space reserved at the end of the object. + // Expand the GC program into space reserved at the end of the new span. runGCProg(addb(typ.GCData, 4), gctyp.GCData) } @@ -905,14 +907,14 @@ func heapSetType(x, dataSize uintptr, typ *_type, header **_type, span *mspan) ( if header == nil { maxIterBytes = dataSize } - off := alignUp(uintptr(fastrand())%dataSize, goarch.PtrSize) + off := alignUp(uintptr(cheaprand())%dataSize, goarch.PtrSize) size := dataSize - off if size == 0 { off -= goarch.PtrSize size += goarch.PtrSize } interior := x + off - size -= alignDown(uintptr(fastrand())%size, goarch.PtrSize) + size -= alignDown(uintptr(cheaprand())%size, goarch.PtrSize) if size == 0 { size = goarch.PtrSize } diff --git a/src/runtime/mem_linux.go b/src/runtime/mem_linux.go index c9823d3011..d63c38c209 100644 --- a/src/runtime/mem_linux.go +++ b/src/runtime/mem_linux.go @@ -170,4 +170,12 @@ func sysMapOS(v unsafe.Pointer, n uintptr) { print("runtime: mmap(", v, ", ", n, ") returned ", p, ", ", err, "\n") throw("runtime: cannot map pages in arena address space") } + + // Disable huge pages if the GODEBUG for it is set. + // + // Note that there are a few sysHugePage calls that can override this, but + // they're all for GC metadata. + if debug.disablethp != 0 { + sysNoHugePageOS(v, n) + } } diff --git a/src/runtime/memclr_loong64.s b/src/runtime/memclr_loong64.s index 7bb6f3dfc9..313e4d4f33 100644 --- a/src/runtime/memclr_loong64.s +++ b/src/runtime/memclr_loong64.s @@ -6,37 +6,39 @@ #include "textflag.h" // func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr) -TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16 - MOVV ptr+0(FP), R6 - MOVV n+8(FP), R7 - ADDV R6, R7, R4 +TEXT runtime·memclrNoHeapPointers(SB),NOSPLIT,$0-16 +#ifndef GOEXPERIMENT_regabiargs + MOVV ptr+0(FP), R4 + MOVV n+8(FP), R5 +#endif + ADDV R4, R5, R6 // if less than 8 bytes, do one byte at a time - SGTU $8, R7, R8 + SGTU $8, R5, R8 BNE R8, out // do one byte at a time until 8-aligned - AND $7, R6, R8 + AND $7, R4, R8 BEQ R8, words - MOVB R0, (R6) - ADDV $1, R6 + MOVB R0, (R4) + ADDV $1, R4 JMP -4(PC) words: // do 8 bytes at a time if there is room - ADDV $-7, R4, R7 + ADDV $-7, R6, R5 PCALIGN $16 - SGTU R7, R6, R8 + SGTU R5, R4, R8 BEQ R8, out - MOVV R0, (R6) - ADDV $8, R6 + MOVV R0, (R4) + ADDV $8, R4 JMP -4(PC) out: - BEQ R6, R4, done - MOVB R0, (R6) - ADDV $1, R6 + BEQ R4, R6, done + MOVB R0, (R4) + ADDV $1, R4 JMP -3(PC) done: RET diff --git a/src/runtime/memmove_loong64.s b/src/runtime/memmove_loong64.s index 0f139bcc13..5b7aeba698 100644 --- a/src/runtime/memmove_loong64.s +++ b/src/runtime/memmove_loong64.s @@ -7,10 +7,12 @@ // See memmove Go doc for important implementation constraints. // func memmove(to, from unsafe.Pointer, n uintptr) -TEXT runtime·memmove(SB), NOSPLIT|NOFRAME, $0-24 +TEXT runtime·memmove(SB), NOSPLIT|NOFRAME, $0-24 +#ifndef GOEXPERIMENT_regabiargs MOVV to+0(FP), R4 MOVV from+8(FP), R5 MOVV n+16(FP), R6 +#endif BNE R6, check RET diff --git a/src/runtime/metrics.go b/src/runtime/metrics.go index 4bd167135d..f97a3804ab 100644 --- a/src/runtime/metrics.go +++ b/src/runtime/metrics.go @@ -470,7 +470,7 @@ func initMetrics() { "/sync/mutex/wait/total:seconds": { compute: func(_ *statAggregate, out *metricValue) { out.kind = metricKindFloat64 - out.scalar = float64bits(nsToSec(sched.totalMutexWaitTime.Load())) + out.scalar = float64bits(nsToSec(totalMutexWaitTimeNanos())) }, }, } diff --git a/src/runtime/metrics/description.go b/src/runtime/metrics/description.go index abe7440f10..19a7dbf07a 100644 --- a/src/runtime/metrics/description.go +++ b/src/runtime/metrics/description.go @@ -441,7 +441,7 @@ var allDesc = []Description{ }, { Name: "/sync/mutex/wait/total:seconds", - Description: "Approximate cumulative time goroutines have spent blocked on a sync.Mutex or sync.RWMutex. This metric is useful for identifying global changes in lock contention. Collect a mutex or block profile using the runtime/pprof package for more detailed contention data.", + Description: "Approximate cumulative time goroutines have spent blocked on a sync.Mutex, sync.RWMutex, or runtime-internal lock. This metric is useful for identifying global changes in lock contention. Collect a mutex or block profile using the runtime/pprof package for more detailed contention data.", Kind: KindFloat64, Cumulative: true, }, diff --git a/src/runtime/metrics/doc.go b/src/runtime/metrics/doc.go index 5be6c32bfa..e63599e0d9 100644 --- a/src/runtime/metrics/doc.go +++ b/src/runtime/metrics/doc.go @@ -8,7 +8,7 @@ /* Package metrics provides a stable interface to access implementation-defined metrics exported by the Go runtime. This package is similar to existing functions -like [runtime.ReadMemStats] and [debug.ReadGCStats], but significantly more general. +like [runtime.ReadMemStats] and [runtime/debug.ReadGCStats], but significantly more general. The set of metrics defined by this package may evolve as the runtime itself evolves, and also enables variation across Go implementations, whose relevant @@ -28,7 +28,8 @@ encouraged to use build tags, and although metrics may be deprecated and removed users should consider this to be an exceptional and rare event, coinciding with a very large change in a particular Go implementation. -Each metric key also has a "kind" that describes the format of the metric's value. +Each metric key also has a "kind" (see [ValueKind]) that describes the format of the +metric's value. In the interest of not breaking users of this package, the "kind" for a given metric is guaranteed not to change. If it must change, then a new metric will be introduced with a new key and a new "kind." @@ -314,6 +315,18 @@ Below is the full list of supported metrics, ordered lexicographically. The number of non-default behaviors executed by the crypto/tls package due to a non-default GODEBUG=tlsrsakex=... setting. + /godebug/non-default-behavior/tlsunsafeekm:events + The number of non-default behaviors executed by the crypto/tls + package due to a non-default GODEBUG=tlsunsafeekm=... setting. + + /godebug/non-default-behavior/winreadlinkvolume:events + The number of non-default behaviors executed by the os package + due to a non-default GODEBUG=winreadlinkvolume=... setting. + + /godebug/non-default-behavior/winsymlink:events + The number of non-default behaviors executed by the os package + due to a non-default GODEBUG=winsymlink=... setting. + /godebug/non-default-behavior/x509sha1:events The number of non-default behaviors executed by the crypto/x509 package due to a non-default GODEBUG=x509sha1=... setting. @@ -323,6 +336,11 @@ Below is the full list of supported metrics, ordered lexicographically. package due to a non-default GODEBUG=x509usefallbackroots=... setting. + /godebug/non-default-behavior/x509usepolicies:events + The number of non-default behaviors executed by the crypto/x509 + package due to a non-default GODEBUG=x509usepolicies=... + setting. + /godebug/non-default-behavior/zipinsecurepath:events The number of non-default behaviors executed by the archive/zip package due to a non-default GODEBUG=zipinsecurepath=... @@ -443,10 +461,10 @@ Below is the full list of supported metrics, ordered lexicographically. monotonically. /sync/mutex/wait/total:seconds - Approximate cumulative time goroutines have spent blocked - on a sync.Mutex or sync.RWMutex. This metric is useful for - identifying global changes in lock contention. Collect a mutex - or block profile using the runtime/pprof package for more - detailed contention data. + Approximate cumulative time goroutines have spent blocked on a + sync.Mutex, sync.RWMutex, or runtime-internal lock. This metric + is useful for identifying global changes in lock contention. + Collect a mutex or block profile using the runtime/pprof package + for more detailed contention data. */ package metrics diff --git a/src/runtime/metrics_test.go b/src/runtime/metrics_test.go index 1e82897381..d7f41334cd 100644 --- a/src/runtime/metrics_test.go +++ b/src/runtime/metrics_test.go @@ -6,15 +6,22 @@ package runtime_test import ( "bytes" + "fmt" + "internal/goexperiment" + "internal/profile" + "internal/testenv" "os" "reflect" "runtime" "runtime/debug" "runtime/metrics" + "runtime/pprof" "runtime/trace" + "slices" "sort" "strings" "sync" + "sync/atomic" "testing" "time" "unsafe" @@ -40,7 +47,7 @@ func TestReadMetrics(t *testing.T) { oldLimit := debug.SetMemoryLimit(limit) defer debug.SetMemoryLimit(oldLimit) - // Set an GC percent to check the metric for it + // Set a GC percent to check the metric for it gcPercent := 99 oldGCPercent := debug.SetGCPercent(gcPercent) defer debug.SetGCPercent(oldGCPercent) @@ -939,3 +946,347 @@ func TestSchedPauseMetrics(t *testing.T) { }) } } + +func TestRuntimeLockMetricsAndProfile(t *testing.T) { + testenv.SkipFlaky(t, 64253) + + old := runtime.SetMutexProfileFraction(0) // enabled during sub-tests + defer runtime.SetMutexProfileFraction(old) + if old != 0 { + t.Fatalf("need MutexProfileRate 0, got %d", old) + } + + { + before := os.Getenv("GODEBUG") + for _, s := range strings.Split(before, ",") { + if strings.HasPrefix(s, "runtimecontentionstacks=") { + t.Logf("GODEBUG includes explicit setting %q", s) + } + } + defer func() { os.Setenv("GODEBUG", before) }() + os.Setenv("GODEBUG", fmt.Sprintf("%s,runtimecontentionstacks=1", before)) + } + + t.Logf("NumCPU %d", runtime.NumCPU()) + t.Logf("GOMAXPROCS %d", runtime.GOMAXPROCS(0)) + if minCPU := 2; runtime.NumCPU() < minCPU { + t.Skipf("creating and observing contention on runtime-internal locks requires NumCPU >= %d", minCPU) + } + + loadProfile := func(t *testing.T) *profile.Profile { + var w bytes.Buffer + pprof.Lookup("mutex").WriteTo(&w, 0) + p, err := profile.Parse(&w) + if err != nil { + t.Fatalf("failed to parse profile: %v", err) + } + if err := p.CheckValid(); err != nil { + t.Fatalf("invalid profile: %v", err) + } + return p + } + + measureDelta := func(t *testing.T, fn func()) (metricGrowth, profileGrowth float64, p *profile.Profile) { + beforeProfile := loadProfile(t) + beforeMetrics := []metrics.Sample{{Name: "/sync/mutex/wait/total:seconds"}} + metrics.Read(beforeMetrics) + + fn() + + afterProfile := loadProfile(t) + afterMetrics := []metrics.Sample{{Name: "/sync/mutex/wait/total:seconds"}} + metrics.Read(afterMetrics) + + sumSamples := func(p *profile.Profile, i int) int64 { + var sum int64 + for _, s := range p.Sample { + sum += s.Value[i] + } + return sum + } + + metricGrowth = afterMetrics[0].Value.Float64() - beforeMetrics[0].Value.Float64() + profileGrowth = float64(sumSamples(afterProfile, 1)-sumSamples(beforeProfile, 1)) * time.Nanosecond.Seconds() + + // The internal/profile package does not support compaction; this delta + // profile will include separate positive and negative entries. + p = afterProfile.Copy() + if len(beforeProfile.Sample) > 0 { + err := p.Merge(beforeProfile, -1) + if err != nil { + t.Fatalf("Merge profiles: %v", err) + } + } + + return metricGrowth, profileGrowth, p + } + + testcase := func(strictTiming bool, acceptStacks [][]string, workers int, fn func() bool) func(t *testing.T) (metricGrowth, profileGrowth float64, n, value int64) { + return func(t *testing.T) (metricGrowth, profileGrowth float64, n, value int64) { + metricGrowth, profileGrowth, p := measureDelta(t, func() { + var started, stopped sync.WaitGroup + started.Add(workers) + stopped.Add(workers) + for i := 0; i < workers; i++ { + w := &contentionWorker{ + before: func() { + started.Done() + started.Wait() + }, + after: func() { + stopped.Done() + }, + fn: fn, + } + go w.run() + } + stopped.Wait() + }) + + if profileGrowth == 0 { + t.Errorf("no increase in mutex profile") + } + if metricGrowth == 0 && strictTiming { + // If the critical section is very short, systems with low timer + // resolution may be unable to measure it via nanotime. + t.Errorf("no increase in /sync/mutex/wait/total:seconds metric") + } + // This comparison is possible because the time measurements in support of + // runtime/pprof and runtime/metrics for runtime-internal locks are so close + // together. It doesn't work as well for user-space contention, where the + // involved goroutines are not _Grunnable the whole time and so need to pass + // through the scheduler. + t.Logf("lock contention growth in runtime/pprof's view (%fs)", profileGrowth) + t.Logf("lock contention growth in runtime/metrics' view (%fs)", metricGrowth) + + acceptStacks = append([][]string(nil), acceptStacks...) + for i, stk := range acceptStacks { + if goexperiment.StaticLockRanking { + if !slices.ContainsFunc(stk, func(s string) bool { + return s == "runtime.systemstack" || s == "runtime.mcall" || s == "runtime.mstart" + }) { + // stk is a call stack that is still on the user stack when + // it calls runtime.unlock. Add the extra function that + // we'll see, when the static lock ranking implementation of + // runtime.unlockWithRank switches to the system stack. + stk = append([]string{"runtime.unlockWithRank"}, stk...) + } + } + acceptStacks[i] = stk + } + + var stks [][]string + values := make([][2]int64, len(acceptStacks)) + for _, s := range p.Sample { + var have []string + for _, loc := range s.Location { + for _, line := range loc.Line { + have = append(have, line.Function.Name) + } + } + stks = append(stks, have) + for i, stk := range acceptStacks { + if slices.Equal(have, stk) { + values[i][0] += s.Value[0] + values[i][1] += s.Value[1] + } + } + } + for i, stk := range acceptStacks { + n += values[i][0] + value += values[i][1] + t.Logf("stack %v has samples totaling n=%d value=%d", stk, values[i][0], values[i][1]) + } + if n == 0 && value == 0 { + t.Logf("profile:\n%s", p) + for _, have := range stks { + t.Logf("have stack %v", have) + } + for _, stk := range acceptStacks { + t.Errorf("want stack %v", stk) + } + } + + return metricGrowth, profileGrowth, n, value + } + } + + name := t.Name() + + t.Run("runtime.lock", func(t *testing.T) { + mus := make([]runtime.Mutex, 100) + var needContention atomic.Int64 + delay := 100 * time.Microsecond // large relative to system noise, for comparison between clocks + delayMicros := delay.Microseconds() + + // The goroutine that acquires the lock will only proceed when it + // detects that its partner is contended for the lock. That will lead to + // live-lock if anything (such as a STW) prevents the partner goroutine + // from running. Allowing the contention workers to pause and restart + // (to allow a STW to proceed) makes it harder to confirm that we're + // counting the correct number of contention events, since some locks + // will end up contended twice. Instead, disable the GC. + defer debug.SetGCPercent(debug.SetGCPercent(-1)) + + const workers = 2 + if runtime.GOMAXPROCS(0) < workers { + t.Skipf("contention on runtime-internal locks requires GOMAXPROCS >= %d", workers) + } + + fn := func() bool { + n := int(needContention.Load()) + if n < 0 { + return false + } + mu := &mus[n] + + runtime.Lock(mu) + for int(needContention.Load()) == n { + if runtime.MutexContended(mu) { + // make them wait a little while + for start := runtime.Nanotime(); (runtime.Nanotime()-start)/1000 < delayMicros; { + runtime.Usleep(uint32(delayMicros)) + } + break + } + } + runtime.Unlock(mu) + needContention.Store(int64(n - 1)) + + return true + } + + stks := [][]string{{ + "runtime.unlock", + "runtime_test." + name + ".func5.1", + "runtime_test.(*contentionWorker).run", + }} + + t.Run("sample-1", func(t *testing.T) { + old := runtime.SetMutexProfileFraction(1) + defer runtime.SetMutexProfileFraction(old) + + needContention.Store(int64(len(mus) - 1)) + metricGrowth, profileGrowth, n, _ := testcase(true, stks, workers, fn)(t) + + if have, want := metricGrowth, delay.Seconds()*float64(len(mus)); have < want { + // The test imposes a delay with usleep, verified with calls to + // nanotime. Compare against the runtime/metrics package's view + // (based on nanotime) rather than runtime/pprof's view (based + // on cputicks). + t.Errorf("runtime/metrics reported less than the known minimum contention duration (%fs < %fs)", have, want) + } + if have, want := n, int64(len(mus)); have != want { + t.Errorf("mutex profile reported contention count different from the known true count (%d != %d)", have, want) + } + + const slop = 1.5 // account for nanotime vs cputicks + if profileGrowth > slop*metricGrowth || metricGrowth > slop*profileGrowth { + t.Errorf("views differ by more than %fx", slop) + } + }) + + t.Run("sample-2", func(t *testing.T) { + old := runtime.SetMutexProfileFraction(2) + defer runtime.SetMutexProfileFraction(old) + + needContention.Store(int64(len(mus) - 1)) + metricGrowth, profileGrowth, n, _ := testcase(true, stks, workers, fn)(t) + + // With 100 trials and profile fraction of 2, we expect to capture + // 50 samples. Allow the test to pass if we get at least 20 samples; + // the CDF of the binomial distribution says there's less than a + // 1e-9 chance of that, which is an acceptably low flakiness rate. + const samplingSlop = 2.5 + + if have, want := metricGrowth, delay.Seconds()*float64(len(mus)); samplingSlop*have < want { + // The test imposes a delay with usleep, verified with calls to + // nanotime. Compare against the runtime/metrics package's view + // (based on nanotime) rather than runtime/pprof's view (based + // on cputicks). + t.Errorf("runtime/metrics reported less than the known minimum contention duration (%f * %fs < %fs)", samplingSlop, have, want) + } + if have, want := n, int64(len(mus)); float64(have) > float64(want)*samplingSlop || float64(want) > float64(have)*samplingSlop { + t.Errorf("mutex profile reported contention count too different from the expected count (%d far from %d)", have, want) + } + + const timerSlop = 1.5 * samplingSlop // account for nanotime vs cputicks, plus the two views' independent sampling + if profileGrowth > timerSlop*metricGrowth || metricGrowth > timerSlop*profileGrowth { + t.Errorf("views differ by more than %fx", timerSlop) + } + }) + }) + + t.Run("runtime.semrelease", func(t *testing.T) { + old := runtime.SetMutexProfileFraction(1) + defer runtime.SetMutexProfileFraction(old) + + const workers = 3 + if runtime.GOMAXPROCS(0) < workers { + t.Skipf("creating and observing contention on runtime-internal semaphores requires GOMAXPROCS >= %d", workers) + } + + var sem uint32 = 1 + var tries atomic.Int32 + tries.Store(10_000_000) // prefer controlled failure to timeout + var sawContention atomic.Int32 + var need int32 = 1 + fn := func() bool { + if sawContention.Load() >= need { + return false + } + if tries.Add(-1) < 0 { + return false + } + + runtime.Semacquire(&sem) + runtime.Semrelease1(&sem, false, 0) + if runtime.MutexContended(runtime.SemRootLock(&sem)) { + sawContention.Add(1) + } + return true + } + + stks := [][]string{ + { + "runtime.unlock", + "runtime.semrelease1", + "runtime_test.TestRuntimeLockMetricsAndProfile.func6.1", + "runtime_test.(*contentionWorker).run", + }, + { + "runtime.unlock", + "runtime.semacquire1", + "runtime.semacquire", + "runtime_test.TestRuntimeLockMetricsAndProfile.func6.1", + "runtime_test.(*contentionWorker).run", + }, + } + + // Verify that we get call stack we expect, with anything more than zero + // cycles / zero samples. The duration of each contention event is too + // small relative to the expected overhead for us to verify its value + // more directly. Leave that to the explicit lock/unlock test. + + testcase(false, stks, workers, fn)(t) + + if remaining := tries.Load(); remaining >= 0 { + t.Logf("finished test early (%d tries remaining)", remaining) + } + }) +} + +// contentionWorker provides cleaner call stacks for lock contention profile tests +type contentionWorker struct { + before func() + fn func() bool + after func() +} + +func (w *contentionWorker) run() { + defer w.after() + w.before() + + for w.fn() { + } +} diff --git a/src/runtime/mfinal.go b/src/runtime/mfinal.go index 18cd93e77e..ea3d8a4579 100644 --- a/src/runtime/mfinal.go +++ b/src/runtime/mfinal.go @@ -9,6 +9,7 @@ package runtime import ( "internal/abi" "internal/goarch" + "internal/goexperiment" "runtime/internal/atomic" "runtime/internal/sys" "unsafe" @@ -299,6 +300,27 @@ func isGoPointerWithoutSpan(p unsafe.Pointer) bool { return false } +// blockUntilEmptyFinalizerQueue blocks until either the finalizer +// queue is emptied (and the finalizers have executed) or the timeout +// is reached. Returns true if the finalizer queue was emptied. +// This is used by the runtime and sync tests. +func blockUntilEmptyFinalizerQueue(timeout int64) bool { + start := nanotime() + for nanotime()-start < timeout { + lock(&finlock) + // We know the queue has been drained when both finq is nil + // and the finalizer g has stopped executing. + empty := finq == nil + empty = empty && readgstatus(fing) == _Gwaiting && fing.waitreason == waitReasonFinalizerWait + unlock(&finlock) + if empty { + return true + } + Gosched() + } + return false +} + // SetFinalizer sets the finalizer associated with obj to the provided // finalizer function. When the garbage collector finds an unreachable block // with an associated finalizer, it clears the association and runs @@ -410,7 +432,7 @@ func SetFinalizer(obj any, finalizer any) { } // find the containing object - base, _, _ := findObject(uintptr(e.data), 0, 0) + base, span, _ := findObject(uintptr(e.data), 0, 0) if base == 0 { if isGoPointerWithoutSpan(e.data) { @@ -419,10 +441,15 @@ func SetFinalizer(obj any, finalizer any) { throw("runtime.SetFinalizer: pointer not in allocated block") } + // Move base forward if we've got an allocation header. + if goexperiment.AllocHeaders && !span.spanclass.noscan() && !heapBitsInSpan(span.elemsize) && span.spanclass.sizeclass() != 0 { + base += mallocHeaderSize + } + if uintptr(e.data) != base { // As an implementation detail we allow to set finalizers for an inner byte // of an object if it could come from tiny alloc (see mallocgc for details). - if ot.Elem == nil || ot.Elem.PtrBytes != 0 || ot.Elem.Size_ >= maxTinySize { + if ot.Elem == nil || ot.Elem.Pointers() || ot.Elem.Size_ >= maxTinySize { throw("runtime.SetFinalizer: pointer not at beginning of allocated block") } } diff --git a/src/runtime/mgc.go b/src/runtime/mgc.go index 6c51517522..64fc0a688c 100644 --- a/src/runtime/mgc.go +++ b/src/runtime/mgc.go @@ -378,8 +378,7 @@ type workType struct { // markDoneSema protects transitions from mark to mark termination. markDoneSema uint32 - bgMarkReady note // signal background mark worker has started - bgMarkDone uint32 // cas to 1 when at a background mark completion point + bgMarkDone uint32 // cas to 1 when at a background mark completion point // Background mark completion signaling // mode is the concurrency mode of the current GC cycle. @@ -1230,11 +1229,34 @@ func gcBgMarkStartWorkers() { // // Worker Gs don't exit if gomaxprocs is reduced. If it is raised // again, we can reuse the old workers; no need to create new workers. - for gcBgMarkWorkerCount < gomaxprocs { - go gcBgMarkWorker() + if gcBgMarkWorkerCount >= gomaxprocs { + return + } - notetsleepg(&work.bgMarkReady, -1) - noteclear(&work.bgMarkReady) + // Increment mp.locks when allocating. We are called within gcStart, + // and thus must not trigger another gcStart via an allocation. gcStart + // bails when allocating with locks held, so simulate that for these + // allocations. + // + // TODO(prattmic): cleanup gcStart to use a more explicit "in gcStart" + // check for bailing. + mp := acquirem() + ready := make(chan struct{}, 1) + releasem(mp) + + for gcBgMarkWorkerCount < gomaxprocs { + mp := acquirem() // See above, we allocate a closure here. + go gcBgMarkWorker(ready) + releasem(mp) + + // N.B. we intentionally wait on each goroutine individually + // rather than starting all in a batch and then waiting once + // afterwards. By running one goroutine at a time, we can take + // advantage of runnext to bounce back and forth between + // workers and this goroutine. In an overloaded application, + // this can reduce GC start latency by prioritizing these + // goroutines rather than waiting on the end of the run queue. + <-ready // The worker is now guaranteed to be added to the pool before // its P's next findRunnableGCWorker. @@ -1273,7 +1295,7 @@ type gcBgMarkWorkerNode struct { m muintptr } -func gcBgMarkWorker() { +func gcBgMarkWorker(ready chan struct{}) { gp := getg() // We pass node to a gopark unlock function, so it can't be on @@ -1286,7 +1308,8 @@ func gcBgMarkWorker() { node.gp.set(gp) node.m.set(acquirem()) - notewakeup(&work.bgMarkReady) + + ready <- struct{}{} // After this point, the background mark worker is generally scheduled // cooperatively by gcController.findRunnableGCWorker. While performing // work on the P, preemption is disabled because we are working on @@ -1299,10 +1322,10 @@ func gcBgMarkWorker() { // fine; it will eventually gopark again for further scheduling via // findRunnableGCWorker. // - // Since we disable preemption before notifying bgMarkReady, we - // guarantee that this G will be in the worker pool for the next - // findRunnableGCWorker. This isn't strictly necessary, but it reduces - // latency between _GCmark starting and the workers starting. + // Since we disable preemption before notifying ready, we guarantee that + // this G will be in the worker pool for the next findRunnableGCWorker. + // This isn't strictly necessary, but it reduces latency between + // _GCmark starting and the workers starting. for { // Go to sleep until woken by diff --git a/src/runtime/mgcmark.go b/src/runtime/mgcmark.go index 95ec069bcf..e118ba69af 100644 --- a/src/runtime/mgcmark.go +++ b/src/runtime/mgcmark.go @@ -73,9 +73,7 @@ func gcMarkRootPrepare() { if nDataRoots > work.nDataRoots { work.nDataRoots = nDataRoots } - } - for _, datap := range activeModules() { nBSSRoots := nBlocks(datap.ebss - datap.bss) if nBSSRoots > work.nBSSRoots { work.nBSSRoots = nBSSRoots @@ -1718,7 +1716,7 @@ func gcDumpObject(label string, obj, off uintptr) { // //go:nowritebarrier //go:nosplit -func gcmarknewobject(span *mspan, obj, size uintptr) { +func gcmarknewobject(span *mspan, obj uintptr) { if useCheckmark { // The world should be stopped so this should not happen. throw("gcmarknewobject called while doing checkmark") } @@ -1734,7 +1732,7 @@ func gcmarknewobject(span *mspan, obj, size uintptr) { } gcw := &getg().m.p.ptr().gcw - gcw.bytesMarked += uint64(size) + gcw.bytesMarked += uint64(span.elemsize) } // gcMarkTinyAllocs greys all active tiny alloc blocks. diff --git a/src/runtime/mgcpacer.go b/src/runtime/mgcpacer.go index 716e3efccc..e9af3d60cd 100644 --- a/src/runtime/mgcpacer.go +++ b/src/runtime/mgcpacer.go @@ -712,7 +712,7 @@ func (c *gcControllerState) enlistWorker() { } myID := gp.m.p.ptr().id for tries := 0; tries < 5; tries++ { - id := int32(fastrandn(uint32(gomaxprocs - 1))) + id := int32(cheaprandn(uint32(gomaxprocs - 1))) if id >= myID { id++ } @@ -1377,7 +1377,7 @@ func (c *gcControllerState) needIdleMarkWorker() bool { return n < max } -// removeIdleMarkWorker must be called when an new idle mark worker stops executing. +// removeIdleMarkWorker must be called when a new idle mark worker stops executing. func (c *gcControllerState) removeIdleMarkWorker() { for { old := c.idleMarkWorkers.Load() diff --git a/src/runtime/mgcscavenge.go b/src/runtime/mgcscavenge.go index e6725b4622..86c2103f18 100644 --- a/src/runtime/mgcscavenge.go +++ b/src/runtime/mgcscavenge.go @@ -307,7 +307,7 @@ type scavengerState struct { // See sleepRatio for more details. sleepController piController - // cooldown is the time left in nanoseconds during which we avoid + // controllerCooldown is the time left in nanoseconds during which we avoid // using the controller and we hold sleepRatio at a conservative // value. Used if the controller's assumptions fail to hold. controllerCooldown int64 diff --git a/src/runtime/mgcsweep.go b/src/runtime/mgcsweep.go index 2d84c0d07c..3dbe9bcec7 100644 --- a/src/runtime/mgcsweep.go +++ b/src/runtime/mgcsweep.go @@ -25,6 +25,7 @@ package runtime import ( + "internal/abi" "internal/goexperiment" "runtime/internal/atomic" "unsafe" @@ -789,14 +790,17 @@ func (sl *sweepLocked) sweep(preserve bool) bool { } else { mheap_.freeSpan(s) } - if goexperiment.AllocHeaders && s.largeType != nil && s.largeType.Kind_&kindGCProg != 0 { + if goexperiment.AllocHeaders && s.largeType != nil && s.largeType.TFlag&abi.TFlagUnrolledBitmap != 0 { // In the allocheaders experiment, the unrolled GCProg bitmap is allocated separately. // Free the space for the unrolled bitmap. systemstack(func() { s := spanOf(uintptr(unsafe.Pointer(s.largeType))) mheap_.freeManual(s, spanAllocPtrScalarBits) }) - s.largeType = nil + // Make sure to zero this pointer without putting the old + // value in a write buffer, as the old value might be an + // invalid pointer. See arena.go:(*mheap).allocUserArenaChunk. + *(*uintptr)(unsafe.Pointer(&s.largeType)) = 0 } // Count the free in the consistent, external stats. diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 0bbda4aa3b..0069328346 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -399,7 +399,7 @@ type mspan struct { _ sys.NotInHeap next *mspan // next span in list, or nil if none prev *mspan // previous span in list, or nil if none - list *mSpanList // For debugging. TODO: Remove. + list *mSpanList // For debugging. startAddr uintptr // address of first byte of span aka s.base() npages uintptr // number of pages in span diff --git a/src/runtime/minmax_test.go b/src/runtime/minmax_test.go index e0bc28fbf6..1f815a84c3 100644 --- a/src/runtime/minmax_test.go +++ b/src/runtime/minmax_test.go @@ -66,10 +66,10 @@ func TestMaxFloat(t *testing.T) { } for _, x := range all { if z := max(nan, x); !math.IsNaN(z) { - t.Errorf("min(%v, %v) = %v, want %v", nan, x, z, nan) + t.Errorf("max(%v, %v) = %v, want %v", nan, x, z, nan) } if z := max(x, nan); !math.IsNaN(z) { - t.Errorf("min(%v, %v) = %v, want %v", nan, x, z, nan) + t.Errorf("max(%v, %v) = %v, want %v", nan, x, z, nan) } } } @@ -127,3 +127,21 @@ func TestMinMaxStringTies(t *testing.T) { test(2, 0, 1) test(2, 1, 0) } + +func BenchmarkMinFloat(b *testing.B) { + var m float64 = 0 + for i := 0; i < b.N; i++ { + for _, f := range all { + m = min(m, f) + } + } +} + +func BenchmarkMaxFloat(b *testing.B) { + var m float64 = 0 + for i := 0; i < b.N; i++ { + for _, f := range all { + m = max(m, f) + } + } +} diff --git a/src/runtime/mkduff.go b/src/runtime/mkduff.go index e8d4fcc93e..b7f07b5087 100644 --- a/src/runtime/mkduff.go +++ b/src/runtime/mkduff.go @@ -181,21 +181,21 @@ func zeroLOONG64(w io.Writer) { // R0: always zero // R19 (aka REGRT1): ptr to memory to be zeroed // On return, R19 points to the last zeroed dword. - fmt.Fprintln(w, "TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0") + fmt.Fprintln(w, "TEXT runtime·duffzero(SB), NOSPLIT|NOFRAME, $0-0") for i := 0; i < 128; i++ { - fmt.Fprintln(w, "\tMOVV\tR0, (R19)") - fmt.Fprintln(w, "\tADDV\t$8, R19") + fmt.Fprintln(w, "\tMOVV\tR0, (R20)") + fmt.Fprintln(w, "\tADDV\t$8, R20") } fmt.Fprintln(w, "\tRET") } func copyLOONG64(w io.Writer) { - fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0") + fmt.Fprintln(w, "TEXT runtime·duffcopy(SB), NOSPLIT|NOFRAME, $0-0") for i := 0; i < 128; i++ { - fmt.Fprintln(w, "\tMOVV\t(R19), R30") - fmt.Fprintln(w, "\tADDV\t$8, R19") - fmt.Fprintln(w, "\tMOVV\tR30, (R20)") + fmt.Fprintln(w, "\tMOVV\t(R20), R30") fmt.Fprintln(w, "\tADDV\t$8, R20") + fmt.Fprintln(w, "\tMOVV\tR30, (R21)") + fmt.Fprintln(w, "\tADDV\t$8, R21") fmt.Fprintln(w) } fmt.Fprintln(w, "\tRET") diff --git a/src/runtime/mklockrank.go b/src/runtime/mklockrank.go index 4cb232b1ba..1c87843edd 100644 --- a/src/runtime/mklockrank.go +++ b/src/runtime/mklockrank.go @@ -52,30 +52,41 @@ NONE < assistQueue, sweep; +# Test only +NONE < testR, testW; + # Scheduler, timers, netpoll -NONE < pollDesc, cpuprof; +NONE < + allocmW, + execW, + cpuprof, + pollDesc, + wakeableSleep; assistQueue, cpuprof, forcegc, pollDesc, # pollDesc can interact with timers, which can lock sched. scavenge, sweep, - sweepWaiters + sweepWaiters, + testR, + wakeableSleep +# Above SCHED are things that can call into the scheduler. +< SCHED +# Below SCHED is the scheduler implementation. +< allocmR, + execR < sched; sched < allg, allp; -allp < timers; -timers < wakeableSleep; +allp, wakeableSleep < timers; timers < netpollInit; +timers < timer; # Channels -scavenge, sweep < hchan; +scavenge, sweep, testR, wakeableSleep < hchan; NONE < notifyList; hchan, notifyList < sudog; -# RWMutex -NONE < rwmutexW; -rwmutexW, sysmon < rwmutexR; - # Semaphores NONE < root; @@ -100,6 +111,9 @@ traceBuf < traceStrings; # Malloc allg, + allocmR, + execR, # May grow stack + execW, # May allocate after BeforeFork hchan, notifyList, reflectOffs, @@ -136,7 +150,7 @@ gcBitsArenas, < STACKGROW # Below STACKGROW is the stack allocator/copying implementation. < gscan; -gscan, rwmutexR < stackpool; +gscan < stackpool; gscan < stackLarge; # Generally, hchan must be acquired before gscan. But in one case, # where we suspend a G and then shrink its stack, syncadjustsudogs @@ -148,7 +162,8 @@ gscan < hchanLeaf; defer, gscan, mspanSpecial, - sudog + sudog, + timer # Anything that can have write barriers can acquire WB. # Above WB, we can have write barriers. < WB @@ -189,6 +204,20 @@ NONE < panic; panic < deadlock; # raceFini is only held while exiting. panic < raceFini; + +# RWMutex internal read lock + +allocmR, + allocmW +< allocmRInternal; + +execR, + execW +< execRInternal; + +testR, + testW +< testRInternal; ` // cyclicRanks lists lock ranks that allow multiple locks of the same diff --git a/src/runtime/mkpreempt.go b/src/runtime/mkpreempt.go index a96ae59c15..17544d6b21 100644 --- a/src/runtime/mkpreempt.go +++ b/src/runtime/mkpreempt.go @@ -317,11 +317,11 @@ func genARM() { p("MOVW.W R14, -%d(R13)", lfp.stack) // allocate frame, save LR l.save() - p("MOVB ·goarm(SB), R0\nCMP $6, R0\nBLT nofp") // test goarm, and skip FP registers if goarm=5. + p("MOVB ·goarmsoftfp(SB), R0\nCMP $0, R0\nBNE nofp") // test goarmsoftfp, and skip FP registers if goarmsoftfp!=0. lfp.save() label("nofp:") p("CALL ·asyncPreempt2(SB)") - p("MOVB ·goarm(SB), R0\nCMP $6, R0\nBLT nofp2") // test goarm, and skip FP registers if goarm=5. + p("MOVB ·goarmsoftfp(SB), R0\nCMP $0, R0\nBNE nofp2") // test goarmsoftfp, and skip FP registers if goarmsoftfp!=0. lfp.restore() label("nofp2:") l.restore() diff --git a/src/runtime/mpagealloc_64bit.go b/src/runtime/mpagealloc_64bit.go index 1418831a50..36cd222360 100644 --- a/src/runtime/mpagealloc_64bit.go +++ b/src/runtime/mpagealloc_64bit.go @@ -209,23 +209,20 @@ func (s *scavengeIndex) sysGrow(base, limit uintptr, sysStat *sysMemStat) uintpt haveMax := s.max.Load() needMin := alignDown(uintptr(chunkIndex(base)), physPageSize/scSize) needMax := alignUp(uintptr(chunkIndex(limit)), physPageSize/scSize) - // Extend the range down to what we have, if there's no overlap. + + // We need a contiguous range, so extend the range if there's no overlap. if needMax < haveMin { needMax = haveMin } if haveMax != 0 && needMin > haveMax { needMin = haveMax } - have := makeAddrRange( - // Avoid a panic from indexing one past the last element. - uintptr(unsafe.Pointer(&s.chunks[0]))+haveMin*scSize, - uintptr(unsafe.Pointer(&s.chunks[0]))+haveMax*scSize, - ) - need := makeAddrRange( - // Avoid a panic from indexing one past the last element. - uintptr(unsafe.Pointer(&s.chunks[0]))+needMin*scSize, - uintptr(unsafe.Pointer(&s.chunks[0]))+needMax*scSize, - ) + + // Avoid a panic from indexing one past the last element. + chunksBase := uintptr(unsafe.Pointer(&s.chunks[0])) + have := makeAddrRange(chunksBase+haveMin*scSize, chunksBase+haveMax*scSize) + need := makeAddrRange(chunksBase+needMin*scSize, chunksBase+needMax*scSize) + // Subtract any overlap from rounding. We can't re-map memory because // it'll be zeroed. need = need.subtract(have) @@ -235,10 +232,10 @@ func (s *scavengeIndex) sysGrow(base, limit uintptr, sysStat *sysMemStat) uintpt sysMap(unsafe.Pointer(need.base.addr()), need.size(), sysStat) sysUsed(unsafe.Pointer(need.base.addr()), need.size(), need.size()) // Update the indices only after the new memory is valid. - if haveMin == 0 || needMin < haveMin { + if haveMax == 0 || needMin < haveMin { s.min.Store(needMin) } - if haveMax == 0 || needMax > haveMax { + if needMax > haveMax { s.max.Store(needMax) } } diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index af461eef1a..c232b15424 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -234,6 +234,10 @@ func newBucket(typ bucketType, nstk int) *bucket { // stk returns the slice in b holding the stack. func (b *bucket) stk() []uintptr { stk := (*[maxStack]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b))) + if b.nstk > maxStack { + // prove that slicing works; otherwise a failure requires a P + throw("bad profile stack count") + } return stk[:b.nstk:b.nstk] } @@ -494,7 +498,7 @@ func blockevent(cycles int64, skip int) { // blocksampled returns true for all events where cycles >= rate. Shorter // events have a cycles/rate random chance of returning true. func blocksampled(cycles, rate int64) bool { - if rate <= 0 || (rate > cycles && int64(fastrand())%rate > cycles) { + if rate <= 0 || (rate > cycles && cheaprand64()%rate > cycles) { return false } return true @@ -509,7 +513,237 @@ func saveblockevent(cycles, rate int64, skip int, which bucketType) { } else { nstk = gcallers(gp.m.curg, skip, stk[:]) } - b := stkbucket(which, 0, stk[:nstk], true) + + saveBlockEventStack(cycles, rate, stk[:nstk], which) +} + +// lockTimer assists with profiling contention on runtime-internal locks. +// +// There are several steps between the time that an M experiences contention and +// when that contention may be added to the profile. This comes from our +// constraints: We need to keep the critical section of each lock small, +// especially when those locks are contended. The reporting code cannot acquire +// new locks until the M has released all other locks, which means no memory +// allocations and encourages use of (temporary) M-local storage. +// +// The M will have space for storing one call stack that caused contention, and +// for the magnitude of that contention. It will also have space to store the +// magnitude of additional contention the M caused, since it only has space to +// remember one call stack and might encounter several contention events before +// it releases all of its locks and is thus able to transfer the local buffer +// into the profile. +// +// The M will collect the call stack when it unlocks the contended lock. That +// minimizes the impact on the critical section of the contended lock, and +// matches the mutex profile's behavior for contention in sync.Mutex: measured +// at the Unlock method. +// +// The profile for contention on sync.Mutex blames the caller of Unlock for the +// amount of contention experienced by the callers of Lock which had to wait. +// When there are several critical sections, this allows identifying which of +// them is responsible. +// +// Matching that behavior for runtime-internal locks will require identifying +// which Ms are blocked on the mutex. The semaphore-based implementation is +// ready to allow that, but the futex-based implementation will require a bit +// more work. Until then, we report contention on runtime-internal locks with a +// call stack taken from the unlock call (like the rest of the user-space +// "mutex" profile), but assign it a duration value based on how long the +// previous lock call took (like the user-space "block" profile). +// +// Thus, reporting the call stacks of runtime-internal lock contention is +// guarded by GODEBUG for now. Set GODEBUG=runtimecontentionstacks=1 to enable. +// +// TODO(rhysh): plumb through the delay duration, remove GODEBUG, update comment +// +// The M will track this by storing a pointer to the lock; lock/unlock pairs for +// runtime-internal locks are always on the same M. +// +// Together, that demands several steps for recording contention. First, when +// finally acquiring a contended lock, the M decides whether it should plan to +// profile that event by storing a pointer to the lock in its "to be profiled +// upon unlock" field. If that field is already set, it uses the relative +// magnitudes to weight a random choice between itself and the other lock, with +// the loser's time being added to the "additional contention" field. Otherwise +// if the M's call stack buffer is occupied, it does the comparison against that +// sample's magnitude. +// +// Second, having unlocked a mutex the M checks to see if it should capture the +// call stack into its local buffer. Finally, when the M unlocks its last mutex, +// it transfers the local buffer into the profile. As part of that step, it also +// transfers any "additional contention" time to the profile. Any lock +// contention that it experiences while adding samples to the profile will be +// recorded later as "additional contention" and not include a call stack, to +// avoid an echo. +type lockTimer struct { + lock *mutex + timeRate int64 + timeStart int64 + tickStart int64 +} + +func (lt *lockTimer) begin() { + rate := int64(atomic.Load64(&mutexprofilerate)) + + lt.timeRate = gTrackingPeriod + if rate != 0 && rate < lt.timeRate { + lt.timeRate = rate + } + if int64(cheaprand())%lt.timeRate == 0 { + lt.timeStart = nanotime() + } + + if rate > 0 && int64(cheaprand())%rate == 0 { + lt.tickStart = cputicks() + } +} + +func (lt *lockTimer) end() { + gp := getg() + + if lt.timeStart != 0 { + nowTime := nanotime() + gp.m.mLockProfile.waitTime.Add((nowTime - lt.timeStart) * lt.timeRate) + } + + if lt.tickStart != 0 { + nowTick := cputicks() + gp.m.mLockProfile.recordLock(nowTick-lt.tickStart, lt.lock) + } +} + +type mLockProfile struct { + waitTime atomic.Int64 // total nanoseconds spent waiting in runtime.lockWithRank + stack [maxStack]uintptr // stack that experienced contention in runtime.lockWithRank + pending uintptr // *mutex that experienced contention (to be traceback-ed) + cycles int64 // cycles attributable to "pending" (if set), otherwise to "stack" + cyclesLost int64 // contention for which we weren't able to record a call stack + disabled bool // attribute all time to "lost" +} + +func (prof *mLockProfile) recordLock(cycles int64, l *mutex) { + if cycles <= 0 { + return + } + + if prof.disabled { + // We're experiencing contention while attempting to report contention. + // Make a note of its magnitude, but don't allow it to be the sole cause + // of another contention report. + prof.cyclesLost += cycles + return + } + + if uintptr(unsafe.Pointer(l)) == prof.pending { + // Optimization: we'd already planned to profile this same lock (though + // possibly from a different unlock site). + prof.cycles += cycles + return + } + + if prev := prof.cycles; prev > 0 { + // We can only store one call stack for runtime-internal lock contention + // on this M, and we've already got one. Decide which should stay, and + // add the other to the report for runtime._LostContendedRuntimeLock. + prevScore := uint64(cheaprand64()) % uint64(prev) + thisScore := uint64(cheaprand64()) % uint64(cycles) + if prevScore > thisScore { + prof.cyclesLost += cycles + return + } else { + prof.cyclesLost += prev + } + } + // Saving the *mutex as a uintptr is safe because: + // - lockrank_on.go does this too, which gives it regular exercise + // - the lock would only move if it's stack allocated, which means it + // cannot experience multi-M contention + prof.pending = uintptr(unsafe.Pointer(l)) + prof.cycles = cycles +} + +// From unlock2, we might not be holding a p in this code. +// +//go:nowritebarrierrec +func (prof *mLockProfile) recordUnlock(l *mutex) { + if uintptr(unsafe.Pointer(l)) == prof.pending { + prof.captureStack() + } + if gp := getg(); gp.m.locks == 1 && gp.m.mLockProfile.cycles != 0 { + prof.store() + } +} + +func (prof *mLockProfile) captureStack() { + skip := 3 // runtime.(*mLockProfile).recordUnlock runtime.unlock2 runtime.unlockWithRank + if staticLockRanking { + // When static lock ranking is enabled, we'll always be on the system + // stack at this point. There will be a runtime.unlockWithRank.func1 + // frame, and if the call to runtime.unlock took place on a user stack + // then there'll also be a runtime.systemstack frame. To keep stack + // traces somewhat consistent whether or not static lock ranking is + // enabled, we'd like to skip those. But it's hard to tell how long + // we've been on the system stack so accept an extra frame in that case, + // with a leaf of "runtime.unlockWithRank runtime.unlock" instead of + // "runtime.unlock". + skip += 1 // runtime.unlockWithRank.func1 + } + prof.pending = 0 + + if debug.runtimeContentionStacks.Load() == 0 { + prof.stack[0] = abi.FuncPCABIInternal(_LostContendedRuntimeLock) + sys.PCQuantum + prof.stack[1] = 0 + return + } + + var nstk int + gp := getg() + sp := getcallersp() + pc := getcallerpc() + systemstack(func() { + var u unwinder + u.initAt(pc, sp, 0, gp, unwindSilentErrors|unwindJumpStack) + nstk = tracebackPCs(&u, skip, prof.stack[:]) + }) + if nstk < len(prof.stack) { + prof.stack[nstk] = 0 + } +} + +func (prof *mLockProfile) store() { + // Report any contention we experience within this function as "lost"; it's + // important that the act of reporting a contention event not lead to a + // reportable contention event. This also means we can use prof.stack + // without copying, since it won't change during this function. + mp := acquirem() + prof.disabled = true + + nstk := maxStack + for i := 0; i < nstk; i++ { + if pc := prof.stack[i]; pc == 0 { + nstk = i + break + } + } + + cycles, lost := prof.cycles, prof.cyclesLost + prof.cycles, prof.cyclesLost = 0, 0 + + rate := int64(atomic.Load64(&mutexprofilerate)) + saveBlockEventStack(cycles, rate, prof.stack[:nstk], mutexProfile) + if lost > 0 { + lostStk := [...]uintptr{ + abi.FuncPCABIInternal(_LostContendedRuntimeLock) + sys.PCQuantum, + } + saveBlockEventStack(lost, rate, lostStk[:], mutexProfile) + } + + prof.disabled = false + releasem(mp) +} + +func saveBlockEventStack(cycles, rate int64, stk []uintptr, which bucketType) { + b := stkbucket(which, 0, stk, true) bp := b.bp() lock(&profBlockLock) @@ -556,7 +790,7 @@ func mutexevent(cycles int64, skip int) { cycles = 0 } rate := int64(atomic.Load64(&mutexprofilerate)) - if rate > 0 && int64(fastrand())%rate == 0 { + if rate > 0 && cheaprand64()%rate == 0 { saveblockevent(cycles, rate, skip+1, mutexProfile) } } @@ -747,8 +981,8 @@ type BlockProfileRecord struct { // If len(p) >= n, BlockProfile copies the profile into p and returns n, true. // If len(p) < n, BlockProfile does not change p and returns n, false. // -// Most clients should use the runtime/pprof package or -// the testing package's -test.blockprofile flag instead +// Most clients should use the [runtime/pprof] package or +// the [testing] package's -test.blockprofile flag instead // of calling BlockProfile directly. func BlockProfile(p []BlockProfileRecord) (n int, ok bool) { lock(&profBlockLock) @@ -898,6 +1132,14 @@ func (p *goroutineProfileStateHolder) CompareAndSwap(old, new goroutineProfileSt } func goroutineProfileWithLabelsConcurrent(p []StackRecord, labels []unsafe.Pointer) (n int, ok bool) { + if len(p) == 0 { + // An empty slice is obviously too small. Return a rough + // allocation estimate without bothering to STW. As long as + // this is close, then we'll only need to STW once (on the next + // call). + return int(gcount()), false + } + semacquire(&goroutineProfile.sema) ourg := getg() diff --git a/src/runtime/msan.go b/src/runtime/msan.go index 5e2aae1bd1..cb740dc2d8 100644 --- a/src/runtime/msan.go +++ b/src/runtime/msan.go @@ -29,6 +29,7 @@ const msanenabled = true // anyhow for values on the stack. Just ignore msanread when running // on the system stack. The other msan functions are fine. // +//go:linkname msanread //go:nosplit func msanread(addr unsafe.Pointer, sz uintptr) { gp := getg() @@ -41,15 +42,19 @@ func msanread(addr unsafe.Pointer, sz uintptr) { //go:noescape func domsanread(addr unsafe.Pointer, sz uintptr) +//go:linkname msanwrite //go:noescape func msanwrite(addr unsafe.Pointer, sz uintptr) +//go:linkname msanmalloc //go:noescape func msanmalloc(addr unsafe.Pointer, sz uintptr) +//go:linkname msanfree //go:noescape func msanfree(addr unsafe.Pointer, sz uintptr) +//go:linkname msanmove //go:noescape func msanmove(dst, src unsafe.Pointer, sz uintptr) diff --git a/src/runtime/msan_amd64.s b/src/runtime/msan_amd64.s index 89ed3048d0..a1dc388063 100644 --- a/src/runtime/msan_amd64.s +++ b/src/runtime/msan_amd64.s @@ -28,7 +28,7 @@ // Called from msanread. TEXT runtime·domsanread(SB), NOSPLIT, $0-16 MOVQ addr+0(FP), RARG0 - MOVQ size+8(FP), RARG1 + MOVQ sz+8(FP), RARG1 // void __msan_read_go(void *addr, uintptr_t sz); MOVQ $__msan_read_go(SB), AX JMP msancall<>(SB) @@ -37,7 +37,7 @@ TEXT runtime·domsanread(SB), NOSPLIT, $0-16 // Called from instrumented code. TEXT runtime·msanwrite(SB), NOSPLIT, $0-16 MOVQ addr+0(FP), RARG0 - MOVQ size+8(FP), RARG1 + MOVQ sz+8(FP), RARG1 // void __msan_write_go(void *addr, uintptr_t sz); MOVQ $__msan_write_go(SB), AX JMP msancall<>(SB) @@ -45,7 +45,7 @@ TEXT runtime·msanwrite(SB), NOSPLIT, $0-16 // func runtime·msanmalloc(addr unsafe.Pointer, sz uintptr) TEXT runtime·msanmalloc(SB), NOSPLIT, $0-16 MOVQ addr+0(FP), RARG0 - MOVQ size+8(FP), RARG1 + MOVQ sz+8(FP), RARG1 // void __msan_malloc_go(void *addr, uintptr_t sz); MOVQ $__msan_malloc_go(SB), AX JMP msancall<>(SB) @@ -53,7 +53,7 @@ TEXT runtime·msanmalloc(SB), NOSPLIT, $0-16 // func runtime·msanfree(addr unsafe.Pointer, sz uintptr) TEXT runtime·msanfree(SB), NOSPLIT, $0-16 MOVQ addr+0(FP), RARG0 - MOVQ size+8(FP), RARG1 + MOVQ sz+8(FP), RARG1 // void __msan_free_go(void *addr, uintptr_t sz); MOVQ $__msan_free_go(SB), AX JMP msancall<>(SB) @@ -62,7 +62,7 @@ TEXT runtime·msanfree(SB), NOSPLIT, $0-16 TEXT runtime·msanmove(SB), NOSPLIT, $0-24 MOVQ dst+0(FP), RARG0 MOVQ src+8(FP), RARG1 - MOVQ size+16(FP), RARG2 + MOVQ sz+16(FP), RARG2 // void __msan_memmove(void *dst, void *src, uintptr_t sz); MOVQ $__msan_memmove(SB), AX JMP msancall<>(SB) diff --git a/src/runtime/msan_arm64.s b/src/runtime/msan_arm64.s index b9eff34ab6..ce475cf44a 100644 --- a/src/runtime/msan_arm64.s +++ b/src/runtime/msan_arm64.s @@ -16,7 +16,7 @@ // Called from msanread. TEXT runtime·domsanread(SB), NOSPLIT, $0-16 MOVD addr+0(FP), RARG0 - MOVD size+8(FP), RARG1 + MOVD sz+8(FP), RARG1 // void __msan_read_go(void *addr, uintptr_t sz); MOVD $__msan_read_go(SB), FARG JMP msancall<>(SB) @@ -25,7 +25,7 @@ TEXT runtime·domsanread(SB), NOSPLIT, $0-16 // Called from instrumented code. TEXT runtime·msanwrite(SB), NOSPLIT, $0-16 MOVD addr+0(FP), RARG0 - MOVD size+8(FP), RARG1 + MOVD sz+8(FP), RARG1 // void __msan_write_go(void *addr, uintptr_t sz); MOVD $__msan_write_go(SB), FARG JMP msancall<>(SB) @@ -33,7 +33,7 @@ TEXT runtime·msanwrite(SB), NOSPLIT, $0-16 // func runtime·msanmalloc(addr unsafe.Pointer, sz uintptr) TEXT runtime·msanmalloc(SB), NOSPLIT, $0-16 MOVD addr+0(FP), RARG0 - MOVD size+8(FP), RARG1 + MOVD sz+8(FP), RARG1 // void __msan_malloc_go(void *addr, uintptr_t sz); MOVD $__msan_malloc_go(SB), FARG JMP msancall<>(SB) @@ -41,7 +41,7 @@ TEXT runtime·msanmalloc(SB), NOSPLIT, $0-16 // func runtime·msanfree(addr unsafe.Pointer, sz uintptr) TEXT runtime·msanfree(SB), NOSPLIT, $0-16 MOVD addr+0(FP), RARG0 - MOVD size+8(FP), RARG1 + MOVD sz+8(FP), RARG1 // void __msan_free_go(void *addr, uintptr_t sz); MOVD $__msan_free_go(SB), FARG JMP msancall<>(SB) @@ -50,7 +50,7 @@ TEXT runtime·msanfree(SB), NOSPLIT, $0-16 TEXT runtime·msanmove(SB), NOSPLIT, $0-24 MOVD dst+0(FP), RARG0 MOVD src+8(FP), RARG1 - MOVD size+16(FP), RARG2 + MOVD sz+16(FP), RARG2 // void __msan_memmove(void *dst, void *src, uintptr_t sz); MOVD $__msan_memmove(SB), FARG JMP msancall<>(SB) diff --git a/src/runtime/msan_loong64.s b/src/runtime/msan_loong64.s index f69fb45454..b9fa5fd120 100644 --- a/src/runtime/msan_loong64.s +++ b/src/runtime/msan_loong64.s @@ -16,7 +16,7 @@ // Called from msanread. TEXT runtime·domsanread(SB), NOSPLIT, $0-16 MOVV addr+0(FP), RARG0 - MOVV size+8(FP), RARG1 + MOVV sz+8(FP), RARG1 // void __msan_read_go(void *addr, uintptr_t sz); MOVV $__msan_read_go(SB), FARG JMP msancall<>(SB) @@ -25,7 +25,7 @@ TEXT runtime·domsanread(SB), NOSPLIT, $0-16 // Called from instrumented code. TEXT runtime·msanwrite(SB), NOSPLIT, $0-16 MOVV addr+0(FP), RARG0 - MOVV size+8(FP), RARG1 + MOVV sz+8(FP), RARG1 // void __msan_write_go(void *addr, uintptr_t sz); MOVV $__msan_write_go(SB), FARG JMP msancall<>(SB) @@ -33,7 +33,7 @@ TEXT runtime·msanwrite(SB), NOSPLIT, $0-16 // func runtime·msanmalloc(addr unsafe.Pointer, sz uintptr) TEXT runtime·msanmalloc(SB), NOSPLIT, $0-16 MOVV addr+0(FP), RARG0 - MOVV size+8(FP), RARG1 + MOVV sz+8(FP), RARG1 // void __msan_malloc_go(void *addr, uintptr_t sz); MOVV $__msan_malloc_go(SB), FARG JMP msancall<>(SB) @@ -41,7 +41,7 @@ TEXT runtime·msanmalloc(SB), NOSPLIT, $0-16 // func runtime·msanfree(addr unsafe.Pointer, sz uintptr) TEXT runtime·msanfree(SB), NOSPLIT, $0-16 MOVV addr+0(FP), RARG0 - MOVV size+8(FP), RARG1 + MOVV sz+8(FP), RARG1 // void __msan_free_go(void *addr, uintptr_t sz); MOVV $__msan_free_go(SB), FARG JMP msancall<>(SB) @@ -50,7 +50,7 @@ TEXT runtime·msanfree(SB), NOSPLIT, $0-16 TEXT runtime·msanmove(SB), NOSPLIT, $0-24 MOVV dst+0(FP), RARG0 MOVV src+8(FP), RARG1 - MOVV size+16(FP), RARG2 + MOVV sz+16(FP), RARG2 // void __msan_memmove(void *dst, void *src, uintptr_t sz); MOVV $__msan_memmove(SB), FARG JMP msancall<>(SB) diff --git a/src/runtime/mstats.go b/src/runtime/mstats.go index 874b08ae3a..87afec47c8 100644 --- a/src/runtime/mstats.go +++ b/src/runtime/mstats.go @@ -361,6 +361,11 @@ func ReadMemStats(m *MemStats) { startTheWorld(stw) } +// doubleCheckReadMemStats controls a double-check mode for ReadMemStats that +// ensures consistency between the values that ReadMemStats is using and the +// runtime-internal stats. +var doubleCheckReadMemStats = false + // readmemstats_m populates stats for internal runtime values. // // The world must be stopped. @@ -435,56 +440,65 @@ func readmemstats_m(stats *MemStats) { heapGoal := gcController.heapGoal() - // The world is stopped, so the consistent stats (after aggregation) - // should be identical to some combination of memstats. In particular: - // - // * memstats.heapInUse == inHeap - // * memstats.heapReleased == released - // * memstats.heapInUse + memstats.heapFree == committed - inStacks - inWorkBufs - inPtrScalarBits - // * memstats.totalAlloc == totalAlloc - // * memstats.totalFree == totalFree - // - // Check if that's actually true. - // - // TODO(mknyszek): Maybe don't throw here. It would be bad if a - // bug in otherwise benign accounting caused the whole application - // to crash. - if gcController.heapInUse.load() != uint64(consStats.inHeap) { - print("runtime: heapInUse=", gcController.heapInUse.load(), "\n") - print("runtime: consistent value=", consStats.inHeap, "\n") - throw("heapInUse and consistent stats are not equal") - } - if gcController.heapReleased.load() != uint64(consStats.released) { - print("runtime: heapReleased=", gcController.heapReleased.load(), "\n") - print("runtime: consistent value=", consStats.released, "\n") - throw("heapReleased and consistent stats are not equal") - } - heapRetained := gcController.heapInUse.load() + gcController.heapFree.load() - consRetained := uint64(consStats.committed - consStats.inStacks - consStats.inWorkBufs - consStats.inPtrScalarBits) - if heapRetained != consRetained { - print("runtime: global value=", heapRetained, "\n") - print("runtime: consistent value=", consRetained, "\n") - throw("measures of the retained heap are not equal") - } - if gcController.totalAlloc.Load() != totalAlloc { - print("runtime: totalAlloc=", gcController.totalAlloc.Load(), "\n") - print("runtime: consistent value=", totalAlloc, "\n") - throw("totalAlloc and consistent stats are not equal") - } - if gcController.totalFree.Load() != totalFree { - print("runtime: totalFree=", gcController.totalFree.Load(), "\n") - print("runtime: consistent value=", totalFree, "\n") - throw("totalFree and consistent stats are not equal") - } - // Also check that mappedReady lines up with totalMapped - released. - // This isn't really the same type of "make sure consistent stats line up" situation, - // but this is an opportune time to check. - if gcController.mappedReady.Load() != totalMapped-uint64(consStats.released) { - print("runtime: mappedReady=", gcController.mappedReady.Load(), "\n") - print("runtime: totalMapped=", totalMapped, "\n") - print("runtime: released=", uint64(consStats.released), "\n") - print("runtime: totalMapped-released=", totalMapped-uint64(consStats.released), "\n") - throw("mappedReady and other memstats are not equal") + if doubleCheckReadMemStats { + // Only check this if we're debugging. It would be bad to crash an application + // just because the debugging stats are wrong. We mostly rely on tests to catch + // these issues, and we enable the double check mode for tests. + // + // The world is stopped, so the consistent stats (after aggregation) + // should be identical to some combination of memstats. In particular: + // + // * memstats.heapInUse == inHeap + // * memstats.heapReleased == released + // * memstats.heapInUse + memstats.heapFree == committed - inStacks - inWorkBufs - inPtrScalarBits + // * memstats.totalAlloc == totalAlloc + // * memstats.totalFree == totalFree + // + // Check if that's actually true. + // + // Prevent sysmon and the tracer from skewing the stats since they can + // act without synchronizing with a STW. See #64401. + lock(&sched.sysmonlock) + lock(&trace.lock) + if gcController.heapInUse.load() != uint64(consStats.inHeap) { + print("runtime: heapInUse=", gcController.heapInUse.load(), "\n") + print("runtime: consistent value=", consStats.inHeap, "\n") + throw("heapInUse and consistent stats are not equal") + } + if gcController.heapReleased.load() != uint64(consStats.released) { + print("runtime: heapReleased=", gcController.heapReleased.load(), "\n") + print("runtime: consistent value=", consStats.released, "\n") + throw("heapReleased and consistent stats are not equal") + } + heapRetained := gcController.heapInUse.load() + gcController.heapFree.load() + consRetained := uint64(consStats.committed - consStats.inStacks - consStats.inWorkBufs - consStats.inPtrScalarBits) + if heapRetained != consRetained { + print("runtime: global value=", heapRetained, "\n") + print("runtime: consistent value=", consRetained, "\n") + throw("measures of the retained heap are not equal") + } + if gcController.totalAlloc.Load() != totalAlloc { + print("runtime: totalAlloc=", gcController.totalAlloc.Load(), "\n") + print("runtime: consistent value=", totalAlloc, "\n") + throw("totalAlloc and consistent stats are not equal") + } + if gcController.totalFree.Load() != totalFree { + print("runtime: totalFree=", gcController.totalFree.Load(), "\n") + print("runtime: consistent value=", totalFree, "\n") + throw("totalFree and consistent stats are not equal") + } + // Also check that mappedReady lines up with totalMapped - released. + // This isn't really the same type of "make sure consistent stats line up" situation, + // but this is an opportune time to check. + if gcController.mappedReady.Load() != totalMapped-uint64(consStats.released) { + print("runtime: mappedReady=", gcController.mappedReady.Load(), "\n") + print("runtime: totalMapped=", totalMapped, "\n") + print("runtime: released=", uint64(consStats.released), "\n") + print("runtime: totalMapped-released=", totalMapped-uint64(consStats.released), "\n") + throw("mappedReady and other memstats are not equal") + } + unlock(&trace.lock) + unlock(&sched.sysmonlock) } // We've calculated all the values we need. Now, populate stats. diff --git a/src/runtime/netpoll.go b/src/runtime/netpoll.go index 9c2e40ce8a..52e7da5741 100644 --- a/src/runtime/netpoll.go +++ b/src/runtime/netpoll.go @@ -399,14 +399,14 @@ func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) { // if they differ the descriptor was reused or timers were reset. pd.rt.arg = pd.makeArg() pd.rt.seq = pd.rseq - resettimer(&pd.rt, pd.rd) + pd.rt.reset(pd.rd) } } else if pd.rd != rd0 || combo != combo0 { pd.rseq++ // invalidate current timers if pd.rd > 0 { - modtimer(&pd.rt, pd.rd, 0, rtf, pd.makeArg(), pd.rseq) + pd.rt.modify(pd.rd, 0, rtf, pd.makeArg(), pd.rseq) } else { - deltimer(&pd.rt) + pd.rt.stop() pd.rt.f = nil } } @@ -415,14 +415,14 @@ func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) { pd.wt.f = netpollWriteDeadline pd.wt.arg = pd.makeArg() pd.wt.seq = pd.wseq - resettimer(&pd.wt, pd.wd) + pd.wt.reset(pd.wd) } } else if pd.wd != wd0 || combo != combo0 { pd.wseq++ // invalidate current timers if pd.wd > 0 && !combo { - modtimer(&pd.wt, pd.wd, 0, netpollWriteDeadline, pd.makeArg(), pd.wseq) + pd.wt.modify(pd.wd, 0, netpollWriteDeadline, pd.makeArg(), pd.wseq) } else { - deltimer(&pd.wt) + pd.wt.stop() pd.wt.f = nil } } @@ -461,11 +461,11 @@ func poll_runtime_pollUnblock(pd *pollDesc) { rg = netpollunblock(pd, 'r', false, &delta) wg = netpollunblock(pd, 'w', false, &delta) if pd.rt.f != nil { - deltimer(&pd.rt) + pd.rt.stop() pd.rt.f = nil } if pd.wt.f != nil { - deltimer(&pd.wt) + pd.wt.stop() pd.wt.f = nil } unlock(&pd.lock) diff --git a/src/runtime/netpoll_epoll.go b/src/runtime/netpoll_epoll.go index cda19fbc27..63b42a0014 100644 --- a/src/runtime/netpoll_epoll.go +++ b/src/runtime/netpoll_epoll.go @@ -7,17 +7,15 @@ package runtime import ( + "internal/runtime/syscall" "runtime/internal/atomic" - "runtime/internal/syscall" "unsafe" ) var ( - epfd int32 = -1 // epoll descriptor - - netpollBreakRd, netpollBreakWr uintptr // for netpollBreak - - netpollWakeSig atomic.Uint32 // used to avoid duplicate calls of netpollBreak + epfd int32 = -1 // epoll descriptor + netpollEventFd uintptr // eventfd for netpollBreak + netpollWakeSig atomic.Uint32 // used to avoid duplicate calls of netpollBreak ) func netpollinit() { @@ -27,26 +25,25 @@ func netpollinit() { println("runtime: epollcreate failed with", errno) throw("runtime: netpollinit failed") } - r, w, errpipe := nonblockingPipe() - if errpipe != 0 { - println("runtime: pipe failed with", -errpipe) - throw("runtime: pipe failed") + efd, errno := syscall.Eventfd(0, syscall.EFD_CLOEXEC|syscall.EFD_NONBLOCK) + if errno != 0 { + println("runtime: eventfd failed with", -errno) + throw("runtime: eventfd failed") } ev := syscall.EpollEvent{ Events: syscall.EPOLLIN, } - *(**uintptr)(unsafe.Pointer(&ev.Data)) = &netpollBreakRd - errno = syscall.EpollCtl(epfd, syscall.EPOLL_CTL_ADD, r, &ev) + *(**uintptr)(unsafe.Pointer(&ev.Data)) = &netpollEventFd + errno = syscall.EpollCtl(epfd, syscall.EPOLL_CTL_ADD, efd, &ev) if errno != 0 { println("runtime: epollctl failed with", errno) throw("runtime: epollctl failed") } - netpollBreakRd = uintptr(r) - netpollBreakWr = uintptr(w) + netpollEventFd = uintptr(efd) } func netpollIsPollDescriptor(fd uintptr) bool { - return fd == uintptr(epfd) || fd == netpollBreakRd || fd == netpollBreakWr + return fd == uintptr(epfd) || fd == netpollEventFd } func netpollopen(fd uintptr, pd *pollDesc) uintptr { @@ -73,10 +70,11 @@ func netpollBreak() { return } + var one uint64 = 1 + oneSize := int32(unsafe.Sizeof(one)) for { - var b byte - n := write(netpollBreakWr, unsafe.Pointer(&b), 1) - if n == 1 { + n := write(netpollEventFd, noescape(unsafe.Pointer(&one)), oneSize) + if n == oneSize { break } if n == -_EINTR { @@ -136,17 +134,19 @@ retry: continue } - if *(**uintptr)(unsafe.Pointer(&ev.Data)) == &netpollBreakRd { + if *(**uintptr)(unsafe.Pointer(&ev.Data)) == &netpollEventFd { if ev.Events != syscall.EPOLLIN { - println("runtime: netpoll: break fd ready for", ev.Events) - throw("runtime: netpoll: break fd ready for something unexpected") + println("runtime: netpoll: eventfd ready for", ev.Events) + throw("runtime: netpoll: eventfd ready for something unexpected") } if delay != 0 { // netpollBreak could be picked up by a - // nonblocking poll. Only read the byte - // if blocking. - var tmp [16]byte - read(int32(netpollBreakRd), noescape(unsafe.Pointer(&tmp[0])), int32(len(tmp))) + // nonblocking poll. Only read the 8-byte + // integer if blocking. + // Since EFD_SEMAPHORE was not specified, + // the eventfd counter will be reset to 0. + var one uint64 + read(int32(netpollEventFd), noescape(unsafe.Pointer(&one)), int32(unsafe.Sizeof(one))) netpollWakeSig.Store(0) } continue diff --git a/src/runtime/netpoll_os_test.go b/src/runtime/netpoll_os_test.go index b96b9f3ee3..1e375f88c7 100644 --- a/src/runtime/netpoll_os_test.go +++ b/src/runtime/netpoll_os_test.go @@ -1,3 +1,7 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package runtime_test import ( diff --git a/src/runtime/netpoll_windows.go b/src/runtime/netpoll_windows.go index 484a9e85b2..b01f5ba725 100644 --- a/src/runtime/netpoll_windows.go +++ b/src/runtime/netpoll_windows.go @@ -5,6 +5,7 @@ package runtime import ( + "internal/goarch" "runtime/internal/atomic" "unsafe" ) @@ -13,21 +14,83 @@ const _DWORD_MAX = 0xffffffff const _INVALID_HANDLE_VALUE = ^uintptr(0) -// net_op must be the same as beginning of internal/poll.operation. -// Keep these in sync. -type net_op struct { - // used by windows - o overlapped - // used by netpoll - pd *pollDesc - mode int32 - errno int32 - qty uint32 +// Sources are used to identify the event that created an overlapped entry. +// The source values are arbitrary. There is no risk of collision with user +// defined values because the only way to set the key of an overlapped entry +// is using the iocphandle, which is not accessible to user code. +const ( + netpollSourceReady = iota + 1 + netpollSourceBreak + netpollSourceTimer +) + +const ( + // sourceBits is the number of bits needed to represent a source. + // 4 bits can hold 16 different sources, which is more than enough. + // It is set to a low value so the overlapped entry key can + // contain as much bits as possible for the pollDesc pointer. + sourceBits = 4 // 4 bits can hold 16 different sources, which is more than enough. + sourceMasks = 1< (1< 0: block for up to that many nanoseconds func netpoll(delay int64) (gList, int32) { - var entries [64]overlappedEntry - var wait, qty, flags, n, i uint32 - var errno int32 - var op *net_op - var toRun gList - - mp := getg().m - if iocphandle == _INVALID_HANDLE_VALUE { return gList{}, 0 } + + var entries [64]overlappedEntry + var wait uint32 + var toRun gList + mp := getg().m + + if delay >= 1e15 { + // An arbitrary cap on how long to wait for a timer. + // 1e15 ns == ~11.5 days. + delay = 1e15 + } + + if delay > 0 && mp.waitIocpHandle != 0 { + // GetQueuedCompletionStatusEx doesn't use a high resolution timer internally, + // so we use a separate higher resolution timer associated with a wait completion + // packet to wake up the poller. Note that the completion packet can be delivered + // to another thread, and the Go scheduler expects netpoll to only block up to delay, + // so we still need to use a timeout with GetQueuedCompletionStatusEx. + // TODO: Improve the Go scheduler to support non-blocking timers. + signaled := netpollQueueTimer(delay) + if signaled { + // There is a small window between the SetWaitableTimer and the NtAssociateWaitCompletionPacket + // where the timer can expire. We can return immediately in this case. + return gList{}, 0 + } + } if delay < 0 { wait = _INFINITE } else if delay == 0 { wait = 0 } else if delay < 1e6 { wait = 1 - } else if delay < 1e15 { - wait = uint32(delay / 1e6) } else { - // An arbitrary cap on how long to wait for a timer. - // 1e9 ms == ~11.5 days. - wait = 1e9 + wait = uint32(delay / 1e6) } - - n = uint32(len(entries) / int(gomaxprocs)) + n := len(entries) / int(gomaxprocs) if n < 8 { n = 8 } @@ -119,7 +196,7 @@ func netpoll(delay int64) (gList, int32) { } if stdcall6(_GetQueuedCompletionStatusEx, iocphandle, uintptr(unsafe.Pointer(&entries[0])), uintptr(n), uintptr(unsafe.Pointer(&n)), uintptr(wait), 0) == 0 { mp.blocked = false - errno = int32(getlasterror()) + errno := getlasterror() if errno == _WAIT_TIMEOUT { return gList{}, 0 } @@ -128,34 +205,80 @@ func netpoll(delay int64) (gList, int32) { } mp.blocked = false delta := int32(0) - for i = 0; i < n; i++ { - op = entries[i].op - if op != nil && op.pd == entries[i].key { - errno = 0 - qty = 0 - if stdcall5(_WSAGetOverlappedResult, op.pd.fd, uintptr(unsafe.Pointer(op)), uintptr(unsafe.Pointer(&qty)), 0, uintptr(unsafe.Pointer(&flags))) == 0 { - errno = int32(getlasterror()) + for i := 0; i < n; i++ { + e := &entries[i] + switch unpackNetpollSource(e.key) { + case netpollSourceReady: + op := pollOperationFromOverlappedEntry(e) + if op == nil { + // Entry from outside the Go runtime and internal/poll, ignore. + continue } - delta += handlecompletion(&toRun, op, errno, qty) - } else { + // Entry from internal/poll. + mode := op.mode + if mode != 'r' && mode != 'w' { + println("runtime: GetQueuedCompletionStatusEx returned net_op with invalid mode=", mode) + throw("runtime: netpoll failed") + } + delta += netpollready(&toRun, op.pd, mode) + case netpollSourceBreak: netpollWakeSig.Store(0) if delay == 0 { - // Forward the notification to the - // blocked poller. + // Forward the notification to the blocked poller. netpollBreak() } + case netpollSourceTimer: + // TODO: We could avoid calling NtCancelWaitCompletionPacket for expired wait completion packets. + default: + println("runtime: GetQueuedCompletionStatusEx returned net_op with invalid key=", e.key) + throw("runtime: netpoll failed") } } return toRun, delta } -func handlecompletion(toRun *gList, op *net_op, errno int32, qty uint32) int32 { - mode := op.mode - if mode != 'r' && mode != 'w' { - println("runtime: GetQueuedCompletionStatusEx returned invalid mode=", mode) +// netpollQueueTimer queues a timer to wake up the poller after the given delay. +// It returns true if the timer expired during this call. +func netpollQueueTimer(delay int64) (signaled bool) { + const ( + STATUS_SUCCESS = 0x00000000 + STATUS_PENDING = 0x00000103 + STATUS_CANCELLED = 0xC0000120 + ) + mp := getg().m + // A wait completion packet can only be associated with one timer at a time, + // so we need to cancel the previous one if it exists. This wouldn't be necessary + // if the poller would only be woken up by the timer, in which case the association + // would be automatically cancelled, but it can also be woken up by other events, + // such as a netpollBreak, so we can get to this point with a timer that hasn't + // expired yet. In this case, the completion packet can still be picked up by + // another thread, so defer the cancellation until it is really necessary. + errno := stdcall2(_NtCancelWaitCompletionPacket, mp.waitIocpHandle, 1) + switch errno { + case STATUS_CANCELLED: + // STATUS_CANCELLED is returned when the associated timer has already expired, + // in which automatically cancels the wait completion packet. + fallthrough + case STATUS_SUCCESS: + dt := -delay / 100 // relative sleep (negative), 100ns units + if stdcall6(_SetWaitableTimer, mp.waitIocpTimer, uintptr(unsafe.Pointer(&dt)), 0, 0, 0, 0) == 0 { + println("runtime: SetWaitableTimer failed; errno=", getlasterror()) + throw("runtime: netpoll failed") + } + key := packNetpollKey(netpollSourceTimer, nil) + if errno := stdcall8(_NtAssociateWaitCompletionPacket, mp.waitIocpHandle, iocphandle, mp.waitIocpTimer, key, 0, 0, 0, uintptr(unsafe.Pointer(&signaled))); errno != 0 { + println("runtime: NtAssociateWaitCompletionPacket failed; errno=", errno) + throw("runtime: netpoll failed") + } + case STATUS_PENDING: + // STATUS_PENDING is returned if the wait operation can't be cancelled yet. + // This can happen if this thread was woken up by another event, such as a netpollBreak, + // and the timer expired just while calling NtCancelWaitCompletionPacket, in which case + // this call fails to cancel the association to avoid a race condition. + // This is a rare case, so we can just avoid using the high resolution timer this time. + default: + println("runtime: NtCancelWaitCompletionPacket failed; errno=", errno) throw("runtime: netpoll failed") } - op.errno = errno - op.qty = qty - return netpollready(toRun, op.pd, mode) + return signaled } diff --git a/src/runtime/nonwindows_stub.go b/src/runtime/nonwindows_stub.go index 033f026c42..e420acf185 100644 --- a/src/runtime/nonwindows_stub.go +++ b/src/runtime/nonwindows_stub.go @@ -12,6 +12,8 @@ package runtime // timer precision to keep the timer error acceptable. const osRelaxMinNS = 0 +var haveHighResSleep = true + // osRelax is called by the scheduler when transitioning to and from // all Ps being idle. func osRelax(relax bool) {} diff --git a/src/runtime/os2_aix.go b/src/runtime/os2_aix.go index 8af88d1832..0d20079242 100644 --- a/src/runtime/os2_aix.go +++ b/src/runtime/os2_aix.go @@ -428,7 +428,6 @@ func write1(fd uintptr, p unsafe.Pointer, n int32) int32 { } // Note that in this case we can't return a valid errno value. return write2(fd, uintptr(p), n) - } //go:nosplit @@ -641,7 +640,6 @@ func sysconf(name int32) uintptr { throw("syscall sysconf") } return r - } // pthread functions returns its error code in the main return value diff --git a/src/runtime/os3_solaris.go b/src/runtime/os3_solaris.go index 81629f02a2..92daf13b1a 100644 --- a/src/runtime/os3_solaris.go +++ b/src/runtime/os3_solaris.go @@ -198,11 +198,11 @@ func exitThread(wait *atomic.Uint32) { var urandom_dev = []byte("/dev/urandom\x00") //go:nosplit -func getRandomData(r []byte) { +func readRandom(r []byte) int { fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0) n := read(fd, unsafe.Pointer(&r[0]), int32(len(r))) closefd(fd) - extendRandom(r, int(n)) + return int(n) } func goenvs() { diff --git a/src/runtime/os_aix.go b/src/runtime/os_aix.go index b26922c908..3a5078a64c 100644 --- a/src/runtime/os_aix.go +++ b/src/runtime/os_aix.go @@ -239,11 +239,11 @@ func exitThread(wait *atomic.Uint32) { var urandom_dev = []byte("/dev/urandom\x00") //go:nosplit -func getRandomData(r []byte) { +func readRandom(r []byte) int { fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0) n := read(fd, unsafe.Pointer(&r[0]), int32(len(r))) closefd(fd) - extendRandom(r, int(n)) + return int(n) } func goenvs() { diff --git a/src/runtime/os_darwin.go b/src/runtime/os_darwin.go index ff33db084b..430d1865df 100644 --- a/src/runtime/os_darwin.go +++ b/src/runtime/os_darwin.go @@ -194,11 +194,11 @@ func getPageSize() uintptr { var urandom_dev = []byte("/dev/urandom\x00") //go:nosplit -func getRandomData(r []byte) { +func readRandom(r []byte) int { fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0) n := read(fd, unsafe.Pointer(&r[0]), int32(len(r))) closefd(fd) - extendRandom(r, int(n)) + return int(n) } func goenvs() { diff --git a/src/runtime/os_darwin_arm64.go b/src/runtime/os_darwin_arm64.go index b808150de0..ebc1b139a6 100644 --- a/src/runtime/os_darwin_arm64.go +++ b/src/runtime/os_darwin_arm64.go @@ -6,7 +6,6 @@ package runtime //go:nosplit func cputicks() int64 { - // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand(). // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. return nanotime() } diff --git a/src/runtime/os_dragonfly.go b/src/runtime/os_dragonfly.go index 80c1267765..2aeea17755 100644 --- a/src/runtime/os_dragonfly.go +++ b/src/runtime/os_dragonfly.go @@ -181,11 +181,11 @@ func osinit() { var urandom_dev = []byte("/dev/urandom\x00") //go:nosplit -func getRandomData(r []byte) { +func readRandom(r []byte) int { fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0) n := read(fd, unsafe.Pointer(&r[0]), int32(len(r))) closefd(fd) - extendRandom(r, int(n)) + return int(n) } func goenvs() { diff --git a/src/runtime/os_freebsd.go b/src/runtime/os_freebsd.go index c05e00f6ac..d0d6f14fa0 100644 --- a/src/runtime/os_freebsd.go +++ b/src/runtime/os_freebsd.go @@ -283,11 +283,11 @@ func osinit() { var urandom_dev = []byte("/dev/urandom\x00") //go:nosplit -func getRandomData(r []byte) { +func readRandom(r []byte) int { fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0) n := read(fd, unsafe.Pointer(&r[0]), int32(len(r))) closefd(fd) - extendRandom(r, int(n)) + return int(n) } func goenvs() { diff --git a/src/runtime/os_freebsd_arm.go b/src/runtime/os_freebsd_arm.go index df8c709b8f..5f6bf46798 100644 --- a/src/runtime/os_freebsd_arm.go +++ b/src/runtime/os_freebsd_arm.go @@ -15,14 +15,16 @@ const ( ) func checkgoarm() { - if goarm > 5 && cpu.HWCap&_HWCAP_VFP == 0 { + if cpu.HWCap&_HWCAP_VFP == 0 && goarmsoftfp == 0 { print("runtime: this CPU has no floating point hardware, so it cannot run\n") - print("this GOARM=", goarm, " binary. Recompile using GOARM=5.\n") + print("a binary compiled for hard floating point. Recompile adding ,softfloat\n") + print("to GOARM.\n") exit(1) } - if goarm > 6 && cpu.HWCap&_HWCAP_VFPv3 == 0 { + if goarm > 6 && cpu.HWCap&_HWCAP_VFPv3 == 0 && goarmsoftfp == 0 { print("runtime: this CPU has no VFPv3 floating point hardware, so it cannot run\n") - print("this GOARM=", goarm, " binary. Recompile using GOARM=5 or GOARM=6.\n") + print("a binary compiled for VFPv3 hard floating point. Recompile adding ,softfloat\n") + print("to GOARM or changing GOARM to 6.\n") exit(1) } @@ -47,7 +49,6 @@ func archauxv(tag, val uintptr) { //go:nosplit func cputicks() int64 { - // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand(). // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. return nanotime() } diff --git a/src/runtime/os_freebsd_arm64.go b/src/runtime/os_freebsd_arm64.go index b5b25f0dc5..58bc5d34b7 100644 --- a/src/runtime/os_freebsd_arm64.go +++ b/src/runtime/os_freebsd_arm64.go @@ -6,7 +6,6 @@ package runtime //go:nosplit func cputicks() int64 { - // Currently cputicks() is used in blocking profiler and to seed fastrand(). // nanotime() is a poor approximation of CPU ticks that is enough for the profiler. return nanotime() } diff --git a/src/runtime/os_js.go b/src/runtime/os_js.go index 65fb499de6..099c5265a0 100644 --- a/src/runtime/os_js.go +++ b/src/runtime/os_js.go @@ -32,6 +32,11 @@ func usleep(usec uint32) { //go:noescape func getRandomData(r []byte) +func readRandom(r []byte) int { + getRandomData(r) + return len(r) +} + func goenvs() { goenvs_unix() } diff --git a/src/runtime/os_linux.go b/src/runtime/os_linux.go index 6386b82a85..ddacaa585c 100644 --- a/src/runtime/os_linux.go +++ b/src/runtime/os_linux.go @@ -7,8 +7,8 @@ package runtime import ( "internal/abi" "internal/goarch" + "internal/runtime/syscall" "runtime/internal/atomic" - "runtime/internal/syscall" "unsafe" ) @@ -288,10 +288,6 @@ func sysargs(argc int32, argv **byte) { auxv = auxvreadbuf[: pairs*2 : pairs*2] } -// startupRandomData holds random bytes initialized at startup. These come from -// the ELF AT_RANDOM auxiliary vector. -var startupRandomData []byte - // secureMode holds the value of AT_SECURE passed in the auxiliary vector. var secureMode bool @@ -303,7 +299,7 @@ func sysauxv(auxv []uintptr) (pairs int) { case _AT_RANDOM: // The kernel provides a pointer to 16-bytes // worth of random data. - startupRandomData = (*[16]byte)(unsafe.Pointer(val))[:] + startupRand = (*[16]byte)(unsafe.Pointer(val))[:] case _AT_PAGESZ: physPageSize = val @@ -352,16 +348,11 @@ func osinit() { var urandom_dev = []byte("/dev/urandom\x00") -func getRandomData(r []byte) { - if startupRandomData != nil { - n := copy(r, startupRandomData) - extendRandom(r, n) - return - } +func readRandom(r []byte) int { fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0) n := read(fd, unsafe.Pointer(&r[0]), int32(len(r))) closefd(fd) - extendRandom(r, int(n)) + return int(n) } func goenvs() { @@ -656,7 +647,7 @@ func setThreadCPUProfiler(hz int32) { // activates may do a couple milliseconds of GC-related work and nothing // else in the few seconds that the profiler observes. spec := new(itimerspec) - spec.it_value.setNsec(1 + int64(fastrandn(uint32(1e9/hz)))) + spec.it_value.setNsec(1 + int64(cheaprandn(uint32(1e9/hz)))) spec.it_interval.setNsec(1e9 / int64(hz)) var timerid int32 diff --git a/src/runtime/os_linux_arm.go b/src/runtime/os_linux_arm.go index 6e0c729855..5e1274ebab 100644 --- a/src/runtime/os_linux_arm.go +++ b/src/runtime/os_linux_arm.go @@ -23,14 +23,16 @@ func checkgoarm() { if GOOS == "android" { return } - if goarm > 5 && cpu.HWCap&_HWCAP_VFP == 0 { + if cpu.HWCap&_HWCAP_VFP == 0 && goarmsoftfp == 0 { print("runtime: this CPU has no floating point hardware, so it cannot run\n") - print("this GOARM=", goarm, " binary. Recompile using GOARM=5.\n") + print("a binary compiled for hard floating point. Recompile adding ,softfloat\n") + print("to GOARM.\n") exit(1) } - if goarm > 6 && cpu.HWCap&_HWCAP_VFPv3 == 0 { + if goarm > 6 && cpu.HWCap&_HWCAP_VFPv3 == 0 && goarmsoftfp == 0 { print("runtime: this CPU has no VFPv3 floating point hardware, so it cannot run\n") - print("this GOARM=", goarm, " binary. Recompile using GOARM=5 or GOARM=6.\n") + print("a binary compiled for VFPv3 hard floating point. Recompile adding ,softfloat\n") + print("to GOARM or changing GOARM to 6.\n") exit(1) } } @@ -50,7 +52,6 @@ func osArchInit() {} //go:nosplit func cputicks() int64 { - // Currently cputicks() is used in blocking profiler and to seed fastrand(). // nanotime() is a poor approximation of CPU ticks that is enough for the profiler. return nanotime() } diff --git a/src/runtime/os_linux_arm64.go b/src/runtime/os_linux_arm64.go index 2daa56fce7..62cead1d22 100644 --- a/src/runtime/os_linux_arm64.go +++ b/src/runtime/os_linux_arm64.go @@ -19,7 +19,6 @@ func osArchInit() {} //go:nosplit func cputicks() int64 { - // Currently cputicks() is used in blocking profiler and to seed fastrand(). // nanotime() is a poor approximation of CPU ticks that is enough for the profiler. return nanotime() } diff --git a/src/runtime/os_linux_mips64x.go b/src/runtime/os_linux_mips64x.go index 11d35bc020..770cc27ba7 100644 --- a/src/runtime/os_linux_mips64x.go +++ b/src/runtime/os_linux_mips64x.go @@ -19,7 +19,6 @@ func osArchInit() {} //go:nosplit func cputicks() int64 { - // Currently cputicks() is used in blocking profiler and to seed fastrand(). // nanotime() is a poor approximation of CPU ticks that is enough for the profiler. return nanotime() } diff --git a/src/runtime/os_linux_mipsx.go b/src/runtime/os_linux_mipsx.go index cdf83ff71d..3807e6d051 100644 --- a/src/runtime/os_linux_mipsx.go +++ b/src/runtime/os_linux_mipsx.go @@ -13,7 +13,6 @@ func osArchInit() {} //go:nosplit func cputicks() int64 { - // Currently cputicks() is used in blocking profiler and to seed fastrand(). // nanotime() is a poor approximation of CPU ticks that is enough for the profiler. return nanotime() } diff --git a/src/runtime/os_netbsd.go b/src/runtime/os_netbsd.go index 7cbba48194..8abb688aae 100644 --- a/src/runtime/os_netbsd.go +++ b/src/runtime/os_netbsd.go @@ -274,11 +274,11 @@ func osinit() { var urandom_dev = []byte("/dev/urandom\x00") //go:nosplit -func getRandomData(r []byte) { +func readRandom(r []byte) int { fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0) n := read(fd, unsafe.Pointer(&r[0]), int32(len(r))) closefd(fd) - extendRandom(r, int(n)) + return int(n) } func goenvs() { diff --git a/src/runtime/os_netbsd_arm.go b/src/runtime/os_netbsd_arm.go index 5fb4e08d66..7494a387e3 100644 --- a/src/runtime/os_netbsd_arm.go +++ b/src/runtime/os_netbsd_arm.go @@ -31,7 +31,6 @@ func checkgoarm() { //go:nosplit func cputicks() int64 { - // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand(). // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. return nanotime() } diff --git a/src/runtime/os_netbsd_arm64.go b/src/runtime/os_netbsd_arm64.go index 2dda9c9274..48841afdb6 100644 --- a/src/runtime/os_netbsd_arm64.go +++ b/src/runtime/os_netbsd_arm64.go @@ -20,7 +20,6 @@ func lwp_mcontext_init(mc *mcontextt, stk unsafe.Pointer, mp *m, gp *g, fn uintp //go:nosplit func cputicks() int64 { - // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand(). // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. return nanotime() } diff --git a/src/runtime/os_openbsd.go b/src/runtime/os_openbsd.go index aa2ba859a8..856979910a 100644 --- a/src/runtime/os_openbsd.go +++ b/src/runtime/os_openbsd.go @@ -142,11 +142,11 @@ func osinit() { var urandom_dev = []byte("/dev/urandom\x00") //go:nosplit -func getRandomData(r []byte) { +func readRandom(r []byte) int { fd := open(&urandom_dev[0], 0 /* O_RDONLY */, 0) n := read(fd, unsafe.Pointer(&r[0]), int32(len(r))) closefd(fd) - extendRandom(r, int(n)) + return int(n) } func goenvs() { diff --git a/src/runtime/os_openbsd_arm.go b/src/runtime/os_openbsd_arm.go index 0a2409676c..d5dc8cb479 100644 --- a/src/runtime/os_openbsd_arm.go +++ b/src/runtime/os_openbsd_arm.go @@ -17,7 +17,6 @@ func checkgoarm() { //go:nosplit func cputicks() int64 { - // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand(). // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. return nanotime() } diff --git a/src/runtime/os_openbsd_arm64.go b/src/runtime/os_openbsd_arm64.go index d71de7d196..4b2c6e3fe9 100644 --- a/src/runtime/os_openbsd_arm64.go +++ b/src/runtime/os_openbsd_arm64.go @@ -6,7 +6,6 @@ package runtime //go:nosplit func cputicks() int64 { - // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand(). // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. return nanotime() } diff --git a/src/runtime/os_openbsd_mips64.go b/src/runtime/os_openbsd_mips64.go index ae220cd683..e5eeb2dcd1 100644 --- a/src/runtime/os_openbsd_mips64.go +++ b/src/runtime/os_openbsd_mips64.go @@ -6,7 +6,6 @@ package runtime //go:nosplit func cputicks() int64 { - // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand(). // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. return nanotime() } diff --git a/src/runtime/os_plan9.go b/src/runtime/os_plan9.go index f4ff4d5f45..77446d09d3 100644 --- a/src/runtime/os_plan9.go +++ b/src/runtime/os_plan9.go @@ -327,24 +327,8 @@ func crash() { } //go:nosplit -func getRandomData(r []byte) { - // inspired by wyrand see hash32.go for detail - t := nanotime() - v := getg().m.procid ^ uint64(t) - - for len(r) > 0 { - v ^= 0xa0761d6478bd642f - v *= 0xe7037ed1a0b428db - size := 8 - if len(r) < 8 { - size = len(r) - } - for i := 0; i < size; i++ { - r[i] = byte(v >> (8 * i)) - } - r = r[size:] - v = v>>32 | v<<32 - } +func readRandom(r []byte) int { + return 0 } func initsig(preinit bool) { diff --git a/src/runtime/os_plan9_arm.go b/src/runtime/os_plan9_arm.go index f165a34151..cce6229323 100644 --- a/src/runtime/os_plan9_arm.go +++ b/src/runtime/os_plan9_arm.go @@ -10,7 +10,6 @@ func checkgoarm() { //go:nosplit func cputicks() int64 { - // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand(). // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. return nanotime() } diff --git a/src/runtime/os_wasip1.go b/src/runtime/os_wasip1.go index 8811bb6178..acac2b3f7a 100644 --- a/src/runtime/os_wasip1.go +++ b/src/runtime/os_wasip1.go @@ -180,10 +180,11 @@ func usleep(usec uint32) { } } -func getRandomData(r []byte) { +func readRandom(r []byte) int { if random_get(unsafe.Pointer(&r[0]), size(len(r))) != 0 { - throw("random_get failed") + return 0 } + return len(r) } func goenvs() { diff --git a/src/runtime/os_wasm.go b/src/runtime/os_wasm.go index bf78dfb5f9..ce260de67e 100644 --- a/src/runtime/os_wasm.go +++ b/src/runtime/os_wasm.go @@ -122,9 +122,7 @@ func syscall_now() (sec int64, nsec int32) { //go:nosplit func cputicks() int64 { - // Currently cputicks() is used in blocking profiler and to seed runtime·fastrand(). // runtime·nanotime() is a poor approximation of CPU ticks that is enough for the profiler. - // TODO: need more entropy to better seed fastrand. return nanotime() } diff --git a/src/runtime/os_windows.go b/src/runtime/os_windows.go index 3772a864b2..ce3f224039 100644 --- a/src/runtime/os_windows.go +++ b/src/runtime/os_windows.go @@ -20,7 +20,6 @@ const ( //go:cgo_import_dynamic runtime._AddVectoredExceptionHandler AddVectoredExceptionHandler%2 "kernel32.dll" //go:cgo_import_dynamic runtime._CloseHandle CloseHandle%1 "kernel32.dll" //go:cgo_import_dynamic runtime._CreateEventA CreateEventA%4 "kernel32.dll" -//go:cgo_import_dynamic runtime._CreateFileA CreateFileA%7 "kernel32.dll" //go:cgo_import_dynamic runtime._CreateIoCompletionPort CreateIoCompletionPort%4 "kernel32.dll" //go:cgo_import_dynamic runtime._CreateThread CreateThread%6 "kernel32.dll" //go:cgo_import_dynamic runtime._CreateWaitableTimerA CreateWaitableTimerA%3 "kernel32.dll" @@ -78,7 +77,6 @@ var ( _AddVectoredExceptionHandler, _CloseHandle, _CreateEventA, - _CreateFileA, _CreateIoCompletionPort, _CreateThread, _CreateWaitableTimerA, @@ -133,13 +131,15 @@ var ( // Load ntdll.dll manually during startup, otherwise Mingw // links wrong printf function to cgo executable (see issue // 12030 for details). - _RtlGetCurrentPeb stdFunction - _RtlGetNtVersionNumbers stdFunction + _NtCreateWaitCompletionPacket stdFunction + _NtAssociateWaitCompletionPacket stdFunction + _NtCancelWaitCompletionPacket stdFunction + _RtlGetCurrentPeb stdFunction + _RtlGetNtVersionNumbers stdFunction // These are from non-kernel32.dll, so we prefer to LoadLibraryEx them. _timeBeginPeriod, _timeEndPeriod, - _WSAGetOverlappedResult, _ stdFunction ) @@ -148,7 +148,6 @@ var ( ntdlldll = [...]uint16{'n', 't', 'd', 'l', 'l', '.', 'd', 'l', 'l', 0} powrprofdll = [...]uint16{'p', 'o', 'w', 'r', 'p', 'r', 'o', 'f', '.', 'd', 'l', 'l', 0} winmmdll = [...]uint16{'w', 'i', 'n', 'm', 'm', '.', 'd', 'l', 'l', 0} - ws2_32dll = [...]uint16{'w', 's', '2', '_', '3', '2', '.', 'd', 'l', 'l', 0} ) // Function to be called by windows CreateThread @@ -165,7 +164,9 @@ type mOS struct { waitsema uintptr // semaphore for parking on locks resumesema uintptr // semaphore to indicate suspend/resume - highResTimer uintptr // high resolution timer handle used in usleep + highResTimer uintptr // high resolution timer handle used in usleep + waitIocpTimer uintptr // high resolution timer handle used in netpoll + waitIocpHandle uintptr // wait completion handle used in netpoll // preemptExtLock synchronizes preemptM with entry/exit from // external C code. @@ -254,27 +255,20 @@ func loadOptionalSyscalls() { if n32 == 0 { throw("ntdll.dll not found") } + _NtCreateWaitCompletionPacket = windowsFindfunc(n32, []byte("NtCreateWaitCompletionPacket\000")) + if _NtCreateWaitCompletionPacket != nil { + // These functions should exists if NtCreateWaitCompletionPacket exists. + _NtAssociateWaitCompletionPacket = windowsFindfunc(n32, []byte("NtAssociateWaitCompletionPacket\000")) + if _NtAssociateWaitCompletionPacket == nil { + throw("NtCreateWaitCompletionPacket exists but NtAssociateWaitCompletionPacket does not") + } + _NtCancelWaitCompletionPacket = windowsFindfunc(n32, []byte("NtCancelWaitCompletionPacket\000")) + if _NtCancelWaitCompletionPacket == nil { + throw("NtCreateWaitCompletionPacket exists but NtCancelWaitCompletionPacket does not") + } + } _RtlGetCurrentPeb = windowsFindfunc(n32, []byte("RtlGetCurrentPeb\000")) _RtlGetNtVersionNumbers = windowsFindfunc(n32, []byte("RtlGetNtVersionNumbers\000")) - - m32 := windowsLoadSystemLib(winmmdll[:]) - if m32 == 0 { - throw("winmm.dll not found") - } - _timeBeginPeriod = windowsFindfunc(m32, []byte("timeBeginPeriod\000")) - _timeEndPeriod = windowsFindfunc(m32, []byte("timeEndPeriod\000")) - if _timeBeginPeriod == nil || _timeEndPeriod == nil { - throw("timeBegin/EndPeriod not found") - } - - ws232 := windowsLoadSystemLib(ws2_32dll[:]) - if ws232 == 0 { - throw("ws2_32.dll not found") - } - _WSAGetOverlappedResult = windowsFindfunc(ws232, []byte("WSAGetOverlappedResult\000")) - if _WSAGetOverlappedResult == nil { - throw("WSAGetOverlappedResult not found") - } } func monitorSuspendResume() { @@ -397,6 +391,13 @@ func osRelax(relax bool) uint32 { // CREATE_WAITABLE_TIMER_HIGH_RESOLUTION flag is available. var haveHighResTimer = false +// haveHighResSleep indicates that NtCreateWaitCompletionPacket +// exists and haveHighResTimer is true. +// NtCreateWaitCompletionPacket has been available since Windows 10, +// but has just been publicly documented, so some platforms, like Wine, +// doesn't support it yet. +var haveHighResSleep = false + // createHighResTimer calls CreateWaitableTimerEx with // CREATE_WAITABLE_TIMER_HIGH_RESOLUTION flag to create high // resolution timer. createHighResTimer returns new timer @@ -420,33 +421,34 @@ func initHighResTimer() { h := createHighResTimer() if h != 0 { haveHighResTimer = true + haveHighResSleep = _NtCreateWaitCompletionPacket != nil stdcall1(_CloseHandle, h) + } else { + // Only load winmm.dll if we need it. + // This avoids a dependency on winmm.dll for Go programs + // that run on new Windows versions. + m32 := windowsLoadSystemLib(winmmdll[:]) + if m32 == 0 { + print("runtime: LoadLibraryExW failed; errno=", getlasterror(), "\n") + throw("winmm.dll not found") + } + _timeBeginPeriod = windowsFindfunc(m32, []byte("timeBeginPeriod\000")) + _timeEndPeriod = windowsFindfunc(m32, []byte("timeEndPeriod\000")) + if _timeBeginPeriod == nil || _timeEndPeriod == nil { + print("runtime: GetProcAddress failed; errno=", getlasterror(), "\n") + throw("timeBegin/EndPeriod not found") + } } } -//go:linkname canUseLongPaths os.canUseLongPaths +//go:linkname canUseLongPaths internal/syscall/windows.CanUseLongPaths var canUseLongPaths bool -// We want this to be large enough to hold the contents of sysDirectory, *plus* -// a slash and another component that itself is greater than MAX_PATH. -var longFileName [(_MAX_PATH+1)*2 + 1]byte - -// initLongPathSupport initializes the canUseLongPaths variable, which is -// linked into os.canUseLongPaths for determining whether or not long paths -// need to be fixed up. In the best case, this function is running on newer -// Windows 10 builds, which have a bit field member of the PEB called -// "IsLongPathAwareProcess." When this is set, we don't need to go through the -// error-prone fixup function in order to access long paths. So this init -// function first checks the Windows build number, sets the flag, and then -// tests to see if it's actually working. If everything checks out, then -// canUseLongPaths is set to true, and later when called, os.fixLongPath -// returns early without doing work. +// initLongPathSupport enables long path support. func initLongPathSupport() { const ( IsLongPathAwareProcess = 0x80 PebBitFieldOffset = 3 - OPEN_EXISTING = 3 - ERROR_PATH_NOT_FOUND = 3 ) // Check that we're ≥ 10.0.15063. @@ -457,38 +459,11 @@ func initLongPathSupport() { } // Set the IsLongPathAwareProcess flag of the PEB's bit field. + // This flag is not documented, but it's known to be used + // by Windows to enable long path support. bitField := (*byte)(unsafe.Pointer(stdcall0(_RtlGetCurrentPeb) + PebBitFieldOffset)) - originalBitField := *bitField *bitField |= IsLongPathAwareProcess - // Check that this actually has an effect, by constructing a large file - // path and seeing whether we get ERROR_PATH_NOT_FOUND, rather than - // some other error, which would indicate the path is too long, and - // hence long path support is not successful. This whole section is NOT - // strictly necessary, but is a nice validity check for the near to - // medium term, when this functionality is still relatively new in - // Windows. - getRandomData(longFileName[len(longFileName)-33 : len(longFileName)-1]) - start := copy(longFileName[:], sysDirectory[:sysDirectoryLen]) - const dig = "0123456789abcdef" - for i := 0; i < 32; i++ { - longFileName[start+i*2] = dig[longFileName[len(longFileName)-33+i]>>4] - longFileName[start+i*2+1] = dig[longFileName[len(longFileName)-33+i]&0xf] - } - start += 64 - for i := start; i < len(longFileName)-1; i++ { - longFileName[i] = 'A' - } - stdcall7(_CreateFileA, uintptr(unsafe.Pointer(&longFileName[0])), 0, 0, 0, OPEN_EXISTING, 0, 0) - // The ERROR_PATH_NOT_FOUND error value is distinct from - // ERROR_FILE_NOT_FOUND or ERROR_INVALID_NAME, the latter of which we - // expect here due to the final component being too long. - if getlasterror() == ERROR_PATH_NOT_FOUND { - *bitField = originalBitField - println("runtime: warning: IsLongPathAwareProcess failed to enable long paths; proceeding in fixup mode") - return - } - canUseLongPaths = true } @@ -519,12 +494,12 @@ func osinit() { } //go:nosplit -func getRandomData(r []byte) { +func readRandom(r []byte) int { n := 0 if stdcall2(_ProcessPrng, uintptr(unsafe.Pointer(&r[0])), uintptr(len(r)))&0xff != 0 { n = len(r) } - extendRandom(r, n) + return n } func goenvs() { @@ -847,7 +822,7 @@ func sigblock(exiting bool) { } // Called to initialize a new m (including the bootstrap m). -// Called on the new thread, cannot allocate memory. +// Called on the new thread, cannot allocate Go memory. func minit() { var thandle uintptr if stdcall7(_DuplicateHandle, currentProcess, currentThread, currentProcess, uintptr(unsafe.Pointer(&thandle)), 0, 0, _DUPLICATE_SAME_ACCESS) == 0 { @@ -868,6 +843,19 @@ func minit() { throw("CreateWaitableTimerEx when creating timer failed") } } + if mp.waitIocpHandle == 0 && haveHighResSleep { + mp.waitIocpTimer = createHighResTimer() + if mp.waitIocpTimer == 0 { + print("runtime: CreateWaitableTimerEx failed; errno=", getlasterror(), "\n") + throw("CreateWaitableTimerEx when creating timer failed") + } + const GENERIC_ALL = 0x10000000 + errno := stdcall3(_NtCreateWaitCompletionPacket, uintptr(unsafe.Pointer(&mp.waitIocpHandle)), GENERIC_ALL, 0) + if mp.waitIocpHandle == 0 { + print("runtime: NtCreateWaitCompletionPacket failed; errno=", errno, "\n") + throw("NtCreateWaitCompletionPacket failed") + } + } unlock(&mp.threadLock) // Query the true stack base from the OS. Currently we're @@ -922,6 +910,14 @@ func mdestroy(mp *m) { stdcall1(_CloseHandle, mp.highResTimer) mp.highResTimer = 0 } + if mp.waitIocpTimer != 0 { + stdcall1(_CloseHandle, mp.waitIocpTimer) + mp.waitIocpTimer = 0 + } + if mp.waitIocpHandle != 0 { + stdcall1(_CloseHandle, mp.waitIocpHandle) + mp.waitIocpHandle = 0 + } if mp.waitsema != 0 { stdcall1(_CloseHandle, mp.waitsema) mp.waitsema = 0 diff --git a/src/runtime/pagetrace_on.go b/src/runtime/pagetrace_on.go index 0e621cb6ca..f82521caad 100644 --- a/src/runtime/pagetrace_on.go +++ b/src/runtime/pagetrace_on.go @@ -317,7 +317,7 @@ func finishPageTrace() { pageTrace.enabled = false // Execute a ragged barrier, flushing each trace buffer. - forEachP(func(pp *p) { + forEachP(waitReasonPageTraceFlush, func(pp *p) { if pp.pageTraceBuf.buf != nil { pp.pageTraceBuf = pp.pageTraceBuf.flush(pp.id, nanotime()) } diff --git a/src/runtime/panic.go b/src/runtime/panic.go index 36d658aa4c..99eb1c3e23 100644 --- a/src/runtime/panic.go +++ b/src/runtime/panic.go @@ -420,17 +420,18 @@ func deferprocat(fn func(), frame any) { return0() } -// deferconvert converts a rangefunc defer list into an ordinary list. +// deferconvert converts the rangefunc defer list of d0 into an ordinary list +// following d0. // See the doc comment for deferrangefunc for details. -func deferconvert(d *_defer) *_defer { - head := d.head +func deferconvert(d0 *_defer) { + head := d0.head if raceenabled { racereadpc(unsafe.Pointer(head), getcallerpc(), abi.FuncPCABIInternal(deferconvert)) } - tail := d.link - d.rangefunc = false - d0 := d + tail := d0.link + d0.rangefunc = false + var d *_defer for { d = head.Load() if head.CompareAndSwap(d, badDefer()) { @@ -438,8 +439,7 @@ func deferconvert(d *_defer) *_defer { } } if d == nil { - freedefer(d0) - return tail + return } for d1 := d; ; d1 = d1.link { d1.sp = d0.sp @@ -449,8 +449,8 @@ func deferconvert(d *_defer) *_defer { break } } - freedefer(d0) - return d + d0.link = d + return } // deferprocStack queues a new deferred function with a defer record on the stack. @@ -528,22 +528,18 @@ func newdefer() *_defer { return d } -// Free the given defer. -// The defer cannot be used after this call. -// -// This is nosplit because the incoming defer is in a perilous state. -// It's not on any defer list, so stack copying won't adjust stack -// pointers in it (namely, d.link). Hence, if we were to copy the -// stack, d could then contain a stale pointer. -// -//go:nosplit -func freedefer(d *_defer) { +// popDefer pops the head of gp's defer list and frees it. +func popDefer(gp *g) { + d := gp._defer + d.fn = nil // Can in theory point to the stack + // We must not copy the stack between the updating gp._defer and setting + // d.link to nil. Between these two steps, d is not on any defer list, so + // stack copying won't adjust stack pointers in it (namely, d.link). Hence, + // if we were to copy the stack, d could then contain a stale pointer. + gp._defer = d.link d.link = nil // After this point we can copy the stack. - if d.fn != nil { - freedeferfn() - } if !d.heap { return } @@ -579,13 +575,6 @@ func freedefer(d *_defer) { mp, pp = nil, nil } -// Separate function so that it can split stack. -// Windows otherwise runs out of stack space. -func freedeferfn() { - // fn must be cleared before d is unlinked from gp. - throw("freedefer with d.fn != nil") -} - // deferreturn runs deferred functions for the caller's frame. // The compiler inserts a call to this at the end of any // function which calls defer. @@ -770,6 +759,16 @@ func gopanic(e any) { fn() } + // If we're tracing, flush the current generation to make the trace more + // readable. + // + // TODO(aktau): Handle a panic from within traceAdvance more gracefully. + // Currently it would hang. Not handled now because it is very unlikely, and + // already unrecoverable. + if traceEnabled() { + traceAdvance(false) + } + // ran out of deferred calls - old-school panic now // Because it is unsafe to call arbitrary user code after freezing // the world, we call preprintpanics to invoke all necessary Error @@ -876,12 +875,12 @@ func (p *_panic) nextDefer() (func(), bool) { Recheck: if d := gp._defer; d != nil && d.sp == uintptr(p.sp) { if d.rangefunc { - gp._defer = deferconvert(d) + deferconvert(d) + popDefer(gp) goto Recheck } fn := d.fn - d.fn = nil // TODO(mdempsky): Instead of having each deferproc call have // its own "deferreturn(); return" sequence, we should just make @@ -889,8 +888,7 @@ func (p *_panic) nextDefer() (func(), bool) { p.retpc = d.pc // Unlink and free. - gp._defer = d.link - freedefer(d) + popDefer(gp) return fn, true } diff --git a/src/runtime/pinner.go b/src/runtime/pinner.go index ea5b909aea..1ede1113ee 100644 --- a/src/runtime/pinner.go +++ b/src/runtime/pinner.go @@ -10,8 +10,8 @@ import ( ) // A Pinner is a set of Go objects each pinned to a fixed location in memory. The -// [Pin] method pins one object, while [Unpin] unpins all pinned objects. See their -// comments for more information. +// [Pinner.Pin] method pins one object, while [Pinner.Unpin] unpins all pinned +// objects. See their comments for more information. type Pinner struct { *pinner } diff --git a/src/runtime/pprof/label_test.go b/src/runtime/pprof/label_test.go index fcb00bde50..cefd9a53e2 100644 --- a/src/runtime/pprof/label_test.go +++ b/src/runtime/pprof/label_test.go @@ -1,3 +1,7 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + package pprof import ( diff --git a/src/runtime/pprof/pprof.go b/src/runtime/pprof/pprof.go index d3a3c788b1..e352b39caf 100644 --- a/src/runtime/pprof/pprof.go +++ b/src/runtime/pprof/pprof.go @@ -69,7 +69,7 @@ // all pprof commands. // // For more information about pprof, see -// https://github.com/google/pprof/blob/master/doc/README.md. +// https://github.com/google/pprof/blob/main/doc/README.md. package pprof import ( @@ -109,6 +109,12 @@ import ( // These predefined profiles maintain themselves and panic on an explicit // [Profile.Add] or [Profile.Remove] method call. // +// The CPU profile is not available as a Profile. It has a special API, +// the [StartCPUProfile] and [StopCPUProfile] functions, because it streams +// output to a writer during profiling. +// +// # Heap profile +// // The heap profile reports statistics as of the most recently completed // garbage collection; it elides more recent allocation to avoid skewing // the profile away from live data and toward garbage. @@ -122,13 +128,47 @@ import ( // flags select which to display, defaulting to -inuse_space (live objects, // scaled by size). // +// # Allocs profile +// // The allocs profile is the same as the heap profile but changes the default // pprof display to -alloc_space, the total number of bytes allocated since // the program began (including garbage-collected bytes). // -// The CPU profile is not available as a Profile. It has a special API, -// the [StartCPUProfile] and [StopCPUProfile] functions, because it streams -// output to a writer during profiling. +// # Block profile +// +// The block profile tracks time spent blocked on synchronization primitives, +// such as [sync.Mutex], [sync.RWMutex], [sync.WaitGroup], [sync.Cond], and +// channel send/receive/select. +// +// Stack traces correspond to the location that blocked (for example, +// [sync.Mutex.Lock]). +// +// Sample values correspond to cumulative time spent blocked at that stack +// trace, subject to time-based sampling specified by +// [runtime.SetBlockProfileRate]. +// +// # Mutex profile +// +// The mutex profile tracks contention on mutexes, such as [sync.Mutex], +// [sync.RWMutex], and runtime-internal locks. +// +// Stack traces correspond to the end of the critical section causing +// contention. For example, a lock held for a long time while other goroutines +// are waiting to acquire the lock will report contention when the lock is +// finally unlocked (that is, at [sync.Mutex.Unlock]). +// +// Sample values correspond to the approximate cumulative time other goroutines +// spent blocked waiting for the lock, subject to event-based sampling +// specified by [runtime.SetMutexProfileFraction]. For example, if a caller +// holds a lock for 1s while 5 other goroutines are waiting for the entire +// second to acquire the lock, its unlock call stack will report 5s of +// contention. +// +// Runtime-internal locks are always reported at the location +// "runtime._LostContendedRuntimeLock". More detailed stack traces for +// runtime-internal locks can be obtained by setting +// `GODEBUG=runtimecontentionstacks=1` (see package [runtime] docs for +// caveats). type Profile struct { name string mu sync.Mutex @@ -312,7 +352,7 @@ func (p *Profile) Remove(value any) { // // The debug parameter enables additional output. // Passing debug=0 writes the gzip-compressed protocol buffer described -// in https://github.com/google/pprof/tree/master/proto#overview. +// in https://github.com/google/pprof/tree/main/proto#overview. // Passing debug=1 writes the legacy text format with comments // translating addresses to function names and line numbers, so that a // programmer can read the profile without tools. @@ -725,6 +765,7 @@ func writeRuntimeProfile(w io.Writer, debug int, name string, fetch func([]runti var p []runtime.StackRecord var labels []unsafe.Pointer n, ok := fetch(nil, nil) + for { // Allocate room for a slightly bigger profile, // in case a few more entries have been added diff --git a/src/runtime/pprof/pprof_test.go b/src/runtime/pprof/pprof_test.go index 6b299e59bf..f57c1fed50 100644 --- a/src/runtime/pprof/pprof_test.go +++ b/src/runtime/pprof/pprof_test.go @@ -1605,7 +1605,11 @@ func TestGoroutineProfileConcurrency(t *testing.T) { // The finalizer goroutine should show up when it's running user code. t.Run("finalizer present", func(t *testing.T) { - obj := new(byte) + // T is a pointer type so it won't be allocated by the tiny + // allocator, which can lead to its finalizer not being called + // during this test + type T *byte + obj := new(T) ch1, ch2 := make(chan int), make(chan int) defer close(ch2) runtime.SetFinalizer(obj, func(_ interface{}) { diff --git a/src/runtime/pprof/proto.go b/src/runtime/pprof/proto.go index cdc4bd7c80..5214374bd9 100644 --- a/src/runtime/pprof/proto.go +++ b/src/runtime/pprof/proto.go @@ -561,7 +561,7 @@ func (d *pcDeck) tryAdd(pc uintptr, frames []runtime.Frame, symbolizeResult symb if last.Entry != newFrame.Entry { // newFrame is for a different function. return false } - if last.Function == newFrame.Function { // maybe recursion. + if runtime_FrameSymbolName(&last) == runtime_FrameSymbolName(&newFrame) { // maybe recursion. return false } } @@ -611,13 +611,14 @@ func (b *profileBuilder) emitLocation() uint64 { b.pb.uint64Opt(tagLocation_Address, uint64(firstFrame.PC)) for _, frame := range b.deck.frames { // Write out each line in frame expansion. - funcID := uint64(b.funcs[frame.Function]) + funcName := runtime_FrameSymbolName(&frame) + funcID := uint64(b.funcs[funcName]) if funcID == 0 { funcID = uint64(len(b.funcs)) + 1 - b.funcs[frame.Function] = int(funcID) + b.funcs[funcName] = int(funcID) newFuncs = append(newFuncs, newFunc{ id: funcID, - name: runtime_FrameSymbolName(&frame), + name: funcName, file: frame.File, startLine: int64(runtime_FrameStartLine(&frame)), }) diff --git a/src/runtime/pprof/proto_test.go b/src/runtime/pprof/proto_test.go index e1a7f2306d..caaaa45f12 100644 --- a/src/runtime/pprof/proto_test.go +++ b/src/runtime/pprof/proto_test.go @@ -45,7 +45,7 @@ func fmtJSON(x any) string { return string(js) } -func TestConvertCPUProfileEmpty(t *testing.T) { +func TestConvertCPUProfileNoSamples(t *testing.T) { // A test server with mock cpu profile data. var buf bytes.Buffer @@ -64,9 +64,13 @@ func TestConvertCPUProfileEmpty(t *testing.T) { } // Expected PeriodType and SampleType. - sampleType := []*profile.ValueType{{}, {}} + periodType := &profile.ValueType{Type: "cpu", Unit: "nanoseconds"} + sampleType := []*profile.ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "nanoseconds"}, + } - checkProfile(t, p, 2000*1000, nil, sampleType, nil, "") + checkProfile(t, p, 2000*1000, periodType, sampleType, nil, "") } func f1() { f1() } @@ -82,21 +86,28 @@ func testPCs(t *testing.T) (addr1, addr2 uint64, map1, map2 *profile.Mapping) { if err != nil { t.Fatal(err) } - mprof := &profile.Profile{} - if err = mprof.ParseMemoryMap(bytes.NewReader(mmap)); err != nil { - t.Fatalf("parsing /proc/self/maps: %v", err) - } - if len(mprof.Mapping) < 2 { + var mappings []*profile.Mapping + id := uint64(1) + parseProcSelfMaps(mmap, func(lo, hi, offset uint64, file, buildID string) { + mappings = append(mappings, &profile.Mapping{ + ID: id, + Start: lo, + Limit: hi, + Offset: offset, + File: file, + BuildID: buildID, + }) + id++ + }) + if len(mappings) < 2 { // It is possible for a binary to only have 1 executable // region of memory. - t.Skipf("need 2 or more mappings, got %v", len(mprof.Mapping)) + t.Skipf("need 2 or more mappings, got %v", len(mappings)) } - addr1 = mprof.Mapping[0].Start - map1 = mprof.Mapping[0] - map1.BuildID, _ = elfBuildID(map1.File) - addr2 = mprof.Mapping[1].Start - map2 = mprof.Mapping[1] - map2.BuildID, _ = elfBuildID(map2.File) + addr1 = mappings[0].Start + map1 = mappings[0] + addr2 = mappings[1].Start + map2 = mappings[1] case "windows", "darwin", "ios": addr1 = uint64(abi.FuncPCABIInternal(f1)) addr2 = uint64(abi.FuncPCABIInternal(f2)) diff --git a/src/runtime/pprof/protomem_test.go b/src/runtime/pprof/protomem_test.go index 156f6286a9..5fb67c53f6 100644 --- a/src/runtime/pprof/protomem_test.go +++ b/src/runtime/pprof/protomem_test.go @@ -6,8 +6,12 @@ package pprof import ( "bytes" + "fmt" "internal/profile" + "internal/testenv" "runtime" + "slices" + "strings" "testing" ) @@ -82,3 +86,138 @@ func TestConvertMemProfile(t *testing.T) { }) } } + +func genericAllocFunc[T interface{ uint32 | uint64 }](n int) []T { + return make([]T, n) +} + +func profileToStrings(p *profile.Profile) []string { + var res []string + for _, s := range p.Sample { + res = append(res, sampleToString(s)) + } + return res +} + +func sampleToString(s *profile.Sample) string { + var funcs []string + for i := len(s.Location) - 1; i >= 0; i-- { + loc := s.Location[i] + funcs = locationToStrings(loc, funcs) + } + return fmt.Sprintf("%s %v", strings.Join(funcs, ";"), s.Value) +} + +func locationToStrings(loc *profile.Location, funcs []string) []string { + for j := range loc.Line { + line := loc.Line[len(loc.Line)-1-j] + funcs = append(funcs, line.Function.Name) + } + return funcs +} + +// This is a regression test for https://go.dev/issue/64528 . +func TestGenericsHashKeyInPprofBuilder(t *testing.T) { + previousRate := runtime.MemProfileRate + runtime.MemProfileRate = 1 + defer func() { + runtime.MemProfileRate = previousRate + }() + for _, sz := range []int{128, 256} { + genericAllocFunc[uint32](sz / 4) + } + for _, sz := range []int{32, 64} { + genericAllocFunc[uint64](sz / 8) + } + + runtime.GC() + buf := bytes.NewBuffer(nil) + if err := WriteHeapProfile(buf); err != nil { + t.Fatalf("writing profile: %v", err) + } + p, err := profile.Parse(buf) + if err != nil { + t.Fatalf("profile.Parse: %v", err) + } + + actual := profileToStrings(p) + expected := []string{ + "testing.tRunner;runtime/pprof.TestGenericsHashKeyInPprofBuilder;runtime/pprof.genericAllocFunc[go.shape.uint32] [1 128 0 0]", + "testing.tRunner;runtime/pprof.TestGenericsHashKeyInPprofBuilder;runtime/pprof.genericAllocFunc[go.shape.uint32] [1 256 0 0]", + "testing.tRunner;runtime/pprof.TestGenericsHashKeyInPprofBuilder;runtime/pprof.genericAllocFunc[go.shape.uint64] [1 32 0 0]", + "testing.tRunner;runtime/pprof.TestGenericsHashKeyInPprofBuilder;runtime/pprof.genericAllocFunc[go.shape.uint64] [1 64 0 0]", + } + + for _, l := range expected { + if !slices.Contains(actual, l) { + t.Errorf("profile = %v\nwant = %v", strings.Join(actual, "\n"), l) + } + } +} + +type opAlloc struct { + buf [128]byte +} + +type opCall struct { +} + +var sink []byte + +func storeAlloc() { + sink = make([]byte, 16) +} + +func nonRecursiveGenericAllocFunction[CurrentOp any, OtherOp any](alloc bool) { + if alloc { + storeAlloc() + } else { + nonRecursiveGenericAllocFunction[OtherOp, CurrentOp](true) + } +} + +func TestGenericsInlineLocations(t *testing.T) { + if testenv.OptimizationOff() { + t.Skip("skipping test with optimizations disabled") + } + + previousRate := runtime.MemProfileRate + runtime.MemProfileRate = 1 + defer func() { + runtime.MemProfileRate = previousRate + sink = nil + }() + + nonRecursiveGenericAllocFunction[opAlloc, opCall](true) + nonRecursiveGenericAllocFunction[opCall, opAlloc](false) + + runtime.GC() + + buf := bytes.NewBuffer(nil) + if err := WriteHeapProfile(buf); err != nil { + t.Fatalf("writing profile: %v", err) + } + p, err := profile.Parse(buf) + if err != nil { + t.Fatalf("profile.Parse: %v", err) + } + + const expectedSample = "testing.tRunner;runtime/pprof.TestGenericsInlineLocations;runtime/pprof.nonRecursiveGenericAllocFunction[go.shape.struct {},go.shape.struct { runtime/pprof.buf [128]uint8 }];runtime/pprof.nonRecursiveGenericAllocFunction[go.shape.struct { runtime/pprof.buf [128]uint8 },go.shape.struct {}];runtime/pprof.storeAlloc [1 16 1 16]" + const expectedLocation = "runtime/pprof.nonRecursiveGenericAllocFunction[go.shape.struct {},go.shape.struct { runtime/pprof.buf [128]uint8 }];runtime/pprof.nonRecursiveGenericAllocFunction[go.shape.struct { runtime/pprof.buf [128]uint8 },go.shape.struct {}];runtime/pprof.storeAlloc" + const expectedLocationNewInliner = "runtime/pprof.TestGenericsInlineLocations;" + expectedLocation + var s *profile.Sample + for _, sample := range p.Sample { + if sampleToString(sample) == expectedSample { + s = sample + break + } + } + if s == nil { + t.Fatalf("expected \n%s\ngot\n%s", expectedSample, strings.Join(profileToStrings(p), "\n")) + } + loc := s.Location[0] + actual := strings.Join(locationToStrings(loc, nil), ";") + if expectedLocation != actual && expectedLocationNewInliner != actual { + t.Errorf("expected a location with at least 3 functions\n%s\ngot\n%s\n", expectedLocation, actual) + } +} diff --git a/src/runtime/pprof/vminfo_darwin_test.go b/src/runtime/pprof/vminfo_darwin_test.go index 1a3b67a0bf..ac180826b1 100644 --- a/src/runtime/pprof/vminfo_darwin_test.go +++ b/src/runtime/pprof/vminfo_darwin_test.go @@ -34,7 +34,10 @@ func TestVMInfo(t *testing.T) { // the go toolchain itself. first = false }) - lo, hi := useVMMap(t) + lo, hi, err := useVMMapWithRetry(t) + if err != nil { + t.Fatal(err) + } if got, want := begin, lo; got != want { t.Errorf("got %x, want %x", got, want) } @@ -53,7 +56,21 @@ func TestVMInfo(t *testing.T) { } } -func useVMMap(t *testing.T) (hi, lo uint64) { +func useVMMapWithRetry(t *testing.T) (hi, lo uint64, err error) { + var retryable bool + for { + hi, lo, retryable, err = useVMMap(t) + if err == nil { + return hi, lo, nil + } + if !retryable { + return 0, 0, err + } + t.Logf("retrying vmmap after error: %v", err) + } +} + +func useVMMap(t *testing.T) (hi, lo uint64, retryable bool, err error) { pid := strconv.Itoa(os.Getpid()) testenv.MustHaveExecPath(t, "vmmap") cmd := testenv.Command(t, "vmmap", pid) @@ -62,21 +79,25 @@ func useVMMap(t *testing.T) (hi, lo uint64) { t.Logf("vmmap output: %s", out) if ee, ok := cmdErr.(*exec.ExitError); ok && len(ee.Stderr) > 0 { t.Logf("%v: %v\n%s", cmd, cmdErr, ee.Stderr) + retryable = bytes.Contains(ee.Stderr, []byte("resource shortage")) + } + t.Logf("%v: %v\n", cmd, cmdErr) + if retryable { + return 0, 0, true, cmdErr } - t.Logf("%v: %v", cmd, cmdErr) } // Always parse the output of vmmap since it may return an error // code even if it successfully reports the text segment information // required for this test. - hi, lo, err := parseVmmap(out) + hi, lo, err = parseVmmap(out) if err != nil { if cmdErr != nil { - t.Fatalf("failed to parse vmmap output, vmmap reported an error: %v", err) + return 0, 0, false, fmt.Errorf("failed to parse vmmap output, vmmap reported an error: %v", err) } t.Logf("vmmap output: %s", out) - t.Fatalf("failed to parse vmmap output, vmmap did not report an error: %v", err) + return 0, 0, false, fmt.Errorf("failed to parse vmmap output, vmmap did not report an error: %v", err) } - return hi, lo + return hi, lo, false, nil } // parseVmmap parses the output of vmmap and calls addMapping for the first r-x TEXT segment in the output. diff --git a/src/runtime/preempt_arm.s b/src/runtime/preempt_arm.s index 8f243c0dcd..b68df5d6b1 100644 --- a/src/runtime/preempt_arm.s +++ b/src/runtime/preempt_arm.s @@ -19,9 +19,9 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVW R12, 48(R13) MOVW CPSR, R0 MOVW R0, 52(R13) - MOVB ·goarm(SB), R0 - CMP $6, R0 - BLT nofp + MOVB ·goarmsoftfp(SB), R0 + CMP $0, R0 + BNE nofp MOVW FPCR, R0 MOVW R0, 56(R13) MOVD F0, 60(R13) @@ -42,9 +42,9 @@ TEXT ·asyncPreempt(SB),NOSPLIT|NOFRAME,$0-0 MOVD F15, 180(R13) nofp: CALL ·asyncPreempt2(SB) - MOVB ·goarm(SB), R0 - CMP $6, R0 - BLT nofp2 + MOVB ·goarmsoftfp(SB), R0 + CMP $0, R0 + BNE nofp2 MOVD 180(R13), F15 MOVD 172(R13), F14 MOVD 164(R13), F13 diff --git a/src/runtime/proc.go b/src/runtime/proc.go index 824b0fa009..33fdf864ff 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -167,7 +167,7 @@ func main() { // Allow newproc to start new Ms. mainStarted = true - if GOARCH != "wasm" { // no threads on wasm yet, so no sysmon + if haveSysmon { systemstack(func() { newm(sysmon, nil, -1) }) @@ -576,7 +576,10 @@ func switchToCrashStack(fn func()) { abort() } -const crashStackImplemented = GOARCH == "amd64" || GOARCH == "arm64" || GOARCH == "mips64" || GOARCH == "mips64le" || GOARCH == "riscv64" +// Disable crash stack on Windows for now. Apparently, throwing an exception +// on a non-system-allocated crash stack causes EXCEPTION_STACK_OVERFLOW and +// hangs the process (see issue 63938). +const crashStackImplemented = (GOARCH == "amd64" || GOARCH == "arm" || GOARCH == "arm64" || GOARCH == "loong64" || GOARCH == "mips64" || GOARCH == "mips64le" || GOARCH == "ppc64" || GOARCH == "ppc64le" || GOARCH == "riscv64" || GOARCH == "s390x" || GOARCH == "wasm") && GOOS != "windows" //go:noescape func switchToCrashStack0(fn func()) // in assembly @@ -756,6 +759,8 @@ func schedinit() { lockInit(&reflectOffs.lock, lockRankReflectOffs) lockInit(&finlock, lockRankFin) lockInit(&cpuprof.lock, lockRankCpuprof) + allocmLock.init(lockRankAllocmR, lockRankAllocmRInternal, lockRankAllocmW) + execLock.init(lockRankExecR, lockRankExecRInternal, lockRankExecW) traceLockInit() // Enforce that this lock is always a leaf lock. // All of this lock's critical sections should be @@ -770,6 +775,7 @@ func schedinit() { } sched.maxmcount = 10000 + crashFD.Store(^uintptr(0)) // The world starts stopped. worldStopped() @@ -781,8 +787,8 @@ func schedinit() { godebug := getGodebugEarly() initPageTrace(godebug) // must run after mallocinit but before anything allocates cpuinit(godebug) // must run before alginit - alginit() // maps, hash, fastrand must not be used before this call - fastrandinit() // must run before mcommoninit + randinit() // must run before alginit, mcommoninit + alginit() // maps, hash, rand must not be used before this call mcommoninit(gp.m, -1) modulesinit() // provides activeModules typelinksinit() // uses maps, activeModules @@ -897,18 +903,7 @@ func mcommoninit(mp *m, id int64) { mp.id = mReserveID() } - lo := uint32(int64Hash(uint64(mp.id), fastrandseed)) - hi := uint32(int64Hash(uint64(cputicks()), ^fastrandseed)) - if lo|hi == 0 { - hi = 1 - } - // Same behavior as for 1.17. - // TODO: Simplify this. - if goarch.BigEndian { - mp.fastrand = uint64(lo)<<32 | uint64(hi) - } else { - mp.fastrand = uint64(hi)<<32 | uint64(lo) - } + mrandinit(mp) mpreinit(mp) if mp.gsignal != nil { @@ -919,7 +914,7 @@ func mcommoninit(mp *m, id int64) { // when it is just in a register or thread-local storage. mp.alllink = allm - // NumCgoCall() iterates over allm w/o schedlock, + // NumCgoCall() and others iterate over allm w/o schedlock, // so we need to publish it safely. atomicstorep(unsafe.Pointer(&allm), unsafe.Pointer(mp)) unlock(&sched.lock) @@ -954,13 +949,6 @@ const ( osHasLowResClock = osHasLowResClockInt > 0 ) -var fastrandseed uintptr - -func fastrandinit() { - s := (*[unsafe.Sizeof(fastrandseed)]byte)(unsafe.Pointer(&fastrandseed))[:] - getRandomData(s) -} - // Mark gp ready to run. func ready(gp *g, traceskip int, next bool) { status := readgstatus(gp) @@ -1852,6 +1840,7 @@ found: unlock(&sched.lock) atomic.Xadd64(&ncgocall, int64(mp.ncgocall)) + sched.totalRuntimeLockWaitTime.Add(mp.mLockProfile.waitTime.Load()) // Release the P. handoffp(releasep()) @@ -2424,8 +2413,20 @@ func dropm() { // Flush all the M's buffers. This is necessary because the M might // be used on a different thread with a different procid, so we have // to make sure we don't write into the same buffer. - if traceEnabled() || traceShuttingDown() { + // + // N.B. traceThreadDestroy is a no-op in the old tracer, so avoid the + // unnecessary acquire/release of the lock. + if goexperiment.ExecTracer2 && (traceEnabled() || traceShuttingDown()) { + // Acquire sched.lock across thread destruction. One of the invariants of the tracer + // is that a thread cannot disappear from the tracer's view (allm or freem) without + // it noticing, so it requires that sched.lock be held over traceThreadDestroy. + // + // This isn't strictly necessary in this case, because this thread never leaves allm, + // but the critical section is short and dropm is rare on pthread platforms, so just + // take the lock and play it safe. traceThreadDestroy also asserts that the lock is held. + lock(&sched.lock) traceThreadDestroy(mp) + unlock(&sched.lock) } mp.isExtraInSig = false @@ -2960,7 +2961,7 @@ func handoffp(pp *p) { // The scheduler lock cannot be held when calling wakeNetPoller below // because wakeNetPoller may call wakep which may call startm. - when := nobarrierWakeTime(pp) + when := pp.timers.wakeTime() pidleput(pp, 0) unlock(&sched.lock) @@ -3157,7 +3158,7 @@ top: // which may steal timers. It's important that between now // and then, nothing blocks, so these numbers remain mostly // relevant. - now, pollUntil, _ := checkTimers(pp, 0) + now, pollUntil, _ := pp.timers.check(0) // Try to schedule the trace reader. if traceEnabled() || traceShuttingDown() { @@ -3550,7 +3551,7 @@ func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWo for i := 0; i < stealTries; i++ { stealTimersOrRunNextG := i == stealTries-1 - for enum := stealOrder.start(fastrand()); !enum.done(); enum.next() { + for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() { if sched.gcwaiting.Load() { // GC work may be available. return nil, false, now, pollUntil, true @@ -3574,7 +3575,7 @@ func stealWork(now int64) (gp *g, inheritTime bool, rnow, pollUntil int64, newWo // timerpMask tells us whether the P may have timers at all. If it // can't, no need to check at all. if stealTimersOrRunNextG && timerpMask.read(enum.position()) { - tnow, w, ran := checkTimers(p2, now) + tnow, w, ran := p2.timers.check(now) now = tnow if w != 0 && (pollUntil == 0 || w < pollUntil) { pollUntil = w @@ -3640,7 +3641,7 @@ func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p { func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 { for id, p2 := range allpSnapshot { if timerpMaskSnapshot.read(uint32(id)) { - w := nobarrierWakeTime(p2) + w := p2.timers.wakeTime() if w != 0 && (pollUntil == 0 || w < pollUntil) { pollUntil = w } @@ -3815,8 +3816,10 @@ func injectglist(glist *gList) { } npidle := int(sched.npidle.Load()) - var globq gQueue - var n int + var ( + globq gQueue + n int + ) for n = 0; n < npidle && !q.empty(); n++ { g := q.pop() globq.pushBack(g) @@ -3832,6 +3835,21 @@ func injectglist(glist *gList) { if !q.empty() { runqputbatch(pp, &q, qsize) } + + // Some P's might have become idle after we loaded `sched.npidle` + // but before any goroutines were added to the queue, which could + // lead to idle P's when there is work available in the global queue. + // That could potentially last until other goroutines become ready + // to run. That said, we need to find a way to hedge + // + // Calling wakep() here is the best bet, it will do nothing in the + // common case (no racing on `sched.npidle`), while it could wake one + // more P to execute G's, which might end up with >1 P's: the first one + // wakes another P and so forth until there is no more work, but this + // ought to be an extremely rare case. + // + // Also see "Worker thread parking/unparking" comment at the top of the file for details. + wakep() } // One round of scheduler: find a runnable goroutine and execute it. @@ -3932,72 +3950,6 @@ func dropg() { setGNoWB(&gp.m.curg, nil) } -// checkTimers runs any timers for the P that are ready. -// If now is not 0 it is the current time. -// It returns the passed time or the current time if now was passed as 0. -// and the time when the next timer should run or 0 if there is no next timer, -// and reports whether it ran any timers. -// If the time when the next timer should run is not 0, -// it is always larger than the returned time. -// We pass now in and out to avoid extra calls of nanotime. -// -//go:yeswritebarrierrec -func checkTimers(pp *p, now int64) (rnow, pollUntil int64, ran bool) { - // If it's not yet time for the first timer, or the first adjusted - // timer, then there is nothing to do. - next := pp.timer0When.Load() - nextAdj := pp.timerModifiedEarliest.Load() - if next == 0 || (nextAdj != 0 && nextAdj < next) { - next = nextAdj - } - - if next == 0 { - // No timers to run or adjust. - return now, 0, false - } - - if now == 0 { - now = nanotime() - } - if now < next { - // Next timer is not ready to run, but keep going - // if we would clear deleted timers. - // This corresponds to the condition below where - // we decide whether to call clearDeletedTimers. - if pp != getg().m.p.ptr() || int(pp.deletedTimers.Load()) <= int(pp.numTimers.Load()/4) { - return now, next, false - } - } - - lock(&pp.timersLock) - - if len(pp.timers) > 0 { - adjusttimers(pp, now) - for len(pp.timers) > 0 { - // Note that runtimer may temporarily unlock - // pp.timersLock. - if tw := runtimer(pp, now); tw != 0 { - if tw > 0 { - pollUntil = tw - } - break - } - ran = true - } - } - - // If this is the local P, and there are a lot of deleted timers, - // clear them out. We only do this for the local P to reduce - // lock contention on timersLock. - if pp == getg().m.p.ptr() && int(pp.deletedTimers.Load()) > len(pp.timers)/4 { - clearDeletedTimers(pp) - } - - unlock(&pp.timersLock) - - return now, pollUntil, ran -} - func parkunlock_c(gp *g, lock unsafe.Pointer) bool { unlock((*mutex)(lock)) return true @@ -4177,6 +4129,11 @@ func goexit1() { // goexit continuation on g0. func goexit0(gp *g) { + gdestroy(gp) + schedule() +} + +func gdestroy(gp *g) { mp := getg().m pp := mp.p.ptr() @@ -4213,7 +4170,7 @@ func goexit0(gp *g) { if GOARCH == "wasm" { // no threads yet on wasm gfput(pp, gp) - schedule() // never returns + return } if mp.lockedInt != 0 { @@ -4236,7 +4193,6 @@ func goexit0(gp *g) { mp.lockedExt = 0 } } - schedule() } // save updates getg().sched to refer to pc and sp so that a following @@ -4400,19 +4356,32 @@ func entersyscall_gcwait() { pp := gp.m.oldp.ptr() lock(&sched.lock) + trace := traceAcquire() if sched.stopwait > 0 && atomic.Cas(&pp.status, _Psyscall, _Pgcstop) { - trace := traceAcquire() if trace.ok() { - trace.GoSysBlock(pp) - // N.B. ProcSteal not necessary because if we succeed we're - // always stopping the P we just put into the syscall status. - trace.ProcStop(pp) + if goexperiment.ExecTracer2 { + // This is a steal in the new tracer. While it's very likely + // that we were the ones to put this P into _Psyscall, between + // then and now it's totally possible it had been stolen and + // then put back into _Psyscall for us to acquire here. In such + // case ProcStop would be incorrect. + // + // TODO(mknyszek): Consider emitting a ProcStop instead when + // gp.m.syscalltick == pp.syscalltick, since then we know we never + // lost the P. + trace.ProcSteal(pp, true) + } else { + trace.GoSysBlock(pp) + trace.ProcStop(pp) + } traceRelease(trace) } pp.syscalltick++ if sched.stopwait--; sched.stopwait == 0 { notewakeup(&sched.stopnote) } + } else if trace.ok() { + traceRelease(trace) } unlock(&sched.lock) } @@ -4590,12 +4559,19 @@ func exitsyscallfast(oldp *p) bool { } // Try to re-acquire the last P. + trace := traceAcquire() if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) { // There's a cpu for us, so we can run. wirep(oldp) - exitsyscallfast_reacquired() + exitsyscallfast_reacquired(trace) + if trace.ok() { + traceRelease(trace) + } return true } + if trace.ok() { + traceRelease(trace) + } // Try to get any other idle P. if sched.pidle != 0 { @@ -4631,10 +4607,9 @@ func exitsyscallfast(oldp *p) bool { // syscall. // //go:nosplit -func exitsyscallfast_reacquired() { +func exitsyscallfast_reacquired(trace traceLocker) { gp := getg() if gp.m.syscalltick != gp.m.p.ptr().syscalltick { - trace := traceAcquire() if trace.ok() { // The p was retaken and then enter into syscall again (since gp.m.syscalltick has changed). // traceGoSysBlock for this syscall was already emitted, @@ -4651,7 +4626,6 @@ func exitsyscallfast_reacquired() { // Denote completion of the current syscall. trace.GoSysExit(true) } - traceRelease(trace) }) } gp.m.p.ptr().syscalltick++ @@ -4928,7 +4902,7 @@ func newproc1(fn *funcval, callergp *g, callerpc uintptr) *g { } } // Track initial transition? - newg.trackingSeq = uint8(fastrand()) + newg.trackingSeq = uint8(cheaprand()) if newg.trackingSeq%gTrackingPeriod == 0 { newg.tracking = true } @@ -5151,7 +5125,7 @@ func dolockOSThread() { // The calling goroutine will always execute in that thread, // and no other goroutine will execute in it, // until the calling goroutine has made as many calls to -// UnlockOSThread as to LockOSThread. +// [UnlockOSThread] as to LockOSThread. // If the calling goroutine exits without unlocking the thread, // the thread will be terminated. // @@ -5270,6 +5244,7 @@ func _ExternalCode() { _ExternalCode() } func _LostExternalCode() { _LostExternalCode() } func _GC() { _GC() } func _LostSIGPROFDuringAtomic64() { _LostSIGPROFDuringAtomic64() } +func _LostContendedRuntimeLock() { _LostContendedRuntimeLock() } func _VDSO() { _VDSO() } // Called if we receive a SIGPROF signal. @@ -5460,7 +5435,7 @@ func (pp *p) init(id int32) { pp.raceprocctx = raceproccreate() } } - lockInit(&pp.timersLock, lockRankTimers) + lockInit(&pp.timers.lock, lockRankTimers) // This P may get timers when it starts running. Set the mask here // since the P may not go through pidleget (notably P 0 on startup). @@ -5490,22 +5465,10 @@ func (pp *p) destroy() { globrunqputhead(pp.runnext.ptr()) pp.runnext = 0 } - if len(pp.timers) > 0 { - plocal := getg().m.p.ptr() - // The world is stopped, but we acquire timersLock to - // protect against sysmon calling timeSleepUntil. - // This is the only case where we hold the timersLock of - // more than one P, so there are no deadlock concerns. - lock(&plocal.timersLock) - lock(&pp.timersLock) - moveTimers(plocal, pp.timers) - pp.timers = nil - pp.numTimers.Store(0) - pp.deletedTimers.Store(0) - pp.timer0When.Store(0) - unlock(&pp.timersLock) - unlock(&plocal.timersLock) - } + + // Move all timers to the local P. + getg().m.p.ptr().timers.take(&pp.timers) + // Flush p's write barrier buffer. if gcphase != _GCoff { wbBufFlush1(pp) @@ -5535,7 +5498,7 @@ func (pp *p) destroy() { gfpurge(pp) traceProcFree(pp) if raceenabled { - if pp.timerRaceCtx != 0 { + if pp.timers.raceCtx != 0 { // The race detector code uses a callback to fetch // the proc context, so arrange for that callback // to see the right thing. @@ -5545,8 +5508,8 @@ func (pp *p) destroy() { phold := mp.p.ptr() mp.p.set(pp) - racectxend(pp.timerRaceCtx) - pp.timerRaceCtx = 0 + racectxend(pp.timers.raceCtx) + pp.timers.raceCtx = 0 mp.p.set(phold) } @@ -5741,15 +5704,23 @@ func wirep(pp *p) { gp := getg() if gp.m.p != 0 { - throw("wirep: already in go") + // Call on the systemstack to avoid a nosplit overflow build failure + // on some platforms when built with -N -l. See #64113. + systemstack(func() { + throw("wirep: already in go") + }) } if pp.m != 0 || pp.status != _Pidle { - id := int64(0) - if pp.m != 0 { - id = pp.m.ptr().id - } - print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n") - throw("wirep: invalid p state") + // Call on the systemstack to avoid a nosplit overflow build failure + // on some platforms when built with -N -l. See #64113. + systemstack(func() { + id := int64(0) + if pp.m != 0 { + id = pp.m.ptr().id + } + print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n") + throw("wirep: invalid p state") + }) } gp.m.p.set(pp) pp.m.set(gp.m) @@ -5889,7 +5860,7 @@ func checkdead() { // There are no goroutines running, so we can look at the P's. for _, pp := range allp { - if len(pp.timers) > 0 { + if len(pp.timers.heap) > 0 { return } } @@ -5909,6 +5880,11 @@ var forcegcperiod int64 = 2 * 60 * 1e9 // golang.org/issue/42515 is needed on NetBSD. var needSysmonWorkaround bool = false +// haveSysmon indicates whether there is sysmon thread support. +// +// No threads on wasm yet, so no sysmon. +const haveSysmon = GOARCH != "wasm" + // Always runs without a P, so write barriers are not allowed. // //go:nowritebarrierrec @@ -6089,7 +6065,10 @@ func retake(now int64) uint32 { s := pp.status sysretake := false if s == _Prunning || s == _Psyscall { - // Preempt G if it's running for too long. + // Preempt G if it's running on the same schedtick for + // too long. This could be from a single long-running + // goroutine or a sequence of goroutines run via + // runnext, which share a single schedtick time slice. t := int64(pp.schedtick) if int64(pd.schedtick) != t { pd.schedtick = uint32(t) @@ -6122,8 +6101,8 @@ func retake(now int64) uint32 { // Otherwise the M from which we retake can exit the syscall, // increment nmidle and report deadlock. incidlelocked(-1) + trace := traceAcquire() if atomic.Cas(&pp.status, s, _Pidle) { - trace := traceAcquire() if trace.ok() { trace.GoSysBlock(pp) trace.ProcSteal(pp, false) @@ -6132,6 +6111,8 @@ func retake(now int64) uint32 { n++ pp.syscalltick++ handoffp(pp) + } else if trace.ok() { + traceRelease(trace) } incidlelocked(1) lock(&allpLock) @@ -6223,7 +6204,7 @@ func schedtrace(detailed bool) { } else { print("nil") } - print(" runqsize=", t-h, " gfreecnt=", pp.gFree.n, " timerslen=", len(pp.timers), "\n") + print(" runqsize=", t-h, " gfreecnt=", pp.gFree.n, " timerslen=", len(pp.timers.heap), "\n") } else { // In non-detailed mode format lengths of per-P run queues as: // [len1 len2 len3 len4] @@ -6445,46 +6426,6 @@ func (p pMask) clear(id int32) { atomic.And(&p[word], ^mask) } -// updateTimerPMask clears pp's timer mask if it has no timers on its heap. -// -// Ideally, the timer mask would be kept immediately consistent on any timer -// operations. Unfortunately, updating a shared global data structure in the -// timer hot path adds too much overhead in applications frequently switching -// between no timers and some timers. -// -// As a compromise, the timer mask is updated only on pidleget / pidleput. A -// running P (returned by pidleget) may add a timer at any time, so its mask -// must be set. An idle P (passed to pidleput) cannot add new timers while -// idle, so if it has no timers at that time, its mask may be cleared. -// -// Thus, we get the following effects on timer-stealing in findrunnable: -// -// - Idle Ps with no timers when they go idle are never checked in findrunnable -// (for work- or timer-stealing; this is the ideal case). -// - Running Ps must always be checked. -// - Idle Ps whose timers are stolen must continue to be checked until they run -// again, even after timer expiration. -// -// When the P starts running again, the mask should be set, as a timer may be -// added at any time. -// -// TODO(prattmic): Additional targeted updates may improve the above cases. -// e.g., updating the mask when stealing a timer. -func updateTimerPMask(pp *p) { - if pp.numTimers.Load() > 0 { - return - } - - // Looks like there are no timers, however another P may transiently - // decrement numTimers when handling a timerModified timer in - // checkTimers. We must take timersLock to serialize with these changes. - lock(&pp.timersLock) - if pp.numTimers.Load() == 0 { - timerpMask.clear(pp.id) - } - unlock(&pp.timersLock) -} - // pidleput puts p on the _Pidle list. now must be a relatively recent call // to nanotime or zero. Returns now or the current time if now was zero. // @@ -6600,7 +6541,18 @@ const randomizeScheduler = raceenabled // If the run queue is full, runnext puts g on the global queue. // Executed only by the owner P. func runqput(pp *p, gp *g, next bool) { - if randomizeScheduler && next && fastrandn(2) == 0 { + if !haveSysmon && next { + // A runnext goroutine shares the same time slice as the + // current goroutine (inheritTime from runqget). To prevent a + // ping-pong pair of goroutines from starving all others, we + // depend on sysmon to preempt "long-running goroutines". That + // is, any set of goroutines sharing the same time slice. + // + // If there is no sysmon, we must avoid runnext entirely or + // risk starvation. + next = false + } + if randomizeScheduler && next && randn(2) == 0 { next = false } @@ -6653,7 +6605,7 @@ func runqputslow(pp *p, gp *g, h, t uint32) bool { if randomizeScheduler { for i := uint32(1); i <= n; i++ { - j := fastrandn(i + 1) + j := cheaprandn(i + 1) batch[i], batch[j] = batch[j], batch[i] } } @@ -6694,7 +6646,7 @@ func runqputbatch(pp *p, q *gQueue, qsize int) { return (pp.runqtail + o) % uint32(len(pp.runq)) } for i := uint32(1); i < n; i++ { - j := fastrandn(i + 1) + j := cheaprandn(i + 1) pp.runq[off(i)], pp.runq[off(j)] = pp.runq[off(j)], pp.runq[off(i)] } } diff --git a/src/runtime/profbuf.go b/src/runtime/profbuf.go index 5772a8020c..d3afbcd8c7 100644 --- a/src/runtime/profbuf.go +++ b/src/runtime/profbuf.go @@ -1,4 +1,4 @@ -// Copyright 2017 The Go Authors. All rights reserved. +// Copyright 2017 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/src/runtime/race.go b/src/runtime/race.go index f9cbc1f54b..ca4f051979 100644 --- a/src/runtime/race.go +++ b/src/runtime/race.go @@ -179,7 +179,7 @@ func raceSymbolizeCode(ctx *symbolizeCodeContext) { // Ignore wrappers, unless we're at the outermost frame of u. // A non-inlined wrapper frame always means we have a physical // frame consisting entirely of wrappers, in which case we'll - // take a outermost wrapper over nothing. + // take an outermost wrapper over nothing. continue } diff --git a/src/runtime/race/README b/src/runtime/race/README index 75484f71eb..47c51ca9c1 100644 --- a/src/runtime/race/README +++ b/src/runtime/race/README @@ -13,5 +13,5 @@ internal/amd64v1/race_windows.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d internal/amd64v3/race_linux.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5. race_darwin_arm64.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5. race_linux_arm64.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5. -race_linux_ppc64le.syso built with LLVM 41cb504b7c4b18ac15830107431a0c1eec73a6b2 and Go 851ecea4cc99ab276109493477b2c7e30c253ea8. +race_linux_ppc64le.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5. race_linux_s390x.syso built with LLVM 51bfeff0e4b0757ff773da6882f4d538996c9b04 and Go e7d582b55dda36e76ce4d0ce770139ca0915b7c5. diff --git a/src/runtime/race/race_linux_ppc64le.syso b/src/runtime/race/race_linux_ppc64le.syso index 1939f29ac0..49824a9d18 100644 Binary files a/src/runtime/race/race_linux_ppc64le.syso and b/src/runtime/race/race_linux_ppc64le.syso differ diff --git a/src/runtime/race_arm64.s b/src/runtime/race_arm64.s index c818345852..ae0030cf10 100644 --- a/src/runtime/race_arm64.s +++ b/src/runtime/race_arm64.s @@ -419,6 +419,10 @@ TEXT racecall<>(SB), NOSPLIT|NOFRAME, $0-0 MOVD (g_sched+gobuf_sp)(R11), R12 MOVD R12, RSP call: + // Decrement SP past where the frame pointer is saved in the Go arm64 + // ABI (one word below the stack pointer) so the race detector library + // code doesn't clobber it + SUB $16, RSP BL R9 MOVD R19, RSP JMP (R20) diff --git a/src/runtime/rand.go b/src/runtime/rand.go new file mode 100644 index 0000000000..62577dda91 --- /dev/null +++ b/src/runtime/rand.go @@ -0,0 +1,253 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Random number generation + +package runtime + +import ( + "internal/chacha8rand" + "internal/goarch" + "runtime/internal/math" + "unsafe" + _ "unsafe" // for go:linkname +) + +// OS-specific startup can set startupRand if the OS passes +// random data to the process at startup time. +// For example Linux passes 16 bytes in the auxv vector. +var startupRand []byte + +// globalRand holds the global random state. +// It is only used at startup and for creating new m's. +// Otherwise the per-m random state should be used +// by calling goodrand. +var globalRand struct { + lock mutex + seed [32]byte + state chacha8rand.State + init bool +} + +var readRandomFailed bool + +// randinit initializes the global random state. +// It must be called before any use of grand. +func randinit() { + lock(&globalRand.lock) + if globalRand.init { + fatal("randinit twice") + } + + seed := &globalRand.seed + if startupRand != nil { + for i, c := range startupRand { + seed[i%len(seed)] ^= c + } + clear(startupRand) + startupRand = nil + } else { + if readRandom(seed[:]) != len(seed) { + // readRandom should never fail, but if it does we'd rather + // not make Go binaries completely unusable, so make up + // some random data based on the current time. + readRandomFailed = true + readTimeRandom(seed[:]) + } + } + globalRand.state.Init(*seed) + clear(seed[:]) + globalRand.init = true + unlock(&globalRand.lock) +} + +// readTimeRandom stretches any entropy in the current time +// into entropy the length of r and XORs it into r. +// This is a fallback for when readRandom does not read +// the full requested amount. +// Whatever entropy r already contained is preserved. +func readTimeRandom(r []byte) { + // Inspired by wyrand. + // An earlier version of this code used getg().m.procid as well, + // but note that this is called so early in startup that procid + // is not initialized yet. + v := uint64(nanotime()) + for len(r) > 0 { + v ^= 0xa0761d6478bd642f + v *= 0xe7037ed1a0b428db + size := 8 + if len(r) < 8 { + size = len(r) + } + for i := 0; i < size; i++ { + r[i] ^= byte(v >> (8 * i)) + } + r = r[size:] + v = v>>32 | v<<32 + } +} + +// bootstrapRand returns a random uint64 from the global random generator. +func bootstrapRand() uint64 { + lock(&globalRand.lock) + if !globalRand.init { + fatal("randinit missed") + } + for { + if x, ok := globalRand.state.Next(); ok { + unlock(&globalRand.lock) + return x + } + globalRand.state.Refill() + } +} + +// bootstrapRandReseed reseeds the bootstrap random number generator, +// clearing from memory any trace of previously returned random numbers. +func bootstrapRandReseed() { + lock(&globalRand.lock) + if !globalRand.init { + fatal("randinit missed") + } + globalRand.state.Reseed() + unlock(&globalRand.lock) +} + +// rand32 is uint32(rand()), called from compiler-generated code. +// +//go:nosplit +func rand32() uint32 { + return uint32(rand()) +} + +// rand returns a random uint64 from the per-m chacha8 state. +// Do not change signature: used via linkname from other packages. +// +//go:nosplit +//go:linkname rand +func rand() uint64 { + // Note: We avoid acquirem here so that in the fast path + // there is just a getg, an inlined c.Next, and a return. + // The performance difference on a 16-core AMD is + // 3.7ns/call this way versus 4.3ns/call with acquirem (+16%). + mp := getg().m + c := &mp.chacha8 + for { + // Note: c.Next is marked nosplit, + // so we don't need to use mp.locks + // on the fast path, which is that the + // first attempt succeeds. + x, ok := c.Next() + if ok { + return x + } + mp.locks++ // hold m even though c.Refill may do stack split checks + c.Refill() + mp.locks-- + } +} + +// mrandinit initializes the random state of an m. +func mrandinit(mp *m) { + var seed [4]uint64 + for i := range seed { + seed[i] = bootstrapRand() + } + bootstrapRandReseed() // erase key we just extracted + mp.chacha8.Init64(seed) + mp.cheaprand = rand() +} + +// randn is like rand() % n but faster. +// Do not change signature: used via linkname from other packages. +// +//go:nosplit +//go:linkname randn +func randn(n uint32) uint32 { + // See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ + return uint32((uint64(uint32(rand())) * uint64(n)) >> 32) +} + +// cheaprand is a non-cryptographic-quality 32-bit random generator +// suitable for calling at very high frequency (such as during scheduling decisions) +// and at sensitive moments in the runtime (such as during stack unwinding). +// it is "cheap" in the sense of both expense and quality. +// +// cheaprand must not be exported to other packages: +// the rule is that other packages using runtime-provided +// randomness must always use rand. +// +//go:nosplit +func cheaprand() uint32 { + mp := getg().m + // Implement wyrand: https://github.com/wangyi-fudan/wyhash + // Only the platform that math.Mul64 can be lowered + // by the compiler should be in this list. + if goarch.IsAmd64|goarch.IsArm64|goarch.IsPpc64| + goarch.IsPpc64le|goarch.IsMips64|goarch.IsMips64le| + goarch.IsS390x|goarch.IsRiscv64|goarch.IsLoong64 == 1 { + mp.cheaprand += 0xa0761d6478bd642f + hi, lo := math.Mul64(mp.cheaprand, mp.cheaprand^0xe7037ed1a0b428db) + return uint32(hi ^ lo) + } + + // Implement xorshift64+: 2 32-bit xorshift sequences added together. + // Shift triplet [17,7,16] was calculated as indicated in Marsaglia's + // Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf + // This generator passes the SmallCrush suite, part of TestU01 framework: + // http://simul.iro.umontreal.ca/testu01/tu01.html + t := (*[2]uint32)(unsafe.Pointer(&mp.cheaprand)) + s1, s0 := t[0], t[1] + s1 ^= s1 << 17 + s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16 + t[0], t[1] = s0, s1 + return s0 + s1 +} + +// cheaprand64 is a non-cryptographic-quality 63-bit random generator +// suitable for calling at very high frequency (such as during sampling decisions). +// it is "cheap" in the sense of both expense and quality. +// +// cheaprand64 must not be exported to other packages: +// the rule is that other packages using runtime-provided +// randomness must always use rand. +// +//go:nosplit +func cheaprand64() int64 { + return int64(cheaprand())<<31 ^ int64(cheaprand()) +} + +// cheaprandn is like cheaprand() % n but faster. +// +// cheaprandn must not be exported to other packages: +// the rule is that other packages using runtime-provided +// randomness must always use randn. +// +//go:nosplit +func cheaprandn(n uint32) uint32 { + // See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ + return uint32((uint64(cheaprand()) * uint64(n)) >> 32) +} + +// Too much legacy code has go:linkname references +// to runtime.fastrand and friends, so keep these around for now. +// Code should migrate to math/rand/v2.Uint64, +// which is just as fast, but that's only available in Go 1.22+. +// It would be reasonable to remove these in Go 1.24. +// Do not call these from package runtime. + +//go:linkname legacy_fastrand runtime.fastrand +func legacy_fastrand() uint32 { + return uint32(rand()) +} + +//go:linkname legacy_fastrandn runtime.fastrandn +func legacy_fastrandn(n uint32) uint32 { + return randn(n) +} + +//go:linkname legacy_fastrand64 runtime.fastrand64 +func legacy_fastrand64() uint64 { + return rand() +} diff --git a/src/runtime/rand_test.go b/src/runtime/rand_test.go index 92d07ebada..baecb6984d 100644 --- a/src/runtime/rand_test.go +++ b/src/runtime/rand_test.go @@ -8,8 +8,20 @@ import ( . "runtime" "strconv" "testing" + _ "unsafe" // for go:linkname ) +func TestReadRandom(t *testing.T) { + if *ReadRandomFailed { + switch GOOS { + default: + t.Fatalf("readRandom failed at startup") + case "plan9": + // ok + } + } +} + func BenchmarkFastrand(b *testing.B) { b.RunParallel(func(pb *testing.PB) { for pb.Next() { @@ -51,3 +63,35 @@ func BenchmarkFastrandn(b *testing.B) { }) } } + +//go:linkname fastrand runtime.fastrand +func fastrand() uint32 + +//go:linkname fastrandn runtime.fastrandn +func fastrandn(uint32) uint32 + +//go:linkname fastrand64 runtime.fastrand64 +func fastrand64() uint64 + +func TestLegacyFastrand(t *testing.T) { + // Testing mainly that the calls work at all, + // but check that all three don't return the same number (1 in 2^64 chance) + { + x, y, z := fastrand(), fastrand(), fastrand() + if x == y && y == z { + t.Fatalf("fastrand three times = %#x, %#x, %#x, want different numbers", x, y, z) + } + } + { + x, y, z := fastrandn(1e9), fastrandn(1e9), fastrandn(1e9) + if x == y && y == z { + t.Fatalf("fastrandn three times = %#x, %#x, %#x, want different numbers", x, y, z) + } + } + { + x, y, z := fastrand64(), fastrand64(), fastrand64() + if x == y && y == z { + t.Fatalf("fastrand64 three times = %#x, %#x, %#x, want different numbers", x, y, z) + } + } +} diff --git a/src/runtime/runtime-gdb_test.go b/src/runtime/runtime-gdb_test.go index 1ae6ff041a..5defe2f615 100644 --- a/src/runtime/runtime-gdb_test.go +++ b/src/runtime/runtime-gdb_test.go @@ -54,9 +54,6 @@ func checkGdbEnvironment(t *testing.T) { case "plan9": t.Skip("there is no gdb on Plan 9") } - if final := os.Getenv("GOROOT_FINAL"); final != "" && testenv.GOROOT(t) != final { - t.Skip("gdb test can fail with GOROOT_FINAL pending") - } } func checkGdbVersion(t *testing.T) { @@ -297,24 +294,6 @@ func testGdbPython(t *testing.T, cgo bool) { } got = bytes.ReplaceAll(got, []byte("\r\n"), []byte("\n")) // normalize line endings - firstLine, _, _ := bytes.Cut(got, []byte("\n")) - if string(firstLine) != "Loading Go Runtime support." { - // This can happen when using all.bash with - // GOROOT_FINAL set, because the tests are run before - // the final installation of the files. - cmd := exec.Command(testenv.GoToolPath(t), "env", "GOROOT") - cmd.Env = []string{} - out, err := cmd.CombinedOutput() - if err != nil && bytes.Contains(out, []byte("cannot find GOROOT")) { - t.Skipf("skipping because GOROOT=%s does not exist", testenv.GOROOT(t)) - } - - _, file, _, _ := runtime.Caller(1) - - t.Logf("package testing source file: %s", file) - t.Fatalf("failed to load Go runtime support: %s\n%s", firstLine, got) - } - // Extract named BEGIN...END blocks from output partRe := regexp.MustCompile(`(?ms)^BEGIN ([^\n]*)\n(.*?)\nEND`) blocks := map[string]string{} diff --git a/src/runtime/runtime-gdb_unix_test.go b/src/runtime/runtime-gdb_unix_test.go index 5413306f77..8b602d13d9 100644 --- a/src/runtime/runtime-gdb_unix_test.go +++ b/src/runtime/runtime-gdb_unix_test.go @@ -20,6 +20,43 @@ import ( "testing" ) +func canGenerateCore(t *testing.T) bool { + // Ensure there is enough RLIMIT_CORE available to generate a full core. + var lim syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_CORE, &lim) + if err != nil { + t.Fatalf("error getting rlimit: %v", err) + } + // Minimum RLIMIT_CORE max to allow. This is a conservative estimate. + // Most systems allow infinity. + const minRlimitCore = 100 << 20 // 100 MB + if lim.Max < minRlimitCore { + t.Skipf("RLIMIT_CORE max too low: %#+v", lim) + } + + // Make sure core pattern will send core to the current directory. + b, err := os.ReadFile("/proc/sys/kernel/core_pattern") + if err != nil { + t.Fatalf("error reading core_pattern: %v", err) + } + if string(b) != "core\n" { + t.Skipf("Unexpected core pattern %q", string(b)) + } + + coreUsesPID := false + b, err = os.ReadFile("/proc/sys/kernel/core_uses_pid") + if err == nil { + switch string(bytes.TrimSpace(b)) { + case "0": + case "1": + coreUsesPID = true + default: + t.Skipf("unexpected core_uses_pid value %q", string(b)) + } + } + return coreUsesPID +} + const coreSignalSource = ` package main @@ -81,45 +118,12 @@ func TestGdbCoreSignalBacktrace(t *testing.T) { t.Parallel() checkGdbVersion(t) - // Ensure there is enough RLIMIT_CORE available to generate a full core. - var lim syscall.Rlimit - err := syscall.Getrlimit(syscall.RLIMIT_CORE, &lim) - if err != nil { - t.Fatalf("error getting rlimit: %v", err) - } - // Minimum RLIMIT_CORE max to allow. This is a conservative estimate. - // Most systems allow infinity. - const minRlimitCore = 100 << 20 // 100 MB - if lim.Max < minRlimitCore { - t.Skipf("RLIMIT_CORE max too low: %#+v", lim) - } - - // Make sure core pattern will send core to the current directory. - b, err := os.ReadFile("/proc/sys/kernel/core_pattern") - if err != nil { - t.Fatalf("error reading core_pattern: %v", err) - } - if string(b) != "core\n" { - t.Skipf("Unexpected core pattern %q", string(b)) - } - - coreUsesPID := false - b, err = os.ReadFile("/proc/sys/kernel/core_uses_pid") - if err == nil { - switch string(bytes.TrimSpace(b)) { - case "0": - case "1": - coreUsesPID = true - default: - t.Skipf("unexpected core_uses_pid value %q", string(b)) - } - } - - dir := t.TempDir() + coreUsesPID := canGenerateCore(t) // Build the source code. + dir := t.TempDir() src := filepath.Join(dir, "main.go") - err = os.WriteFile(src, []byte(coreSignalSource), 0644) + err := os.WriteFile(src, []byte(coreSignalSource), 0644) if err != nil { t.Fatalf("failed to create file: %v", err) } @@ -230,3 +234,146 @@ func TestGdbCoreSignalBacktrace(t *testing.T) { t.Fatalf("could not find runtime symbol in backtrace after signal handler:\n%s", rest) } } + +const coreCrashThreadSource = ` +package main + +/* +#cgo CFLAGS: -g -O0 +#include +#include +void trigger_crash() +{ + int* ptr = NULL; + *ptr = 1024; +} +*/ +import "C" +import ( + "flag" + "fmt" + "os" + "runtime/debug" + "syscall" +) + +func enableCore() { + debug.SetTraceback("crash") + + var lim syscall.Rlimit + err := syscall.Getrlimit(syscall.RLIMIT_CORE, &lim) + if err != nil { + panic(fmt.Sprintf("error getting rlimit: %v", err)) + } + lim.Cur = lim.Max + fmt.Fprintf(os.Stderr, "Setting RLIMIT_CORE = %+#v\n", lim) + err = syscall.Setrlimit(syscall.RLIMIT_CORE, &lim) + if err != nil { + panic(fmt.Sprintf("error setting rlimit: %v", err)) + } +} + +func main() { + flag.Parse() + + enableCore() + + C.trigger_crash() +} +` + +// TestGdbCoreCrashThreadBacktrace tests that runtime could let the fault thread to crash process +// and make fault thread as number one thread while gdb in a core file +func TestGdbCoreCrashThreadBacktrace(t *testing.T) { + if runtime.GOOS != "linux" { + // N.B. This test isn't fundamentally Linux-only, but it needs + // to know how to enable/find core files on each OS. + t.Skip("Test only supported on Linux") + } + if runtime.GOARCH != "386" && runtime.GOARCH != "amd64" { + // TODO(go.dev/issue/25218): Other architectures use sigreturn + // via VDSO, which we somehow don't handle correctly. + t.Skip("Backtrace through signal handler only works on 386 and amd64") + } + + testenv.SkipFlaky(t, 65138) + + testenv.MustHaveCGO(t) + checkGdbEnvironment(t) + t.Parallel() + checkGdbVersion(t) + + coreUsesPID := canGenerateCore(t) + + // Build the source code. + dir := t.TempDir() + src := filepath.Join(dir, "main.go") + err := os.WriteFile(src, []byte(coreCrashThreadSource), 0644) + if err != nil { + t.Fatalf("failed to create file: %v", err) + } + cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", "a.exe", "main.go") + cmd.Dir = dir + out, err := testenv.CleanCmdEnv(cmd).CombinedOutput() + if err != nil { + t.Fatalf("building source %v\n%s", err, out) + } + + // Start the test binary. + cmd = testenv.Command(t, "./a.exe") + cmd.Dir = dir + var output bytes.Buffer + cmd.Stdout = &output // for test logging + cmd.Stderr = &output + + if err := cmd.Start(); err != nil { + t.Fatalf("error starting test binary: %v", err) + } + + pid := cmd.Process.Pid + + err = cmd.Wait() + t.Logf("child output:\n%s", output.String()) + if err == nil { + t.Fatalf("Wait succeeded, want SIGABRT") + } + ee, ok := err.(*exec.ExitError) + if !ok { + t.Fatalf("Wait err got %T %v, want exec.ExitError", ee, ee) + } + ws, ok := ee.Sys().(syscall.WaitStatus) + if !ok { + t.Fatalf("Sys got %T %v, want syscall.WaitStatus", ee.Sys(), ee.Sys()) + } + if ws.Signal() != syscall.SIGABRT { + t.Fatalf("Signal got %d want SIGABRT", ws.Signal()) + } + if !ws.CoreDump() { + t.Fatalf("CoreDump got %v want true", ws.CoreDump()) + } + + coreFile := "core" + if coreUsesPID { + coreFile += fmt.Sprintf(".%d", pid) + } + + // Execute gdb commands. + args := []string{"-nx", "-batch", + "-iex", "add-auto-load-safe-path " + filepath.Join(testenv.GOROOT(t), "src", "runtime"), + "-ex", "backtrace", + filepath.Join(dir, "a.exe"), + filepath.Join(dir, coreFile), + } + cmd = testenv.Command(t, "gdb", args...) + + got, err := cmd.CombinedOutput() + t.Logf("gdb output:\n%s", got) + if err != nil { + t.Fatalf("gdb exited with error: %v", err) + } + + re := regexp.MustCompile(`#.* trigger_crash`) + if found := re.Find(got) != nil; !found { + t.Fatalf("could not find trigger_crash in backtrace") + } +} diff --git a/src/runtime/runtime-lldb_test.go b/src/runtime/runtime-lldb_test.go index 19a6cc6f8d..e00d6cf202 100644 --- a/src/runtime/runtime-lldb_test.go +++ b/src/runtime/runtime-lldb_test.go @@ -135,9 +135,6 @@ intvar = 42 func TestLldbPython(t *testing.T) { testenv.MustHaveGoBuild(t) - if final := os.Getenv("GOROOT_FINAL"); final != "" && runtime.GOROOT() != final { - t.Skip("gdb test can fail with GOROOT_FINAL pending") - } testenv.SkipFlaky(t, 31188) checkLldbPython(t) diff --git a/src/runtime/runtime.go b/src/runtime/runtime.go index 0829a84e43..05a2098fcd 100644 --- a/src/runtime/runtime.go +++ b/src/runtime/runtime.go @@ -167,12 +167,17 @@ func (g *godebugInc) IncNonDefault() { if newInc == nil { return } - // If other goroutines are racing here, no big deal. One will win, - // and all the inc functions will be using the same underlying - // *godebug.Setting. inc = new(func()) *inc = (*newInc)(g.name) - g.inc.Store(inc) + if raceenabled { + racereleasemerge(unsafe.Pointer(&g.inc)) + } + if !g.inc.CompareAndSwap(nil, inc) { + inc = g.inc.Load() + } + } + if raceenabled { + raceacquire(unsafe.Pointer(&g.inc)) } (*inc)() } @@ -212,10 +217,75 @@ func syscall_runtimeUnsetenv(key string) { } // writeErrStr writes a string to descriptor 2. +// If SetCrashOutput(f) was called, it also writes to f. // //go:nosplit func writeErrStr(s string) { - write(2, unsafe.Pointer(unsafe.StringData(s)), int32(len(s))) + writeErrData(unsafe.StringData(s), int32(len(s))) +} + +// writeErrData is the common parts of writeErr{,Str}. +// +//go:nosplit +func writeErrData(data *byte, n int32) { + write(2, unsafe.Pointer(data), n) + + // If crashing, print a copy to the SetCrashOutput fd. + gp := getg() + if gp != nil && gp.m.dying > 0 || + gp == nil && panicking.Load() > 0 { + if fd := crashFD.Load(); fd != ^uintptr(0) { + write(fd, unsafe.Pointer(data), n) + } + } +} + +// crashFD is an optional file descriptor to use for fatal panics, as +// set by debug.SetCrashOutput (see #42888). If it is a valid fd (not +// all ones), writeErr and related functions write to it in addition +// to standard error. +// +// Initialized to -1 in schedinit. +var crashFD atomic.Uintptr + +//go:linkname setCrashFD +func setCrashFD(fd uintptr) uintptr { + // Don't change the crash FD if a crash is already in progress. + // + // Unlike the case below, this is not required for correctness, but it + // is generally nicer to have all of the crash output go to the same + // place rather than getting split across two different FDs. + if panicking.Load() > 0 { + return ^uintptr(0) + } + + old := crashFD.Swap(fd) + + // If we are panicking, don't return the old FD to runtime/debug for + // closing. writeErrData may have already read the old FD from crashFD + // before the swap and closing it would cause the write to be lost [1]. + // The old FD will never be closed, but we are about to crash anyway. + // + // On the writeErrData thread, panicking.Add(1) happens-before + // crashFD.Load() [2]. + // + // On this thread, swapping old FD for new in crashFD happens-before + // panicking.Load() > 0. + // + // Therefore, if panicking.Load() == 0 here (old FD will be closed), it + // is impossible for the writeErrData thread to observe + // crashFD.Load() == old FD. + // + // [1] Or, if really unlucky, another concurrent open could reuse the + // FD, sending the write into an unrelated file. + // + // [2] If gp != nil, it occurs when incrementing gp.m.dying in + // startpanic_m. If gp == nil, we read panicking.Load() > 0, so an Add + // must have happened-before. + if panicking.Load() > 0 { + return ^uintptr(0) + } + return old } // auxv is populated on relevant platforms but defined here for all platforms diff --git a/src/runtime/runtime1.go b/src/runtime/runtime1.go index 489dcdd79c..afe1bdd298 100644 --- a/src/runtime/runtime1.go +++ b/src/runtime/runtime1.go @@ -307,26 +307,28 @@ type dbgVar struct { // existing int var for that value, which may // already have an initial value. var debug struct { - cgocheck int32 - clobberfree int32 - dontfreezetheworld int32 - efence int32 - gccheckmark int32 - gcpacertrace int32 - gcshrinkstackoff int32 - gcstoptheworld int32 - gctrace int32 - invalidptr int32 - madvdontneed int32 // for Linux; issue 28466 - scavtrace int32 - scheddetail int32 - schedtrace int32 - tracebackancestors int32 - asyncpreemptoff int32 - harddecommit int32 - adaptivestackstart int32 - tracefpunwindoff int32 - traceadvanceperiod int32 + cgocheck int32 + clobberfree int32 + disablethp int32 + dontfreezetheworld int32 + efence int32 + gccheckmark int32 + gcpacertrace int32 + gcshrinkstackoff int32 + gcstoptheworld int32 + gctrace int32 + invalidptr int32 + madvdontneed int32 // for Linux; issue 28466 + runtimeContentionStacks atomic.Int32 + scavtrace int32 + scheddetail int32 + schedtrace int32 + tracebackancestors int32 + asyncpreemptoff int32 + harddecommit int32 + adaptivestackstart int32 + tracefpunwindoff int32 + traceadvanceperiod int32 // debug.malloc is used as a combined debug check // in the malloc function and should be set @@ -343,6 +345,7 @@ var dbgvars = []*dbgVar{ {name: "allocfreetrace", value: &debug.allocfreetrace}, {name: "clobberfree", value: &debug.clobberfree}, {name: "cgocheck", value: &debug.cgocheck}, + {name: "disablethp", value: &debug.disablethp}, {name: "dontfreezetheworld", value: &debug.dontfreezetheworld}, {name: "efence", value: &debug.efence}, {name: "gccheckmark", value: &debug.gccheckmark}, @@ -352,6 +355,7 @@ var dbgvars = []*dbgVar{ {name: "gctrace", value: &debug.gctrace}, {name: "invalidptr", value: &debug.invalidptr}, {name: "madvdontneed", value: &debug.madvdontneed}, + {name: "runtimecontentionstacks", atomic: &debug.runtimeContentionStacks}, {name: "sbrk", value: &debug.sbrk}, {name: "scavtrace", value: &debug.scavtrace}, {name: "scheddetail", value: &debug.scheddetail}, @@ -619,7 +623,6 @@ func reflect_resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer { //go:linkname reflect_resolveTextOff reflect.resolveTextOff func reflect_resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer { return toRType((*_type)(rtype)).textOff(textOff(off)) - } // reflectlite_resolveNameOff resolves a name offset from a base pointer. diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index e64c3c5695..985c1ffab4 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -6,6 +6,7 @@ package runtime import ( "internal/abi" + "internal/chacha8rand" "internal/goarch" "runtime/internal/atomic" "runtime/internal/sys" @@ -482,6 +483,7 @@ type g struct { // inMarkAssist indicates whether the goroutine is in mark assist. // Used by the execution tracer. inMarkAssist bool + coroexit bool // argument to coroswitch_m raceignore int8 // ignore race detection events nocgocallback bool // whether disable callback from C @@ -506,6 +508,8 @@ type g struct { timer *timer // cached timer for time.Sleep selectDone atomic.Uint32 // are we participating in a select and did someone win the race? + coroarg *coro // argument during coroutine transfers + // goroutineProfiled indicates the status of this goroutine's stack for the // current in-progress goroutine profile goroutineProfiled goroutineProfileStateHolder @@ -577,7 +581,6 @@ type m struct { isExtraInC bool // m is an extra m that is not executing Go code isExtraInSig bool // m is an extra m in a signal handler freeWait atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait) - fastrand uint64 needextram bool traceback uint8 ncgocall uint64 // number of cgo calls in total @@ -593,6 +596,8 @@ type m struct { lockedInt uint32 // tracking for internal lockOSThread nextwaitm muintptr // next m waiting for lock + mLockProfile mLockProfile // fields relating to runtime.lock contention + // wait* are used to carry arguments from gopark into park_m, because // there's no stack to put them on. That is their sole purpose. waitunlockf func(*g, unsafe.Pointer) bool @@ -630,6 +635,9 @@ type m struct { mOS + chacha8 chacha8rand.State + cheaprand uint64 + // Up to 10 locks held by this m, maintained by the lock ranking code. locksHeldLen int locksHeld [10]heldLockInfo @@ -700,16 +708,6 @@ type p struct { palloc persistentAlloc // per-P to avoid mutex - // The when field of the first entry on the timer heap. - // This is 0 if the timer heap is empty. - timer0When atomic.Int64 - - // The earliest known nextwhen field of a timer with - // timerModifiedEarlier status. Because the timer may have been - // modified again, there need not be any timer with this value. - // This is 0 if there are no timerModifiedEarlier timers. - timerModifiedEarliest atomic.Int64 - // Per-P GC state gcAssistTime int64 // Nanoseconds in assistAlloc gcFractionalMarkTime int64 // Nanoseconds in fractional mark worker (atomic) @@ -743,23 +741,8 @@ type p struct { // writing any stats. Its value is even when not, odd when it is. statsSeq atomic.Uint32 - // Lock for timers. We normally access the timers while running - // on this P, but the scheduler can also do it from a different P. - timersLock mutex - - // Actions to take at some time. This is used to implement the - // standard library's time package. - // Must hold timersLock to access. - timers []*timer - - // Number of timers in P's heap. - numTimers atomic.Uint32 - - // Number of timerDeleted timers in P's heap. - deletedTimers atomic.Uint32 - - // Race context used while executing timer functions. - timerRaceCtx uintptr + // Timer heap. + timers timers // maxStackScanDelta accumulates the amount of stack space held by // live goroutines (i.e. those eligible for stack scanning). @@ -855,7 +838,7 @@ type schedt struct { sysmonwait atomic.Bool sysmonnote note - // safepointFn should be called on each P at the next GC + // safePointFn should be called on each P at the next GC // safepoint if p.runSafePointFn is set. safePointFn func(*p) safePointWait int32 @@ -900,6 +883,12 @@ type schedt struct { // stwTotalTimeOther covers the others. stwTotalTimeGC timeHistogram stwTotalTimeOther timeHistogram + + // totalRuntimeLockWaitTime (plus the value of lockWaitTime on each M in + // allm) is the sum of time goroutines have spent in _Grunnable and with an + // M, but waiting for locks within the runtime. This field stores the value + // for Ms that have exited. + totalRuntimeLockWaitTime atomic.Int64 } // Values for the flags field of a sigTabT. @@ -976,17 +965,7 @@ type funcinl struct { startLine int32 } -// layout of Itab known to compilers -// allocated in non-garbage-collected memory -// Needs to be in sync with -// ../cmd/compile/internal/reflectdata/reflect.go:/^func.WritePluginTable. -type itab struct { - inter *interfacetype - _type *_type - hash uint32 // copy of _type.hash. Used for type switches. - _ [4]byte - fun [1]uintptr // variable sized. fun[0]==0 means _type does not implement inter. -} +type itab = abi.ITab // Lock-free stack node. // Also known to export_test.go. @@ -1001,27 +980,6 @@ type forcegcstate struct { idle atomic.Bool } -// extendRandom extends the random numbers in r[:n] to the whole slice r. -// Treats n<0 as n==0. -func extendRandom(r []byte, n int) { - if n < 0 { - n = 0 - } - for n < len(r) { - // Extend random bits using hash function & time seed - w := n - if w > 16 { - w = 16 - } - h := memhash(unsafe.Pointer(&r[n-w]), uintptr(nanotime()), uintptr(w)) - for i := 0; i < goarch.PtrSize && n < len(r); i++ { - r[n] = byte(h) - n++ - h >>= 8 - } - } -} - // A _defer holds an entry on the list of deferred calls. // If you add a field here, add code to clear it in deferProcStack. // This struct must match the code in cmd/compile/internal/ssagen/ssa.go:deferstruct @@ -1134,6 +1092,8 @@ const ( waitReasonFlushProcCaches // "flushing proc caches" waitReasonTraceGoroutineStatus // "trace goroutine status" waitReasonTraceProcStatus // "trace proc status" + waitReasonPageTraceFlush // "page trace flush" + waitReasonCoroutine // "coroutine" ) var waitReasonStrings = [...]string{ @@ -1172,6 +1132,8 @@ var waitReasonStrings = [...]string{ waitReasonFlushProcCaches: "flushing proc caches", waitReasonTraceGoroutineStatus: "trace goroutine status", waitReasonTraceProcStatus: "trace proc status", + waitReasonPageTraceFlush: "page trace flush", + waitReasonCoroutine: "coroutine", } func (w waitReason) String() string { @@ -1230,7 +1192,9 @@ var ( processorVersionInfo uint32 isIntel bool - goarm uint8 // set by cmd/link on arm systems + // set by cmd/link on arm systems + goarm uint8 + goarmsoftfp uint8 ) // Set by the linker so the runtime can determine the buildmode. diff --git a/src/runtime/rwmutex.go b/src/runtime/rwmutex.go index ede3d13599..bf3b9a1cae 100644 --- a/src/runtime/rwmutex.go +++ b/src/runtime/rwmutex.go @@ -25,6 +25,43 @@ type rwmutex struct { readerCount atomic.Int32 // number of pending readers readerWait atomic.Int32 // number of departing readers + + readRank lockRank // semantic lock rank for read locking +} + +// Lock ranking an rwmutex has two aspects: +// +// Semantic ranking: this rwmutex represents some higher level lock that +// protects some resource (e.g., allocmLock protects creation of new Ms). The +// read and write locks of that resource need to be represented in the lock +// rank. +// +// Internal ranking: as an implementation detail, rwmutex uses two mutexes: +// rLock and wLock. These have lock order requirements: wLock must be locked +// before rLock. This also needs to be represented in the lock rank. +// +// Semantic ranking is represented by acquiring readRank during read lock and +// writeRank during write lock. +// +// wLock is held for the duration of a write lock, so it uses writeRank +// directly, both for semantic and internal ranking. rLock is only held +// temporarily inside the rlock/lock methods, so it uses readRankInternal to +// represent internal ranking. Semantic ranking is represented by a separate +// acquire of readRank for the duration of a read lock. +// +// The lock ranking must document this ordering: +// - readRankInternal is a leaf lock. +// - readRank is taken before readRankInternal. +// - writeRank is taken before readRankInternal. +// - readRank is placed in the lock order wherever a read lock of this rwmutex +// belongs. +// - writeRank is placed in the lock order wherever a write lock of this +// rwmutex belongs. +func (rw *rwmutex) init(readRank, readRankInternal, writeRank lockRank) { + rw.readRank = readRank + + lockInit(&rw.rLock, readRankInternal) + lockInit(&rw.wLock, writeRank) } const rwmutexMaxReaders = 1 << 30 @@ -36,10 +73,14 @@ func (rw *rwmutex) rlock() { // deadlock (issue #20903). Alternatively, we could drop the P // while sleeping. acquirem() + + acquireLockRank(rw.readRank) + lockWithRankMayAcquire(&rw.rLock, getLockRank(&rw.rLock)) + if rw.readerCount.Add(1) < 0 { // A writer is pending. Park on the reader queue. systemstack(func() { - lockWithRank(&rw.rLock, lockRankRwmutexR) + lock(&rw.rLock) if rw.readerPass > 0 { // Writer finished. rw.readerPass -= 1 @@ -67,7 +108,7 @@ func (rw *rwmutex) runlock() { // A writer is pending. if rw.readerWait.Add(-1) == 0 { // The last reader unblocks the writer. - lockWithRank(&rw.rLock, lockRankRwmutexR) + lock(&rw.rLock) w := rw.writer.ptr() if w != nil { notewakeup(&w.park) @@ -75,18 +116,19 @@ func (rw *rwmutex) runlock() { unlock(&rw.rLock) } } + releaseLockRank(rw.readRank) releasem(getg().m) } // lock locks rw for writing. func (rw *rwmutex) lock() { // Resolve competition with other writers and stick to our P. - lockWithRank(&rw.wLock, lockRankRwmutexW) + lock(&rw.wLock) m := getg().m // Announce that there is a pending writer. r := rw.readerCount.Add(-rwmutexMaxReaders) + rwmutexMaxReaders // Wait for any active readers to complete. - lockWithRank(&rw.rLock, lockRankRwmutexR) + lock(&rw.rLock) if r != 0 && rw.readerWait.Add(r) != 0 { // Wait for reader to wake us up. systemstack(func() { @@ -108,7 +150,7 @@ func (rw *rwmutex) unlock() { throw("unlock of unlocked rwmutex") } // Unblock blocked readers. - lockWithRank(&rw.rLock, lockRankRwmutexR) + lock(&rw.rLock) for rw.readers.ptr() != nil { reader := rw.readers.ptr() rw.readers = reader.schedlink diff --git a/src/runtime/rwmutex_test.go b/src/runtime/rwmutex_test.go index ddb16aead4..bdeb9c4901 100644 --- a/src/runtime/rwmutex_test.go +++ b/src/runtime/rwmutex_test.go @@ -29,6 +29,7 @@ func parallelReader(m *RWMutex, clocked chan bool, cunlock *atomic.Bool, cdone c func doTestParallelReaders(numReaders int) { GOMAXPROCS(numReaders + 1) var m RWMutex + m.Init() clocked := make(chan bool, numReaders) var cunlock atomic.Bool cdone := make(chan bool) @@ -100,6 +101,7 @@ func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) { // Number of active readers + 10000 * number of active writers. var activity int32 var rwm RWMutex + rwm.Init() cdone := make(chan bool) go writer(&rwm, num_iterations, &activity, cdone) var i int @@ -141,6 +143,7 @@ func BenchmarkRWMutexUncontended(b *testing.B) { } b.RunParallel(func(pb *testing.PB) { var rwm PaddedRWMutex + rwm.Init() for pb.Next() { rwm.RLock() rwm.RLock() @@ -154,6 +157,7 @@ func BenchmarkRWMutexUncontended(b *testing.B) { func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) { var rwm RWMutex + rwm.Init() b.RunParallel(func(pb *testing.PB) { foo := 0 for pb.Next() { diff --git a/src/runtime/select.go b/src/runtime/select.go index 34c06375c2..b3a3085cb0 100644 --- a/src/runtime/select.go +++ b/src/runtime/select.go @@ -173,7 +173,7 @@ func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, blo continue } - j := fastrandn(uint32(norder + 1)) + j := cheaprandn(uint32(norder + 1)) pollorder[norder] = pollorder[j] pollorder[j] = uint16(i) norder++ diff --git a/src/runtime/sema.go b/src/runtime/sema.go index 3b6874ca11..c87fc7658e 100644 --- a/src/runtime/sema.go +++ b/src/runtime/sema.go @@ -338,7 +338,7 @@ func (root *semaRoot) queue(addr *uint32, s *sudog, lifo bool) { // // s.ticket compared with zero in couple of places, therefore set lowest bit. // It will not affect treap's quality noticeably. - s.ticket = fastrand() | 1 + s.ticket = cheaprand() | 1 s.parent = last *pt = s diff --git a/src/runtime/signal_aix_ppc64.go b/src/runtime/signal_aix_ppc64.go index c6cb91a0a2..8ae0f749ed 100644 --- a/src/runtime/signal_aix_ppc64.go +++ b/src/runtime/signal_aix_ppc64.go @@ -1,4 +1,4 @@ -/// Copyright 2018 The Go Authors. All rights reserved. +// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. diff --git a/src/runtime/signal_unix.go b/src/runtime/signal_unix.go index cd9fd5d796..84391d58ed 100644 --- a/src/runtime/signal_unix.go +++ b/src/runtime/signal_unix.go @@ -597,7 +597,7 @@ func adjustSignalStack(sig uint32, mp *m, gsigStack *gsignalStack) bool { // crashing is the number of m's we have waited for when implementing // GOTRACEBACK=crash when a signal is received. -var crashing int32 +var crashing atomic.Int32 // testSigtrap and testSigusr1 are used by the runtime tests. If // non-nil, it is called on SIGTRAP/SIGUSR1. If it returns true, the @@ -730,7 +730,7 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) { mp.throwing = throwTypeRuntime mp.caughtsig.set(gp) - if crashing == 0 { + if crashing.Load() == 0 { startpanic_m() } @@ -740,11 +740,11 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) { if level > 0 { goroutineheader(gp) tracebacktrap(c.sigpc(), c.sigsp(), c.siglr(), gp) - if crashing > 0 && gp != mp.curg && mp.curg != nil && readgstatus(mp.curg)&^_Gscan == _Grunning { + if crashing.Load() > 0 && gp != mp.curg && mp.curg != nil && readgstatus(mp.curg)&^_Gscan == _Grunning { // tracebackothers on original m skipped this one; trace it now. goroutineheader(mp.curg) traceback(^uintptr(0), ^uintptr(0), 0, mp.curg) - } else if crashing == 0 { + } else if crashing.Load() == 0 { tracebackothers(gp) print("\n") } @@ -752,20 +752,35 @@ func sighandler(sig uint32, info *siginfo, ctxt unsafe.Pointer, gp *g) { } if docrash { - crashing++ - if crashing < mcount()-int32(extraMLength.Load()) { + isCrashThread := false + if crashing.CompareAndSwap(0, 1) { + isCrashThread = true + } else { + crashing.Add(1) + } + if crashing.Load() < mcount()-int32(extraMLength.Load()) { // There are other m's that need to dump their stacks. // Relay SIGQUIT to the next m by sending it to the current process. // All m's that have already received SIGQUIT have signal masks blocking // receipt of any signals, so the SIGQUIT will go to an m that hasn't seen it yet. - // When the last m receives the SIGQUIT, it will fall through to the call to - // crash below. Just in case the relaying gets botched, each m involved in + // The first m will wait until all ms received the SIGQUIT, then crash/exit. + // Just in case the relaying gets botched, each m involved in // the relay sleeps for 5 seconds and then does the crash/exit itself. - // In expected operation, the last m has received the SIGQUIT and run - // crash/exit and the process is gone, all long before any of the - // 5-second sleeps have finished. + // The faulting m is crashing first so it is the faulting thread in the core dump (see issue #63277): + // in expected operation, the first m will wait until the last m has received the SIGQUIT, + // and then run crash/exit and the process is gone. + // However, if it spends more than 5 seconds to send SIGQUIT to all ms, + // any of ms may crash/exit the process after waiting for 5 seconds. print("\n-----\n\n") raiseproc(_SIGQUIT) + } + if isCrashThread { + i := 0 + for (crashing.Load() < mcount()-int32(extraMLength.Load())) && i < 10 { + i++ + usleep(500 * 1000) + } + } else { usleep(5 * 1000 * 1000) } printDebugLog() diff --git a/src/runtime/sizeof_test.go b/src/runtime/sizeof_test.go index ccc0864ca9..aa8caaadda 100644 --- a/src/runtime/sizeof_test.go +++ b/src/runtime/sizeof_test.go @@ -17,9 +17,9 @@ import ( func TestSizeof(t *testing.T) { const _64bit = unsafe.Sizeof(uintptr(0)) == 8 - g32bit := uintptr(252) + g32bit := uintptr(256) if goexperiment.ExecTracer2 { - g32bit = uintptr(256) + g32bit = uintptr(260) } var tests = []struct { @@ -27,7 +27,7 @@ func TestSizeof(t *testing.T) { _32bit uintptr // size on 32bit platforms _64bit uintptr // size on 64bit platforms }{ - {runtime.G{}, g32bit, 408}, // g, but exported for testing + {runtime.G{}, g32bit, 424}, // g, but exported for testing {runtime.Sudog{}, 56, 88}, // sudog, but exported for testing } diff --git a/src/runtime/slice.go b/src/runtime/slice.go index eb628bb169..4fbe056b78 100644 --- a/src/runtime/slice.go +++ b/src/runtime/slice.go @@ -53,7 +53,7 @@ func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsaf } var to unsafe.Pointer - if et.PtrBytes == 0 { + if !et.Pointers() { to = mallocgc(tomem, nil, false) if copymem < tomem { memclrNoHeapPointers(add(to, copymem), tomem-copymem) @@ -183,7 +183,7 @@ func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice // For 1 we don't need any division/multiplication. // For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant. // For powers of 2, use a variable shift. - noscan := et.PtrBytes == 0 + noscan := !et.Pointers() switch { case et.Size_ == 1: lenmem = uintptr(oldLen) @@ -238,7 +238,7 @@ func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice } var p unsafe.Pointer - if et.PtrBytes == 0 { + if !et.Pointers() { p = mallocgc(capmem, nil, false) // The append() that calls growslice is going to overwrite from oldLen to newLen. // Only clear the part that will not be overwritten. @@ -308,7 +308,7 @@ func reflect_growslice(et *_type, old slice, num int) slice { // the memory will be overwritten by an append() that called growslice. // Since the caller of reflect_growslice is not append(), // zero out this region before returning the slice to the reflect package. - if et.PtrBytes == 0 { + if !et.Pointers() { oldcapmem := uintptr(old.cap) * et.Size_ newlenmem := uintptr(new.len) * et.Size_ memclrNoHeapPointers(add(new.array, oldcapmem), newlenmem-oldcapmem) @@ -366,5 +366,6 @@ func bytealg_MakeNoZero(len int) []byte { if uintptr(len) > maxAlloc { panicmakeslicelen() } - return unsafe.Slice((*byte)(mallocgc(uintptr(len), nil, false)), len) + cap := roundupsize(uintptr(len), true) + return unsafe.Slice((*byte)(mallocgc(uintptr(cap), nil, false)), cap)[:len] } diff --git a/src/runtime/stkframe.go b/src/runtime/stkframe.go index a2f40c92d5..becb729e59 100644 --- a/src/runtime/stkframe.go +++ b/src/runtime/stkframe.go @@ -234,7 +234,7 @@ func (frame *stkframe) getStackMap(debug bool) (locals, args bitvector, objs []s } // stack objects. - if (GOARCH == "amd64" || GOARCH == "arm64" || GOARCH == "ppc64" || GOARCH == "ppc64le" || GOARCH == "riscv64") && + if (GOARCH == "amd64" || GOARCH == "arm64" || GOARCH == "loong64" || GOARCH == "ppc64" || GOARCH == "ppc64le" || GOARCH == "riscv64") && unsafe.Sizeof(abi.RegArgs{}) > 0 && isReflect { // For reflect.makeFuncStub and reflect.methodValueCall, // we need to fake the stack object record. diff --git a/src/runtime/stubs.go b/src/runtime/stubs.go index cf856e135f..34984d86ff 100644 --- a/src/runtime/stubs.go +++ b/src/runtime/stubs.go @@ -6,8 +6,6 @@ package runtime import ( "internal/abi" - "internal/goarch" - "runtime/internal/math" "unsafe" ) @@ -120,94 +118,6 @@ func reflect_memmove(to, from unsafe.Pointer, n uintptr) { // exported value for testing const hashLoad = float32(loadFactorNum) / float32(loadFactorDen) -//go:nosplit -func fastrand() uint32 { - mp := getg().m - // Implement wyrand: https://github.com/wangyi-fudan/wyhash - // Only the platform that math.Mul64 can be lowered - // by the compiler should be in this list. - if goarch.IsAmd64|goarch.IsArm64|goarch.IsPpc64| - goarch.IsPpc64le|goarch.IsMips64|goarch.IsMips64le| - goarch.IsS390x|goarch.IsRiscv64|goarch.IsLoong64 == 1 { - mp.fastrand += 0xa0761d6478bd642f - hi, lo := math.Mul64(mp.fastrand, mp.fastrand^0xe7037ed1a0b428db) - return uint32(hi ^ lo) - } - - // Implement xorshift64+: 2 32-bit xorshift sequences added together. - // Shift triplet [17,7,16] was calculated as indicated in Marsaglia's - // Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf - // This generator passes the SmallCrush suite, part of TestU01 framework: - // http://simul.iro.umontreal.ca/testu01/tu01.html - t := (*[2]uint32)(unsafe.Pointer(&mp.fastrand)) - s1, s0 := t[0], t[1] - s1 ^= s1 << 17 - s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16 - t[0], t[1] = s0, s1 - return s0 + s1 -} - -//go:nosplit -func fastrandn(n uint32) uint32 { - // This is similar to fastrand() % n, but faster. - // See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ - return uint32(uint64(fastrand()) * uint64(n) >> 32) -} - -func fastrand64() uint64 { - mp := getg().m - // Implement wyrand: https://github.com/wangyi-fudan/wyhash - // Only the platform that math.Mul64 can be lowered - // by the compiler should be in this list. - if goarch.IsAmd64|goarch.IsArm64|goarch.IsPpc64| - goarch.IsPpc64le|goarch.IsMips64|goarch.IsMips64le| - goarch.IsS390x|goarch.IsRiscv64 == 1 { - mp.fastrand += 0xa0761d6478bd642f - hi, lo := math.Mul64(mp.fastrand, mp.fastrand^0xe7037ed1a0b428db) - return hi ^ lo - } - - // Implement xorshift64+: 2 32-bit xorshift sequences added together. - // Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf - // This generator passes the SmallCrush suite, part of TestU01 framework: - // http://simul.iro.umontreal.ca/testu01/tu01.html - t := (*[2]uint32)(unsafe.Pointer(&mp.fastrand)) - s1, s0 := t[0], t[1] - s1 ^= s1 << 17 - s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16 - r := uint64(s0 + s1) - - s0, s1 = s1, s0 - s1 ^= s1 << 17 - s1 = s1 ^ s0 ^ s1>>7 ^ s0>>16 - r += uint64(s0+s1) << 32 - - t[0], t[1] = s0, s1 - return r -} - -func fastrandu() uint { - if goarch.PtrSize == 4 { - return uint(fastrand()) - } - return uint(fastrand64()) -} - -//go:linkname rand_fastrand64 math/rand.fastrand64 -func rand_fastrand64() uint64 { return fastrand64() } - -//go:linkname rand2_fastrand64 math/rand/v2.fastrand64 -func rand2_fastrand64() uint64 { return fastrand64() } - -//go:linkname sync_fastrandn sync.fastrandn -func sync_fastrandn(n uint32) uint32 { return fastrandn(n) } - -//go:linkname net_fastrandu net.fastrandu -func net_fastrandu() uint { return fastrandu() } - -//go:linkname os_fastrand os.fastrand -func os_fastrand() uint32 { return fastrand() } - // in internal/bytealg/equal_*.s // //go:noescape diff --git a/src/runtime/stubs_loong64.go b/src/runtime/stubs_loong64.go index 556983cad1..4576089b0b 100644 --- a/src/runtime/stubs_loong64.go +++ b/src/runtime/stubs_loong64.go @@ -10,6 +10,13 @@ package runtime func load_g() func save_g() +// Used by reflectcall and the reflect package. +// +// Spills/loads arguments in registers to/from an internal/abi.RegArgs +// respectively. Does not follow the Go ABI. +func spillArgs() +func unspillArgs() + // getfp returns the frame pointer register of its caller or 0 if not implemented. // TODO: Make this a compiler intrinsic func getfp() uintptr { return 0 } diff --git a/src/runtime/symtab.go b/src/runtime/symtab.go index 87b687a196..96a2d29079 100644 --- a/src/runtime/symtab.go +++ b/src/runtime/symtab.go @@ -23,7 +23,7 @@ type Frames struct { frameStore [2]Frame } -// Frame is the information returned by Frames for each call frame. +// Frame is the information returned by [Frames] for each call frame. type Frame struct { // PC is the program counter for the location in this frame. // For a frame that calls another frame, this will be the @@ -79,15 +79,15 @@ func CallersFrames(callers []uintptr) *Frames { return f } -// Next returns a Frame representing the next call frame in the slice +// Next returns a [Frame] representing the next call frame in the slice // of PC values. If it has already returned all call frames, Next -// returns a zero Frame. +// returns a zero [Frame]. // // The more result indicates whether the next call to Next will return -// a valid Frame. It does not necessarily indicate whether this call +// a valid [Frame]. It does not necessarily indicate whether this call // returned one. // -// See the Frames example for idiomatic usage. +// See the [Frames] example for idiomatic usage. func (ci *Frames) Next() (frame Frame, more bool) { for len(ci.frames) < 2 { // Find the next frame. @@ -497,9 +497,6 @@ type textsect struct { baseaddr uintptr // relocated section address } -const minfunc = 16 // minimum function size -const pcbucketsize = 256 * minfunc // size of bucket in the pc->func lookup table - // findfuncbucket is an array of these structures. // Each bucket represents 4096 bytes of the text segment. // Each subbucket represents 256 bytes of the text segment. @@ -781,8 +778,8 @@ func findfunc(pc uintptr) funcInfo { } x := uintptr(pcOff) + datap.text - datap.minpc // TODO: are datap.text and datap.minpc always equal? - b := x / pcbucketsize - i := x % pcbucketsize / (pcbucketsize / nsub) + b := x / abi.FuncTabBucketSize + i := x % abi.FuncTabBucketSize / (abi.FuncTabBucketSize / nsub) ffb := (*findfuncbucket)(add(unsafe.Pointer(datap.findfunctab), b*unsafe.Sizeof(findfuncbucket{}))) idx := ffb.idx + uint32(ffb.subbuckets[i]) @@ -931,7 +928,7 @@ func pcvalue(f funcInfo, off uint32, targetpc uintptr, strict bool) (int32, uint cache.inUse++ if cache.inUse == 1 { e := &cache.entries[ck] - ci := fastrandn(uint32(len(cache.entries[ck]))) + ci := cheaprandn(uint32(len(cache.entries[ck]))) e[ci] = e[0] e[0] = pcvalueCacheEnt{ targetpc: targetpc, diff --git a/src/runtime/sys_windows_amd64.s b/src/runtime/sys_windows_amd64.s index c1b78e3976..56a2dc0bcf 100644 --- a/src/runtime/sys_windows_amd64.s +++ b/src/runtime/sys_windows_amd64.s @@ -33,14 +33,12 @@ TEXT runtime·asmstdcall(SB),NOSPLIT,$16 SUBQ $(const_maxArgs*8), SP // room for args - // Fast version, do not store args on the stack nor - // load them into registers. - CMPL CX, $0 - JE docall - // Fast version, do not store args on the stack. - CMPL CX, $4 - JLE loadregs + CMPL CX, $0; JE _0args + CMPL CX, $1; JE _1args + CMPL CX, $2; JE _2args + CMPL CX, $3; JE _3args + CMPL CX, $4; JE _4args // Check we have enough room for args. CMPL CX, $const_maxArgs @@ -53,22 +51,25 @@ TEXT runtime·asmstdcall(SB),NOSPLIT,$16 REP; MOVSQ MOVQ SP, SI -loadregs: // Load first 4 args into correspondent registers. - MOVQ 0(SI), CX - MOVQ 8(SI), DX - MOVQ 16(SI), R8 - MOVQ 24(SI), R9 // Floating point arguments are passed in the XMM // registers. Set them here in case any of the arguments // are floating point values. For details see // https://learn.microsoft.com/en-us/cpp/build/x64-calling-convention?view=msvc-170 - MOVQ CX, X0 - MOVQ DX, X1 - MOVQ R8, X2 +_4args: + MOVQ 24(SI), R9 MOVQ R9, X3 +_3args: + MOVQ 16(SI), R8 + MOVQ R8, X2 +_2args: + MOVQ 8(SI), DX + MOVQ DX, X1 +_1args: + MOVQ 0(SI), CX + MOVQ CX, X0 +_0args: -docall: // Call stdcall function. CALL AX diff --git a/src/runtime/syscall_aix.go b/src/runtime/syscall_aix.go index e87d4d6d7a..7738fca602 100644 --- a/src/runtime/syscall_aix.go +++ b/src/runtime/syscall_aix.go @@ -164,7 +164,6 @@ func syscall_exit(code uintptr) { func syscall_fcntl1(fd, cmd, arg uintptr) (val, err uintptr) { val, err = syscall3(&libc_fcntl, fd, cmd, arg) return - } //go:linkname syscall_forkx syscall.forkx diff --git a/src/runtime/syscall_windows.go b/src/runtime/syscall_windows.go index ba88e93d7d..7abaea11c8 100644 --- a/src/runtime/syscall_windows.go +++ b/src/runtime/syscall_windows.go @@ -415,63 +415,36 @@ const _LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800 //go:linkname syscall_loadsystemlibrary syscall.loadsystemlibrary //go:nosplit -//go:cgo_unsafe_args func syscall_loadsystemlibrary(filename *uint16) (handle, err uintptr) { - lockOSThread() - c := &getg().m.syscall - c.fn = getLoadLibraryEx() - c.n = 3 - args := struct { - lpFileName *uint16 - hFile uintptr // always 0 - flags uint32 - }{filename, 0, _LOAD_LIBRARY_SEARCH_SYSTEM32} - c.args = uintptr(noescape(unsafe.Pointer(&args))) - - cgocall(asmstdcallAddr, unsafe.Pointer(c)) + fn := getLoadLibraryEx() + handle, _, err = syscall_SyscallN(fn, uintptr(unsafe.Pointer(filename)), 0, _LOAD_LIBRARY_SEARCH_SYSTEM32) KeepAlive(filename) - handle = c.r1 - if handle == 0 { - err = c.err + if handle != 0 { + err = 0 } - unlockOSThread() // not defer'd after the lockOSThread above to save stack frame size. return } //go:linkname syscall_loadlibrary syscall.loadlibrary //go:nosplit -//go:cgo_unsafe_args func syscall_loadlibrary(filename *uint16) (handle, err uintptr) { - lockOSThread() - defer unlockOSThread() - c := &getg().m.syscall - c.fn = getLoadLibrary() - c.n = 1 - c.args = uintptr(noescape(unsafe.Pointer(&filename))) - cgocall(asmstdcallAddr, unsafe.Pointer(c)) + fn := getLoadLibrary() + handle, _, err = syscall_SyscallN(fn, uintptr(unsafe.Pointer(filename))) KeepAlive(filename) - handle = c.r1 - if handle == 0 { - err = c.err + if handle != 0 { + err = 0 } return } //go:linkname syscall_getprocaddress syscall.getprocaddress //go:nosplit -//go:cgo_unsafe_args func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uintptr) { - lockOSThread() - defer unlockOSThread() - c := &getg().m.syscall - c.fn = getGetProcAddress() - c.n = 2 - c.args = uintptr(noescape(unsafe.Pointer(&handle))) - cgocall(asmstdcallAddr, unsafe.Pointer(c)) + fn := getGetProcAddress() + outhandle, _, err = syscall_SyscallN(fn, handle, uintptr(unsafe.Pointer(procname))) KeepAlive(procname) - outhandle = c.r1 - if outhandle == 0 { - err = c.err + if outhandle != 0 { + err = 0 } return } @@ -479,37 +452,43 @@ func syscall_getprocaddress(handle uintptr, procname *byte) (outhandle, err uint //go:linkname syscall_Syscall syscall.Syscall //go:nosplit func syscall_Syscall(fn, nargs, a1, a2, a3 uintptr) (r1, r2, err uintptr) { - return syscall_SyscallN(fn, a1, a2, a3) + args := [...]uintptr{a1, a2, a3} + return syscall_SyscallN(fn, args[:nargs]...) } //go:linkname syscall_Syscall6 syscall.Syscall6 //go:nosplit func syscall_Syscall6(fn, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, err uintptr) { - return syscall_SyscallN(fn, a1, a2, a3, a4, a5, a6) + args := [...]uintptr{a1, a2, a3, a4, a5, a6} + return syscall_SyscallN(fn, args[:nargs]...) } //go:linkname syscall_Syscall9 syscall.Syscall9 //go:nosplit func syscall_Syscall9(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2, err uintptr) { - return syscall_SyscallN(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9) + args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9} + return syscall_SyscallN(fn, args[:nargs]...) } //go:linkname syscall_Syscall12 syscall.Syscall12 //go:nosplit func syscall_Syscall12(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2, err uintptr) { - return syscall_SyscallN(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12) + args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12} + return syscall_SyscallN(fn, args[:nargs]...) } //go:linkname syscall_Syscall15 syscall.Syscall15 //go:nosplit func syscall_Syscall15(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2, err uintptr) { - return syscall_SyscallN(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15) + args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15} + return syscall_SyscallN(fn, args[:nargs]...) } //go:linkname syscall_Syscall18 syscall.Syscall18 //go:nosplit func syscall_Syscall18(fn, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2, err uintptr) { - return syscall_SyscallN(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18) + args := [...]uintptr{a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18} + return syscall_SyscallN(fn, args[:nargs]...) } // maxArgs should be divisible by 2, as Windows stack @@ -521,26 +500,22 @@ const maxArgs = 42 //go:linkname syscall_SyscallN syscall.SyscallN //go:nosplit -func syscall_SyscallN(trap uintptr, args ...uintptr) (r1, r2, err uintptr) { - nargs := len(args) - - // asmstdcall expects it can access the first 4 arguments - // to load them into registers. - var tmp [4]uintptr - switch { - case nargs < 4: - copy(tmp[:], args) - args = tmp[:] - case nargs > maxArgs: +func syscall_SyscallN(fn uintptr, args ...uintptr) (r1, r2, err uintptr) { + if len(args) > maxArgs { panic("runtime: SyscallN has too many arguments") } + // The cgocall parameters are stored in m instead of in + // the stack because the stack can move during if fn + // calls back into Go. lockOSThread() defer unlockOSThread() c := &getg().m.syscall - c.fn = trap - c.n = uintptr(nargs) - c.args = uintptr(noescape(unsafe.Pointer(&args[0]))) + c.fn = fn + c.n = uintptr(len(args)) + if c.n != 0 { + c.args = uintptr(noescape(unsafe.Pointer(&args[0]))) + } cgocall(asmstdcallAddr, unsafe.Pointer(c)) return c.r1, c.r2, c.err } diff --git a/src/runtime/test_amd64.s b/src/runtime/test_amd64.s index 80fa8c9948..cc4bc6296d 100644 --- a/src/runtime/test_amd64.s +++ b/src/runtime/test_amd64.s @@ -1,3 +1,7 @@ +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + // Create a large frame to force stack growth. See #62326. TEXT ·testSPWrite(SB),0,$16384-0 // Write to SP diff --git a/src/runtime/time.go b/src/runtime/time.go index 8ed1e45fc9..1899589795 100644 --- a/src/runtime/time.go +++ b/src/runtime/time.go @@ -13,153 +13,172 @@ import ( "unsafe" ) +// A timer is a potentially repeating trigger for calling t.f(t.arg, t.seq). +// Timers are allocated by client code, often as part of other data structures. +// Each P has a heap of pointers to timers that it manages. +// +// A timer is expected to be used by only one client goroutine at a time, +// but there will be concurrent access by the P managing that timer. +// The fundamental state about the timer is managed in the atomic state field, +// including a lock bit to manage access to the other fields. +// The lock bit supports a manual cas-based spin lock that handles +// contention by yielding the OS thread. The expectation is that critical +// sections are very short and contention on the lock bit is low. +// // Package time knows the layout of this structure. // If this struct changes, adjust ../time/sleep.go:/runtimeTimer. type timer struct { - // If this timer is on a heap, which P's heap it is on. - // puintptr rather than *p to match uintptr in the versions - // of this struct defined in other packages. - pp puintptr + ts *timers // Timer wakes up at when, and then at when+period, ... (period > 0 only) // each time calling f(arg, now) in the timer goroutine, so f must be // a well-behaved function and not block. // // when must be positive on an active timer. + // Timers in heaps are ordered by when. when int64 period int64 f func(any, uintptr) arg any seq uintptr - // What to set the when field to in timerModifiedXX status. - nextwhen int64 + // nextWhen is the next value for when, + // set if state&timerNextWhen is true. + // In that case, the actual update of when = nextWhen + // must be delayed until the heap can be fixed at the same time. + nextWhen int64 - // The status field holds one of the values below. - status atomic.Uint32 + // The state field holds state bits, defined below. + state atomic.Uint32 } -// Code outside this file has to be careful in using a timer value. -// -// The pp, status, and nextwhen fields may only be used by code in this file. -// -// Code that creates a new timer value can set the when, period, f, -// arg, and seq fields. -// A new timer value may be passed to addtimer (called by time.startTimer). -// After doing that no fields may be touched. -// -// An active timer (one that has been passed to addtimer) may be -// passed to deltimer (time.stopTimer), after which it is no longer an -// active timer. It is an inactive timer. -// In an inactive timer the period, f, arg, and seq fields may be modified, -// but not the when field. -// It's OK to just drop an inactive timer and let the GC collect it. -// It's not OK to pass an inactive timer to addtimer. -// Only newly allocated timer values may be passed to addtimer. -// -// An active timer may be passed to modtimer. No fields may be touched. -// It remains an active timer. -// -// An inactive timer may be passed to resettimer to turn into an -// active timer with an updated when field. -// It's OK to pass a newly allocated timer value to resettimer. -// -// Timer operations are addtimer, deltimer, modtimer, resettimer, -// cleantimers, adjusttimers, and runtimer. -// -// We don't permit calling addtimer/deltimer/modtimer/resettimer simultaneously, -// but adjusttimers and runtimer can be called at the same time as any of those. -// -// Active timers live in heaps attached to P, in the timers field. -// Inactive timers live there too temporarily, until they are removed. -// -// addtimer: -// timerNoStatus -> timerWaiting -// anything else -> panic: invalid value -// deltimer: -// timerWaiting -> timerModifying -> timerDeleted -// timerModifiedEarlier -> timerModifying -> timerDeleted -// timerModifiedLater -> timerModifying -> timerDeleted -// timerNoStatus -> do nothing -// timerDeleted -> do nothing -// timerRemoving -> do nothing -// timerRemoved -> do nothing -// timerRunning -> wait until status changes -// timerMoving -> wait until status changes -// timerModifying -> wait until status changes -// modtimer: -// timerWaiting -> timerModifying -> timerModifiedXX -// timerModifiedXX -> timerModifying -> timerModifiedYY -// timerNoStatus -> timerModifying -> timerWaiting -// timerRemoved -> timerModifying -> timerWaiting -// timerDeleted -> timerModifying -> timerModifiedXX -// timerRunning -> wait until status changes -// timerMoving -> wait until status changes -// timerRemoving -> wait until status changes -// timerModifying -> wait until status changes -// cleantimers (looks in P's timer heap): -// timerDeleted -> timerRemoving -> timerRemoved -// timerModifiedXX -> timerMoving -> timerWaiting -// adjusttimers (looks in P's timer heap): -// timerDeleted -> timerRemoving -> timerRemoved -// timerModifiedXX -> timerMoving -> timerWaiting -// runtimer (looks in P's timer heap): -// timerNoStatus -> panic: uninitialized timer -// timerWaiting -> timerWaiting or -// timerWaiting -> timerRunning -> timerNoStatus or -// timerWaiting -> timerRunning -> timerWaiting -// timerModifying -> wait until status changes -// timerModifiedXX -> timerMoving -> timerWaiting -// timerDeleted -> timerRemoving -> timerRemoved -// timerRunning -> panic: concurrent runtimer calls -// timerRemoved -> panic: inconsistent timer heap -// timerRemoving -> panic: inconsistent timer heap -// timerMoving -> panic: inconsistent timer heap +// A timers is a per-P set of timers. +type timers struct { + // lock protects timers; timers are per-P, but the scheduler can + // access the timers of another P, so we have to lock. + lock mutex -// Values for the timer status field. + // heap is the set of timers, ordered by t.when. + // Must hold lock to access. + heap []*timer + + // len is an atomic copy of len(heap). + len atomic.Uint32 + + // zombies is the number of deleted timers left in heap. + zombies atomic.Uint32 + + // raceCtx is the race context used while executing timer functions. + raceCtx uintptr + + // timer0When is an atomic copy of of heap[0].when. + // If len(heap) == 0, timer0When is 0. + timer0When atomic.Int64 + + // timerModifiedEarliest holds the earliest known heap[i].nextWhen field + // for the heap entries with a new nextWhen pending + // (that is, with the timerNextWhen bit set in t.state). + // Because timers can be modified multiple times, + // timerModifiedEarliest can be set to a nextWhen that has since + // been replaced with a later time. + // If this is 0, it means there are no timerNextWhen timers in the heap. + timerModifiedEarliest atomic.Int64 +} + +// Timer state field. +// Note that state 0 must be "unlocked, not in heap" and usable, +// at least for time.Timer.Stop. See go.dev/issue/21874. const ( - // Timer has no status set yet. - timerNoStatus = iota + // timerLocked is set when the timer is locked, + // meaning other goroutines cannot read or write mutable fields. + // Goroutines can still read the state word atomically to see + // what the state was before it was locked. + // The lock is implemented as a cas on the state field with osyield on contention; + // the expectation is very short critical sections with little to no contention. + timerLocked = 1 << iota - // Waiting for timer to fire. - // The timer is in some P's heap. - timerWaiting + // timerHeaped is set when the timer is stored in some P's heap. + timerHeaped - // Running the timer function. - // A timer will only have this status briefly. - timerRunning - - // The timer is deleted and should be removed. - // It should not be run, but it is still in some P's heap. - timerDeleted - - // The timer is being removed. - // The timer will only have this status briefly. - timerRemoving - - // The timer has been stopped. - // It is not in any P's heap. - timerRemoved - - // The timer is being modified. - // The timer will only have this status briefly. - timerModifying - - // The timer has been modified to an earlier time. - // The new when value is in the nextwhen field. - // The timer is in some P's heap, possibly in the wrong place. - timerModifiedEarlier - - // The timer has been modified to the same or a later time. - // The new when value is in the nextwhen field. - // The timer is in some P's heap, possibly in the wrong place. - timerModifiedLater - - // The timer has been modified and is being moved. - // The timer will only have this status briefly. - timerMoving + // timerNextWhen is set when a pending change to the timer's when + // field has been stored in t.nextwhen. The change to t.when waits + // until the heap in which the timer appears can also be updated. + // Only set when timerHeaped is also set. + timerNextWhen ) +// lock locks the timer, allowing reading or writing any of the timer fields. +// It returns the current m and the status prior to the lock. +// The caller must call unlock with the same m and an updated status. +func (t *timer) lock() (state uint32, mp *m) { + acquireLockRank(lockRankTimer) + for { + state := t.state.Load() + if state&timerLocked != 0 { + osyield() + continue + } + // Prevent preemption while the timer is locked. + // This could lead to a self-deadlock. See #38070. + mp := acquirem() + if t.state.CompareAndSwap(state, state|timerLocked) { + return state, mp + } + releasem(mp) + } +} + +// unlock unlocks the timer. +// If mp == nil, the caller is responsible for calling +// releasem(mp) with the mp returned by t.lock. +func (t *timer) unlock(state uint32, mp *m) { + releaseLockRank(lockRankTimer) + if t.state.Load()&timerLocked == 0 { + badTimer() + } + if state&timerLocked != 0 { + badTimer() + } + t.state.Store(state) + if mp != nil { + releasem(mp) + } +} + +// updateWhen updates t.when as directed by state, returning the new state +// and a bool indicating whether the state (and t.when) changed. +// If ts != nil, then the caller must have locked ts, +// t must be ts.heap[0], and updateWhen takes care of +// moving t within the timers heap when t.when is changed. +func (t *timer) updateWhen(state uint32, ts *timers) (newState uint32, updated bool) { + if state&timerNextWhen == 0 { + return state, false + } + state &^= timerNextWhen + if t.nextWhen == 0 { + if ts != nil { + if t != ts.heap[0] { + badTimer() + } + ts.zombies.Add(-1) + ts.deleteMin() + } + state &^= timerHeaped + } else { + // Now we can change the when field. + t.when = t.nextWhen + // Move t to the right position. + if ts != nil { + if t != ts.heap[0] { + badTimer() + } + ts.siftDown(0) + ts.updateTimer0When() + } + } + return state, true +} + // maxWhen is the maximum value for timer's when field. const maxWhen = 1<<63 - 1 @@ -188,9 +207,9 @@ func timeSleep(ns int64) { } t.f = goroutineReady t.arg = gp - t.nextwhen = nanotime() + ns - if t.nextwhen < 0 { // check for overflow. - t.nextwhen = maxWhen + t.nextWhen = nanotime() + ns + if t.nextWhen < 0 { // check for overflow. + t.nextWhen = maxWhen } gopark(resetForSleep, unsafe.Pointer(t), waitReasonSleep, traceBlockSleep, 1) } @@ -201,7 +220,7 @@ func timeSleep(ns int64) { // timer function, goroutineReady, before the goroutine has been parked. func resetForSleep(gp *g, ut unsafe.Pointer) bool { t := (*timer)(ut) - resettimer(t, t.nextwhen) + t.reset(t.nextWhen) return true } @@ -212,7 +231,10 @@ func startTimer(t *timer) { if raceenabled { racerelease(unsafe.Pointer(t)) } - addtimer(t) + if t.state.Load() != 0 { + throw("startTimer called with initialized timer") + } + t.reset(t.when) } // stopTimer stops a timer. @@ -220,7 +242,7 @@ func startTimer(t *timer) { // //go:linkname stopTimer time.stopTimer func stopTimer(t *timer) bool { - return deltimer(t) + return t.stop() } // resetTimer resets an inactive timer, adding it to the heap. @@ -232,14 +254,14 @@ func resetTimer(t *timer, when int64) bool { if raceenabled { racerelease(unsafe.Pointer(t)) } - return resettimer(t, when) + return t.reset(when) } // modTimer modifies an existing timer. // //go:linkname modTimer time.modTimer -func modTimer(t *timer, when, period int64, f func(any, uintptr), arg any, seq uintptr) { - modtimer(t, when, period, f, arg, seq) +func modTimer(t *timer, when, period int64) { + t.modify(when, period, t.f, t.arg, t.seq) } // Go runtime. @@ -249,194 +271,78 @@ func goroutineReady(arg any, seq uintptr) { goready(arg.(*g), 0) } -// Note: this changes some unsynchronized operations to synchronized operations -// addtimer adds a timer to the current P. -// This should only be called with a newly created timer. -// That avoids the risk of changing the when field of a timer in some P's heap, -// which could cause the heap to become unsorted. -func addtimer(t *timer) { - // when must be positive. A negative value will cause runtimer to - // overflow during its delta calculation and never expire other runtime - // timers. Zero will cause checkTimers to fail to notice the timer. - if t.when <= 0 { - throw("timer when must be positive") - } - if t.period < 0 { - throw("timer period must be non-negative") - } - if t.status.Load() != timerNoStatus { - throw("addtimer called with initialized timer") - } - t.status.Store(timerWaiting) - - when := t.when - - // Disable preemption while using pp to avoid changing another P's heap. - mp := acquirem() - - pp := getg().m.p.ptr() - lock(&pp.timersLock) - cleantimers(pp) - doaddtimer(pp, t) - unlock(&pp.timersLock) - - wakeNetPoller(when) - - releasem(mp) -} - -// doaddtimer adds t to the current P's heap. -// The caller must have locked the timers for pp. -func doaddtimer(pp *p, t *timer) { +// add adds t to the timers. +// The caller must have set t.ts = t, unlocked t, +// and then locked ts.lock. +func (ts *timers) add(t *timer) { + assertLockHeld(&ts.lock) // Timers rely on the network poller, so make sure the poller // has started. if netpollInited.Load() == 0 { netpollGenericInit() } - if t.pp != 0 { - throw("doaddtimer: P already set in timer") + if t.ts != ts { + throw("ts not set in timer") } - t.pp.set(pp) - i := len(pp.timers) - pp.timers = append(pp.timers, t) - siftupTimer(pp.timers, i) - if t == pp.timers[0] { - pp.timer0When.Store(t.when) + ts.heap = append(ts.heap, t) + ts.siftUp(len(ts.heap) - 1) + if t == ts.heap[0] { + ts.timer0When.Store(t.when) } - pp.numTimers.Add(1) + ts.len.Add(1) } -// deltimer deletes the timer t. It may be on some other P, so we can't -// actually remove it from the timers heap. We can only mark it as deleted. +// stop deletes the timer t. It may be on some other P, so we can't +// actually remove it from the timers heap. We can only mark it as stopped. // It will be removed in due course by the P whose heap it is on. -// Reports whether the timer was removed before it was run. -func deltimer(t *timer) bool { - for { - switch s := t.status.Load(); s { - case timerWaiting, timerModifiedLater: - // Prevent preemption while the timer is in timerModifying. - // This could lead to a self-deadlock. See #38070. - mp := acquirem() - if t.status.CompareAndSwap(s, timerModifying) { - // Must fetch t.pp before changing status, - // as cleantimers in another goroutine - // can clear t.pp of a timerDeleted timer. - tpp := t.pp.ptr() - if !t.status.CompareAndSwap(timerModifying, timerDeleted) { - badTimer() - } - releasem(mp) - tpp.deletedTimers.Add(1) - // Timer was not yet run. - return true - } else { - releasem(mp) - } - case timerModifiedEarlier: - // Prevent preemption while the timer is in timerModifying. - // This could lead to a self-deadlock. See #38070. - mp := acquirem() - if t.status.CompareAndSwap(s, timerModifying) { - // Must fetch t.pp before setting status - // to timerDeleted. - tpp := t.pp.ptr() - if !t.status.CompareAndSwap(timerModifying, timerDeleted) { - badTimer() - } - releasem(mp) - tpp.deletedTimers.Add(1) - // Timer was not yet run. - return true - } else { - releasem(mp) - } - case timerDeleted, timerRemoving, timerRemoved: - // Timer was already run. - return false - case timerRunning, timerMoving: - // The timer is being run or moved, by a different P. - // Wait for it to complete. - osyield() - case timerNoStatus: - // Removing timer that was never added or - // has already been run. Also see issue 21874. - return false - case timerModifying: - // Simultaneous calls to deltimer and modtimer. - // Wait for the other call to complete. - osyield() - default: - badTimer() - } +// Reports whether the timer was stopped before it was run. +func (t *timer) stop() bool { + state, mp := t.lock() + if state&timerHeaped != 0 && (state&timerNextWhen == 0 || t.nextWhen != 0) { + // Timer pending: stop it. + t.ts.zombies.Add(1) + t.nextWhen = 0 + state |= timerNextWhen + t.unlock(state, mp) + return true } + + // Timer already run or deleted. + t.unlock(state, mp) + return false } -// dodeltimer removes timer i from the current P's heap. -// We are locked on the P when this is called. -// It returns the smallest changed index in pp.timers. -// The caller must have locked the timers for pp. -func dodeltimer(pp *p, i int) int { - if t := pp.timers[i]; t.pp.ptr() != pp { - throw("dodeltimer: wrong P") - } else { - t.pp = 0 +// deleteMin removes timer 0 from ts. +// ts must be locked. +func (ts *timers) deleteMin() { + assertLockHeld(&ts.lock) + t := ts.heap[0] + if t.ts != ts { + throw("wrong timers") } - last := len(pp.timers) - 1 - if i != last { - pp.timers[i] = pp.timers[last] + t.ts = nil + last := len(ts.heap) - 1 + if last > 0 { + ts.heap[0] = ts.heap[last] } - pp.timers[last] = nil - pp.timers = pp.timers[:last] - smallestChanged := i - if i != last { - // Moving to i may have moved the last timer to a new parent, - // so sift up to preserve the heap guarantee. - smallestChanged = siftupTimer(pp.timers, i) - siftdownTimer(pp.timers, i) + ts.heap[last] = nil + ts.heap = ts.heap[:last] + if last > 0 { + ts.siftDown(0) } - if i == 0 { - updateTimer0When(pp) - } - n := pp.numTimers.Add(-1) + ts.updateTimer0When() + n := ts.len.Add(-1) if n == 0 { // If there are no timers, then clearly none are modified. - pp.timerModifiedEarliest.Store(0) - } - return smallestChanged -} - -// dodeltimer0 removes timer 0 from the current P's heap. -// We are locked on the P when this is called. -// It reports whether it saw no problems due to races. -// The caller must have locked the timers for pp. -func dodeltimer0(pp *p) { - if t := pp.timers[0]; t.pp.ptr() != pp { - throw("dodeltimer0: wrong P") - } else { - t.pp = 0 - } - last := len(pp.timers) - 1 - if last > 0 { - pp.timers[0] = pp.timers[last] - } - pp.timers[last] = nil - pp.timers = pp.timers[:last] - if last > 0 { - siftdownTimer(pp.timers, 0) - } - updateTimer0When(pp) - n := pp.numTimers.Add(-1) - if n == 0 { - // If there are no timers, then clearly none are modified. - pp.timerModifiedEarliest.Store(0) + ts.timerModifiedEarliest.Store(0) } } -// modtimer modifies an existing timer. +// modify modifies an existing timer. // This is called by the netpoll code or time.Ticker.Reset or time.Timer.Reset. // Reports whether the timer was modified before it was run. -func modtimer(t *timer, when, period int64, f func(any, uintptr), arg any, seq uintptr) bool { +func (t *timer) modify(when, period int64, f func(any, uintptr), arg any, seq uintptr) bool { if when <= 0 { throw("timer when must be positive") } @@ -444,125 +350,79 @@ func modtimer(t *timer, when, period int64, f func(any, uintptr), arg any, seq u throw("timer period must be non-negative") } - status := uint32(timerNoStatus) - wasRemoved := false - var pending bool - var mp *m -loop: - for { - switch status = t.status.Load(); status { - case timerWaiting, timerModifiedEarlier, timerModifiedLater: - // Prevent preemption while the timer is in timerModifying. - // This could lead to a self-deadlock. See #38070. - mp = acquirem() - if t.status.CompareAndSwap(status, timerModifying) { - pending = true // timer not yet run - break loop - } - releasem(mp) - case timerNoStatus, timerRemoved: - // Prevent preemption while the timer is in timerModifying. - // This could lead to a self-deadlock. See #38070. - mp = acquirem() - - // Timer was already run and t is no longer in a heap. - // Act like addtimer. - if t.status.CompareAndSwap(status, timerModifying) { - wasRemoved = true - pending = false // timer already run or stopped - break loop - } - releasem(mp) - case timerDeleted: - // Prevent preemption while the timer is in timerModifying. - // This could lead to a self-deadlock. See #38070. - mp = acquirem() - if t.status.CompareAndSwap(status, timerModifying) { - t.pp.ptr().deletedTimers.Add(-1) - pending = false // timer already stopped - break loop - } - releasem(mp) - case timerRunning, timerRemoving, timerMoving: - // The timer is being run or moved, by a different P. - // Wait for it to complete. - osyield() - case timerModifying: - // Multiple simultaneous calls to modtimer. - // Wait for the other call to complete. - osyield() - default: - badTimer() - } - } - + state, mp := t.lock() t.period = period t.f = f t.arg = arg t.seq = seq - if wasRemoved { + if state&timerHeaped == 0 { + // Set up t for insertion but unlock first, + // to avoid lock inversion with timers lock. + // Since t is not in a heap yet, nothing will + // find and modify it until after the ts.add. + state |= timerHeaped t.when = when - pp := getg().m.p.ptr() - lock(&pp.timersLock) - doaddtimer(pp, t) - unlock(&pp.timersLock) - if !t.status.CompareAndSwap(timerModifying, timerWaiting) { - badTimer() - } + + ts := &getg().m.p.ptr().timers + t.ts = ts + // pass mp=nil to t.unlock to avoid preemption + // between t.unlock and lock of timersLock. + // releasem done manually below + t.unlock(state, nil) + + lock(&ts.lock) + ts.add(t) + unlock(&ts.lock) releasem(mp) + wakeNetPoller(when) - } else { - // The timer is in some other P's heap, so we can't change - // the when field. If we did, the other P's heap would - // be out of order. So we put the new when value in the - // nextwhen field, and let the other P set the when field - // when it is prepared to resort the heap. - t.nextwhen = when + return false + } - newStatus := uint32(timerModifiedLater) - if when < t.when { - newStatus = timerModifiedEarlier - } + pending := state&timerNextWhen == 0 || t.nextWhen != 0 // timerHeaped is set (checked above) + if !pending { + t.ts.zombies.Add(-1) + } - tpp := t.pp.ptr() + // The timer is in some other P's heap, so we can't change + // the when field. If we did, the other P's heap would + // be out of order. So we put the new when value in the + // nextwhen field, and let the other P set the when field + // when it is prepared to resort the heap. + t.nextWhen = when + state |= timerNextWhen + earlier := when < t.when + if earlier { + t.ts.updateTimerModifiedEarliest(when) + } - if newStatus == timerModifiedEarlier { - updateTimerModifiedEarliest(tpp, when) - } + t.unlock(state, mp) - // Set the new status of the timer. - if !t.status.CompareAndSwap(timerModifying, newStatus) { - badTimer() - } - releasem(mp) - - // If the new status is earlier, wake up the poller. - if newStatus == timerModifiedEarlier { - wakeNetPoller(when) - } + // If the new status is earlier, wake up the poller. + if earlier { + wakeNetPoller(when) } return pending } -// resettimer resets the time when a timer should fire. +// reset resets the time when a timer should fire. // If used for an inactive timer, the timer will become active. -// This should be called instead of addtimer if the timer value has been, -// or may have been, used previously. -// Reports whether the timer was modified before it was run. -func resettimer(t *timer, when int64) bool { - return modtimer(t, when, t.period, t.f, t.arg, t.seq) +// Reports whether the timer was active and was stopped. +func (t *timer) reset(when int64) bool { + return t.modify(when, t.period, t.f, t.arg, t.seq) } -// cleantimers cleans up the head of the timer queue. This speeds up +// cleanHead cleans up the head of the timer queue. This speeds up // programs that create and delete timers; leaving them in the heap -// slows down addtimer. Reports whether no timer problems were found. -// The caller must have locked the timers for pp. -func cleantimers(pp *p) { +// slows down heap operations. +// The caller must have locked ts. +func (ts *timers) cleanHead() { + assertLockHeld(&ts.lock) gp := getg() for { - if len(pp.timers) == 0 { + if len(ts.heap) == 0 { return } @@ -574,277 +434,270 @@ func cleantimers(pp *p) { return } - t := pp.timers[0] - if t.pp.ptr() != pp { - throw("cleantimers: bad p") + t := ts.heap[0] + if t.ts != ts { + throw("bad ts") } - switch s := t.status.Load(); s { - case timerDeleted: - if !t.status.CompareAndSwap(s, timerRemoving) { - continue - } - dodeltimer0(pp) - if !t.status.CompareAndSwap(timerRemoving, timerRemoved) { - badTimer() - } - pp.deletedTimers.Add(-1) - case timerModifiedEarlier, timerModifiedLater: - if !t.status.CompareAndSwap(s, timerMoving) { - continue - } - // Now we can change the when field. - t.when = t.nextwhen - // Move t to the right position. - dodeltimer0(pp) - doaddtimer(pp, t) - if !t.status.CompareAndSwap(timerMoving, timerWaiting) { - badTimer() - } - default: - // Head of timers does not need adjustment. + + if t.state.Load()&timerNextWhen == 0 { + // Fast path: head of timers does not need adjustment. return } + + state, mp := t.lock() + state, updated := t.updateWhen(state, ts) + t.unlock(state, mp) + if !updated { + // Head of timers does not need adjustment. + t.unlock(state, mp) + return + } + } +} + +// take moves any timers from src into ts +// and then clears the timer state from src, +// because src is being destroyed. +// The caller must not have locked either timers. +// For now this is only called when the world is stopped. +func (ts *timers) take(src *timers) { + assertWorldStopped() + if len(src.heap) > 0 { + // The world is stopped, but we acquire timersLock to + // protect against sysmon calling timeSleepUntil. + // This is the only case where we hold more than one ts.lock, + // so there are no deadlock concerns. + lock(&src.lock) + lock(&ts.lock) + ts.move(src.heap) + src.heap = nil + src.len.Store(0) + src.zombies.Store(0) + src.timer0When.Store(0) + unlock(&ts.lock) + unlock(&src.lock) } } // moveTimers moves a slice of timers to pp. The slice has been taken // from a different P. // This is currently called when the world is stopped, but the caller -// is expected to have locked the timers for pp. -func moveTimers(pp *p, timers []*timer) { +// is expected to have locked ts. +func (ts *timers) move(timers []*timer) { + assertLockHeld(&ts.lock) for _, t := range timers { - loop: - for { - switch s := t.status.Load(); s { - case timerWaiting: - if !t.status.CompareAndSwap(s, timerMoving) { - continue - } - t.pp = 0 - doaddtimer(pp, t) - if !t.status.CompareAndSwap(timerMoving, timerWaiting) { - badTimer() - } - break loop - case timerModifiedEarlier, timerModifiedLater: - if !t.status.CompareAndSwap(s, timerMoving) { - continue - } - t.when = t.nextwhen - t.pp = 0 - doaddtimer(pp, t) - if !t.status.CompareAndSwap(timerMoving, timerWaiting) { - badTimer() - } - break loop - case timerDeleted: - if !t.status.CompareAndSwap(s, timerRemoved) { - continue - } - t.pp = 0 - // We no longer need this timer in the heap. - break loop - case timerModifying: - // Loop until the modification is complete. - osyield() - case timerNoStatus, timerRemoved: - // We should not see these status values in a timers heap. - badTimer() - case timerRunning, timerRemoving, timerMoving: - // Some other P thinks it owns this timer, - // which should not happen. - badTimer() - default: - badTimer() - } + state, mp := t.lock() + t.ts = nil + state, _ = t.updateWhen(state, nil) + // Unlock before add, to avoid append (allocation) + // while holding lock. This would be correct even if the world wasn't + // stopped (but it is), and it makes staticlockranking happy. + if state&timerHeaped != 0 { + t.ts = ts + } + t.unlock(state, mp) + if state&timerHeaped != 0 { + ts.add(t) } } } -// adjusttimers looks through the timers in the current P's heap for +// adjust looks through the timers in ts.heap for // any timers that have been modified to run earlier, and puts them in // the correct place in the heap. While looking for those timers, // it also moves timers that have been modified to run later, -// and removes deleted timers. The caller must have locked the timers for pp. -func adjusttimers(pp *p, now int64) { - // If we haven't yet reached the time of the first timerModifiedEarlier +// and removes deleted timers. The caller must have locked ts. +func (ts *timers) adjust(now int64, force bool) { + assertLockHeld(&ts.lock) + // If we haven't yet reached the time of the earliest timerModified // timer, don't do anything. This speeds up programs that adjust // a lot of timers back and forth if the timers rarely expire. // We'll postpone looking through all the adjusted timers until // one would actually expire. - first := pp.timerModifiedEarliest.Load() - if first == 0 || first > now { - if verifyTimers { - verifyTimerHeap(pp) - } - return - } - - // We are going to clear all timerModifiedEarlier timers. - pp.timerModifiedEarliest.Store(0) - - var moved []*timer - for i := 0; i < len(pp.timers); i++ { - t := pp.timers[i] - if t.pp.ptr() != pp { - throw("adjusttimers: bad p") - } - switch s := t.status.Load(); s { - case timerDeleted: - if t.status.CompareAndSwap(s, timerRemoving) { - changed := dodeltimer(pp, i) - if !t.status.CompareAndSwap(timerRemoving, timerRemoved) { - badTimer() - } - pp.deletedTimers.Add(-1) - // Go back to the earliest changed heap entry. - // "- 1" because the loop will add 1. - i = changed - 1 + if !force { + first := ts.timerModifiedEarliest.Load() + if first == 0 || first > now { + if verifyTimers { + ts.verify() } - case timerModifiedEarlier, timerModifiedLater: - if t.status.CompareAndSwap(s, timerMoving) { - // Now we can change the when field. - t.when = t.nextwhen - // Take t off the heap, and hold onto it. - // We don't add it back yet because the - // heap manipulation could cause our - // loop to skip some other timer. - changed := dodeltimer(pp, i) - moved = append(moved, t) - // Go back to the earliest changed heap entry. - // "- 1" because the loop will add 1. - i = changed - 1 - } - case timerNoStatus, timerRunning, timerRemoving, timerRemoved, timerMoving: - badTimer() - case timerWaiting: - // OK, nothing to do. - case timerModifying: - // Check again after modification is complete. - osyield() - i-- - default: - badTimer() + return } } - if len(moved) > 0 { - addAdjustedTimers(pp, moved) + // We are going to clear all timerModified timers. + ts.timerModifiedEarliest.Store(0) + + changed := false + for i := 0; i < len(ts.heap); i++ { + t := ts.heap[i] + if t.ts != ts { + throw("bad ts") + } + + state, mp := t.lock() + if state&timerHeaped == 0 { + badTimer() + } + state, updated := t.updateWhen(state, nil) + if updated { + changed = true + if state&timerHeaped == 0 { + n := len(ts.heap) + ts.heap[i] = ts.heap[n-1] + ts.heap[n-1] = nil + ts.heap = ts.heap[:n-1] + t.ts = nil + ts.zombies.Add(-1) + i-- + } + } + t.unlock(state, mp) + } + + if changed { + ts.initHeap() + ts.updateTimer0When() } if verifyTimers { - verifyTimerHeap(pp) + ts.verify() } } -// addAdjustedTimers adds any timers we adjusted in adjusttimers -// back to the timer heap. -func addAdjustedTimers(pp *p, moved []*timer) { - for _, t := range moved { - doaddtimer(pp, t) - if !t.status.CompareAndSwap(timerMoving, timerWaiting) { - badTimer() - } - } -} - -// nobarrierWakeTime looks at P's timers and returns the time when we +// wakeTime looks at ts's timers and returns the time when we // should wake up the netpoller. It returns 0 if there are no timers. -// This function is invoked when dropping a P, and must run without +// This function is invoked when dropping a P, so it must run without // any write barriers. // //go:nowritebarrierrec -func nobarrierWakeTime(pp *p) int64 { - next := pp.timer0When.Load() - nextAdj := pp.timerModifiedEarliest.Load() +func (ts *timers) wakeTime() int64 { + next := ts.timer0When.Load() + nextAdj := ts.timerModifiedEarliest.Load() if next == 0 || (nextAdj != 0 && nextAdj < next) { next = nextAdj } return next } -// runtimer examines the first timer in timers. If it is ready based on now, +// check runs any timers in ts that are ready. +// If now is not 0 it is the current time. +// It returns the passed time or the current time if now was passed as 0. +// and the time when the next timer should run or 0 if there is no next timer, +// and reports whether it ran any timers. +// If the time when the next timer should run is not 0, +// it is always larger than the returned time. +// We pass now in and out to avoid extra calls of nanotime. +// +//go:yeswritebarrierrec +func (ts *timers) check(now int64) (rnow, pollUntil int64, ran bool) { + // If it's not yet time for the first timer, or the first adjusted + // timer, then there is nothing to do. + next := ts.timer0When.Load() + nextAdj := ts.timerModifiedEarliest.Load() + if next == 0 || (nextAdj != 0 && nextAdj < next) { + next = nextAdj + } + + if next == 0 { + // No timers to run or adjust. + return now, 0, false + } + + if now == 0 { + now = nanotime() + } + + // If this is the local P, and there are a lot of deleted timers, + // clear them out. We only do this for the local P to reduce + // lock contention on timersLock. + force := ts == &getg().m.p.ptr().timers && int(ts.zombies.Load()) > int(ts.len.Load())/4 + + if now < next && !force { + // Next timer is not ready to run, and we don't need to clear deleted timers. + return now, next, false + } + + lock(&ts.lock) + if len(ts.heap) > 0 { + ts.adjust(now, force) + for len(ts.heap) > 0 { + // Note that runtimer may temporarily unlock ts. + if tw := ts.run(now); tw != 0 { + if tw > 0 { + pollUntil = tw + } + break + } + ran = true + } + } + + unlock(&ts.lock) + + return now, pollUntil, ran +} + +// run examines the first timer in ts. If it is ready based on now, // it runs the timer and removes or updates it. // Returns 0 if it ran a timer, -1 if there are no more timers, or the time // when the first timer should run. -// The caller must have locked the timers for pp. -// If a timer is run, this will temporarily unlock the timers. +// The caller must have locked ts. +// If a timer is run, this will temporarily unlock ts. // //go:systemstack -func runtimer(pp *p, now int64) int64 { - for { - t := pp.timers[0] - if t.pp.ptr() != pp { - throw("runtimer: bad p") - } - switch s := t.status.Load(); s { - case timerWaiting: - if t.when > now { - // Not ready to run. - return t.when - } - - if !t.status.CompareAndSwap(s, timerRunning) { - continue - } - // Note that runOneTimer may temporarily unlock - // pp.timersLock. - runOneTimer(pp, t, now) - return 0 - - case timerDeleted: - if !t.status.CompareAndSwap(s, timerRemoving) { - continue - } - dodeltimer0(pp) - if !t.status.CompareAndSwap(timerRemoving, timerRemoved) { - badTimer() - } - pp.deletedTimers.Add(-1) - if len(pp.timers) == 0 { - return -1 - } - - case timerModifiedEarlier, timerModifiedLater: - if !t.status.CompareAndSwap(s, timerMoving) { - continue - } - t.when = t.nextwhen - dodeltimer0(pp) - doaddtimer(pp, t) - if !t.status.CompareAndSwap(timerMoving, timerWaiting) { - badTimer() - } - - case timerModifying: - // Wait for modification to complete. - osyield() - - case timerNoStatus, timerRemoved: - // Should not see a new or inactive timer on the heap. - badTimer() - case timerRunning, timerRemoving, timerMoving: - // These should only be set when timers are locked, - // and we didn't do it. - badTimer() - default: - badTimer() - } +func (ts *timers) run(now int64) int64 { + assertLockHeld(&ts.lock) +Redo: + if len(ts.heap) == 0 { + return -1 } + t := ts.heap[0] + if t.ts != ts { + throw("bad ts") + } + + if t.state.Load()&timerNextWhen == 0 && t.when > now { + // Fast path: not ready to run. + // The access of t.when is protected by the caller holding + // ts.lock, even though t itself is unlocked. + return t.when + } + + state, mp := t.lock() + state, updated := t.updateWhen(state, ts) + if updated { + t.unlock(state, mp) + goto Redo + } + + if state&timerHeaped == 0 { + badTimer() + } + + if t.when > now { + // Not ready to run. + t.unlock(state, mp) + return t.when + } + + ts.unlockAndRun(t, now, state, mp) + assertLockHeld(&ts.lock) // t is unlocked now, but not ts + return 0 } -// runOneTimer runs a single timer. -// The caller must have locked the timers for pp. +// unlockAndRun unlocks and runs a single timer. +// The caller must have locked ts. // This will temporarily unlock the timers while running the timer function. // //go:systemstack -func runOneTimer(pp *p, t *timer, now int64) { +func (ts *timers) unlockAndRun(t *timer, now int64, state uint32, mp *m) { + assertLockHeld(&ts.lock) if raceenabled { - ppcur := getg().m.p.ptr() - if ppcur.timerRaceCtx == 0 { - ppcur.timerRaceCtx = racegostart(abi.FuncPCABIInternal(runtimer) + sys.PCQuantum) + tsLocal := &getg().m.p.ptr().timers + if tsLocal.raceCtx == 0 { + tsLocal.raceCtx = racegostart(abi.FuncPCABIInternal((*timers).run) + sys.PCQuantum) } - raceacquirectx(ppcur.timerRaceCtx, unsafe.Pointer(t)) + raceacquirectx(tsLocal.raceCtx, unsafe.Pointer(t)) } f := t.f @@ -854,37 +707,28 @@ func runOneTimer(pp *p, t *timer, now int64) { if t.period > 0 { // Leave in heap but adjust next time to fire. delta := t.when - now - t.when += t.period * (1 + -delta/t.period) - if t.when < 0 { // check for overflow. - t.when = maxWhen + t.nextWhen = t.when + t.period*(1+-delta/t.period) + if t.nextWhen < 0 { // check for overflow. + t.nextWhen = maxWhen } - siftdownTimer(pp.timers, 0) - if !t.status.CompareAndSwap(timerRunning, timerWaiting) { - badTimer() - } - updateTimer0When(pp) } else { - // Remove from heap. - dodeltimer0(pp) - if !t.status.CompareAndSwap(timerRunning, timerNoStatus) { - badTimer() - } + t.nextWhen = 0 } + state, _ = t.updateWhen(state|timerNextWhen, ts) + t.unlock(state, mp) if raceenabled { // Temporarily use the current P's racectx for g0. gp := getg() if gp.racectx != 0 { - throw("runOneTimer: unexpected racectx") + throw("unexpected racectx") } - gp.racectx = gp.m.p.ptr().timerRaceCtx + gp.racectx = gp.m.p.ptr().timers.raceCtx } - unlock(&pp.timersLock) - + unlock(&ts.lock) f(arg, seq) - - lock(&pp.timersLock) + lock(&ts.lock) if raceenabled { gp := getg() @@ -892,96 +736,52 @@ func runOneTimer(pp *p, t *timer, now int64) { } } -// clearDeletedTimers removes all deleted timers from the P's timer heap. -// This is used to avoid clogging up the heap if the program -// starts a lot of long-running timers and then stops them. -// For example, this can happen via context.WithTimeout. +// updateTimerPMask clears pp's timer mask if it has no timers on its heap. // -// This is the only function that walks through the entire timer heap, -// other than moveTimers which only runs when the world is stopped. +// Ideally, the timer mask would be kept immediately consistent on any timer +// operations. Unfortunately, updating a shared global data structure in the +// timer hot path adds too much overhead in applications frequently switching +// between no timers and some timers. // -// The caller must have locked the timers for pp. -func clearDeletedTimers(pp *p) { - // We are going to clear all timerModifiedEarlier timers. - // Do this now in case new ones show up while we are looping. - pp.timerModifiedEarliest.Store(0) - - cdel := int32(0) - to := 0 - changedHeap := false - timers := pp.timers -nextTimer: - for _, t := range timers { - for { - switch s := t.status.Load(); s { - case timerWaiting: - if changedHeap { - timers[to] = t - siftupTimer(timers, to) - } - to++ - continue nextTimer - case timerModifiedEarlier, timerModifiedLater: - if t.status.CompareAndSwap(s, timerMoving) { - t.when = t.nextwhen - timers[to] = t - siftupTimer(timers, to) - to++ - changedHeap = true - if !t.status.CompareAndSwap(timerMoving, timerWaiting) { - badTimer() - } - continue nextTimer - } - case timerDeleted: - if t.status.CompareAndSwap(s, timerRemoving) { - t.pp = 0 - cdel++ - if !t.status.CompareAndSwap(timerRemoving, timerRemoved) { - badTimer() - } - changedHeap = true - continue nextTimer - } - case timerModifying: - // Loop until modification complete. - osyield() - case timerNoStatus, timerRemoved: - // We should not see these status values in a timer heap. - badTimer() - case timerRunning, timerRemoving, timerMoving: - // Some other P thinks it owns this timer, - // which should not happen. - badTimer() - default: - badTimer() - } - } +// As a compromise, the timer mask is updated only on pidleget / pidleput. A +// running P (returned by pidleget) may add a timer at any time, so its mask +// must be set. An idle P (passed to pidleput) cannot add new timers while +// idle, so if it has no timers at that time, its mask may be cleared. +// +// Thus, we get the following effects on timer-stealing in findrunnable: +// +// - Idle Ps with no timers when they go idle are never checked in findrunnable +// (for work- or timer-stealing; this is the ideal case). +// - Running Ps must always be checked. +// - Idle Ps whose timers are stolen must continue to be checked until they run +// again, even after timer expiration. +// +// When the P starts running again, the mask should be set, as a timer may be +// added at any time. +// +// TODO(prattmic): Additional targeted updates may improve the above cases. +// e.g., updating the mask when stealing a timer. +func updateTimerPMask(pp *p) { + if pp.timers.len.Load() > 0 { + return } - // Set remaining slots in timers slice to nil, - // so that the timer values can be garbage collected. - for i := to; i < len(timers); i++ { - timers[i] = nil - } - - pp.deletedTimers.Add(-cdel) - pp.numTimers.Add(-cdel) - - timers = timers[:to] - pp.timers = timers - updateTimer0When(pp) - - if verifyTimers { - verifyTimerHeap(pp) + // Looks like there are no timers, however another P may transiently + // decrement numTimers when handling a timerModified timer in + // checkTimers. We must take timersLock to serialize with these changes. + lock(&pp.timers.lock) + if pp.timers.len.Load() == 0 { + timerpMask.clear(pp.id) } + unlock(&pp.timers.lock) } -// verifyTimerHeap verifies that the timer heap is in a valid state. +// verifyTimerHeap verifies that the timers is in a valid state. // This is only for debugging, and is only called if verifyTimers is true. -// The caller must have locked the timers. -func verifyTimerHeap(pp *p) { - for i, t := range pp.timers { +// The caller must have locked ts. +func (ts *timers) verify() { + assertLockHeld(&ts.lock) + for i, t := range ts.heap { if i == 0 { // First timer has no parent. continue @@ -989,38 +789,38 @@ func verifyTimerHeap(pp *p) { // The heap is 4-ary. See siftupTimer and siftdownTimer. p := (i - 1) / 4 - if t.when < pp.timers[p].when { - print("bad timer heap at ", i, ": ", p, ": ", pp.timers[p].when, ", ", i, ": ", t.when, "\n") + if t.when < ts.heap[p].when { + print("bad timer heap at ", i, ": ", p, ": ", ts.heap[p].when, ", ", i, ": ", t.when, "\n") throw("bad timer heap") } } - if numTimers := int(pp.numTimers.Load()); len(pp.timers) != numTimers { - println("timer heap len", len(pp.timers), "!= numTimers", numTimers) + if n := int(ts.len.Load()); len(ts.heap) != n { + println("timer heap len", len(ts.heap), "!= atomic len", n) throw("bad timer heap len") } } -// updateTimer0When sets the P's timer0When field. -// The caller must have locked the timers for pp. -func updateTimer0When(pp *p) { - if len(pp.timers) == 0 { - pp.timer0When.Store(0) +// updateTimer0When sets ts.timer0When to ts.heap[0].when. +// The caller must have locked ts. +func (ts *timers) updateTimer0When() { + assertLockHeld(&ts.lock) + if len(ts.heap) == 0 { + ts.timer0When.Store(0) } else { - pp.timer0When.Store(pp.timers[0].when) + ts.timer0When.Store(ts.heap[0].when) } } -// updateTimerModifiedEarliest updates the recorded nextwhen field of the -// earlier timerModifiedEarier value. -// The timers for pp will not be locked. -func updateTimerModifiedEarliest(pp *p, nextwhen int64) { +// updateTimerModifiedEarliest updates ts.timerModifiedEarliest to be <= nextwhen. +// The timers for ts need not be locked. +func (ts *timers) updateTimerModifiedEarliest(nextwhen int64) { for { - old := pp.timerModifiedEarliest.Load() + old := ts.timerModifiedEarliest.Load() if old != 0 && old < nextwhen { return } - if pp.timerModifiedEarliest.CompareAndSwap(old, nextwhen) { + if ts.timerModifiedEarliest.CompareAndSwap(old, nextwhen) { return } } @@ -1041,12 +841,12 @@ func timeSleepUntil() int64 { continue } - w := pp.timer0When.Load() + w := pp.timers.timer0When.Load() if w != 0 && w < next { next = w } - w = pp.timerModifiedEarliest.Load() + w = pp.timers.timerModifiedEarliest.Load() if w != 0 && w < next { next = w } @@ -1064,10 +864,10 @@ func timeSleepUntil() int64 { // "panic holding locks" message. Instead, we panic while not // holding a lock. -// siftupTimer puts the timer at position i in the right place +// siftUp puts the timer at position i in the right place // in the heap by moving it up toward the top of the heap. -// It returns the smallest changed index. -func siftupTimer(t []*timer, i int) int { +func (ts *timers) siftUp(i int) { + t := ts.heap if i >= len(t) { badTimer() } @@ -1087,12 +887,12 @@ func siftupTimer(t []*timer, i int) int { if tmp != t[i] { t[i] = tmp } - return i } -// siftdownTimer puts the timer at position i in the right place +// siftDown puts the timer at position i in the right place // in the heap by moving it down toward the bottom of the heap. -func siftdownTimer(t []*timer, i int) { +func (ts *timers) siftDown(i int) { + t := ts.heap n := len(t) if i >= n { badTimer() @@ -1135,6 +935,19 @@ func siftdownTimer(t []*timer, i int) { } } +// initHeap reestablishes the heap order in the slice ts.heap. +// It takes O(n) time for n=len(ts.heap), not the O(n log n) of n repeated add operations. +func (ts *timers) initHeap() { + // Last possible element that needs sifting down is parent of last element; + // last element is len(t)-1; parent of last element is (len(t)-1-1)/4. + if len(ts.heap) <= 1 { + return + } + for i := (len(ts.heap) - 1 - 1) / 4; i >= 0; i-- { + ts.siftDown(i) + } +} + // badTimer is called if the timer data structures have been corrupted, // presumably due to racy use by the program. We panic here rather than // panicking due to invalid slice access while holding locks. diff --git a/src/runtime/trace.go b/src/runtime/trace.go index b4ad9a638c..948a8da0ca 100644 --- a/src/runtime/trace.go +++ b/src/runtime/trace.go @@ -324,9 +324,9 @@ func traceRelease(tl traceLocker) { } // StartTrace enables tracing for the current process. -// While tracing, the data will be buffered and available via ReadTrace. +// While tracing, the data will be buffered and available via [ReadTrace]. // StartTrace returns an error if tracing is already enabled. -// Most clients should use the runtime/trace package or the testing package's +// Most clients should use the [runtime/trace] package or the [testing] package's // -test.trace flag instead of calling StartTrace directly. func StartTrace() error { // Stop the world so that we can take a consistent snapshot @@ -578,6 +578,9 @@ func StopTrace() { }) } +// traceAdvance is called from panic, it does nothing for the legacy tracer. +func traceAdvance(stopTrace bool) {} + // ReadTrace returns the next chunk of binary tracing data, blocking until data // is available. If tracing is turned off and all the data accumulated while it // was on has been returned, ReadTrace returns nil. The caller must copy the diff --git a/src/runtime/trace2.go b/src/runtime/trace2.go index 1a58015989..673205dda8 100644 --- a/src/runtime/trace2.go +++ b/src/runtime/trace2.go @@ -71,7 +71,8 @@ var trace struct { stringTab [2]traceStringTable // maps strings to unique ids // cpuLogRead accepts CPU profile samples from the signal handler where - // they're generated. It uses a three-word header to hold the IDs of the P, G, + // they're generated. There are two profBufs here: one for gen%2, one for + // 1-gen%2. These profBufs use a three-word header to hold the IDs of the P, G, // and M (respectively) that were active at the time of the sample. Because // profBuf uses a record with all zeros in its header to indicate overflow, // we make sure to make the P field always non-zero: The ID of a real P will @@ -82,9 +83,9 @@ var trace struct { // when sampling g0. // // Initialization and teardown of these fields is protected by traceAdvanceSema. - cpuLogRead *profBuf - signalLock atomic.Uint32 // protects use of the following member, only usable in signal handlers - cpuLogWrite atomic.Pointer[profBuf] // copy of cpuLogRead for use in signal handlers, set without signalLock + cpuLogRead [2]*profBuf + signalLock atomic.Uint32 // protects use of the following member, only usable in signal handlers + cpuLogWrite [2]atomic.Pointer[profBuf] // copy of cpuLogRead for use in signal handlers, set without signalLock cpuSleep *wakeableSleep cpuLogDone <-chan struct{} cpuBuf [2]*traceBuf @@ -123,9 +124,9 @@ var ( ) // StartTrace enables tracing for the current process. -// While tracing, the data will be buffered and available via ReadTrace. +// While tracing, the data will be buffered and available via [ReadTrace]. // StartTrace returns an error if tracing is already enabled. -// Most clients should use the runtime/trace package or the testing package's +// Most clients should use the [runtime/trace] package or the [testing] package's // -test.trace flag instead of calling StartTrace directly. func StartTrace() error { if traceEnabled() || traceShuttingDown() { @@ -334,7 +335,7 @@ func traceAdvance(stopTrace bool) { if !s.dead { ug.goid = s.g.goid if s.g.m != nil { - ug.mid = s.g.m.id + ug.mid = int64(s.g.m.procid) } ug.status = readgstatus(s.g) &^ _Gscan ug.waitreason = s.g.waitreason @@ -443,6 +444,7 @@ func traceAdvance(stopTrace bool) { // held, we can be certain that when there are no writers there are // also no stale generation values left. Therefore, it's safe to flush // any buffers that remain in that generation's slot. + const debugDeadlock = false systemstack(func() { // Track iterations for some rudimentary deadlock detection. i := 0 @@ -479,16 +481,18 @@ func traceAdvance(stopTrace bool) { osyield() } - // Try to detect a deadlock. We probably shouldn't loop here - // this many times. - if i > 100000 && !detectedDeadlock { - detectedDeadlock = true - println("runtime: failing to flush") - for mp := mToFlush; mp != nil; mp = mp.trace.link { - print("runtime: m=", mp.id, "\n") + if debugDeadlock { + // Try to detect a deadlock. We probably shouldn't loop here + // this many times. + if i > 100000 && !detectedDeadlock { + detectedDeadlock = true + println("runtime: failing to flush") + for mp := mToFlush; mp != nil; mp = mp.trace.link { + print("runtime: m=", mp.id, "\n") + } } + i++ } - i++ } }) @@ -512,6 +516,9 @@ func traceAdvance(stopTrace bool) { } statusWriter.flush().end() + // Read everything out of the last gen's CPU profile buffer. + traceReadCPU(gen) + systemstack(func() { // Flush CPU samples, stacks, and strings for the last generation. This is safe, // because we're now certain no M is writing to the last generation. @@ -761,6 +768,8 @@ func readTrace0() (buf []byte, park bool) { // can continue to advance. if trace.flushedGen.Load() == gen { if trace.shutdown.Load() { + unlock(&trace.lock) + // Wake up anyone waiting for us to be done with this generation. // // Do this after reading trace.shutdown, because the thread we're @@ -775,13 +784,13 @@ func readTrace0() (buf []byte, park bool) { // We're shutting down, and the last generation is fully // read. We're done. - unlock(&trace.lock) return nil, false } // The previous gen has had all of its buffers flushed, and // there's nothing else for us to read. Advance the generation // we're reading from and try again. trace.readerGen.Store(trace.gen.Load()) + unlock(&trace.lock) // Wake up anyone waiting for us to be done with this generation. // @@ -792,6 +801,9 @@ func readTrace0() (buf []byte, park bool) { racerelease(unsafe.Pointer(&trace.doneSema[gen%2])) } semrelease(&trace.doneSema[gen%2]) + + // Reacquire the lock and go back to the top of the loop. + lock(&trace.lock) continue } // Wait for new data. @@ -923,7 +935,13 @@ func newWakeableSleep() *wakeableSleep { func (s *wakeableSleep) sleep(ns int64) { resetTimer(s.timer, nanotime()+ns) lock(&s.lock) + if raceenabled { + raceacquire(unsafe.Pointer(&s.lock)) + } wakeup := s.wakeup + if raceenabled { + racerelease(unsafe.Pointer(&s.lock)) + } unlock(&s.lock) <-wakeup stopTimer(s.timer) @@ -936,6 +954,9 @@ func (s *wakeableSleep) wake() { // Grab the wakeup channel, which may be nil if we're // racing with close. lock(&s.lock) + if raceenabled { + raceacquire(unsafe.Pointer(&s.lock)) + } if s.wakeup != nil { // Non-blocking send. // @@ -947,6 +968,9 @@ func (s *wakeableSleep) wake() { default: } } + if raceenabled { + racerelease(unsafe.Pointer(&s.lock)) + } unlock(&s.lock) } @@ -960,11 +984,18 @@ func (s *wakeableSleep) wake() { func (s *wakeableSleep) close() { // Set wakeup to nil so that a late timer ends up being a no-op. lock(&s.lock) + if raceenabled { + raceacquire(unsafe.Pointer(&s.lock)) + } wakeup := s.wakeup s.wakeup = nil // Close the channel. close(wakeup) + + if raceenabled { + racerelease(unsafe.Pointer(&s.lock)) + } unlock(&s.lock) return } diff --git a/src/runtime/trace2cpu.go b/src/runtime/trace2cpu.go index a33c0b6b6d..b3b0fb046d 100644 --- a/src/runtime/trace2cpu.go +++ b/src/runtime/trace2cpu.go @@ -16,8 +16,9 @@ func traceInitReadCPU() { throw("traceInitReadCPU called with trace enabled") } // Create new profBuf for CPU samples that will be emitted as events. - profBuf := newProfBuf(3, profBufWordCount, profBufTagCount) // after the timestamp, header is [pp.id, gp.goid, mp.procid] - trace.cpuLogRead = profBuf + // Format: after the timestamp, header is [pp.id, gp.goid, mp.procid]. + trace.cpuLogRead[0] = newProfBuf(3, profBufWordCount, profBufTagCount) + trace.cpuLogRead[1] = newProfBuf(3, profBufWordCount, profBufTagCount) // We must not acquire trace.signalLock outside of a signal handler: a // profiling signal may arrive at any time and try to acquire it, leading to // deadlock. Because we can't use that lock to protect updates to @@ -25,7 +26,8 @@ func traceInitReadCPU() { // writes of the pointer must be atomic. (And although this field is never // the sole pointer to the profBuf value, it's best to allow a write barrier // here.) - trace.cpuLogWrite.Store(profBuf) + trace.cpuLogWrite[0].Store(trace.cpuLogRead[0]) + trace.cpuLogWrite[1].Store(trace.cpuLogRead[1]) } // traceStartReadCPU creates a goroutine to start reading CPU profile @@ -52,7 +54,15 @@ func traceStartReadCPU() { // we would still want to do a goroutine-level sleep in between // reads to avoid frequent wakeups. trace.cpuSleep.sleep(100_000_000) - if !traceReadCPU(trace.cpuLogRead) { + + tl := traceAcquire() + if !tl.ok() { + // Tracing disabled. + break + } + keepGoing := traceReadCPU(tl.gen) + traceRelease(tl) + if !keepGoing { break } } @@ -76,8 +86,10 @@ func traceStopReadCPU() { // // Wake the goroutine so it can observe that their the buffer is // closed an exit. - trace.cpuLogWrite.Store(nil) - trace.cpuLogRead.close() + trace.cpuLogWrite[0].Store(nil) + trace.cpuLogWrite[1].Store(nil) + trace.cpuLogRead[0].close() + trace.cpuLogRead[1].close() trace.cpuSleep.wake() // Wait until the logger goroutine exits. @@ -85,20 +97,28 @@ func traceStopReadCPU() { // Clear state for the next trace. trace.cpuLogDone = nil - trace.cpuLogRead = nil + trace.cpuLogRead[0] = nil + trace.cpuLogRead[1] = nil trace.cpuSleep.close() } -// traceReadCPU attempts to read from the provided profBuf and write +// traceReadCPU attempts to read from the provided profBuf[gen%2] and write // into the trace. Returns true if there might be more to read or false // if the profBuf is closed or the caller should otherwise stop reading. // +// The caller is responsible for ensuring that gen does not change. Either +// the caller must be in a traceAcquire/traceRelease block, or must be calling +// with traceAdvanceSema held. +// // No more than one goroutine may be in traceReadCPU for the same // profBuf at a time. -func traceReadCPU(pb *profBuf) bool { +// +// Must not run on the system stack because profBuf.read performs race +// operations. +func traceReadCPU(gen uintptr) bool { var pcBuf [traceStackSize]uintptr - data, tags, eof := pb.read(profBufNonBlocking) + data, tags, eof := trace.cpuLogRead[gen%2].read(profBufNonBlocking) for len(data) > 0 { if len(data) < 4 || data[0] > uint64(len(data)) { break // truncated profile @@ -147,12 +167,7 @@ func traceReadCPU(pb *profBuf) bool { } // Write out a trace event. - tl := traceAcquire() - if !tl.ok() { - // Tracing disabled, exit without continuing. - return false - } - w := unsafeTraceWriter(tl.gen, trace.cpuBuf[tl.gen%2]) + w := unsafeTraceWriter(gen, trace.cpuBuf[gen%2]) // Ensure we have a place to write to. var flushed bool @@ -163,7 +178,7 @@ func traceReadCPU(pb *profBuf) bool { } // Add the stack to the table. - stackID := trace.stackTab[tl.gen%2].put(pcBuf[:nstk]) + stackID := trace.stackTab[gen%2].put(pcBuf[:nstk]) // Write out the CPU sample. w.byte(byte(traceEvCPUSample)) @@ -173,8 +188,7 @@ func traceReadCPU(pb *profBuf) bool { w.varint(goid) w.varint(stackID) - trace.cpuBuf[tl.gen%2] = w.traceBuf - traceRelease(tl) + trace.cpuBuf[gen%2] = w.traceBuf } return !eof } @@ -187,6 +201,7 @@ func traceReadCPU(pb *profBuf) bool { // //go:systemstack func traceCPUFlush(gen uintptr) { + // Flush any remaining trace buffers containing CPU samples. if buf := trace.cpuBuf[gen%2]; buf != nil { lock(&trace.lock) traceBufFlush(buf, gen) @@ -197,13 +212,38 @@ func traceCPUFlush(gen uintptr) { // traceCPUSample writes a CPU profile sample stack to the execution tracer's // profiling buffer. It is called from a signal handler, so is limited in what -// it can do. +// it can do. mp must be the thread that is currently stopped in a signal. func traceCPUSample(gp *g, mp *m, pp *p, stk []uintptr) { if !traceEnabled() { // Tracing is usually turned off; don't spend time acquiring the signal // lock unless it's active. return } + if mp == nil { + // Drop samples that don't have an identifiable thread. We can't render + // this in any useful way anyway. + return + } + + // We're going to conditionally write to one of two buffers based on the + // generation. To make sure we write to the correct one, we need to make + // sure this thread's trace seqlock is held. If it already is, then we're + // in the tracer and we can just take advantage of that. If it isn't, then + // we need to acquire it and read the generation. + locked := false + if mp.trace.seqlock.Load()%2 == 0 { + mp.trace.seqlock.Add(1) + locked = true + } + gen := trace.gen.Load() + if gen == 0 { + // Tracing is disabled, as it turns out. Release the seqlock if necessary + // and exit. + if locked { + mp.trace.seqlock.Add(1) + } + return + } now := traceClockNow() // The "header" here is the ID of the M that was running the profiled code, @@ -221,9 +261,7 @@ func traceCPUSample(gp *g, mp *m, pp *p, stk []uintptr) { if gp != nil { hdr[1] = gp.goid } - if mp != nil { - hdr[2] = uint64(mp.procid) - } + hdr[2] = uint64(mp.procid) // Allow only one writer at a time for !trace.signalLock.CompareAndSwap(0, 1) { @@ -231,7 +269,7 @@ func traceCPUSample(gp *g, mp *m, pp *p, stk []uintptr) { osyield() } - if log := trace.cpuLogWrite.Load(); log != nil { + if log := trace.cpuLogWrite[gen%2].Load(); log != nil { // Note: we don't pass a tag pointer here (how should profiling tags // interact with the execution tracer?), but if we did we'd need to be // careful about write barriers. See the long comment in profBuf.write. @@ -239,4 +277,9 @@ func traceCPUSample(gp *g, mp *m, pp *p, stk []uintptr) { } trace.signalLock.Store(0) + + // Release the seqlock if we acquired it earlier. + if locked { + mp.trace.seqlock.Add(1) + } } diff --git a/src/runtime/trace2map.go b/src/runtime/trace2map.go index 4a5a7ecba4..195ec0bbe7 100644 --- a/src/runtime/trace2map.go +++ b/src/runtime/trace2map.go @@ -141,5 +141,11 @@ func (tab *traceMap) reset() { assertLockHeld(&tab.lock) tab.mem.drop() tab.seq.Store(0) - tab.tab = [1 << 13]atomic.UnsafePointer{} + // Clear table without write barriers. The table consists entirely + // of notinheap pointers, so this is fine. + // + // Write barriers may theoretically call into the tracer and acquire + // the lock again, and this lock ordering is expressed in the static + // lock ranking checker. + memclrNoHeapPointers(unsafe.Pointer(&tab.tab), unsafe.Sizeof(tab.tab)) } diff --git a/src/runtime/trace2runtime.go b/src/runtime/trace2runtime.go index b6837d0360..512e53907e 100644 --- a/src/runtime/trace2runtime.go +++ b/src/runtime/trace2runtime.go @@ -133,7 +133,7 @@ const ( var traceGoStopReasonStrings = [...]string{ traceGoStopGeneric: "unspecified", - traceGoStopGoSched: "runtime.GoSched", + traceGoStopGoSched: "runtime.Gosched", traceGoStopPreempted: "preempted", } @@ -192,7 +192,12 @@ func traceAcquireEnabled() traceLocker { // Prevent preemption. mp := acquirem() - // Acquire the trace seqlock. + // Acquire the trace seqlock. This prevents traceAdvance from moving forward + // until all Ms are observed to be outside of their seqlock critical section. + // + // Note: The seqlock is mutated here and also in traceCPUSample. If you update + // usage of the seqlock here, make sure to also look at what traceCPUSample is + // doing. seq := mp.trace.seqlock.Add(1) if debugTraceReentrancy && seq%2 != 1 { throw("bad use of trace.seqlock or tracer is reentrant") @@ -493,10 +498,10 @@ func (tl traceLocker) GoSysExit(lostP bool) { // ProcSteal indicates that our current M stole a P from another M. // -// forMe indicates that the caller is stealing pp to wire it up to itself. +// inSyscall indicates that we're stealing the P from a syscall context. // // The caller must have ownership of pp. -func (tl traceLocker) ProcSteal(pp *p, forMe bool) { +func (tl traceLocker) ProcSteal(pp *p, inSyscall bool) { // Grab the M ID we stole from. mStolenFrom := pp.trace.mSyscallID pp.trace.mSyscallID = -1 @@ -506,17 +511,20 @@ func (tl traceLocker) ProcSteal(pp *p, forMe bool) { // the P just to get its attention (e.g. STW or sysmon retake) or we're trying to steal a P for // ourselves specifically to keep running. The two contexts look different, but can be summarized // fairly succinctly. In the former, we're a regular running goroutine and proc, if we have either. - // In the latter, we're a goroutine in a syscall, + // In the latter, we're a goroutine in a syscall. goStatus := traceGoRunning procStatus := traceProcRunning - if forMe { + if inSyscall { goStatus = traceGoSyscall procStatus = traceProcSyscallAbandoned } w := tl.eventWriter(goStatus, procStatus) - // Emit the status of the P we're stealing. We may have *just* done this, but we may not have, - // even if forMe is true, depending on whether we wired the P to ourselves already. + // Emit the status of the P we're stealing. We may have *just* done this when creating the event + // writer but it's not guaranteed, even if inSyscall is true. Although it might seem like from a + // syscall context we're always stealing a P for ourselves, we may have not wired it up yet (so + // it wouldn't be visible to eventWriter) or we may not even intend to wire it up to ourselves + // at all (e.g. entersyscall_gcwait). if !pp.trace.statusWasTraced(tl.gen) && pp.trace.acquireStatus(tl.gen) { // Careful: don't use the event writer. We never want status or in-progress events // to trigger more in-progress events. diff --git a/src/runtime/trace2stack.go b/src/runtime/trace2stack.go index ebfe7c57f0..af6638fa8f 100644 --- a/src/runtime/trace2stack.go +++ b/src/runtime/trace2stack.go @@ -97,7 +97,8 @@ func (t *traceStackTable) put(pcs []uintptr) uint64 { } // dump writes all previously cached stacks to trace buffers, -// releases all memory and resets state. +// releases all memory and resets state. It must only be called once the caller +// can guarantee that there are no more writers to the table. // // This must run on the system stack because it flushes buffers and thus // may acquire trace.lock. @@ -107,7 +108,15 @@ func (t *traceStackTable) dump(gen uintptr) { w := unsafeTraceWriter(gen, nil) // Iterate over the table. - lock(&t.tab.lock) + // + // Do not acquire t.tab.lock. There's a conceptual lock cycle between acquiring this lock + // here and allocation-related locks. Specifically, this lock may be acquired when an event + // is emitted in allocation paths. Simultaneously, we might allocate here with the lock held, + // creating a cycle. In practice, this cycle is never exercised. Because the table is only + // dumped once there are no more writers, it's not possible for the cycle to occur. However + // the lockrank mode is not sophisticated enough to identify this, and if it's not possible + // for that cycle to happen, then it's also not possible for this to race with writers to + // the table. for i := range t.tab.tab { stk := t.tab.bucket(i) for ; stk != nil; stk = stk.next() { @@ -144,6 +153,9 @@ func (t *traceStackTable) dump(gen uintptr) { } } } + // Still, hold the lock over reset. The callee expects it, even though it's + // not strictly necessary. + lock(&t.tab.lock) t.tab.reset() unlock(&t.tab.lock) diff --git a/src/runtime/trace2status.go b/src/runtime/trace2status.go index 0f64452c3e..5016e08656 100644 --- a/src/runtime/trace2status.go +++ b/src/runtime/trace2status.go @@ -82,8 +82,17 @@ func (w traceWriter) writeProcStatusForP(pp *p, inSTW bool) traceWriter { // in _Pgcstop, but we model it as running in the tracer. status = traceProcRunning } - case _Prunning, _Psyscall: + case _Prunning: status = traceProcRunning + // There's a short window wherein the goroutine may have entered _Gsyscall + // but it still owns the P (it's not in _Psyscall yet). The goroutine entering + // _Gsyscall is the tracer's signal that the P its bound to is also in a syscall, + // so we need to emit a status that matches. See #64318. + if w.mp.p.ptr() == pp && w.mp.curg != nil && readgstatus(w.mp.curg)&^_Gscan == _Gsyscall { + status = traceProcSyscall + } + case _Psyscall: + status = traceProcSyscall default: throw("attempt to trace invalid or unsupported P status") } @@ -138,6 +147,7 @@ func goStatusToTraceGoStatus(status uint32, wr waitReason) traceGoStatus { wr == waitReasonGCMarkTermination || wr == waitReasonGarbageCollection || wr == waitReasonTraceProcStatus || + wr == waitReasonPageTraceFlush || wr == waitReasonGCWorkerActive { tgs = traceGoRunning } diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go index 66a1cc85ee..61027ea89a 100644 --- a/src/runtime/traceback.go +++ b/src/runtime/traceback.go @@ -650,25 +650,7 @@ func tracebackPCs(u *unwinder, skip int, pcBuf []uintptr) int { // printArgs prints function arguments in traceback. func printArgs(f funcInfo, argp unsafe.Pointer, pc uintptr) { - // The "instruction" of argument printing is encoded in _FUNCDATA_ArgInfo. - // See cmd/compile/internal/ssagen.emitArgInfo for the description of the - // encoding. - // These constants need to be in sync with the compiler. - const ( - _endSeq = 0xff - _startAgg = 0xfe - _endAgg = 0xfd - _dotdotdot = 0xfc - _offsetTooLarge = 0xfb - ) - - const ( - limit = 10 // print no more than 10 args/components - maxDepth = 5 // no more than 5 layers of nesting - maxLen = (maxDepth*3+2)*limit + 1 // max length of _FUNCDATA_ArgInfo (see the compiler side for reasoning) - ) - - p := (*[maxLen]uint8)(funcdata(f, abi.FUNCDATA_ArgInfo)) + p := (*[abi.TraceArgsMaxLen]uint8)(funcdata(f, abi.FUNCDATA_ArgInfo)) if p == nil { return } @@ -721,19 +703,19 @@ printloop: o := p[pi] pi++ switch o { - case _endSeq: + case abi.TraceArgsEndSeq: break printloop - case _startAgg: + case abi.TraceArgsStartAgg: printcomma() print("{") start = true continue - case _endAgg: + case abi.TraceArgsEndAgg: print("}") - case _dotdotdot: + case abi.TraceArgsDotdotdot: printcomma() print("...") - case _offsetTooLarge: + case abi.TraceArgsOffsetTooLarge: printcomma() print("_") default: @@ -1011,12 +993,24 @@ func traceback2(u *unwinder, showRuntime bool, skip, max int) (n, lastN int) { } print(")\n") print("\t", file, ":", line) - if !iu.isInlined(uf) { - if u.frame.pc > f.entry() { - print(" +", hex(u.frame.pc-f.entry())) - } - if gp.m != nil && gp.m.throwing >= throwTypeRuntime && gp == gp.m.curg || level >= 2 { - print(" fp=", hex(u.frame.fp), " sp=", hex(u.frame.sp), " pc=", hex(u.frame.pc)) + // The contract between Callers and CallersFrames uses + // return addresses, which are +1 relative to the CALL + // instruction. Follow that convention. + pc := uf.pc + 1 + if !iu.isInlined(uf) && pc > f.entry() { + // Func-relative PCs make no sense for inlined + // frames because there is no actual entry. + print(" +", hex(pc-f.entry())) + } + if gp.m != nil && gp.m.throwing >= throwTypeRuntime && gp == gp.m.curg || level >= 2 { + if !iu.isInlined(uf) { + // The stack information makes no sense for inline frames. + print(" fp=", hex(u.frame.fp), " sp=", hex(u.frame.sp), " pc=", hex(pc)) + } else { + // The PC for an inlined frame is a special marker NOP, + // but crash monitoring tools may still parse the PCs + // and feed them to CallersFrames. + print(" pc=", hex(pc)) } } print("\n") @@ -1151,10 +1145,32 @@ func showfuncinfo(sf srcFunc, firstFrame bool, calleeID abi.FuncID) bool { // isExportedRuntime reports whether name is an exported runtime function. // It is only for runtime functions, so ASCII A-Z is fine. -// TODO: this handles exported functions but not exported methods. func isExportedRuntime(name string) bool { - const n = len("runtime.") - return len(name) > n && name[:n] == "runtime." && 'A' <= name[n] && name[n] <= 'Z' + // Check and remove package qualifier. + n := len("runtime.") + if len(name) <= n || name[:n] != "runtime." { + return false + } + name = name[n:] + rcvr := "" + + // Extract receiver type, if any. + // For example, runtime.(*Func).Entry + i := len(name) - 1 + for i >= 0 && name[i] != '.' { + i-- + } + if i >= 0 { + rcvr = name[:i] + name = name[i+1:] + // Remove parentheses and star for pointer receivers. + if len(rcvr) >= 3 && rcvr[0] == '(' && rcvr[1] == '*' && rcvr[len(rcvr)-1] == ')' { + rcvr = rcvr[2 : len(rcvr)-1] + } + } + + // Exported functions and exported methods on exported types. + return len(name) > 0 && 'A' <= name[0] && name[0] <= 'Z' && (len(rcvr) == 0 || 'A' <= rcvr[0] && rcvr[0] <= 'Z') } // elideWrapperCalling reports whether a wrapper function that called @@ -1322,7 +1338,7 @@ func isSystemGoroutine(gp *g, fixed bool) bool { if !f.valid() { return false } - if f.funcID == abi.FuncID_runtime_main || f.funcID == abi.FuncID_handleAsyncEvent { + if f.funcID == abi.FuncID_runtime_main || f.funcID == abi.FuncID_corostart || f.funcID == abi.FuncID_handleAsyncEvent { return false } if f.funcID == abi.FuncID_runfinq { diff --git a/src/runtime/traceback_system_test.go b/src/runtime/traceback_system_test.go new file mode 100644 index 0000000000..223d78a808 --- /dev/null +++ b/src/runtime/traceback_system_test.go @@ -0,0 +1,238 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package runtime_test + +// This test of GOTRACEBACK=system has its own file, +// to minimize line-number perturbation. + +import ( + "bytes" + "fmt" + "internal/testenv" + "io" + "os" + "path/filepath" + "reflect" + "runtime" + "runtime/debug" + "strconv" + "strings" + "testing" +) + +// This is the entrypoint of the child process used by +// TestTracebackSystem. It prints a crash report to stdout. +func crash() { + // Ensure that we get pc=0x%x values in the traceback. + debug.SetTraceback("system") + writeSentinel(os.Stdout) + debug.SetCrashOutput(os.Stdout) + + go func() { + // This call is typically inlined. + child() + }() + select {} +} + +func child() { + grandchild() +} + +func grandchild() { + // Write runtime.Caller's view of the stack to stderr, for debugging. + var pcs [16]uintptr + n := runtime.Callers(1, pcs[:]) + io.WriteString(os.Stderr, formatStack(pcs[:n])) + + // Cause the crash report to be written to stdout. + panic("oops") +} + +// TestTracebackSystem tests that the syntax of crash reports produced +// by GOTRACEBACK=system (see traceback2) contains a complete, +// parseable list of program counters for the running goroutine that +// can be parsed and fed to runtime.CallersFrames to obtain accurate +// information about the logical call stack, even in the presence of +// inlining. +// +// The test is a distillation of the crash monitor in +// golang.org/x/telemetry/crashmonitor. +func TestTracebackSystem(t *testing.T) { + testenv.MustHaveExec(t) + if runtime.GOOS == "android" { + t.Skip("Can't read source code for this file on Android") + } + + // Fork+exec the crashing process. + exe, err := os.Executable() + if err != nil { + t.Fatal(err) + } + cmd := testenv.Command(t, exe) + cmd.Env = append(cmd.Environ(), entrypointVar+"=crash") + cmd.Stdout = new(strings.Builder) + // cmd.Stderr = os.Stderr // uncomment to debug, e.g. to see runtime.Caller's view + cmd.Run() // expected to crash + crash := cmd.Stdout.(*strings.Builder).String() + + // If the only line is the sentinel, it wasn't a crash. + if strings.Count(crash, "\n") < 2 { + t.Fatalf("child process did not produce a crash report") + } + + // Parse the PCs out of the child's crash report. + pcs, err := parseStackPCs(crash) + if err != nil { + t.Fatal(err) + } + + // Unwind the stack using this executable's symbol table. + got := formatStack(pcs) + want := `redacted.go:0: runtime.gopanic +traceback_system_test.go:51: runtime_test.grandchild: panic("oops") +traceback_system_test.go:41: runtime_test.child: grandchild() +traceback_system_test.go:35: runtime_test.crash.func1: child() +redacted.go:0: runtime.goexit` + if strings.TrimSpace(got) != strings.TrimSpace(want) { + t.Errorf("got:\n%swant:\n%s", got, want) + } +} + +// parseStackPCs parses the parent process's program counters for the +// first running goroutine out of a GOTRACEBACK=system traceback, +// adjusting them so that they are valid for the child process's text +// segment. +// +// This function returns only program counter values, ensuring that +// there is no possibility of strings from the crash report (which may +// contain PII) leaking into the telemetry system. +// +// (Copied from golang.org/x/telemetry/crashmonitor.parseStackPCs.) +func parseStackPCs(crash string) ([]uintptr, error) { + // getPC parses the PC out of a line of the form: + // \tFILE:LINE +0xRELPC sp=... fp=... pc=... + getPC := func(line string) (uint64, error) { + _, pcstr, ok := strings.Cut(line, " pc=") // e.g. pc=0x%x + if !ok { + return 0, fmt.Errorf("no pc= for stack frame: %s", line) + } + return strconv.ParseUint(pcstr, 0, 64) // 0 => allow 0x prefix + } + + var ( + pcs []uintptr + parentSentinel uint64 + childSentinel = sentinel() + on = false // are we in the first running goroutine? + lines = strings.Split(crash, "\n") + ) + for i := 0; i < len(lines); i++ { + line := lines[i] + + // Read sentinel value. + if parentSentinel == 0 && strings.HasPrefix(line, "sentinel ") { + _, err := fmt.Sscanf(line, "sentinel %x", &parentSentinel) + if err != nil { + return nil, fmt.Errorf("can't read sentinel line") + } + continue + } + + // Search for "goroutine GID [STATUS]" + if !on { + if strings.HasPrefix(line, "goroutine ") && + strings.Contains(line, " [running]:") { + on = true + + if parentSentinel == 0 { + return nil, fmt.Errorf("no sentinel value in crash report") + } + } + continue + } + + // A blank line marks end of a goroutine stack. + if line == "" { + break + } + + // Skip the final "created by SYMBOL in goroutine GID" part. + if strings.HasPrefix(line, "created by ") { + break + } + + // Expect a pair of lines: + // SYMBOL(ARGS) + // \tFILE:LINE +0xRELPC sp=0x%x fp=0x%x pc=0x%x + // Note: SYMBOL may contain parens "pkg.(*T).method" + // The RELPC is sometimes missing. + + // Skip the symbol(args) line. + i++ + if i == len(lines) { + break + } + line = lines[i] + + // Parse the PC, and correct for the parent and child's + // different mappings of the text section. + pc, err := getPC(line) + if err != nil { + // Inlined frame, perhaps; skip it. + continue + } + pcs = append(pcs, uintptr(pc-parentSentinel+childSentinel)) + } + return pcs, nil +} + +// The sentinel function returns its address. The difference between +// this value as observed by calls in two different processes of the +// same executable tells us the relative offset of their text segments. +// +// It would be nice if SetCrashOutput took care of this as it's fiddly +// and likely to confuse every user at first. +func sentinel() uint64 { + return uint64(reflect.ValueOf(sentinel).Pointer()) +} + +func writeSentinel(out io.Writer) { + fmt.Fprintf(out, "sentinel %x\n", sentinel()) +} + +// formatStack formats a stack of PC values using the symbol table, +// redacting information that cannot be relied upon in the test. +func formatStack(pcs []uintptr) string { + // When debugging, show file/line/content of files other than this one. + const debug = false + + var buf strings.Builder + i := 0 + frames := runtime.CallersFrames(pcs) + for { + fr, more := frames.Next() + if debug { + fmt.Fprintf(&buf, "pc=%x ", pcs[i]) + i++ + } + if base := filepath.Base(fr.File); base == "traceback_system_test.go" || debug { + content, err := os.ReadFile(fr.File) + if err != nil { + panic(err) + } + lines := bytes.Split(content, []byte("\n")) + fmt.Fprintf(&buf, "%s:%d: %s: %s\n", base, fr.Line, fr.Function, lines[fr.Line-1]) + } else { + // For robustness, don't show file/line for functions from other files. + fmt.Fprintf(&buf, "redacted.go:0: %s\n", fr.Function) + } + + if !more { + break + } + } + return buf.String() +} diff --git a/src/runtime/traceback_test.go b/src/runtime/traceback_test.go index 204b4f5316..8cbccac673 100644 --- a/src/runtime/traceback_test.go +++ b/src/runtime/traceback_test.go @@ -419,6 +419,17 @@ func TestTracebackArgs(t *testing.T) { "testTracebackArgs11b(0xffffffff?, 0xffffffff?, 0x3?, 0x4)", "testTracebackArgs11b(0x1, 0x2, 0x3, 0x4)"), }, + // Make sure spilled slice data pointers are spilled to the right location + // to ensure we see it listed without a ?. + // See issue 64414. + { + func() int { + poisonStack() + return testTracebackArgsSlice(testTracebackArgsSliceBackingStore[:]) + }, + // Note: capacity of the slice might be junk, as it is not used. + fmt.Sprintf("testTracebackArgsSlice({%p, 0x2, ", &testTracebackArgsSliceBackingStore[0]), + }, } for _, test := range tests { n := test.fn() @@ -450,7 +461,6 @@ func testTracebackArgs2(a bool, b struct { return b.a + b.b + b.c + b.x[0] + b.x[1] + int(d[0]) + int(d[1]) + int(d[2]) } return n - } //go:noinline @@ -667,6 +677,19 @@ func testTracebackArgs11b(a, b, c, d int32) int { return runtime.Stack(testTracebackArgsBuf[:], false) } +// norace to avoid race instrumentation changing spill locations. +// nosplit to avoid preemption or morestack spilling registers. +// +//go:norace +//go:nosplit +//go:noinline +func testTracebackArgsSlice(a []int) int { + n := runtime.Stack(testTracebackArgsBuf[:], false) + return a[1] + n +} + +var testTracebackArgsSliceBackingStore [2]int + // Poison the arg area with deterministic values. // //go:noinline diff --git a/src/runtime/vdso_test.go b/src/runtime/vdso_test.go index 126fd8d199..d025ba50c2 100644 --- a/src/runtime/vdso_test.go +++ b/src/runtime/vdso_test.go @@ -12,6 +12,7 @@ import ( "os" "os/exec" "path/filepath" + "syscall" "testing" "time" ) @@ -56,6 +57,16 @@ func TestUsingVDSO(t *testing.T) { t.Logf("%s", out) } if err != nil { + if err := err.(*exec.ExitError); err != nil && err.Sys().(syscall.WaitStatus).Signaled() { + if !bytes.Contains(out, []byte("+++ killed by")) { + // strace itself occasionally crashes. + // Here, it exited with a signal, but + // the strace log didn't report any + // signal from the child process. + t.Log(err) + testenv.SkipFlaky(t, 63734) + } + } t.Fatal(err) } diff --git a/src/runtime/write_err.go b/src/runtime/write_err.go index 81ae872e9c..11ca6bbb94 100644 --- a/src/runtime/write_err.go +++ b/src/runtime/write_err.go @@ -6,8 +6,9 @@ package runtime -import "unsafe" - +//go:nosplit func writeErr(b []byte) { - write(2, unsafe.Pointer(&b[0]), int32(len(b))) + if len(b) > 0 { + writeErrData(&b[0], int32(len(b))) + } } diff --git a/src/runtime/write_err_android.go b/src/runtime/write_err_android.go index a876900c95..dd950774cb 100644 --- a/src/runtime/write_err_android.go +++ b/src/runtime/write_err_android.go @@ -34,6 +34,10 @@ const ( var logger loggerType func writeErr(b []byte) { + if len(b) == 0 { + return + } + if logger == unknown { // Use logd if /dev/socket/logdw is available. if v := uintptr(access(&writeLogd[0], 0x02 /* W_OK */)); v == 0 { @@ -45,8 +49,9 @@ func writeErr(b []byte) { } } - // Write to stderr for command-line programs. - write(2, unsafe.Pointer(&b[0]), int32(len(b))) + // Write to stderr for command-line programs, + // and optionally to SetCrashOutput file. + writeErrData(&b[0], int32(len(b))) // Log format: "
    \x00\x00" // diff --git a/src/slices/slices.go b/src/slices/slices.go index f92a25da6a..326584064c 100644 --- a/src/slices/slices.go +++ b/src/slices/slices.go @@ -130,14 +130,13 @@ func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { // Insert panics if i is out of range. // This function is O(len(s) + len(v)). func Insert[S ~[]E, E any](s S, i int, v ...E) S { - n := len(s) + _ = s[i:] // bounds check + m := len(v) if m == 0 { - // Panic if i is not in the range [0:n] inclusive. - // See issue 63913. - _ = s[:n:n][i:] return s } + n := len(s) if i == n { return append(s, v...) } @@ -217,7 +216,11 @@ func Insert[S ~[]E, E any](s S, i int, v ...E) S { // make a single call deleting them all together than to delete one at a time. // Delete zeroes the elements s[len(s)-(j-i):len(s)]. func Delete[S ~[]E, E any](s S, i, j int) S { - _ = s[i:j] // bounds check + _ = s[i:j:len(s)] // bounds check + + if i == j { + return s + } oldlen := len(s) s = append(s[:i], s[j:]...) @@ -255,7 +258,11 @@ func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { return Insert(s, i, v...) } if j == len(s) { - return append(s[:i], v...) + s2 := append(s[:i], v...) + if len(s2) < len(s) { + clear(s[len(s2):len(s)]) // zero/nil out the obsolete elements, for GC + } + return s2 } tot := len(s[:i]) + len(v) + len(s[j:]) @@ -335,6 +342,7 @@ func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { // Clone returns a copy of the slice. // The elements are copied using assignment, so this is a shallow clone. +// The result may have additional unused capacity. func Clone[S ~[]E, E any](s S) S { // The s[:0:0] preserves nil in case it matters. return append(s[:0:0], s...) @@ -401,65 +409,21 @@ func Clip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] } -// Rotation algorithm explanation: -// -// rotate left by 2 -// start with -// 0123456789 -// split up like this -// 01 234567 89 -// swap first 2 and last 2 -// 89 234567 01 -// join first parts -// 89234567 01 -// recursively rotate first left part by 2 -// 23456789 01 -// join at the end -// 2345678901 -// -// rotate left by 8 -// start with -// 0123456789 -// split up like this -// 01 234567 89 -// swap first 2 and last 2 -// 89 234567 01 -// join last parts -// 89 23456701 -// recursively rotate second part left by 6 -// 89 01234567 -// join at the end -// 8901234567 - // TODO: There are other rotate algorithms. -// This algorithm has the desirable property that it moves each element exactly twice. -// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes. +// This algorithm has the desirable property that it moves each element at most twice. // The follow-cycles algorithm can be 1-write but it is not very cache friendly. -// rotateLeft rotates b left by n spaces. +// rotateLeft rotates s left by r spaces. // s_final[i] = s_orig[i+r], wrapping around. func rotateLeft[E any](s []E, r int) { - for r != 0 && r != len(s) { - if r*2 <= len(s) { - swap(s[:r], s[len(s)-r:]) - s = s[:len(s)-r] - } else { - swap(s[:len(s)-r], s[r:]) - s, r = s[len(s)-r:], r*2-len(s) - } - } + Reverse(s[:r]) + Reverse(s[r:]) + Reverse(s) } func rotateRight[E any](s []E, r int) { rotateLeft(s, len(s)-r) } -// swap swaps the contents of x and y. x and y must be equal length and disjoint. -func swap[E any](x, y []E) { - for i := 0; i < len(x); i++ { - x[i], y[i] = y[i], x[i] - } -} - // overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap. func overlaps[E any](a, b []E) bool { if len(a) == 0 || len(b) == 0 { diff --git a/src/slices/slices_test.go b/src/slices/slices_test.go index b86638172a..4b5f0355df 100644 --- a/src/slices/slices_test.go +++ b/src/slices/slices_test.go @@ -538,6 +538,7 @@ func TestInsertOverlap(t *testing.T) { func TestInsertPanics(t *testing.T) { a := [3]int{} + b := [1]int{} for _, test := range []struct { name string s []int @@ -549,8 +550,14 @@ func TestInsertPanics(t *testing.T) { {"with out-of-bounds index and > cap", a[:1:1], 2, nil}, {"with out-of-bounds index and = cap", a[:1:2], 2, nil}, {"with out-of-bounds index and < cap", a[:1:3], 2, nil}, + + // There are values. + {"with negative index", a[:1:1], -1, b[:]}, + {"with out-of-bounds index and > cap", a[:1:1], 2, b[:]}, + {"with out-of-bounds index and = cap", a[:1:2], 2, b[:]}, + {"with out-of-bounds index and < cap", a[:1:3], 2, b[:]}, } { - if !panics(func() { Insert(test.s, test.i, test.v...) }) { + if !panics(func() { _ = Insert(test.s, test.i, test.v...) }) { t.Errorf("Insert %s: got no panic, want panic", test.name) } } @@ -672,10 +679,12 @@ func TestDeletePanics(t *testing.T) { {"with negative second index", []int{42}, 1, -1}, {"with out-of-bounds first index", []int{42}, 2, 3}, {"with out-of-bounds second index", []int{42}, 0, 2}, + {"with out-of-bounds both indexes", []int{42}, 2, 2}, {"with invalid i>j", []int{42}, 1, 0}, {"s[i:j] is valid and j > len(s)", s, 0, 4}, + {"s[i:j] is valid and i == j > len(s)", s, 3, 3}, } { - if !panics(func() { Delete(test.s, test.i, test.j) }) { + if !panics(func() { _ = Delete(test.s, test.i, test.j) }) { t.Errorf("Delete %s: got no panic, want panic", test.name) } } @@ -897,10 +906,10 @@ func TestGrow(t *testing.T) { } // Test number of allocations. - if n := testing.AllocsPerRun(100, func() { Grow(s2, cap(s2)-len(s2)) }); n != 0 { + if n := testing.AllocsPerRun(100, func() { _ = Grow(s2, cap(s2)-len(s2)) }); n != 0 { t.Errorf("Grow should not allocate when given sufficient capacity; allocated %v times", n) } - if n := testing.AllocsPerRun(100, func() { Grow(s2, cap(s2)-len(s2)+1) }); n != 1 { + if n := testing.AllocsPerRun(100, func() { _ = Grow(s2, cap(s2)-len(s2)+1) }); n != 1 { errorf := t.Errorf if race.Enabled || testenv.OptimizationOff() { errorf = t.Logf // this allocates multiple times in race detector mode @@ -912,7 +921,7 @@ func TestGrow(t *testing.T) { var gotPanic bool func() { defer func() { gotPanic = recover() != nil }() - Grow(s1, -1) + _ = Grow(s1, -1) }() if !gotPanic { t.Errorf("Grow(-1) did not panic; expected a panic") @@ -1028,7 +1037,7 @@ func TestReplacePanics(t *testing.T) { {"s[i:j] is valid and j > len(s)", s, nil, 0, 4}, } { ss, vv := Clone(test.s), Clone(test.v) - if !panics(func() { Replace(ss, test.i, test.j, vv...) }) { + if !panics(func() { _ = Replace(ss, test.i, test.j, vv...) }) { t.Errorf("Replace %s: should have panicked", test.name) } } @@ -1111,6 +1120,19 @@ func TestReplaceOverlap(t *testing.T) { } } +func TestReplaceEndClearTail(t *testing.T) { + s := []int{11, 22, 33} + v := []int{99} + // case when j == len(s) + i, j := 1, 3 + s = Replace(s, i, j, v...) + + x := s[:3][2] + if want := 0; x != want { + t.Errorf("TestReplaceEndClearTail: obsolete element is %d, want %d", x, want) + } +} + func BenchmarkReplace(b *testing.B) { cases := []struct { name string diff --git a/src/slices/sort.go b/src/slices/sort.go index d5e998ce1e..4c20a2943c 100644 --- a/src/slices/sort.go +++ b/src/slices/sort.go @@ -117,10 +117,10 @@ func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { return m } -// BinarySearch searches for target in a sorted slice and returns the position -// where target is found, or the position where target would appear in the -// sort order; it also returns a bool saying whether the target is really found -// in the slice. The slice must be sorted in increasing order. +// BinarySearch searches for target in a sorted slice and returns the earliest +// position where target is found, or the position where target would appear +// in the sort order; it also returns a bool saying whether the target is +// really found in the slice. The slice must be sorted in increasing order. func BinarySearch[S ~[]E, E cmp.Ordered](x S, target E) (int, bool) { // Inlining is faster than calling BinarySearchFunc with a lambda. n := len(x) diff --git a/src/sort/search.go b/src/sort/search.go index 874e40813d..ccf76dba24 100644 --- a/src/sort/search.go +++ b/src/sort/search.go @@ -117,7 +117,7 @@ func Find(n int, cmp func(int) int) (i int, found bool) { // Convenience wrappers for common cases. // SearchInts searches for x in a sorted slice of ints and returns the index -// as specified by Search. The return value is the index to insert x if x is +// as specified by [Search]. The return value is the index to insert x if x is // not present (it could be len(a)). // The slice must be sorted in ascending order. func SearchInts(a []int, x int) int { @@ -125,7 +125,7 @@ func SearchInts(a []int, x int) int { } // SearchFloat64s searches for x in a sorted slice of float64s and returns the index -// as specified by Search. The return value is the index to insert x if x is not +// as specified by [Search]. The return value is the index to insert x if x is not // present (it could be len(a)). // The slice must be sorted in ascending order. func SearchFloat64s(a []float64, x float64) int { @@ -140,11 +140,11 @@ func SearchStrings(a []string, x string) int { return Search(len(a), func(i int) bool { return a[i] >= x }) } -// Search returns the result of applying SearchInts to the receiver and x. +// Search returns the result of applying [SearchInts] to the receiver and x. func (p IntSlice) Search(x int) int { return SearchInts(p, x) } -// Search returns the result of applying SearchFloat64s to the receiver and x. +// Search returns the result of applying [SearchFloat64s] to the receiver and x. func (p Float64Slice) Search(x float64) int { return SearchFloat64s(p, x) } -// Search returns the result of applying SearchStrings to the receiver and x. +// Search returns the result of applying [SearchStrings] to the receiver and x. func (p StringSlice) Search(x string) int { return SearchStrings(p, x) } diff --git a/src/sort/slice.go b/src/sort/slice.go index 73ba548a47..bc9dd84ed2 100644 --- a/src/sort/slice.go +++ b/src/sort/slice.go @@ -14,12 +14,12 @@ import ( // // The sort is not guaranteed to be stable: equal elements // may be reversed from their original order. -// For a stable sort, use SliceStable. +// For a stable sort, use [SliceStable]. // // The less function must satisfy the same requirements as // the Interface type's Less method. // -// Note: in many situations, the newer slices.SortFunc function is more +// Note: in many situations, the newer [slices.SortFunc] function is more // ergonomic and runs faster. func Slice(x any, less func(i, j int) bool) { rv := reflectlite.ValueOf(x) @@ -36,7 +36,7 @@ func Slice(x any, less func(i, j int) bool) { // The less function must satisfy the same requirements as // the Interface type's Less method. // -// Note: in many situations, the newer slices.SortStableFunc function is more +// Note: in many situations, the newer [slices.SortStableFunc] function is more // ergonomic and runs faster. func SliceStable(x any, less func(i, j int) bool) { rv := reflectlite.ValueOf(x) @@ -47,7 +47,7 @@ func SliceStable(x any, less func(i, j int) bool) { // SliceIsSorted reports whether the slice x is sorted according to the provided less function. // It panics if x is not a slice. // -// Note: in many situations, the newer slices.IsSortedFunc function is more +// Note: in many situations, the newer [slices.IsSortedFunc] function is more // ergonomic and runs faster. func SliceIsSorted(x any, less func(i, j int) bool) bool { rv := reflectlite.ValueOf(x) diff --git a/src/sort/sort.go b/src/sort/sort.go index 8ea62a5e6a..6db161f0c0 100644 --- a/src/sort/sort.go +++ b/src/sort/sort.go @@ -40,7 +40,7 @@ type Interface interface { // It makes one call to data.Len to determine n and O(n*log(n)) calls to // data.Less and data.Swap. The sort is not guaranteed to be stable. // -// Note: in many situations, the newer slices.SortFunc function is more +// Note: in many situations, the newer [slices.SortFunc] function is more // ergonomic and runs faster. func Sort(data Interface) { n := data.Len() @@ -100,7 +100,7 @@ func Reverse(data Interface) Interface { // IsSorted reports whether data is sorted. // -// Note: in many situations, the newer slices.IsSortedFunc function is more +// Note: in many situations, the newer [slices.IsSortedFunc] function is more // ergonomic and runs faster. func IsSorted(data Interface) bool { n := data.Len() @@ -161,34 +161,34 @@ func (x StringSlice) Sort() { Sort(x) } // Ints sorts a slice of ints in increasing order. // -// Note: as of Go 1.22, this function simply calls slices.Sort. +// Note: as of Go 1.22, this function simply calls [slices.Sort]. func Ints(x []int) { intsImpl(x) } // Float64s sorts a slice of float64s in increasing order. // Not-a-number (NaN) values are ordered before other values. // -// Note: as of Go 1.22, this function simply calls slices.Sort. +// Note: as of Go 1.22, this function simply calls [slices.Sort]. func Float64s(x []float64) { float64sImpl(x) } // Strings sorts a slice of strings in increasing order. // -// Note: as of Go 1.22, this function simply calls slices.Sort. +// Note: as of Go 1.22, this function simply calls [slices.Sort]. func Strings(x []string) { stringsImpl(x) } // IntsAreSorted reports whether the slice x is sorted in increasing order. // -// Note: as of Go 1.22, this function simply calls slices.IsSorted. +// Note: as of Go 1.22, this function simply calls [slices.IsSorted]. func IntsAreSorted(x []int) bool { return intsAreSortedImpl(x) } // Float64sAreSorted reports whether the slice x is sorted in increasing order, // with not-a-number (NaN) values before any other values. // -// Note: as of Go 1.22, this function simply calls slices.IsSorted. +// Note: as of Go 1.22, this function simply calls [slices.IsSorted]. func Float64sAreSorted(x []float64) bool { return float64sAreSortedImpl(x) } // StringsAreSorted reports whether the slice x is sorted in increasing order. // -// Note: as of Go 1.22, this function simply calls slices.IsSorted. +// Note: as of Go 1.22, this function simply calls [slices.IsSorted]. func StringsAreSorted(x []string) bool { return stringsAreSortedImpl(x) } // Notes on stable sorting: diff --git a/src/sort/sort_slices_benchmark_test.go b/src/sort/sort_slices_benchmark_test.go index 37f3b1bc7e..069536df03 100644 --- a/src/sort/sort_slices_benchmark_test.go +++ b/src/sort/sort_slices_benchmark_test.go @@ -5,7 +5,7 @@ package sort_test import ( - "math/rand" + "math/rand/v2" "slices" . "sort" "strconv" @@ -18,10 +18,10 @@ import ( // package). func makeRandomInts(n int) []int { - rand.Seed(42) + r := rand.New(rand.NewPCG(42, 0)) ints := make([]int, n) for i := 0; i < n; i++ { - ints[i] = rand.Intn(n) + ints[i] = r.IntN(n) } return ints } @@ -92,14 +92,14 @@ func BenchmarkSlicesIsSorted(b *testing.B) { // makeRandomStrings generates n random strings with alphabetic runes of // varying lengths. func makeRandomStrings(n int) []string { - rand.Seed(42) + r := rand.New(rand.NewPCG(42, 0)) var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") ss := make([]string, n) for i := 0; i < n; i++ { var sb stringspkg.Builder - slen := 2 + rand.Intn(50) + slen := 2 + r.IntN(50) for j := 0; j < slen; j++ { - sb.WriteRune(letters[rand.Intn(len(letters))]) + sb.WriteRune(letters[r.IntN(len(letters))]) } ss[i] = sb.String() } @@ -156,10 +156,10 @@ func (s myStructs) Less(i, j int) bool { return s[i].n < s[j].n } func (s myStructs) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func makeRandomStructs(n int) myStructs { - rand.Seed(42) + r := rand.New(rand.NewPCG(42, 0)) structs := make([]*myStruct, n) for i := 0; i < n; i++ { - structs[i] = &myStruct{n: rand.Intn(n)} + structs[i] = &myStruct{n: r.IntN(n)} } return structs } diff --git a/src/sort/sort_test.go b/src/sort/sort_test.go index ccb89873af..ba757a845a 100644 --- a/src/sort/sort_test.go +++ b/src/sort/sort_test.go @@ -9,7 +9,7 @@ import ( "fmt" "internal/testenv" "math" - "math/rand" + "math/rand/v2" "slices" . "sort" "strconv" @@ -110,7 +110,7 @@ func TestSortLarge_Random(t *testing.T) { } data := make([]int, n) for i := 0; i < len(data); i++ { - data[i] = rand.Intn(100) + data[i] = rand.IntN(100) } if IntsAreSorted(data) { t.Fatalf("terrible rand.rand") @@ -198,7 +198,7 @@ func TestNonDeterministicComparison(t *testing.T) { }() td := &nonDeterministicTestingData{ - r: rand.New(rand.NewSource(0)), + r: rand.New(rand.NewPCG(0, 0)), } for i := 0; i < 10; i++ { @@ -442,13 +442,13 @@ func testBentleyMcIlroy(t *testing.T, sort func(Interface), maxswap func(int) in case _Sawtooth: data[i] = i % m case _Rand: - data[i] = rand.Intn(m) + data[i] = rand.IntN(m) case _Stagger: data[i] = (i*m + i) % n case _Plateau: data[i] = min(i, m) case _Shuffle: - if rand.Intn(m) != 0 { + if rand.IntN(m) != 0 { j += 2 data[i] = j } else { @@ -648,7 +648,7 @@ func TestStability(t *testing.T) { // random distribution for i := 0; i < len(data); i++ { - data[i].a = rand.Intn(m) + data[i].a = rand.IntN(m) } if IsSorted(data) { t.Fatalf("terrible rand.rand") @@ -704,7 +704,7 @@ func countOps(t *testing.T, algo func(Interface), name string) { maxswap: 1<<31 - 1, } for i := 0; i < n; i++ { - td.data[i] = rand.Intn(n / 5) + td.data[i] = rand.IntN(n / 5) } algo(&td) t.Logf("%s %8d elements: %11d Swap, %10d Less", name, n, td.nswap, td.ncmp) diff --git a/src/strings/builder.go b/src/strings/builder.go index 189dadb1e7..7c9b686241 100644 --- a/src/strings/builder.go +++ b/src/strings/builder.go @@ -15,7 +15,11 @@ import ( // Do not copy a non-zero Builder. type Builder struct { addr *Builder // of receiver, to detect copies by value - buf []byte + + // External users should never get direct access to this buffer, since + // the slice at some point will be converted to a string using unsafe, also + // data between len(buf) and cap(buf) might be uninitialized. + buf []byte } // noescape hides a pointer from escape analysis. It is the identity function diff --git a/src/strings/builder_test.go b/src/strings/builder_test.go index c3c627ee7d..36fd7a77e3 100644 --- a/src/strings/builder_test.go +++ b/src/strings/builder_test.go @@ -385,3 +385,16 @@ func BenchmarkBuildString_ByteBuffer(b *testing.B) { } }) } + +func TestBuilderGrowSizeclasses(t *testing.T) { + s := Repeat("a", 19) + allocs := testing.AllocsPerRun(100, func() { + var b Builder + b.Grow(18) + b.WriteString(s) + _ = b.String() + }) + if allocs > 1 { + t.Fatalf("unexpected amount of allocations: %v, want: 1", allocs) + } +} diff --git a/src/strings/strings.go b/src/strings/strings.go index ce79bccf8c..f3f0723721 100644 --- a/src/strings/strings.go +++ b/src/strings/strings.go @@ -521,7 +521,7 @@ func Map(mapping func(rune) rune, s string) string { if r < utf8.RuneSelf { b.WriteByte(byte(r)) } else { - // r is not a ASCII rune. + // r is not an ASCII rune. b.WriteRune(r) } } diff --git a/src/sync/atomic/value.go b/src/sync/atomic/value.go index a57b08a6b8..0cfc5f9496 100644 --- a/src/sync/atomic/value.go +++ b/src/sync/atomic/value.go @@ -9,8 +9,8 @@ import ( ) // A Value provides an atomic load and store of a consistently typed value. -// The zero value for a Value returns nil from Load. -// Once Store has been called, a Value must not be copied. +// The zero value for a Value returns nil from [Value.Load]. +// Once [Value.Store] has been called, a Value must not be copied. // // A Value must not be copied after first use. type Value struct { @@ -41,7 +41,7 @@ func (v *Value) Load() (val any) { var firstStoreInProgress byte -// Store sets the value of the Value v to val. +// Store sets the value of the [Value] v to val. // All calls to Store for a given Value must use values of the same concrete type. // Store of an inconsistent type panics, as does Store(nil). func (v *Value) Store(val any) { @@ -127,7 +127,7 @@ func (v *Value) Swap(new any) (old any) { } } -// CompareAndSwap executes the compare-and-swap operation for the Value. +// CompareAndSwap executes the compare-and-swap operation for the [Value]. // // All calls to CompareAndSwap for a given Value must use values of the same // concrete type. CompareAndSwap of an inconsistent type panics, as does diff --git a/src/sync/cond.go b/src/sync/cond.go index 7ef3188a1e..614c5fe5ea 100644 --- a/src/sync/cond.go +++ b/src/sync/cond.go @@ -13,21 +13,21 @@ import ( // for goroutines waiting for or announcing the occurrence // of an event. // -// Each Cond has an associated Locker L (often a *Mutex or *RWMutex), +// Each Cond has an associated Locker L (often a [*Mutex] or [*RWMutex]), // which must be held when changing the condition and -// when calling the Wait method. +// when calling the [Cond.Wait] method. // // A Cond must not be copied after first use. // // In the terminology of the Go memory model, Cond arranges that -// a call to Broadcast or Signal “synchronizes before” any Wait call +// a call to [Cond.Broadcast] or [Cond.Signal] “synchronizes before” any Wait call // that it unblocks. // // For many simple use cases, users will be better off using channels than a // Cond (Broadcast corresponds to closing a channel, and Signal corresponds to // sending on a channel). // -// For more on replacements for sync.Cond, see [Roberto Clapis's series on +// For more on replacements for [sync.Cond], see [Roberto Clapis's series on // advanced concurrency patterns], as well as [Bryan Mills's talk on concurrency // patterns]. // @@ -51,7 +51,7 @@ func NewCond(l Locker) *Cond { // Wait atomically unlocks c.L and suspends execution // of the calling goroutine. After later resuming execution, // Wait locks c.L before returning. Unlike in other systems, -// Wait cannot return unless awoken by Broadcast or Signal. +// Wait cannot return unless awoken by [Cond.Broadcast] or [Cond.Signal]. // // Because c.L is not locked while Wait is waiting, the caller // typically cannot assume that the condition is true when diff --git a/src/sync/map.go b/src/sync/map.go index 00b2446153..7ae97bce1d 100644 --- a/src/sync/map.go +++ b/src/sync/map.go @@ -8,7 +8,7 @@ import ( "sync/atomic" ) -// Map is like a Go map[interface{}]interface{} but is safe for concurrent use +// Map is like a Go map[any]any but is safe for concurrent use // by multiple goroutines without additional locking or coordination. // Loads, stores, and deletes run in amortized constant time. // @@ -20,18 +20,19 @@ import ( // key is only ever written once but read many times, as in caches that only grow, // or (2) when multiple goroutines read, write, and overwrite entries for disjoint // sets of keys. In these two cases, use of a Map may significantly reduce lock -// contention compared to a Go map paired with a separate Mutex or RWMutex. +// contention compared to a Go map paired with a separate [Mutex] or [RWMutex]. // // The zero Map is empty and ready for use. A Map must not be copied after first use. // // In the terminology of the Go memory model, Map arranges that a write operation // “synchronizes before” any read operation that observes the effect of the write, where // read and write operations are defined as follows. -// Load, LoadAndDelete, LoadOrStore, Swap, CompareAndSwap, and CompareAndDelete -// are read operations; Delete, LoadAndDelete, Store, and Swap are write operations; -// LoadOrStore is a write operation when it returns loaded set to false; -// CompareAndSwap is a write operation when it returns swapped set to true; -// and CompareAndDelete is a write operation when it returns deleted set to true. +// [Map.Load], [Map.LoadAndDelete], [Map.LoadOrStore], [Map.Swap], [Map.CompareAndSwap], +// and [Map.CompareAndDelete] are read operations; +// [Map.Delete], [Map.LoadAndDelete], [Map.Store], and [Map.Swap] are write operations; +// [Map.LoadOrStore] is a write operation when it returns loaded set to false; +// [Map.CompareAndSwap] is a write operation when it returns swapped set to true; +// and [Map.CompareAndDelete] is a write operation when it returns deleted set to true. type Map struct { mu Mutex @@ -155,6 +156,27 @@ func (m *Map) Store(key, value any) { _, _ = m.Swap(key, value) } +// Clear deletes all the entries, resulting in an empty Map. +func (m *Map) Clear() { + read := m.loadReadOnly() + if len(read.m) == 0 && !read.amended { + // Avoid allocating a new readOnly when the map is already clear. + return + } + + m.mu.Lock() + defer m.mu.Unlock() + + read = m.loadReadOnly() + if len(read.m) > 0 || read.amended { + m.read.Store(&readOnly{}) + } + + clear(m.dirty) + // Don't immediately promote the newly-cleared dirty map on the next operation. + m.misses = 0 +} + // tryCompareAndSwap compare the entry with the given old value and swaps // it with a new value if the entry is equal to the old value, and the entry // has not been expunged. diff --git a/src/sync/map_bench_test.go b/src/sync/map_bench_test.go index eebec3bacf..fb9eb25432 100644 --- a/src/sync/map_bench_test.go +++ b/src/sync/map_bench_test.go @@ -533,3 +533,15 @@ func BenchmarkCompareAndDeleteMostlyMisses(b *testing.B) { }, }) } + +func BenchmarkClear(b *testing.B) { + benchMap(b, bench{ + perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) { + for ; pb.Next(); i++ { + k, v := i%256, i%256 + m.Clear() + m.Store(k, v) + } + }, + }) +} diff --git a/src/sync/map_reference_test.go b/src/sync/map_reference_test.go index aa5ebf352f..283da0f3a9 100644 --- a/src/sync/map_reference_test.go +++ b/src/sync/map_reference_test.go @@ -13,7 +13,7 @@ import ( // mapInterface is the interface Map implements. type mapInterface interface { - Load(any) (any, bool) + Load(key any) (value any, ok bool) Store(key, value any) LoadOrStore(key, value any) (actual any, loaded bool) LoadAndDelete(key any) (value any, loaded bool) @@ -22,6 +22,7 @@ type mapInterface interface { CompareAndSwap(key, old, new any) (swapped bool) CompareAndDelete(key, old any) (deleted bool) Range(func(key, value any) (shouldContinue bool)) + Clear() } var ( @@ -144,6 +145,13 @@ func (m *RWMutexMap) Range(f func(key, value any) (shouldContinue bool)) { } } +func (m *RWMutexMap) Clear() { + m.mu.Lock() + defer m.mu.Unlock() + + clear(m.dirty) +} + // DeepCopyMap is an implementation of mapInterface using a Mutex and // atomic.Value. It makes deep copies of the map on every write to avoid // acquiring the Mutex in Load. @@ -269,3 +277,10 @@ func (m *DeepCopyMap) dirty() map[any]any { } return dirty } + +func (m *DeepCopyMap) Clear() { + m.mu.Lock() + defer m.mu.Unlock() + + m.clean.Store((map[any]any)(nil)) +} diff --git a/src/sync/map_test.go b/src/sync/map_test.go index 20872f3b72..e1d0380765 100644 --- a/src/sync/map_test.go +++ b/src/sync/map_test.go @@ -26,6 +26,7 @@ const ( opSwap = mapOp("Swap") opCompareAndSwap = mapOp("CompareAndSwap") opCompareAndDelete = mapOp("CompareAndDelete") + opClear = mapOp("Clear") ) var mapOps = [...]mapOp{ @@ -37,6 +38,7 @@ var mapOps = [...]mapOp{ opSwap, opCompareAndSwap, opCompareAndDelete, + opClear, } // mapCall is a quick.Generator for calls on mapInterface. @@ -74,6 +76,9 @@ func (c mapCall) apply(m mapInterface) (any, bool) { } } return nil, false + case opClear: + m.Clear() + return nil, false default: panic("invalid mapOp") } @@ -278,7 +283,7 @@ func TestCompareAndSwap_NonExistingKey(t *testing.T) { m := &sync.Map{} if m.CompareAndSwap(m, nil, 42) { // See https://go.dev/issue/51972#issuecomment-1126408637. - t.Fatalf("CompareAndSwap on an non-existing key succeeded") + t.Fatalf("CompareAndSwap on a non-existing key succeeded") } } @@ -294,3 +299,61 @@ func TestMapRangeNoAllocations(t *testing.T) { // Issue 62404 t.Errorf("AllocsPerRun of m.Range = %v; want 0", allocs) } } + +// TestConcurrentClear tests concurrent behavior of sync.Map properties to ensure no data races. +// Checks for proper synchronization between Clear, Store, Load operations. +func TestConcurrentClear(t *testing.T) { + var m sync.Map + + wg := sync.WaitGroup{} + wg.Add(30) // 10 goroutines for writing, 10 goroutines for reading, 10 goroutines for waiting + + // Writing data to the map concurrently + for i := 0; i < 10; i++ { + go func(k, v int) { + defer wg.Done() + m.Store(k, v) + }(i, i*10) + } + + // Reading data from the map concurrently + for i := 0; i < 10; i++ { + go func(k int) { + defer wg.Done() + if value, ok := m.Load(k); ok { + t.Logf("Key: %v, Value: %v\n", k, value) + } else { + t.Logf("Key: %v not found\n", k) + } + }(i) + } + + // Clearing data from the map concurrently + for i := 0; i < 10; i++ { + go func() { + defer wg.Done() + m.Clear() + }() + } + + wg.Wait() + + m.Clear() + + m.Range(func(k, v any) bool { + t.Errorf("after Clear, Map contains (%v, %v); expected to be empty", k, v) + + return true + }) +} + +func TestMapClearNoAllocations(t *testing.T) { + testenv.SkipIfOptimizationOff(t) + var m sync.Map + allocs := testing.AllocsPerRun(10, func() { + m.Clear() + }) + if allocs > 0 { + t.Errorf("AllocsPerRun of m.Clear = %v; want 0", allocs) + } +} diff --git a/src/sync/mutex.go b/src/sync/mutex.go index 2ea024e585..654804882f 100644 --- a/src/sync/mutex.go +++ b/src/sync/mutex.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package sync provides basic synchronization primitives such as mutual -// exclusion locks. Other than the Once and WaitGroup types, most are intended +// exclusion locks. Other than the [Once] and [WaitGroup] types, most are intended // for use by low-level library routines. Higher-level synchronization is // better done via channels and communication. // @@ -26,9 +26,9 @@ func fatal(string) // A Mutex must not be copied after first use. // // In the terminology of the Go memory model, -// the n'th call to Unlock “synchronizes before” the m'th call to Lock +// the n'th call to [Mutex.Unlock] “synchronizes before” the m'th call to [Mutex.Lock] // for any n < m. -// A successful call to TryLock is equivalent to a call to Lock. +// A successful call to [Mutex.TryLock] is equivalent to a call to Lock. // A failed call to TryLock does not establish any “synchronizes before” // relation at all. type Mutex struct { @@ -206,7 +206,7 @@ func (m *Mutex) lockSlow() { // Unlock unlocks m. // It is a run-time error if m is not locked on entry to Unlock. // -// A locked Mutex is not associated with a particular goroutine. +// A locked [Mutex] is not associated with a particular goroutine. // It is allowed for one goroutine to lock a Mutex and then // arrange for another goroutine to unlock it. func (m *Mutex) Unlock() { diff --git a/src/sync/once.go b/src/sync/once.go index 3f58707e1c..de69d70187 100644 --- a/src/sync/once.go +++ b/src/sync/once.go @@ -26,7 +26,7 @@ type Once struct { } // Do calls the function f if and only if Do is being called for the -// first time for this instance of Once. In other words, given +// first time for this instance of [Once]. In other words, given // // var once Once // diff --git a/src/sync/oncefunc.go b/src/sync/oncefunc.go index 9ef8344132..db286283d1 100644 --- a/src/sync/oncefunc.go +++ b/src/sync/oncefunc.go @@ -25,7 +25,8 @@ func OnceFunc(f func()) func() { } }() f() - valid = true // Set only if f does not panic + f = nil // Do not keep f alive after invoking it. + valid = true // Set only if f does not panic. } return func() { once.Do(g) @@ -54,6 +55,7 @@ func OnceValue[T any](f func() T) func() T { } }() result = f() + f = nil valid = true } return func() T { @@ -85,6 +87,7 @@ func OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) { } }() r1, r2 = f() + f = nil valid = true } return func() (T1, T2) { diff --git a/src/sync/oncefunc_test.go b/src/sync/oncefunc_test.go index 3c523a5b62..5f0d564063 100644 --- a/src/sync/oncefunc_test.go +++ b/src/sync/oncefunc_test.go @@ -6,10 +6,13 @@ package sync_test import ( "bytes" + "math" "runtime" "runtime/debug" "sync" + "sync/atomic" "testing" + _ "unsafe" ) // We assume that the Once.Do tests have already covered parallelism. @@ -182,6 +185,53 @@ func onceFuncPanic() { panic("x") } +func TestOnceXGC(t *testing.T) { + fns := map[string]func([]byte) func(){ + "OnceFunc": func(buf []byte) func() { + return sync.OnceFunc(func() { buf[0] = 1 }) + }, + "OnceValue": func(buf []byte) func() { + f := sync.OnceValue(func() any { buf[0] = 1; return nil }) + return func() { f() } + }, + "OnceValues": func(buf []byte) func() { + f := sync.OnceValues(func() (any, any) { buf[0] = 1; return nil, nil }) + return func() { f() } + }, + } + for n, fn := range fns { + t.Run(n, func(t *testing.T) { + buf := make([]byte, 1024) + var gc atomic.Bool + runtime.SetFinalizer(&buf[0], func(_ *byte) { + gc.Store(true) + }) + f := fn(buf) + gcwaitfin() + if gc.Load() != false { + t.Fatal("wrapped function garbage collected too early") + } + f() + gcwaitfin() + if gc.Load() != true { + // Even if f is still alive, the function passed to Once(Func|Value|Values) + // is not kept alive after the first call to f. + t.Fatal("wrapped function should be garbage collected, but still live") + } + f() + }) + } +} + +// gcwaitfin performs garbage collection and waits for all finalizers to run. +func gcwaitfin() { + runtime.GC() + runtime_blockUntilEmptyFinalizerQueue(math.MaxInt64) +} + +//go:linkname runtime_blockUntilEmptyFinalizerQueue runtime.blockUntilEmptyFinalizerQueue +func runtime_blockUntilEmptyFinalizerQueue(int64) bool + var ( onceFunc = sync.OnceFunc(func() {}) diff --git a/src/sync/pool.go b/src/sync/pool.go index ffab67bf19..e094849974 100644 --- a/src/sync/pool.go +++ b/src/sync/pool.go @@ -43,7 +43,7 @@ import ( // A Pool must not be copied after first use. // // In the terminology of the Go memory model, a call to Put(x) “synchronizes before” -// a call to Get returning that same value x. +// a call to [Pool.Get] returning that same value x. // Similarly, a call to New returning x “synchronizes before” // a call to Get returning that same value x. type Pool struct { @@ -76,7 +76,9 @@ type poolLocal struct { } // from runtime -func fastrandn(n uint32) uint32 +// +//go:linkname runtime_randn runtime.randn +func runtime_randn(n uint32) uint32 var poolRaceHash [128]uint64 @@ -97,7 +99,7 @@ func (p *Pool) Put(x any) { return } if race.Enabled { - if fastrandn(4) == 0 { + if runtime_randn(4) == 0 { // Randomly drop x on floor. return } @@ -116,10 +118,10 @@ func (p *Pool) Put(x any) { } } -// Get selects an arbitrary item from the Pool, removes it from the +// Get selects an arbitrary item from the [Pool], removes it from the // Pool, and returns it to the caller. // Get may choose to ignore the pool and treat it as empty. -// Callers should not assume any relation between values passed to Put and +// Callers should not assume any relation between values passed to [Pool.Put] and // the values returned by Get. // // If Get would otherwise return nil and p.New is non-nil, Get returns diff --git a/src/sync/poolqueue.go b/src/sync/poolqueue.go index 5c640f988a..e9593f8c44 100644 --- a/src/sync/poolqueue.go +++ b/src/sync/poolqueue.go @@ -198,7 +198,7 @@ type poolChain struct { // tail is the poolDequeue to popTail from. This is accessed // by consumers, so reads and writes must be atomic. - tail *poolChainElt + tail atomic.Pointer[poolChainElt] } type poolChainElt struct { @@ -214,15 +214,7 @@ type poolChainElt struct { // prev is written atomically by the consumer and read // atomically by the producer. It only transitions from // non-nil to nil. - next, prev *poolChainElt -} - -func storePoolChainElt(pp **poolChainElt, v *poolChainElt) { - atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(pp)), unsafe.Pointer(v)) -} - -func loadPoolChainElt(pp **poolChainElt) *poolChainElt { - return (*poolChainElt)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(pp)))) + next, prev atomic.Pointer[poolChainElt] } func (c *poolChain) pushHead(val any) { @@ -233,7 +225,7 @@ func (c *poolChain) pushHead(val any) { d = new(poolChainElt) d.vals = make([]eface, initSize) c.head = d - storePoolChainElt(&c.tail, d) + c.tail.Store(d) } if d.pushHead(val) { @@ -248,10 +240,11 @@ func (c *poolChain) pushHead(val any) { newSize = dequeueLimit } - d2 := &poolChainElt{prev: d} + d2 := &poolChainElt{} + d2.prev.Store(d) d2.vals = make([]eface, newSize) c.head = d2 - storePoolChainElt(&d.next, d2) + d.next.Store(d2) d2.pushHead(val) } @@ -263,13 +256,13 @@ func (c *poolChain) popHead() (any, bool) { } // There may still be unconsumed elements in the // previous dequeue, so try backing up. - d = loadPoolChainElt(&d.prev) + d = d.prev.Load() } return nil, false } func (c *poolChain) popTail() (any, bool) { - d := loadPoolChainElt(&c.tail) + d := c.tail.Load() if d == nil { return nil, false } @@ -281,7 +274,7 @@ func (c *poolChain) popTail() (any, bool) { // the pop and the pop fails, then d is permanently // empty, which is the only condition under which it's // safe to drop d from the chain. - d2 := loadPoolChainElt(&d.next) + d2 := d.next.Load() if val, ok := d.popTail(); ok { return val, ok @@ -297,12 +290,12 @@ func (c *poolChain) popTail() (any, bool) { // to the next dequeue. Try to drop it from the chain // so the next pop doesn't have to look at the empty // dequeue again. - if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&c.tail)), unsafe.Pointer(d), unsafe.Pointer(d2)) { + if c.tail.CompareAndSwap(d, d2) { // We won the race. Clear the prev pointer so // the garbage collector can collect the empty // dequeue and so popHead doesn't back up // further than necessary. - storePoolChainElt(&d2.prev, nil) + d2.prev.Store(nil) } d = d2 } diff --git a/src/sync/rwmutex.go b/src/sync/rwmutex.go index f445b66fd7..1215c377a1 100644 --- a/src/sync/rwmutex.go +++ b/src/sync/rwmutex.go @@ -19,18 +19,18 @@ import ( // // A RWMutex must not be copied after first use. // -// If any goroutine calls Lock while the lock is already held by -// one or more readers, concurrent calls to RLock will block until +// If any goroutine calls [RWMutex.Lock] while the lock is already held by +// one or more readers, concurrent calls to [RWMutex.RLock] will block until // the writer has acquired (and released) the lock, to ensure that // the lock eventually becomes available to the writer. // Note that this prohibits recursive read-locking. // // In the terminology of the Go memory model, -// the n'th call to Unlock “synchronizes before” the m'th call to Lock -// for any n < m, just as for Mutex. +// the n'th call to [RWMutex.Unlock] “synchronizes before” the m'th call to Lock +// for any n < m, just as for [Mutex]. // For any call to RLock, there exists an n such that // the n'th call to Unlock “synchronizes before” that call to RLock, -// and the corresponding call to RUnlock “synchronizes before” +// and the corresponding call to [RWMutex.RUnlock] “synchronizes before” // the n+1'th call to Lock. type RWMutex struct { w Mutex // held if there are pending writers @@ -59,7 +59,7 @@ const rwmutexMaxReaders = 1 << 30 // // It should not be used for recursive read locking; a blocked Lock // call excludes new readers from acquiring the lock. See the -// documentation on the RWMutex type. +// documentation on the [RWMutex] type. func (rw *RWMutex) RLock() { if race.Enabled { _ = rw.w.state @@ -103,7 +103,7 @@ func (rw *RWMutex) TryRLock() bool { } } -// RUnlock undoes a single RLock call; +// RUnlock undoes a single [RWMutex.RLock] call; // it does not affect other simultaneous readers. // It is a run-time error if rw is not locked for reading // on entry to RUnlock. @@ -191,9 +191,9 @@ func (rw *RWMutex) TryLock() bool { // Unlock unlocks rw for writing. It is a run-time error if rw is // not locked for writing on entry to Unlock. // -// As with Mutexes, a locked RWMutex is not associated with a particular -// goroutine. One goroutine may RLock (Lock) a RWMutex and then -// arrange for another goroutine to RUnlock (Unlock) it. +// As with Mutexes, a locked [RWMutex] is not associated with a particular +// goroutine. One goroutine may [RWMutex.RLock] ([RWMutex.Lock]) a RWMutex and then +// arrange for another goroutine to [RWMutex.RUnlock] ([RWMutex.Unlock]) it. func (rw *RWMutex) Unlock() { if race.Enabled { _ = rw.w.state @@ -231,8 +231,8 @@ func syscall_hasWaitingReaders(rw *RWMutex) bool { return r < 0 && r+rwmutexMaxReaders > 0 } -// RLocker returns a Locker interface that implements -// the Lock and Unlock methods by calling rw.RLock and rw.RUnlock. +// RLocker returns a [Locker] interface that implements +// the [RWMutex.Lock] and [RWMutex.Unlock] methods by calling rw.RLock and rw.RUnlock. func (rw *RWMutex) RLocker() Locker { return (*rlocker)(rw) } diff --git a/src/sync/waitgroup.go b/src/sync/waitgroup.go index be21417f9c..7320705b0e 100644 --- a/src/sync/waitgroup.go +++ b/src/sync/waitgroup.go @@ -11,14 +11,14 @@ import ( ) // A WaitGroup waits for a collection of goroutines to finish. -// The main goroutine calls Add to set the number of +// The main goroutine calls [WaitGroup.Add] to set the number of // goroutines to wait for. Then each of the goroutines -// runs and calls Done when finished. At the same time, -// Wait can be used to block until all goroutines have finished. +// runs and calls [WaitGroup.Done] when finished. At the same time, +// [WaitGroup.Wait] can be used to block until all goroutines have finished. // // A WaitGroup must not be copied after first use. // -// In the terminology of the Go memory model, a call to Done +// In the terminology of the Go memory model, a call to [WaitGroup.Done] // “synchronizes before” the return of any Wait call that it unblocks. type WaitGroup struct { noCopy noCopy @@ -27,8 +27,8 @@ type WaitGroup struct { sema uint32 } -// Add adds delta, which may be negative, to the WaitGroup counter. -// If the counter becomes zero, all goroutines blocked on Wait are released. +// Add adds delta, which may be negative, to the [WaitGroup] counter. +// If the counter becomes zero, all goroutines blocked on [WaitGroup.Wait] are released. // If the counter goes negative, Add panics. // // Note that calls with a positive delta that occur when the counter is zero @@ -82,12 +82,12 @@ func (wg *WaitGroup) Add(delta int) { } } -// Done decrements the WaitGroup counter by one. +// Done decrements the [WaitGroup] counter by one. func (wg *WaitGroup) Done() { wg.Add(-1) } -// Wait blocks until the WaitGroup counter is zero. +// Wait blocks until the [WaitGroup] counter is zero. func (wg *WaitGroup) Wait() { if race.Enabled { race.Disable() diff --git a/src/syscall/dir_plan9.go b/src/syscall/dir_plan9.go index 1667cbc02f..464fe748f7 100644 --- a/src/syscall/dir_plan9.go +++ b/src/syscall/dir_plan9.go @@ -54,12 +54,12 @@ var nullDir = Dir{ } // Null assigns special "don't touch" values to members of d to -// avoid modifying them during syscall.Wstat. +// avoid modifying them during [Wstat]. func (d *Dir) Null() { *d = nullDir } // Marshal encodes a 9P stat message corresponding to d into b // -// If there isn't enough space in b for a stat message, ErrShortStat is returned. +// If there isn't enough space in b for a stat message, [ErrShortStat] is returned. func (d *Dir) Marshal(b []byte) (n int, err error) { n = STATFIXLEN + len(d.Name) + len(d.Uid) + len(d.Gid) + len(d.Muid) if n > len(b) { @@ -92,9 +92,9 @@ func (d *Dir) Marshal(b []byte) (n int, err error) { // UnmarshalDir decodes a single 9P stat message from b and returns the resulting Dir. // -// If b is too small to hold a valid stat message, ErrShortStat is returned. +// If b is too small to hold a valid stat message, [ErrShortStat] is returned. // -// If the stat message itself is invalid, ErrBadStat is returned. +// If the stat message itself is invalid, [ErrBadStat] is returned. func UnmarshalDir(b []byte) (*Dir, error) { if len(b) < STATFIXLEN { return nil, ErrShortStat diff --git a/src/syscall/dll_windows.go b/src/syscall/dll_windows.go index 5f62b5512c..81134cb0bd 100644 --- a/src/syscall/dll_windows.go +++ b/src/syscall/dll_windows.go @@ -24,22 +24,22 @@ func (e *DLLError) Unwrap() error { return e.Err } // Implemented in ../runtime/syscall_windows.go. -// Deprecated: Use SyscallN instead. +// Deprecated: Use [SyscallN] instead. func Syscall(trap, nargs, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) -// Deprecated: Use SyscallN instead. +// Deprecated: Use [SyscallN] instead. func Syscall6(trap, nargs, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) -// Deprecated: Use SyscallN instead. +// Deprecated: Use [SyscallN] instead. func Syscall9(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) -// Deprecated: Use SyscallN instead. +// Deprecated: Use [SyscallN] instead. func Syscall12(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12 uintptr) (r1, r2 uintptr, err Errno) -// Deprecated: Use SyscallN instead. +// Deprecated: Use [SyscallN] instead. func Syscall15(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15 uintptr) (r1, r2 uintptr, err Errno) -// Deprecated: Use SyscallN instead. +// Deprecated: Use [SyscallN] instead. func Syscall18(trap, nargs, a1, a2, a3, a4, a5, a6, a7, a8, a9, a10, a11, a12, a13, a14, a15, a16, a17, a18 uintptr) (r1, r2 uintptr, err Errno) func SyscallN(trap uintptr, args ...uintptr) (r1, r2 uintptr, err Errno) @@ -59,7 +59,7 @@ type DLL struct { // Go, Windows will search for the named DLL in many locations, causing // potential DLL preloading attacks. // -// Use LazyDLL in golang.org/x/sys/windows for a secure way to +// Use [LazyDLL] in golang.org/x/sys/windows for a secure way to // load system DLLs. func LoadDLL(name string) (*DLL, error) { namep, err := UTF16PtrFromString(name) @@ -87,7 +87,7 @@ func LoadDLL(name string) (*DLL, error) { return d, nil } -// MustLoadDLL is like LoadDLL but panics if load operation fails. +// MustLoadDLL is like [LoadDLL] but panics if load operation fails. func MustLoadDLL(name string) *DLL { d, e := LoadDLL(name) if e != nil { @@ -96,7 +96,7 @@ func MustLoadDLL(name string) *DLL { return d } -// FindProc searches DLL d for procedure named name and returns *Proc +// FindProc searches [DLL] d for procedure named name and returns [*Proc] // if found. It returns an error if search fails. func (d *DLL) FindProc(name string) (proc *Proc, err error) { namep, err := BytePtrFromString(name) @@ -119,7 +119,7 @@ func (d *DLL) FindProc(name string) (proc *Proc, err error) { return p, nil } -// MustFindProc is like FindProc but panics if search fails. +// MustFindProc is like [DLL.FindProc] but panics if search fails. func (d *DLL) MustFindProc(name string) *Proc { p, e := d.FindProc(name) if e != nil { @@ -128,12 +128,12 @@ func (d *DLL) MustFindProc(name string) *Proc { return p } -// Release unloads DLL d from memory. +// Release unloads [DLL] d from memory. func (d *DLL) Release() (err error) { return FreeLibrary(d.Handle) } -// A Proc implements access to a procedure inside a DLL. +// A Proc implements access to a procedure inside a [DLL]. type Proc struct { Dll *DLL Name string @@ -151,28 +151,28 @@ func (p *Proc) Addr() uintptr { // The returned error is always non-nil, constructed from the result of GetLastError. // Callers must inspect the primary return value to decide whether an error occurred // (according to the semantics of the specific function being called) before consulting -// the error. The error always has type syscall.Errno. +// the error. The error always has type [Errno]. // // On amd64, Call can pass and return floating-point values. To pass // an argument x with C type "float", use // uintptr(math.Float32bits(x)). To pass an argument with C type // "double", use uintptr(math.Float64bits(x)). Floating-point return // values are returned in r2. The return value for C type "float" is -// math.Float32frombits(uint32(r2)). For C type "double", it is -// math.Float64frombits(uint64(r2)). +// [math.Float32frombits](uint32(r2)). For C type "double", it is +// [math.Float64frombits](uint64(r2)). // //go:uintptrescapes func (p *Proc) Call(a ...uintptr) (uintptr, uintptr, error) { return SyscallN(p.Addr(), a...) } -// A LazyDLL implements access to a single DLL. +// A LazyDLL implements access to a single [DLL]. // It will delay the load of the DLL until the first -// call to its Handle method or to one of its -// LazyProc's Addr method. +// call to its [LazyDLL.Handle] method or to one of its +// [LazyProc]'s Addr method. // // LazyDLL is subject to the same DLL preloading attacks as documented -// on LoadDLL. +// on [LoadDLL]. // // Use LazyDLL in golang.org/x/sys/windows for a secure way to // load system DLLs. @@ -217,18 +217,18 @@ func (d *LazyDLL) Handle() uintptr { return uintptr(d.dll.Handle) } -// NewProc returns a LazyProc for accessing the named procedure in the DLL d. +// NewProc returns a [LazyProc] for accessing the named procedure in the [DLL] d. func (d *LazyDLL) NewProc(name string) *LazyProc { return &LazyProc{l: d, Name: name} } -// NewLazyDLL creates new LazyDLL associated with DLL file. +// NewLazyDLL creates new [LazyDLL] associated with [DLL] file. func NewLazyDLL(name string) *LazyDLL { return &LazyDLL{Name: name} } -// A LazyProc implements access to a procedure inside a LazyDLL. -// It delays the lookup until the Addr, Call, or Find method is called. +// A LazyProc implements access to a procedure inside a [LazyDLL]. +// It delays the lookup until the [LazyProc.Addr], [LazyProc.Call], or [LazyProc.Find] method is called. type LazyProc struct { mu sync.Mutex Name string @@ -236,7 +236,7 @@ type LazyProc struct { proc *Proc } -// Find searches DLL for procedure named p.Name. It returns +// Find searches [DLL] for procedure named p.Name. It returns // an error if search fails. Find will not search procedure, // if it is already found and loaded into memory. func (p *LazyProc) Find() error { diff --git a/src/syscall/exec_linux.go b/src/syscall/exec_linux.go index e1c71b5a34..e6d6343ed8 100644 --- a/src/syscall/exec_linux.go +++ b/src/syscall/exec_linux.go @@ -133,7 +133,7 @@ func runtime_AfterForkInChild() func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr *ProcAttr, sys *SysProcAttr, pipe int) (pid int, err Errno) { // Set up and fork. This returns immediately in the parent or // if there's an error. - upid, err, mapPipe, locked := forkAndExecInChild1(argv0, argv, envv, chroot, dir, attr, sys, pipe) + upid, pidfd, err, mapPipe, locked := forkAndExecInChild1(argv0, argv, envv, chroot, dir, attr, sys, pipe) if locked { runtime_AfterFork() } @@ -143,6 +143,9 @@ func forkAndExecInChild(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr // parent; return PID pid = int(upid) + if sys.PidFD != nil { + *sys.PidFD = int(pidfd) + } if sys.UidMappings != nil || sys.GidMappings != nil { Close(mapPipe[0]) @@ -210,7 +213,7 @@ type cloneArgs struct { //go:noinline //go:norace //go:nocheckptr -func forkAndExecInChild1(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr *ProcAttr, sys *SysProcAttr, pipe int) (pid uintptr, err1 Errno, mapPipe [2]int, locked bool) { +func forkAndExecInChild1(argv0 *byte, argv, envv []*byte, chroot, dir *byte, attr *ProcAttr, sys *SysProcAttr, pipe int) (pid uintptr, pidfd int32, err1 Errno, mapPipe [2]int, locked bool) { // Defined in linux/prctl.h starting with Linux 4.3. const ( PR_CAP_AMBIENT = 0x2f @@ -241,12 +244,12 @@ func forkAndExecInChild1(argv0 *byte, argv, envv []*byte, chroot, dir *byte, att uidmap, setgroups, gidmap []byte clone3 *cloneArgs pgrp int32 - pidfd _C_int = -1 dirfd int cred *Credential ngroups, groups uintptr c uintptr ) + pidfd = -1 rlim := origRlimitNofile.Load() @@ -341,10 +344,6 @@ func forkAndExecInChild1(argv0 *byte, argv, envv []*byte, chroot, dir *byte, att // Fork succeeded, now in child. - if sys.PidFD != nil { - *sys.PidFD = int(pidfd) - } - // Enable the "keep capabilities" flag to set ambient capabilities later. if len(sys.AmbientCaps) > 0 { _, _, err1 = RawSyscall6(SYS_PRCTL, PR_SET_KEEPCAPS, 1, 0, 0, 0, 0) diff --git a/src/syscall/exec_linux_test.go b/src/syscall/exec_linux_test.go index f255930aa8..5ec1a24ba4 100644 --- a/src/syscall/exec_linux_test.go +++ b/src/syscall/exec_linux_test.go @@ -12,6 +12,7 @@ import ( "flag" "fmt" "internal/platform" + "internal/syscall/unix" "internal/testenv" "io" "os" @@ -218,7 +219,7 @@ func TestGroupCleanupUserNamespace(t *testing.T) { // Test for https://go.dev/issue/19661: unshare fails because systemd // has forced / to be shared func TestUnshareMountNameSpace(t *testing.T) { - const mountNotSupported = "mount is not supported: " // Output prefix indicatating a test skip. + const mountNotSupported = "mount is not supported: " // Output prefix indicating a test skip. if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { dir := flag.Args()[0] err := syscall.Mount("none", dir, "proc", 0, "") @@ -272,7 +273,7 @@ func TestUnshareMountNameSpace(t *testing.T) { // Test for Issue 20103: unshare fails when chroot is used func TestUnshareMountNameSpaceChroot(t *testing.T) { - const mountNotSupported = "mount is not supported: " // Output prefix indicatating a test skip. + const mountNotSupported = "mount is not supported: " // Output prefix indicating a test skip. if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { dir := flag.Args()[0] err := syscall.Mount("none", dir, "proc", 0, "") @@ -522,7 +523,7 @@ func TestCloneTimeNamespace(t *testing.T) { } } -func testPidFD(t *testing.T) error { +func testPidFD(t *testing.T, userns bool) error { testenv.MustHaveExec(t) if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { @@ -541,6 +542,9 @@ func testPidFD(t *testing.T) error { cmd.SysProcAttr = &syscall.SysProcAttr{ PidFD: &pidfd, } + if userns { + cmd.SysProcAttr.Cloneflags = syscall.CLONE_NEWUSER + } if err := cmd.Start(); err != nil { return err } @@ -557,11 +561,11 @@ func testPidFD(t *testing.T) error { // Use pidfd to send a signal to the child. sig := syscall.SIGINT - if _, _, e := syscall.Syscall(syscall.Sys_pidfd_send_signal, uintptr(pidfd), uintptr(sig), 0); e != 0 { - if e != syscall.EINVAL && testenv.SyscallIsNotSupported(e) { - t.Skip("pidfd_send_signal syscall not supported:", e) + if err := unix.PidFDSendSignal(uintptr(pidfd), sig); err != nil { + if err != syscall.EINVAL && testenv.SyscallIsNotSupported(err) { + t.Skip("pidfd_send_signal syscall not supported:", err) } - t.Fatal("pidfd_send_signal syscall failed:", e) + t.Fatal("pidfd_send_signal syscall failed:", err) } // Check if the child received our signal. err = cmd.Wait() @@ -572,7 +576,16 @@ func testPidFD(t *testing.T) error { } func TestPidFD(t *testing.T) { - if err := testPidFD(t); err != nil { + if err := testPidFD(t, false); err != nil { + t.Fatal("can't start a process:", err) + } +} + +func TestPidFDWithUserNS(t *testing.T) { + if err := testPidFD(t, true); err != nil { + if testenv.SyscallIsNotSupported(err) { + t.Skip("userns not supported:", err) + } t.Fatal("can't start a process:", err) } } @@ -581,7 +594,7 @@ func TestPidFDClone3(t *testing.T) { *syscall.ForceClone3 = true defer func() { *syscall.ForceClone3 = false }() - if err := testPidFD(t); err != nil { + if err := testPidFD(t, false); err != nil { if testenv.SyscallIsNotSupported(err) { t.Skip("clone3 not supported:", err) } diff --git a/src/syscall/exec_plan9.go b/src/syscall/exec_plan9.go index 8762237825..91705e175e 100644 --- a/src/syscall/exec_plan9.go +++ b/src/syscall/exec_plan9.go @@ -69,7 +69,7 @@ func StringSlicePtr(ss []string) []*byte { // SlicePtrFromStrings converts a slice of strings to a slice of // pointers to NUL-terminated byte arrays. If any string contains -// a NUL byte, it returns (nil, EINVAL). +// a NUL byte, it returns (nil, [EINVAL]). func SlicePtrFromStrings(ss []string) ([]*byte, error) { var err error bb := make([]*byte, len(ss)+1) @@ -528,7 +528,7 @@ func ForkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error) return startProcess(argv0, argv, attr) } -// StartProcess wraps ForkExec for package os. +// StartProcess wraps [ForkExec] for package os. func StartProcess(argv0 string, argv []string, attr *ProcAttr) (pid int, handle uintptr, err error) { pid, err = startProcess(argv0, argv, attr) return pid, 0, err @@ -581,7 +581,7 @@ func Exec(argv0 string, argv []string, envv []string) (err error) { // WaitProcess waits until the pid of a // running process is found in the queue of // wait messages. It is used in conjunction -// with ForkExec/StartProcess to wait for a +// with [ForkExec]/[StartProcess] to wait for a // running process to exit. func WaitProcess(pid int, w *Waitmsg) (err error) { procs.Lock() diff --git a/src/syscall/exec_unix.go b/src/syscall/exec_unix.go index 469b660198..1b90aa7e72 100644 --- a/src/syscall/exec_unix.go +++ b/src/syscall/exec_unix.go @@ -54,13 +54,13 @@ import ( // The rules for which file descriptor-creating operations use the // ForkLock are as follows: // -// - Pipe. Use pipe2 if available. Otherwise, does not block, +// - [Pipe]. Use pipe2 if available. Otherwise, does not block, // so use ForkLock. -// - Socket. Use SOCK_CLOEXEC if available. Otherwise, does not +// - [Socket]. Use SOCK_CLOEXEC if available. Otherwise, does not // block, so use ForkLock. -// - Open. Use O_CLOEXEC if available. Otherwise, may block, +// - [Open]. Use [O_CLOEXEC] if available. Otherwise, may block, // so live with the race. -// - Dup. Use F_DUPFD_CLOEXEC or dup3 if available. Otherwise, +// - [Dup]. Use [F_DUPFD_CLOEXEC] or dup3 if available. Otherwise, // does not block, so use ForkLock. var ForkLock sync.RWMutex @@ -68,7 +68,7 @@ var ForkLock sync.RWMutex // to NUL-terminated byte arrays. If any string contains a NUL byte // this function panics instead of returning an error. // -// Deprecated: Use SlicePtrFromStrings instead. +// Deprecated: Use [SlicePtrFromStrings] instead. func StringSlicePtr(ss []string) []*byte { bb := make([]*byte, len(ss)+1) for i := 0; i < len(ss); i++ { @@ -80,7 +80,7 @@ func StringSlicePtr(ss []string) []*byte { // SlicePtrFromStrings converts a slice of strings to a slice of // pointers to NUL-terminated byte arrays. If any string contains -// a NUL byte, it returns (nil, EINVAL). +// a NUL byte, it returns (nil, [EINVAL]). func SlicePtrFromStrings(ss []string) ([]*byte, error) { n := 0 for _, s := range ss { @@ -120,7 +120,7 @@ func SetNonblock(fd int, nonblocking bool) (err error) { } // Credential holds user and group identities to be assumed -// by a child process started by StartProcess. +// by a child process started by [StartProcess]. type Credential struct { Uid uint32 // User ID. Gid uint32 // Group ID. @@ -129,7 +129,7 @@ type Credential struct { } // ProcAttr holds attributes that will be applied to a new process started -// by StartProcess. +// by [StartProcess]. type ProcAttr struct { Dir string // Current working directory. Env []string // Environment. @@ -249,7 +249,7 @@ func ForkExec(argv0 string, argv []string, attr *ProcAttr) (pid int, err error) return forkExec(argv0, argv, attr) } -// StartProcess wraps ForkExec for package os. +// StartProcess wraps [ForkExec] for package os. func StartProcess(argv0 string, argv []string, attr *ProcAttr) (pid int, handle uintptr, err error) { pid, err = forkExec(argv0, argv, attr) return pid, 0, err diff --git a/src/syscall/export_linux_test.go b/src/syscall/export_linux_test.go index a09db60753..3aa877cfe3 100644 --- a/src/syscall/export_linux_test.go +++ b/src/syscall/export_linux_test.go @@ -10,6 +10,5 @@ var ( ) const ( - Sys_GETEUID = sys_GETEUID - Sys_pidfd_send_signal = _SYS_pidfd_send_signal + Sys_GETEUID = sys_GETEUID ) diff --git a/src/syscall/flock_aix.go b/src/syscall/flock_aix.go index c9eab43b6b..d8be7ab504 100644 --- a/src/syscall/flock_aix.go +++ b/src/syscall/flock_aix.go @@ -8,7 +8,7 @@ import "unsafe" // On AIX, there is no flock() system call. -// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. +// FcntlFlock performs a fcntl syscall for the [F_GETLK], [F_SETLK] or [F_SETLKW] command. func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) (err error) { _, _, e1 := syscall6(uintptr(unsafe.Pointer(&libc_fcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(lk)), 0, 0, 0) if e1 != 0 { diff --git a/src/syscall/flock_bsd.go b/src/syscall/flock_bsd.go index 68d3470848..3be2656be7 100644 --- a/src/syscall/flock_bsd.go +++ b/src/syscall/flock_bsd.go @@ -8,7 +8,7 @@ package syscall import "unsafe" -// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. +// FcntlFlock performs a fcntl syscall for the [F_GETLK], [F_SETLK] or [F_SETLKW] command. func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { _, err := fcntlPtr(int(fd), cmd, unsafe.Pointer(lk)) return err diff --git a/src/syscall/flock_linux.go b/src/syscall/flock_linux.go index 7d1169b428..2e87b2e0e4 100644 --- a/src/syscall/flock_linux.go +++ b/src/syscall/flock_linux.go @@ -10,7 +10,7 @@ import "unsafe" // systems by flock_linux_32bit.go to be SYS_FCNTL64. var fcntl64Syscall uintptr = SYS_FCNTL -// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. +// FcntlFlock performs a fcntl syscall for the [F_GETLK], [F_SETLK] or [F_SETLKW] command. func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { _, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk))) if errno == 0 { diff --git a/src/syscall/flock_linux_32bit.go b/src/syscall/flock_linux_32bit.go index 76a09fc47e..927c4dfffd 100644 --- a/src/syscall/flock_linux_32bit.go +++ b/src/syscall/flock_linux_32bit.go @@ -2,9 +2,6 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// If you change the build tags here, see -// internal/syscall/unix/fcntl_linux_32bit.go. - //go:build (linux && 386) || (linux && arm) || (linux && mips) || (linux && mipsle) package syscall diff --git a/src/syscall/fs_wasip1.go b/src/syscall/fs_wasip1.go index 4ad3f9610b..4d3d7d72c6 100644 --- a/src/syscall/fs_wasip1.go +++ b/src/syscall/fs_wasip1.go @@ -566,7 +566,7 @@ func Open(path string, openmode int, perm uint32) (int, error) { if errno == EISDIR && oflags == 0 && fdflags == 0 && ((rights & writeRights) == 0) { // wasmtime and wasmedge will error if attempting to open a directory // because we are asking for too many rights. However, we cannot - // determine ahread of time if the path we are about to open is a + // determine ahead of time if the path we are about to open is a // directory, so instead we fallback to a second call to path_open with // a more limited set of rights. // diff --git a/src/syscall/mksyscall.pl b/src/syscall/mksyscall.pl index 73d4b0f6e3..47efbffcbc 100755 --- a/src/syscall/mksyscall.pl +++ b/src/syscall/mksyscall.pl @@ -85,6 +85,9 @@ if($ARGV[0] =~ /^-/) { if($libc) { $extraimports = 'import "internal/abi"'; } +if($darwin) { + $extraimports .= "\nimport \"runtime\""; +} sub parseparamlist($) { my ($list) = @_; @@ -137,7 +140,7 @@ while(<>) { # without reading the header. $text .= "// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT\n\n"; - if (($darwin && $func =~ /^ptrace1(Ptr)?$/) || (($openbsd && $libc) && $func =~ /^ptrace(Ptr)?$/)) { + if ((($darwin || ($openbsd && $libc)) && $func =~ /^ptrace(Ptr)?$/)) { # The ptrace function is called from forkAndExecInChild where stack # growth is forbidden. $text .= "//go:nosplit\n" @@ -147,6 +150,13 @@ while(<>) { my $out_decl = @out ? sprintf(" (%s)", join(', ', @out)) : ""; $text .= sprintf "func %s(%s)%s {\n", $func, join(', ', @in), $out_decl; + # Disable ptrace on iOS. + if ($darwin && $func =~ /^ptrace(Ptr)?$/) { + $text .= "\tif runtime.GOOS == \"ios\" {\n"; + $text .= "\t\tpanic(\"unimplemented\")\n"; + $text .= "\t}\n"; + } + # Check if err return available my $errvar = ""; foreach my $p (@out) { diff --git a/src/syscall/ptrace_darwin.go b/src/syscall/ptrace_darwin.go deleted file mode 100644 index 519e451c73..0000000000 --- a/src/syscall/ptrace_darwin.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !ios - -package syscall - -// Nosplit because it is called from forkAndExecInChild. -// -//go:nosplit -func ptrace(request int, pid int, addr uintptr, data uintptr) error { - return ptrace1(request, pid, addr, data) -} diff --git a/src/syscall/ptrace_ios.go b/src/syscall/ptrace_ios.go deleted file mode 100644 index fa8d000715..0000000000 --- a/src/syscall/ptrace_ios.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2020 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build ios - -package syscall - -// Nosplit because it is called from forkAndExecInChild. -// -//go:nosplit -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - panic("unimplemented") -} diff --git a/src/syscall/pwd_plan9.go b/src/syscall/pwd_plan9.go index 28e99565ee..b81018873f 100644 --- a/src/syscall/pwd_plan9.go +++ b/src/syscall/pwd_plan9.go @@ -23,7 +23,7 @@ var ( ) // Ensure current working directory seen by this goroutine matches -// the most recent Chdir called in any goroutine. It's called internally +// the most recent [Chdir] called in any goroutine. It's called internally // before executing any syscall which uses a relative pathname. Must // be called with the goroutine locked to the OS thread, to prevent // rescheduling on a different thread (potentially with a different diff --git a/src/syscall/route_bsd.go b/src/syscall/route_bsd.go index 8e47ff888e..46680d645a 100644 --- a/src/syscall/route_bsd.go +++ b/src/syscall/route_bsd.go @@ -325,7 +325,7 @@ func (m *InterfaceAddrMessage) sockaddr() ([]Sockaddr, error) { } // ParseRoutingMessage parses b as routing messages and returns the -// slice containing the RoutingMessage interfaces. +// slice containing the [RoutingMessage] interfaces. // // Deprecated: Use golang.org/x/net/route instead. func ParseRoutingMessage(b []byte) (msgs []RoutingMessage, err error) { @@ -352,7 +352,7 @@ func ParseRoutingMessage(b []byte) (msgs []RoutingMessage, err error) { } // ParseRoutingSockaddr parses msg's payload as raw sockaddrs and -// returns the slice containing the Sockaddr interfaces. +// returns the slice containing the [Sockaddr] interfaces. // // Deprecated: Use golang.org/x/net/route instead. func ParseRoutingSockaddr(msg RoutingMessage) ([]Sockaddr, error) { diff --git a/src/syscall/sockcmsg_unix.go b/src/syscall/sockcmsg_unix.go index 6ade73e87e..a4b45739b8 100644 --- a/src/syscall/sockcmsg_unix.go +++ b/src/syscall/sockcmsg_unix.go @@ -12,7 +12,7 @@ import ( "unsafe" ) -// CmsgLen returns the value to store in the Len field of the Cmsghdr +// CmsgLen returns the value to store in the Len field of the [Cmsghdr] // structure, taking into account any necessary alignment. func CmsgLen(datalen int) int { return cmsgAlignOf(SizeofCmsghdr) + datalen diff --git a/src/syscall/syscall.go b/src/syscall/syscall.go index f75ba31f5f..a46f22ddb5 100644 --- a/src/syscall/syscall.go +++ b/src/syscall/syscall.go @@ -16,7 +16,7 @@ // the manuals for the appropriate operating system. // These calls return err == nil to indicate success; otherwise // err is an operating system error describing the failure. -// On most systems, that error has type syscall.Errno. +// On most systems, that error has type [Errno]. // // NOTE: Most of the functions, types, and constants defined in // this package are also available in the [golang.org/x/sys] package. @@ -44,7 +44,7 @@ func StringByteSlice(s string) []byte { // ByteSliceFromString returns a NUL-terminated slice of bytes // containing the text of s. If s contains a NUL byte at any -// location, it returns (nil, EINVAL). +// location, it returns (nil, [EINVAL]). func ByteSliceFromString(s string) ([]byte, error) { if bytealg.IndexByteString(s, 0) != -1 { return nil, EINVAL @@ -58,12 +58,12 @@ func ByteSliceFromString(s string) ([]byte, error) { // If s contains a NUL byte this function panics instead of returning // an error. // -// Deprecated: Use BytePtrFromString instead. +// Deprecated: Use [BytePtrFromString] instead. func StringBytePtr(s string) *byte { return &StringByteSlice(s)[0] } // BytePtrFromString returns a pointer to a NUL-terminated array of // bytes containing the text of s. If s contains a NUL byte at any -// location, it returns (nil, EINVAL). +// location, it returns (nil, [EINVAL]). func BytePtrFromString(s string) (*byte, error) { a, err := ByteSliceFromString(s) if err != nil { diff --git a/src/syscall/syscall_aix.go b/src/syscall/syscall_aix.go index 30e6887cce..18827e9e8b 100644 --- a/src/syscall/syscall_aix.go +++ b/src/syscall/syscall_aix.go @@ -222,7 +222,7 @@ func Wait4(pid int, wstatus *WaitStatus, options int, rusage *Rusage) (wpid int, var status _C_int var r _Pid_t err = ERESTART - // AIX wait4 may return with ERESTART errno, while the processus is still + // AIX wait4 may return with ERESTART errno, while the process is still // active. for err == ERESTART { r, err = wait4(_Pid_t(pid), &status, options, rusage) diff --git a/src/syscall/syscall_darwin_amd64.go b/src/syscall/syscall_darwin_amd64.go index ef3c1998aa..64e54ad730 100644 --- a/src/syscall/syscall_darwin_amd64.go +++ b/src/syscall/syscall_darwin_amd64.go @@ -24,7 +24,7 @@ func setTimeval(sec, usec int64) Timeval { //sys Stat(path string, stat *Stat_t) (err error) = SYS_stat64 //sys Statfs(path string, stat *Statfs_t) (err error) = SYS_statfs64 //sys fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_fstatat64 -//sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint64(fd) diff --git a/src/syscall/syscall_darwin_arm64.go b/src/syscall/syscall_darwin_arm64.go index cea42772bb..913c748374 100644 --- a/src/syscall/syscall_darwin_arm64.go +++ b/src/syscall/syscall_darwin_arm64.go @@ -24,7 +24,7 @@ func setTimeval(sec, usec int64) Timeval { //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) //sys fstatat(fd int, path string, stat *Stat_t, flags int) (err error) -//sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint64(fd) diff --git a/src/syscall/syscall_js.go b/src/syscall/syscall_js.go index c1b28942e8..0e529e0343 100644 --- a/src/syscall/syscall_js.go +++ b/src/syscall/syscall_js.go @@ -48,7 +48,7 @@ const PathMax = 256 // err = errno // } // -// Errno values can be tested against error values using errors.Is. +// Errno values can be tested against error values using [errors.Is]. // For example: // // _, _, err := syscall.Syscall(...) @@ -88,7 +88,7 @@ func (e Errno) Timeout() bool { } // A Signal is a number describing a process signal. -// It implements the os.Signal interface. +// It implements the [os.Signal] interface. type Signal int const ( diff --git a/src/syscall/syscall_linux.go b/src/syscall/syscall_linux.go index b6e84203e8..6547c517a7 100644 --- a/src/syscall/syscall_linux.go +++ b/src/syscall/syscall_linux.go @@ -13,17 +13,11 @@ package syscall import ( "internal/itoa" + runtimesyscall "internal/runtime/syscall" "runtime" "unsafe" ) -// N.B. RawSyscall6 is provided via linkname by runtime/internal/syscall. -// -// Errno is uintptr and thus compatible with the runtime/internal/syscall -// definition. - -func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) - // Pull in entersyscall/exitsyscall for Syscall/Syscall6. // // Note that this can't be a push linkname because the runtime already has a @@ -40,8 +34,7 @@ func runtime_exitsyscall() // N.B. For the Syscall functions below: // // //go:uintptrkeepalive because the uintptr argument may be converted pointers -// that need to be kept alive in the caller (this is implied for RawSyscall6 -// since it has no body). +// that need to be kept alive in the caller. // // //go:nosplit because stack copying does not account for uintptrkeepalive, so // the stack must not grow. Stack copying cannot blindly assume that all @@ -62,6 +55,17 @@ func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) { return RawSyscall6(trap, a1, a2, a3, 0, 0, 0) } +//go:uintptrkeepalive +//go:nosplit +//go:norace +//go:linkname RawSyscall6 +func RawSyscall6(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) { + var errno uintptr + r1, r2, errno = runtimesyscall.Syscall6(trap, a1, a2, a3, a4, a5, a6) + err = Errno(errno) + return +} + //go:uintptrkeepalive //go:nosplit //go:linkname Syscall @@ -1107,7 +1111,7 @@ func runtime_doAllThreadsSyscall(trap, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, // // AllThreadsSyscall is unaware of any threads that are launched // explicitly by cgo linked code, so the function always returns -// ENOTSUP in binaries that use cgo. +// [ENOTSUP] in binaries that use cgo. // //go:uintptrescapes func AllThreadsSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) { @@ -1118,7 +1122,7 @@ func AllThreadsSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) { return r1, r2, Errno(errno) } -// AllThreadsSyscall6 is like AllThreadsSyscall, but extended to six +// AllThreadsSyscall6 is like [AllThreadsSyscall], but extended to six // arguments. // //go:uintptrescapes diff --git a/src/syscall/syscall_linux_386.go b/src/syscall/syscall_linux_386.go index 1ab6c5e148..a559f7e288 100644 --- a/src/syscall/syscall_linux_386.go +++ b/src/syscall/syscall_linux_386.go @@ -7,11 +7,10 @@ package syscall import "unsafe" const ( - _SYS_setgroups = SYS_SETGROUPS32 - _SYS_clone3 = 435 - _SYS_faccessat2 = 439 - _SYS_pidfd_send_signal = 424 - _SYS_fchmodat2 = 452 + _SYS_setgroups = SYS_SETGROUPS32 + _SYS_clone3 = 435 + _SYS_faccessat2 = 439 + _SYS_fchmodat2 = 452 ) func setTimespec(sec, nsec int64) Timespec { diff --git a/src/syscall/syscall_linux_amd64.go b/src/syscall/syscall_linux_amd64.go index 1083a507c7..ec52f8a4bd 100644 --- a/src/syscall/syscall_linux_amd64.go +++ b/src/syscall/syscall_linux_amd64.go @@ -9,11 +9,10 @@ import ( ) const ( - _SYS_setgroups = SYS_SETGROUPS - _SYS_clone3 = 435 - _SYS_faccessat2 = 439 - _SYS_pidfd_send_signal = 424 - _SYS_fchmodat2 = 452 + _SYS_setgroups = SYS_SETGROUPS + _SYS_clone3 = 435 + _SYS_faccessat2 = 439 + _SYS_fchmodat2 = 452 ) //sys Dup2(oldfd int, newfd int) (err error) diff --git a/src/syscall/syscall_linux_arm.go b/src/syscall/syscall_linux_arm.go index 2641cd2868..a6d92cea13 100644 --- a/src/syscall/syscall_linux_arm.go +++ b/src/syscall/syscall_linux_arm.go @@ -7,11 +7,10 @@ package syscall import "unsafe" const ( - _SYS_setgroups = SYS_SETGROUPS32 - _SYS_clone3 = 435 - _SYS_faccessat2 = 439 - _SYS_pidfd_send_signal = 424 - _SYS_fchmodat2 = 452 + _SYS_setgroups = SYS_SETGROUPS32 + _SYS_clone3 = 435 + _SYS_faccessat2 = 439 + _SYS_fchmodat2 = 452 ) func setTimespec(sec, nsec int64) Timespec { diff --git a/src/syscall/syscall_linux_arm64.go b/src/syscall/syscall_linux_arm64.go index 74d6e3a958..b87b51c0c0 100644 --- a/src/syscall/syscall_linux_arm64.go +++ b/src/syscall/syscall_linux_arm64.go @@ -7,11 +7,10 @@ package syscall import "unsafe" const ( - _SYS_setgroups = SYS_SETGROUPS - _SYS_clone3 = 435 - _SYS_faccessat2 = 439 - _SYS_pidfd_send_signal = 424 - _SYS_fchmodat2 = 452 + _SYS_setgroups = SYS_SETGROUPS + _SYS_clone3 = 435 + _SYS_faccessat2 = 439 + _SYS_fchmodat2 = 452 ) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT diff --git a/src/syscall/syscall_linux_loong64.go b/src/syscall/syscall_linux_loong64.go index eb275bc717..634cf30cf2 100644 --- a/src/syscall/syscall_linux_loong64.go +++ b/src/syscall/syscall_linux_loong64.go @@ -7,11 +7,10 @@ package syscall import "unsafe" const ( - _SYS_setgroups = SYS_SETGROUPS - _SYS_clone3 = 435 - _SYS_faccessat2 = 439 - _SYS_pidfd_send_signal = 424 - _SYS_fchmodat2 = 452 + _SYS_setgroups = SYS_SETGROUPS + _SYS_clone3 = 435 + _SYS_faccessat2 = 439 + _SYS_fchmodat2 = 452 ) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT diff --git a/src/syscall/syscall_linux_mips64x.go b/src/syscall/syscall_linux_mips64x.go index 3bdee928ed..41106ed81f 100644 --- a/src/syscall/syscall_linux_mips64x.go +++ b/src/syscall/syscall_linux_mips64x.go @@ -11,11 +11,10 @@ import ( ) const ( - _SYS_setgroups = SYS_SETGROUPS - _SYS_clone3 = 5435 - _SYS_faccessat2 = 5439 - _SYS_pidfd_send_signal = 5424 - _SYS_fchmodat2 = 5452 + _SYS_setgroups = SYS_SETGROUPS + _SYS_clone3 = 5435 + _SYS_faccessat2 = 5439 + _SYS_fchmodat2 = 5452 ) //sys Dup2(oldfd int, newfd int) (err error) diff --git a/src/syscall/syscall_linux_mipsx.go b/src/syscall/syscall_linux_mipsx.go index 7253c648e7..7d4f8f2264 100644 --- a/src/syscall/syscall_linux_mipsx.go +++ b/src/syscall/syscall_linux_mipsx.go @@ -9,11 +9,10 @@ package syscall import "unsafe" const ( - _SYS_setgroups = SYS_SETGROUPS - _SYS_clone3 = 4435 - _SYS_faccessat2 = 4439 - _SYS_pidfd_send_signal = 4424 - _SYS_fchmodat2 = 4452 + _SYS_setgroups = SYS_SETGROUPS + _SYS_clone3 = 4435 + _SYS_faccessat2 = 4439 + _SYS_fchmodat2 = 4452 ) func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) diff --git a/src/syscall/syscall_linux_ppc64x.go b/src/syscall/syscall_linux_ppc64x.go index 9cfe2dc695..13c184c44f 100644 --- a/src/syscall/syscall_linux_ppc64x.go +++ b/src/syscall/syscall_linux_ppc64x.go @@ -11,11 +11,10 @@ import ( ) const ( - _SYS_setgroups = SYS_SETGROUPS - _SYS_clone3 = 435 - _SYS_faccessat2 = 439 - _SYS_pidfd_send_signal = 424 - _SYS_fchmodat2 = 452 + _SYS_setgroups = SYS_SETGROUPS + _SYS_clone3 = 435 + _SYS_faccessat2 = 439 + _SYS_fchmodat2 = 452 ) //sys Dup2(oldfd int, newfd int) (err error) diff --git a/src/syscall/syscall_linux_riscv64.go b/src/syscall/syscall_linux_riscv64.go index 61fb4c1668..00872a74fb 100644 --- a/src/syscall/syscall_linux_riscv64.go +++ b/src/syscall/syscall_linux_riscv64.go @@ -7,11 +7,10 @@ package syscall import "unsafe" const ( - _SYS_setgroups = SYS_SETGROUPS - _SYS_clone3 = 435 - _SYS_faccessat2 = 439 - _SYS_pidfd_send_signal = 424 - _SYS_fchmodat2 = 452 + _SYS_setgroups = SYS_SETGROUPS + _SYS_clone3 = 435 + _SYS_faccessat2 = 439 + _SYS_fchmodat2 = 452 ) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT diff --git a/src/syscall/syscall_linux_s390x.go b/src/syscall/syscall_linux_s390x.go index 3a0afc404a..ea667ec1da 100644 --- a/src/syscall/syscall_linux_s390x.go +++ b/src/syscall/syscall_linux_s390x.go @@ -7,11 +7,10 @@ package syscall import "unsafe" const ( - _SYS_setgroups = SYS_SETGROUPS - _SYS_clone3 = 435 - _SYS_faccessat2 = 439 - _SYS_pidfd_send_signal = 424 - _SYS_fchmodat2 = 452 + _SYS_setgroups = SYS_SETGROUPS + _SYS_clone3 = 435 + _SYS_faccessat2 = 439 + _SYS_fchmodat2 = 452 ) //sys Dup2(oldfd int, newfd int) (err error) diff --git a/src/syscall/syscall_plan9.go b/src/syscall/syscall_plan9.go index 7af10ba322..968782008d 100644 --- a/src/syscall/syscall_plan9.go +++ b/src/syscall/syscall_plan9.go @@ -23,7 +23,7 @@ const bitSize16 = 2 // ErrorString implements Error's String method by returning itself. // -// ErrorString values can be tested against error values using errors.Is. +// ErrorString values can be tested against error values using [errors.Is]. // For example: // // _, _, err := syscall.Syscall(...) @@ -99,7 +99,7 @@ var ( ) // For testing: clients can set this flag to force -// creation of IPv6 sockets to return EAFNOSUPPORT. +// creation of IPv6 sockets to return [EAFNOSUPPORT]. var SocketDisableIPv6 bool func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err ErrorString) diff --git a/src/syscall/syscall_solaris.go b/src/syscall/syscall_solaris.go index 28d3727db6..30400b4fac 100644 --- a/src/syscall/syscall_solaris.go +++ b/src/syscall/syscall_solaris.go @@ -299,7 +299,7 @@ func UtimesNano(path string, ts []Timespec) error { //sys fcntl(fd int, cmd int, arg int) (val int, err error) -// FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. +// FcntlFlock performs a fcntl syscall for the [F_GETLK], [F_SETLK] or [F_SETLKW] command. func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&libc_fcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(lk)), 0, 0, 0) if e1 != 0 { diff --git a/src/syscall/syscall_unix.go b/src/syscall/syscall_unix.go index 4c48f29744..0d3fe31e7a 100644 --- a/src/syscall/syscall_unix.go +++ b/src/syscall/syscall_unix.go @@ -98,7 +98,7 @@ func (m *mmapper) Munmap(data []byte) (err error) { // err = errno // } // -// Errno values can be tested against error values using errors.Is. +// Errno values can be tested against error values using [errors.Is]. // For example: // // _, _, err := syscall.Syscall(...) @@ -162,7 +162,7 @@ func errnoErr(e Errno) error { } // A Signal is a number describing a process signal. -// It implements the os.Signal interface. +// It implements the [os.Signal] interface. type Signal int func (s Signal) Signal() {} @@ -257,7 +257,7 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { } // For testing: clients can set this flag to force -// creation of IPv6 sockets to return EAFNOSUPPORT. +// creation of IPv6 sockets to return [EAFNOSUPPORT]. var SocketDisableIPv6 bool type Sockaddr interface { diff --git a/src/syscall/syscall_wasip1.go b/src/syscall/syscall_wasip1.go index e66afee5e9..84c6bddc08 100644 --- a/src/syscall/syscall_wasip1.go +++ b/src/syscall/syscall_wasip1.go @@ -97,7 +97,7 @@ func (e Errno) Timeout() bool { } // A Signal is a number describing a process signal. -// It implements the os.Signal interface. +// It implements the [os.Signal] interface. type Signal uint8 const ( @@ -305,7 +305,7 @@ func (w WaitStatus) Continued() bool { return false } func (w WaitStatus) StopSignal() Signal { return 0 } func (w WaitStatus) TrapCause() int { return 0 } -// Rusage is a placeholder to allow compilation of the os/exec package +// Rusage is a placeholder to allow compilation of the [os/exec] package // because we need Go programs to be portable across platforms. WASI does // not have a mechanism to to spawn processes so there is no reason for an // application to take a dependency on this type. @@ -314,7 +314,7 @@ type Rusage struct { Stime Timeval } -// ProcAttr is a placeholder to allow compilation of the os/exec package +// ProcAttr is a placeholder to allow compilation of the [os/exec] package // because we need Go programs to be portable across platforms. WASI does // not have a mechanism to to spawn processes so there is no reason for an // application to take a dependency on this type. diff --git a/src/syscall/syscall_windows.go b/src/syscall/syscall_windows.go index 33876c7fe0..c51ce04b84 100644 --- a/src/syscall/syscall_windows.go +++ b/src/syscall/syscall_windows.go @@ -25,7 +25,7 @@ const InvalidHandle = ^Handle(0) // with a terminating NUL added. If s contains a NUL byte this // function panics instead of returning an error. // -// Deprecated: Use UTF16FromString instead. +// Deprecated: Use [UTF16FromString] instead. func StringToUTF16(s string) []uint16 { a, err := UTF16FromString(s) if err != nil { @@ -36,7 +36,7 @@ func StringToUTF16(s string) []uint16 { // UTF16FromString returns the UTF-16 encoding of the UTF-8 string // s, with a terminating NUL added. If s contains a NUL byte at any -// location, it returns (nil, EINVAL). Unpaired surrogates +// location, it returns (nil, [EINVAL]). Unpaired surrogates // are encoded using WTF-8. func UTF16FromString(s string) ([]uint16, error) { if bytealg.IndexByteString(s, 0) != -1 { @@ -102,7 +102,7 @@ func utf16PtrToString(p *uint16) string { // contains a NUL byte this function panics instead of // returning an error. // -// Deprecated: Use UTF16PtrFromString instead. +// Deprecated: Use [UTF16PtrFromString] instead. func StringToUTF16Ptr(s string) *uint16 { return &StringToUTF16(s)[0] } // UTF16PtrFromString returns pointer to the UTF-16 encoding of @@ -119,7 +119,7 @@ func UTF16PtrFromString(s string) (*uint16, error) { // Errno is the Windows error number. // -// Errno values can be tested against error values using errors.Is. +// Errno values can be tested against error values using [errors.Is]. // For example: // // _, _, err := syscall.Syscall(...) @@ -409,6 +409,10 @@ func Open(path string, mode int, perm uint32) (fd Handle, err error) { // Necessary for opening directory handles. attrs |= FILE_FLAG_BACKUP_SEMANTICS } + if mode&O_SYNC != 0 { + const _FILE_FLAG_WRITE_THROUGH = 0x80000000 + attrs |= _FILE_FLAG_WRITE_THROUGH + } return CreateFile(pathp, access, sharemode, sa, createmode, attrs, 0) } @@ -764,7 +768,7 @@ const socket_error = uintptr(^uint32(0)) //sys WSAEnumProtocols(protocols *int32, protocolBuffer *WSAProtocolInfo, bufferLength *uint32) (n int32, err error) [failretval==-1] = ws2_32.WSAEnumProtocolsW // For testing: clients can set this flag to force -// creation of IPv6 sockets to return EAFNOSUPPORT. +// creation of IPv6 sockets to return [EAFNOSUPPORT]. var SocketDisableIPv6 bool type RawSockaddrInet4 struct { @@ -1257,7 +1261,7 @@ func Fchdir(fd Handle) (err error) { if err != nil { return err } - // When using VOLUME_NAME_DOS, the path is always pefixed by "\\?\". + // When using VOLUME_NAME_DOS, the path is always prefixed by "\\?\". // That prefix tells the Windows APIs to disable all string parsing and to send // the string that follows it straight to the file system. // Although SetCurrentDirectory and GetCurrentDirectory do support the "\\?\" prefix, @@ -1434,7 +1438,7 @@ func newProcThreadAttributeList(maxAttrCount uint32) (*_PROC_THREAD_ATTRIBUTE_LI // decrementing until index 0 is enumerated. // // Successive calls to this API must happen on the same OS thread, -// so call runtime.LockOSThread before calling this function. +// so call [runtime.LockOSThread] before calling this function. func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) { return regEnumKeyEx(key, index, name, nameLen, reserved, class, classLen, lastWriteTime) } diff --git a/src/syscall/timestruct.go b/src/syscall/timestruct.go index 4fca63cc40..b1d03ef25c 100644 --- a/src/syscall/timestruct.go +++ b/src/syscall/timestruct.go @@ -9,7 +9,7 @@ package syscall // TimespecToNsec returns the time stored in ts as nanoseconds. func TimespecToNsec(ts Timespec) int64 { return ts.Nano() } -// NsecToTimespec converts a number of nanoseconds into a Timespec. +// NsecToTimespec converts a number of nanoseconds into a [Timespec]. func NsecToTimespec(nsec int64) Timespec { sec := nsec / 1e9 nsec = nsec % 1e9 @@ -23,7 +23,7 @@ func NsecToTimespec(nsec int64) Timespec { // TimevalToNsec returns the time stored in tv as nanoseconds. func TimevalToNsec(tv Timeval) int64 { return tv.Nano() } -// NsecToTimeval converts a number of nanoseconds into a Timeval. +// NsecToTimeval converts a number of nanoseconds into a [Timeval]. func NsecToTimeval(nsec int64) Timeval { nsec += 999 // round up to microsecond usec := nsec % 1e9 / 1e3 diff --git a/src/syscall/types_windows.go b/src/syscall/types_windows.go index b338ec4700..6743675b95 100644 --- a/src/syscall/types_windows.go +++ b/src/syscall/types_windows.go @@ -27,6 +27,7 @@ const ( ERROR_NOT_FOUND Errno = 1168 ERROR_PRIVILEGE_NOT_HELD Errno = 1314 WSAEACCES Errno = 10013 + WSAENOPROTOOPT Errno = 10042 WSAECONNABORTED Errno = 10053 WSAECONNRESET Errno = 10054 ) diff --git a/src/syscall/zsyscall_darwin_amd64.go b/src/syscall/zsyscall_darwin_amd64.go index 3ad9bad076..8812fb12cd 100644 --- a/src/syscall/zsyscall_darwin_amd64.go +++ b/src/syscall/zsyscall_darwin_amd64.go @@ -7,6 +7,7 @@ package syscall import "unsafe" import "internal/abi" +import "runtime" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2011,7 +2012,10 @@ func libc_fstatat64_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT //go:nosplit -func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + if runtime.GOOS == "ios" { + panic("unimplemented") + } _, _, e1 := syscall6(abi.FuncPCABI0(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) diff --git a/src/syscall/zsyscall_darwin_arm64.go b/src/syscall/zsyscall_darwin_arm64.go index c2502c7842..22b096349d 100644 --- a/src/syscall/zsyscall_darwin_arm64.go +++ b/src/syscall/zsyscall_darwin_arm64.go @@ -7,6 +7,7 @@ package syscall import "unsafe" import "internal/abi" +import "runtime" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2011,7 +2012,10 @@ func libc_fstatat_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT //go:nosplit -func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + if runtime.GOOS == "ios" { + panic("unimplemented") + } _, _, e1 := syscall6(abi.FuncPCABI0(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) diff --git a/src/testing/fstest/mapfs.go b/src/testing/fstest/mapfs.go index 1409d6202d..f7f8ccd9ec 100644 --- a/src/testing/fstest/mapfs.go +++ b/src/testing/fstest/mapfs.go @@ -150,7 +150,7 @@ type mapFileInfo struct { f *MapFile } -func (i *mapFileInfo) Name() string { return i.name } +func (i *mapFileInfo) Name() string { return path.Base(i.name) } func (i *mapFileInfo) Size() int64 { return int64(len(i.f.Data)) } func (i *mapFileInfo) Mode() fs.FileMode { return i.f.Mode } func (i *mapFileInfo) Type() fs.FileMode { return i.f.Mode.Type() } diff --git a/src/testing/fstest/mapfs_test.go b/src/testing/fstest/mapfs_test.go index c64dc8db5a..6381a2e56c 100644 --- a/src/testing/fstest/mapfs_test.go +++ b/src/testing/fstest/mapfs_test.go @@ -45,3 +45,15 @@ a/b.txt: -rw-rw-rw- t.Errorf("MapFS modes want:\n%s\ngot:\n%s\n", want, got) } } + +func TestMapFSFileInfoName(t *testing.T) { + m := MapFS{ + "path/to/b.txt": &MapFile{}, + } + info, _ := m.Stat("path/to/b.txt") + want := "b.txt" + got := info.Name() + if want != got { + t.Errorf("MapFS FileInfo.Name want:\n%s\ngot:\n%s\n", want, got) + } +} diff --git a/src/testing/fuzz.go b/src/testing/fuzz.go index d50ea793e0..baf1c7243c 100644 --- a/src/testing/fuzz.go +++ b/src/testing/fuzz.go @@ -199,7 +199,7 @@ var supportedTypes = map[reflect.Type]bool{ // the (*F).Fuzz function are (*F).Failed and (*F).Name. // // This function should be fast and deterministic, and its behavior should not -// depend on shared state. No mutatable input arguments, or pointers to them, +// depend on shared state. No mutable input arguments, or pointers to them, // should be retained between executions of the fuzz function, as the memory // backing them may be mutated during a subsequent invocation. ff must not // modify the underlying data of the arguments provided by the fuzzing engine. diff --git a/src/testing/helper_test.go b/src/testing/helper_test.go index 6e8986a2ab..da5622f85f 100644 --- a/src/testing/helper_test.go +++ b/src/testing/helper_test.go @@ -2,98 +2,107 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package testing +package testing_test import ( + "internal/testenv" + "os" "regexp" "strings" + "testing" ) -func TestTBHelper(t *T) { - var buf strings.Builder - ctx := newTestContext(1, allMatcher()) - t1 := &T{ - common: common{ - signal: make(chan bool), - w: &buf, - }, - context: ctx, - } - t1.Run("Test", testHelper) +func TestTBHelper(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { + testTestHelper(t) - want := `--- FAIL: Test (?s) -helperfuncs_test.go:12: 0 -helperfuncs_test.go:40: 1 -helperfuncs_test.go:21: 2 -helperfuncs_test.go:42: 3 -helperfuncs_test.go:49: 4 ---- FAIL: Test/sub (?s) -helperfuncs_test.go:52: 5 -helperfuncs_test.go:21: 6 -helperfuncs_test.go:51: 7 -helperfuncs_test.go:63: 8 ---- FAIL: Test/sub2 (?s) -helperfuncs_test.go:78: 11 -helperfuncs_test.go:82: recover 12 -helperfuncs_test.go:84: GenericFloat64 -helperfuncs_test.go:85: GenericInt -helperfuncs_test.go:71: 9 -helperfuncs_test.go:67: 10 + // Check that calling Helper from inside a top-level test function + // has no effect. + t.Helper() + t.Error("8") + return + } + + testenv.MustHaveExec(t) + t.Parallel() + + exe, err := os.Executable() + if err != nil { + t.Fatal(err) + } + + cmd := testenv.Command(t, exe, "-test.run=^TestTBHelper$") + cmd = testenv.CleanCmdEnv(cmd) + cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1") + out, _ := cmd.CombinedOutput() + + want := `--- FAIL: TestTBHelper \([^)]+\) + helperfuncs_test.go:15: 0 + helperfuncs_test.go:47: 1 + helperfuncs_test.go:24: 2 + helperfuncs_test.go:49: 3 + helperfuncs_test.go:56: 4 + --- FAIL: TestTBHelper/sub \([^)]+\) + helperfuncs_test.go:59: 5 + helperfuncs_test.go:24: 6 + helperfuncs_test.go:58: 7 + --- FAIL: TestTBHelper/sub2 \([^)]+\) + helperfuncs_test.go:80: 11 + helperfuncs_test.go:84: recover 12 + helperfuncs_test.go:86: GenericFloat64 + helperfuncs_test.go:87: GenericInt + helper_test.go:22: 8 + helperfuncs_test.go:73: 9 + helperfuncs_test.go:69: 10 ` - lines := strings.Split(buf.String(), "\n") - durationRE := regexp.MustCompile(`\(.*\)$`) - for i, line := range lines { - line = strings.TrimSpace(line) - line = durationRE.ReplaceAllString(line, "(?s)") - lines[i] = line - } - got := strings.Join(lines, "\n") - if got != want { - t.Errorf("got output:\n\n%s\nwant:\n\n%s", got, want) + if !regexp.MustCompile(want).Match(out) { + t.Errorf("got output:\n\n%s\nwant matching:\n\n%s", out, want) } } -func TestTBHelperParallel(t *T) { - var buf strings.Builder - ctx := newTestContext(1, newMatcher(regexp.MatchString, "", "", "")) - t1 := &T{ - common: common{ - signal: make(chan bool), - w: &buf, - }, - context: ctx, +func TestTBHelperParallel(t *testing.T) { + if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { + parallelTestHelper(t) + return } - t1.Run("Test", parallelTestHelper) - lines := strings.Split(strings.TrimSpace(buf.String()), "\n") - if len(lines) != 6 { - t.Fatalf("parallelTestHelper gave %d lines of output; want 6", len(lines)) + testenv.MustHaveExec(t) + t.Parallel() + + exe, err := os.Executable() + if err != nil { + t.Fatal(err) } - want := "helperfuncs_test.go:21: parallel" + + cmd := testenv.Command(t, exe, "-test.run=^TestTBHelperParallel$") + cmd = testenv.CleanCmdEnv(cmd) + cmd.Env = append(cmd.Env, "GO_WANT_HELPER_PROCESS=1") + out, _ := cmd.CombinedOutput() + + t.Logf("output:\n%s", out) + + lines := strings.Split(strings.TrimSpace(string(out)), "\n") + + // We expect to see one "--- FAIL" line at the start + // of the log, five lines of "parallel" logging, + // and a final "FAIL" line at the end of the test. + const wantLines = 7 + + if len(lines) != wantLines { + t.Fatalf("parallelTestHelper gave %d lines of output; want %d", len(lines), wantLines) + } + want := "helperfuncs_test.go:24: parallel" if got := strings.TrimSpace(lines[1]); got != want { - t.Errorf("got output line %q; want %q", got, want) + t.Errorf("got second output line %q; want %q", got, want) } } -type noopWriter int - -func (nw *noopWriter) Write(b []byte) (int, error) { return len(b), nil } - -func BenchmarkTBHelper(b *B) { - w := noopWriter(0) - ctx := newTestContext(1, allMatcher()) - t1 := &T{ - common: common{ - signal: make(chan bool), - w: &w, - }, - context: ctx, - } +func BenchmarkTBHelper(b *testing.B) { f1 := func() { - t1.Helper() + b.Helper() } f2 := func() { - t1.Helper() + b.Helper() } b.ResetTimer() b.ReportAllocs() diff --git a/src/testing/helperfuncs_test.go b/src/testing/helperfuncs_test.go index b63bc91ac2..f0295f35df 100644 --- a/src/testing/helperfuncs_test.go +++ b/src/testing/helperfuncs_test.go @@ -2,38 +2,45 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -package testing +package testing_test -import "sync" +import ( + "sync" + "testing" +) // The line numbering of this file is important for TestTBHelper. -func notHelper(t *T, msg string) { +func notHelper(t *testing.T, msg string) { t.Error(msg) } -func helper(t *T, msg string) { +func helper(t *testing.T, msg string) { t.Helper() t.Error(msg) } -func notHelperCallingHelper(t *T, msg string) { +func notHelperCallingHelper(t *testing.T, msg string) { helper(t, msg) } -func helperCallingHelper(t *T, msg string) { +func helperCallingHelper(t *testing.T, msg string) { t.Helper() helper(t, msg) } -func genericHelper[G any](t *T, msg string) { +func genericHelper[G any](t *testing.T, msg string) { t.Helper() t.Error(msg) } var genericIntHelper = genericHelper[int] -func testHelper(t *T) { +func testTestHelper(t *testing.T) { + testHelper(t) +} + +func testHelper(t *testing.T) { // Check combinations of directly and indirectly // calling helper functions. notHelper(t, "0") @@ -48,7 +55,7 @@ func testHelper(t *T) { } fn("4") - t.Run("sub", func(t *T) { + t.Run("sub", func(t *testing.T) { helper(t, "5") notHelperCallingHelper(t, "6") // Check that calling Helper from inside a subtest entry function @@ -57,11 +64,6 @@ func testHelper(t *T) { t.Error("7") }) - // Check that calling Helper from inside a top-level test function - // has no effect. - t.Helper() - t.Error("8") - // Check that right caller is reported for func passed to Cleanup when // multiple cleanup functions have been registered. t.Cleanup(func() { @@ -85,7 +87,7 @@ func testHelper(t *T) { genericIntHelper(t, "GenericInt") } -func parallelTestHelper(t *T) { +func parallelTestHelper(t *testing.T) { var wg sync.WaitGroup for i := 0; i < 5; i++ { wg.Add(1) @@ -97,15 +99,15 @@ func parallelTestHelper(t *T) { wg.Wait() } -func helperSubCallingHelper(t *T, msg string) { +func helperSubCallingHelper(t *testing.T, msg string) { t.Helper() - t.Run("sub2", func(t *T) { + t.Run("sub2", func(t *testing.T) { t.Helper() t.Fatal(msg) }) } -func recoverHelper(t *T, msg string) { +func recoverHelper(t *testing.T, msg string) { t.Helper() defer func() { t.Helper() @@ -116,7 +118,7 @@ func recoverHelper(t *T, msg string) { doPanic(t, msg) } -func doPanic(t *T, msg string) { +func doPanic(t *testing.T, msg string) { t.Helper() panic(msg) } diff --git a/src/testing/sub_test.go b/src/testing/sub_test.go index 55b14c3795..1c23d054a0 100644 --- a/src/testing/sub_test.go +++ b/src/testing/sub_test.go @@ -767,22 +767,6 @@ func TestBenchmarkReadMemStatsBeforeFirstRun(t *T) { }) } -func TestParallelSub(t *T) { - c := make(chan int) - block := make(chan int) - for i := 0; i < 10; i++ { - go func(i int) { - <-block - t.Run(fmt.Sprint(i), func(t *T) {}) - c <- 1 - }(i) - } - close(block) - for i := 0; i < 10; i++ { - <-c - } -} - type funcWriter struct { write func([]byte) (int, error) } @@ -910,18 +894,22 @@ func TestCleanup(t *T) { func TestConcurrentCleanup(t *T) { cleanups := 0 t.Run("test", func(t *T) { - done := make(chan struct{}) + var wg sync.WaitGroup + wg.Add(2) for i := 0; i < 2; i++ { i := i go func() { t.Cleanup(func() { + // Although the calls to Cleanup are concurrent, the functions passed + // to Cleanup should be called sequentially, in some nondeterministic + // order based on when the Cleanup calls happened to be scheduled. + // So these assignments to the cleanups variable should not race. cleanups |= 1 << i }) - done <- struct{}{} + wg.Done() }() } - <-done - <-done + wg.Wait() }) if cleanups != 1|2 { t.Errorf("unexpected cleanup; got %d want 3", cleanups) diff --git a/src/testing/testing.go b/src/testing/testing.go index ed8b3630f1..5c06aea5f8 100644 --- a/src/testing/testing.go +++ b/src/testing/testing.go @@ -1638,15 +1638,22 @@ func tRunner(t *T, fn func(t *T)) { if len(t.sub) > 0 { // Run parallel subtests. - // Decrease the running count for this test. + + // Decrease the running count for this test and mark it as no longer running. t.context.release() + running.Delete(t.name) + // Release the parallel subtests. close(t.barrier) // Wait for subtests to complete. for _, sub := range t.sub { <-sub.signal } + + // Run any cleanup callbacks, marking the test as running + // in case the cleanup hangs. cleanupStart := time.Now() + running.Store(t.name, cleanupStart) err := t.runCleanup(recoverAndReturnPanic) t.duration += time.Since(cleanupStart) if err != nil { @@ -1733,11 +1740,19 @@ func (t *T) Run(name string, f func(t *T)) bool { // without being preempted, even when their parent is a parallel test. This // may especially reduce surprises if *parallel == 1. go tRunner(t, f) + + // The parent goroutine will block until the subtest either finishes or calls + // Parallel, but in general we don't know whether the parent goroutine is the + // top-level test function or some other goroutine it has spawned. + // To avoid confusing false-negatives, we leave the parent in the running map + // even though in the typical case it is blocked. + if !<-t.signal { // At this point, it is likely that FailNow was called on one of the // parent tests by one of the subtests. Continue aborting up the chain. runtime.Goexit() } + if t.chatty != nil && t.chatty.json { t.chatty.Updatef(t.parent.name, "=== NAME %s\n", t.parent.name) } diff --git a/src/testing/testing_test.go b/src/testing/testing_test.go index 91c6ccf21d..d3822dfd57 100644 --- a/src/testing/testing_test.go +++ b/src/testing/testing_test.go @@ -6,13 +6,18 @@ package testing_test import ( "bytes" + "fmt" "internal/race" "internal/testenv" "os" + "os/exec" "path/filepath" "regexp" + "slices" + "strings" "sync" "testing" + "time" ) // This is exactly what a test would do without a TestMain. @@ -636,3 +641,174 @@ func BenchmarkSubRacy(b *testing.B) { doRace() // should be reported separately } + +func TestRunningTests(t *testing.T) { + t.Parallel() + + // Regression test for https://go.dev/issue/64404: + // on timeout, the "running tests" message should not include + // tests that are waiting on parked subtests. + + if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { + for i := 0; i < 2; i++ { + t.Run(fmt.Sprintf("outer%d", i), func(t *testing.T) { + t.Parallel() + for j := 0; j < 2; j++ { + t.Run(fmt.Sprintf("inner%d", j), func(t *testing.T) { + t.Parallel() + for { + time.Sleep(1 * time.Millisecond) + } + }) + } + }) + } + } + + timeout := 10 * time.Millisecond + for { + cmd := testenv.Command(t, os.Args[0], "-test.run=^"+t.Name()+"$", "-test.timeout="+timeout.String(), "-test.parallel=4") + cmd.Env = append(cmd.Environ(), "GO_WANT_HELPER_PROCESS=1") + out, err := cmd.CombinedOutput() + t.Logf("%v:\n%s", cmd, out) + if _, ok := err.(*exec.ExitError); !ok { + t.Fatal(err) + } + + // Because the outer subtests (and TestRunningTests itself) are marked as + // parallel, their test functions return (and are no longer “running”) + // before the inner subtests are released to run and hang. + // Only those inner subtests should be reported as running. + want := []string{ + "TestRunningTests/outer0/inner0", + "TestRunningTests/outer0/inner1", + "TestRunningTests/outer1/inner0", + "TestRunningTests/outer1/inner1", + } + + got, ok := parseRunningTests(out) + if slices.Equal(got, want) { + break + } + if ok { + t.Logf("found running tests:\n%s\nwant:\n%s", strings.Join(got, "\n"), strings.Join(want, "\n")) + } else { + t.Logf("no running tests found") + } + t.Logf("retrying with longer timeout") + timeout *= 2 + } +} + +func TestRunningTestsInCleanup(t *testing.T) { + t.Parallel() + + if os.Getenv("GO_WANT_HELPER_PROCESS") == "1" { + for i := 0; i < 2; i++ { + t.Run(fmt.Sprintf("outer%d", i), func(t *testing.T) { + // Not parallel: we expect to see only one outer test, + // stuck in cleanup after its subtest finishes. + + t.Cleanup(func() { + for { + time.Sleep(1 * time.Millisecond) + } + }) + + for j := 0; j < 2; j++ { + t.Run(fmt.Sprintf("inner%d", j), func(t *testing.T) { + t.Parallel() + }) + } + }) + } + } + + timeout := 10 * time.Millisecond + for { + cmd := testenv.Command(t, os.Args[0], "-test.run=^"+t.Name()+"$", "-test.timeout="+timeout.String()) + cmd.Env = append(cmd.Environ(), "GO_WANT_HELPER_PROCESS=1") + out, err := cmd.CombinedOutput() + t.Logf("%v:\n%s", cmd, out) + if _, ok := err.(*exec.ExitError); !ok { + t.Fatal(err) + } + + // TestRunningTestsInCleanup is blocked in the call to t.Run, + // but its test function has not yet returned so it should still + // be considered to be running. + // outer1 hasn't even started yet, so only outer0 and the top-level + // test function should be reported as running. + want := []string{ + "TestRunningTestsInCleanup", + "TestRunningTestsInCleanup/outer0", + } + + got, ok := parseRunningTests(out) + if slices.Equal(got, want) { + break + } + if ok { + t.Logf("found running tests:\n%s\nwant:\n%s", strings.Join(got, "\n"), strings.Join(want, "\n")) + } else { + t.Logf("no running tests found") + } + t.Logf("retrying with longer timeout") + timeout *= 2 + } +} + +func parseRunningTests(out []byte) (runningTests []string, ok bool) { + inRunningTests := false + for _, line := range strings.Split(string(out), "\n") { + if inRunningTests { + if trimmed, ok := strings.CutPrefix(line, "\t"); ok { + if name, _, ok := strings.Cut(trimmed, " "); ok { + runningTests = append(runningTests, name) + continue + } + } + + // This line is not the name of a running test. + return runningTests, true + } + + if strings.TrimSpace(line) == "running tests:" { + inRunningTests = true + } + } + + return nil, false +} + +func TestConcurrentRun(t *testing.T) { + // Regression test for https://go.dev/issue/64402: + // this deadlocked after https://go.dev/cl/506755. + + block := make(chan struct{}) + var ready, done sync.WaitGroup + for i := 0; i < 2; i++ { + ready.Add(1) + done.Add(1) + go t.Run("", func(*testing.T) { + ready.Done() + <-block + done.Done() + }) + } + ready.Wait() + close(block) + done.Wait() +} + +func TestParentRun(t1 *testing.T) { + // Regression test for https://go.dev/issue/64402: + // this deadlocked after https://go.dev/cl/506755. + + t1.Run("outer", func(t2 *testing.T) { + t2.Log("Hello outer!") + t1.Run("not_inner", func(t3 *testing.T) { // Note: this is t1.Run, not t2.Run. + t3.Log("Hello inner!") + }) + }) +} diff --git a/src/text/scanner/scanner.go b/src/text/scanner/scanner.go index 44be0b6bd4..6ae7a9b987 100644 --- a/src/text/scanner/scanner.go +++ b/src/text/scanner/scanner.go @@ -8,7 +8,7 @@ // existing tools, the NUL character is not allowed. If the first character // in the source is a UTF-8 encoded byte order mark (BOM), it is discarded. // -// By default, a Scanner skips white space and Go comments and recognizes all +// By default, a [Scanner] skips white space and Go comments and recognizes all // literals as defined by the Go language specification. It may be // customized to recognize only a subset of those literals and to recognize // different identifier and white space characters. @@ -47,7 +47,7 @@ func (pos Position) String() string { } // Predefined mode bits to control recognition of tokens. For instance, -// to configure a Scanner such that it only recognizes (Go) identifiers, +// to configure a [Scanner] such that it only recognizes (Go) identifiers, // integers, and skips comments, set the Scanner's Mode field to: // // ScanIdents | ScanInts | SkipComments @@ -56,7 +56,7 @@ func (pos Position) String() string { // set, unrecognized tokens are not ignored. Instead, the scanner simply // returns the respective individual characters (or possibly sub-tokens). // For instance, if the mode is ScanIdents (not ScanStrings), the string -// "foo" is scanned as the token sequence '"' Ident '"'. +// "foo" is scanned as the token sequence '"' [Ident] '"'. // // Use GoTokens to configure the Scanner such that it accepts all Go // literal tokens including Go identifiers. Comments will be skipped. @@ -106,13 +106,13 @@ func TokenString(tok rune) string { return fmt.Sprintf("%q", string(tok)) } -// GoWhitespace is the default value for the Scanner's Whitespace field. +// GoWhitespace is the default value for the [Scanner]'s Whitespace field. // Its value selects Go's white space characters. const GoWhitespace = 1<<'\t' | 1<<'\n' | 1<<'\r' | 1<<' ' const bufLen = 1024 // at least utf8.UTFMax -// A Scanner implements reading of Unicode characters and tokens from an io.Reader. +// A Scanner implements reading of Unicode characters and tokens from an [io.Reader]. type Scanner struct { // Input src io.Reader @@ -175,9 +175,9 @@ type Scanner struct { Position } -// Init initializes a Scanner with a new source and returns s. -// Error is set to nil, ErrorCount is set to 0, Mode is set to GoTokens, -// and Whitespace is set to GoWhitespace. +// Init initializes a [Scanner] with a new source and returns s. +// [Scanner.Error] is set to nil, [Scanner.ErrorCount] is set to 0, [Scanner.Mode] is set to [GoTokens], +// and [Scanner.Whitespace] is set to [GoWhitespace]. func (s *Scanner) Init(src io.Reader) *Scanner { s.src = src @@ -296,10 +296,10 @@ func (s *Scanner) next() rune { } // Next reads and returns the next Unicode character. -// It returns EOF at the end of the source. It reports +// It returns [EOF] at the end of the source. It reports // a read error by calling s.Error, if not nil; otherwise -// it prints an error message to os.Stderr. Next does not -// update the Scanner's Position field; use Pos() to +// it prints an error message to [os.Stderr]. Next does not +// update the [Scanner.Position] field; use [Scanner.Pos]() to // get the current position. func (s *Scanner) Next() rune { s.tokPos = -1 // don't collect token text @@ -312,7 +312,7 @@ func (s *Scanner) Next() rune { } // Peek returns the next Unicode character in the source without advancing -// the scanner. It returns EOF if the scanner's position is at the last +// the scanner. It returns [EOF] if the scanner's position is at the last // character of the source. func (s *Scanner) Peek() rune { if s.ch == -2 { @@ -639,10 +639,10 @@ func (s *Scanner) scanComment(ch rune) rune { } // Scan reads the next token or Unicode character from source and returns it. -// It only recognizes tokens t for which the respective Mode bit (1<<-t) is set. -// It returns EOF at the end of the source. It reports scanner errors (read and +// It only recognizes tokens t for which the respective [Scanner.Mode] bit (1<<-t) is set. +// It returns [EOF] at the end of the source. It reports scanner errors (read and // token errors) by calling s.Error, if not nil; otherwise it prints an error -// message to os.Stderr. +// message to [os.Stderr]. func (s *Scanner) Scan() rune { ch := s.Peek() @@ -742,8 +742,8 @@ redo: } // Pos returns the position of the character immediately after -// the character or token returned by the last call to Next or Scan. -// Use the Scanner's Position field for the start position of the most +// the character or token returned by the last call to [Scanner.Next] or [Scanner.Scan]. +// Use the [Scanner.Position] field for the start position of the most // recently scanned token. func (s *Scanner) Pos() (pos Position) { pos.Filename = s.Filename @@ -766,7 +766,7 @@ func (s *Scanner) Pos() (pos Position) { } // TokenText returns the string corresponding to the most recently scanned token. -// Valid after calling Scan and in calls of Scanner.Error. +// Valid after calling [Scanner.Scan] and in calls of [Scanner.Error]. func (s *Scanner) TokenText() string { if s.tokPos < 0 { // no token text diff --git a/src/text/tabwriter/tabwriter.go b/src/text/tabwriter/tabwriter.go index d4cfcf556a..976ad251aa 100644 --- a/src/text/tabwriter/tabwriter.go +++ b/src/text/tabwriter/tabwriter.go @@ -12,6 +12,7 @@ package tabwriter import ( + "fmt" "io" "unicode/utf8" ) @@ -59,7 +60,7 @@ type cell struct { // this may not be true in some fonts or if the string contains combining // characters. // -// If DiscardEmptyColumns is set, empty columns that are terminated +// If [DiscardEmptyColumns] is set, empty columns that are terminated // entirely by vertical (or "soft") tabs are discarded. Columns // terminated by horizontal (or "hard") tabs are not affected by // this flag. @@ -68,24 +69,24 @@ type cell struct { // are passed through. The widths of tags and entities are // assumed to be zero (tags) and one (entities) for formatting purposes. // -// A segment of text may be escaped by bracketing it with Escape +// A segment of text may be escaped by bracketing it with [Escape] // characters. The tabwriter passes escaped text segments through // unchanged. In particular, it does not interpret any tabs or line -// breaks within the segment. If the StripEscape flag is set, the +// breaks within the segment. If the [StripEscape] flag is set, the // Escape characters are stripped from the output; otherwise they // are passed through as well. For the purpose of formatting, the // width of the escaped text is always computed excluding the Escape // characters. // // The formfeed character acts like a newline but it also terminates -// all columns in the current line (effectively calling Flush). Tab- +// all columns in the current line (effectively calling [Writer.Flush]). Tab- // terminated cells in the next line start new columns. Unless found // inside an HTML tag or inside an escaped text segment, formfeed // characters appear as newlines in the output. // // The Writer must buffer input internally, because proper spacing // of one line may depend on the cells in future lines. Clients must -// call Flush when done calling Write. +// call Flush when done calling [Writer.Write]. type Writer struct { // configuration output io.Writer @@ -192,7 +193,7 @@ const ( Debug ) -// A Writer must be initialized with a call to Init. The first parameter (output) +// A [Writer] must be initialized with a call to Init. The first parameter (output) // specifies the filter output. The remaining parameters control the formatting: // // minwidth minimal cell width including any padding @@ -476,12 +477,12 @@ func (b *Writer) handlePanic(err *error, op string) { *err = nerr.err return } - panic("tabwriter: panic during " + op) + panic(fmt.Sprintf("tabwriter: panic during %s (%v)", op, e)) } } -// Flush should be called after the last call to Write to ensure -// that any data buffered in the Writer is written to output. Any +// Flush should be called after the last call to [Writer.Write] to ensure +// that any data buffered in the [Writer] is written to output. Any // incomplete escape sequence at the end is considered // complete for formatting purposes. func (b *Writer) Flush() error { @@ -593,7 +594,7 @@ func (b *Writer) Write(buf []byte) (n int, err error) { return } -// NewWriter allocates and initializes a new tabwriter.Writer. +// NewWriter allocates and initializes a new [Writer]. // The parameters are the same as for the Init function. func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer { return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags) diff --git a/src/text/tabwriter/tabwriter_test.go b/src/text/tabwriter/tabwriter_test.go index a51358dbed..2bb54a25e2 100644 --- a/src/text/tabwriter/tabwriter_test.go +++ b/src/text/tabwriter/tabwriter_test.go @@ -635,7 +635,7 @@ func wantPanicString(t *testing.T, want string) { } func TestPanicDuringFlush(t *testing.T) { - defer wantPanicString(t, "tabwriter: panic during Flush") + defer wantPanicString(t, "tabwriter: panic during Flush (cannot write)") var p panicWriter w := new(Writer) w.Init(p, 0, 0, 5, ' ', 0) @@ -645,7 +645,7 @@ func TestPanicDuringFlush(t *testing.T) { } func TestPanicDuringWrite(t *testing.T) { - defer wantPanicString(t, "tabwriter: panic during Write") + defer wantPanicString(t, "tabwriter: panic during Write (cannot write)") var p panicWriter w := new(Writer) w.Init(p, 0, 0, 5, ' ', 0) diff --git a/src/text/template/doc.go b/src/text/template/doc.go index 4c01b05ebf..b3ffaabb15 100644 --- a/src/text/template/doc.go +++ b/src/text/template/doc.go @@ -144,6 +144,13 @@ data, defined in detail in the corresponding sections that follow. is executed; otherwise, dot is set to the value of the pipeline and T1 is executed. + {{with pipeline}} T1 {{else with pipeline}} T0 {{end}} + To simplify the appearance of with-else chains, the else action + of a with may include another with directly; the effect is exactly + the same as writing + {{with pipeline}} T1 {{else}}{{with pipeline}} T0 {{end}}{{end}} + + Arguments An argument is a simple value, denoted by one of the following. @@ -438,13 +445,13 @@ produce the text By construction, a template may reside in only one association. If it's necessary to have a template addressable from multiple associations, the template definition must be parsed multiple times to create distinct *Template -values, or must be copied with the Clone or AddParseTree method. +values, or must be copied with [Template.Clone] or [Template.AddParseTree]. Parse may be called multiple times to assemble the various associated templates; -see the ParseFiles and ParseGlob functions and methods for simple ways to parse -related templates stored in files. +see [ParseFiles], [ParseGlob], [Template.ParseFiles] and [Template.ParseGlob] +for simple ways to parse related templates stored in files. -A template may be executed directly or through ExecuteTemplate, which executes +A template may be executed directly or through [Template.ExecuteTemplate], which executes an associated template identified by name. To invoke our example above, we might write, diff --git a/src/text/template/exec.go b/src/text/template/exec.go index 2b778fff69..20d8f98f28 100644 --- a/src/text/template/exec.go +++ b/src/text/template/exec.go @@ -201,8 +201,8 @@ func (t *Template) ExecuteTemplate(wr io.Writer, name string, data any) error { // A template may be executed safely in parallel, although if parallel // executions share a Writer the output may be interleaved. // -// If data is a reflect.Value, the template applies to the concrete -// value that the reflect.Value holds, as in fmt.Print. +// If data is a [reflect.Value], the template applies to the concrete +// value that the reflect.Value holds, as in [fmt.Print]. func (t *Template) Execute(wr io.Writer, data any) error { return t.execute(wr, data) } @@ -228,7 +228,7 @@ func (t *Template) execute(wr io.Writer, data any) (err error) { // DefinedTemplates returns a string listing the defined templates, // prefixed by the string "; defined templates are: ". If there are none, // it returns the empty string. For generating an error message here -// and in html/template. +// and in [html/template]. func (t *Template) DefinedTemplates() string { if t.common == nil { return "" diff --git a/src/text/template/exec_test.go b/src/text/template/exec_test.go index e607fd3bee..8fdd9280f2 100644 --- a/src/text/template/exec_test.go +++ b/src/text/template/exec_test.go @@ -569,6 +569,8 @@ var execTests = []execTest{ {"with $x struct.U.V", "{{with $x := $}}{{$x.U.V}}{{end}}", "v", tVal, true}, {"with variable and action", "{{with $x := $}}{{$y := $.U.V}}{{$y}}{{end}}", "v", tVal, true}, {"with on typed nil interface value", "{{with .NonEmptyInterfaceTypedNil}}TRUE{{ end }}", "", tVal, true}, + {"with else with", "{{with 0}}{{.}}{{else with true}}{{.}}{{end}}", "true", tVal, true}, + {"with else with chain", "{{with 0}}{{.}}{{else with false}}{{.}}{{else with `notempty`}}{{.}}{{end}}", "notempty", tVal, true}, // Range. {"range []int", "{{range .SI}}-{{.}}-{{end}}", "-3--4--5-", tVal, true}, diff --git a/src/text/template/funcs.go b/src/text/template/funcs.go index a949f896fa..c9d5835bed 100644 --- a/src/text/template/funcs.go +++ b/src/text/template/funcs.go @@ -22,14 +22,14 @@ import ( // return value evaluates to non-nil during execution, execution terminates and // Execute returns that error. // -// Errors returned by Execute wrap the underlying error; call errors.As to +// Errors returned by Execute wrap the underlying error; call [errors.As] to // unwrap them. // // When template execution invokes a function with an argument list, that list // must be assignable to the function's parameter types. Functions meant to // apply to arguments of arbitrary type can use parameters of type interface{} or -// of type reflect.Value. Similarly, functions meant to return a result of arbitrary -// type can return interface{} or reflect.Value. +// of type [reflect.Value]. Similarly, functions meant to return a result of arbitrary +// type can return interface{} or [reflect.Value]. type FuncMap map[string]any // builtins returns the FuncMap. diff --git a/src/text/template/helper.go b/src/text/template/helper.go index 48af3928b3..06e7a92798 100644 --- a/src/text/template/helper.go +++ b/src/text/template/helper.go @@ -28,7 +28,7 @@ func Must(t *Template, err error) *Template { return t } -// ParseFiles creates a new Template and parses the template definitions from +// ParseFiles creates a new [Template] and parses the template definitions from // the named files. The returned template's name will have the base name and // parsed contents of the first file. There must be at least one file. // If an error occurs, parsing stops and the returned *Template is nil. @@ -93,12 +93,12 @@ func parseFiles(t *Template, readFile func(string) (string, []byte, error), file return t, nil } -// ParseGlob creates a new Template and parses the template definitions from +// ParseGlob creates a new [Template] and parses the template definitions from // the files identified by the pattern. The files are matched according to the // semantics of filepath.Match, and the pattern must match at least one file. // The returned template will have the (base) name and (parsed) contents of the // first file matched by the pattern. ParseGlob is equivalent to calling -// ParseFiles with the list of files matched by the pattern. +// [ParseFiles] with the list of files matched by the pattern. // // When parsing multiple files with the same name in different directories, // the last one mentioned will be the one that results. @@ -131,7 +131,7 @@ func parseGlob(t *Template, pattern string) (*Template, error) { return parseFiles(t, readFileOS, filenames...) } -// ParseFS is like ParseFiles or ParseGlob but reads from the file system fsys +// ParseFS is like [Template.ParseFiles] or [Template.ParseGlob] but reads from the file system fsys // instead of the host operating system's file system. // It accepts a list of glob patterns. // (Note that most file names serve as glob patterns matching only themselves.) @@ -139,7 +139,7 @@ func ParseFS(fsys fs.FS, patterns ...string) (*Template, error) { return parseFS(nil, fsys, patterns) } -// ParseFS is like ParseFiles or ParseGlob but reads from the file system fsys +// ParseFS is like [Template.ParseFiles] or [Template.ParseGlob] but reads from the file system fsys // instead of the host operating system's file system. // It accepts a list of glob patterns. // (Note that most file names serve as glob patterns matching only themselves.) diff --git a/src/text/template/parse/node.go b/src/text/template/parse/node.go index 47268225c8..23ba9aec2b 100644 --- a/src/text/template/parse/node.go +++ b/src/text/template/parse/node.go @@ -284,7 +284,6 @@ func (a *ActionNode) tree() *Tree { func (a *ActionNode) Copy() Node { return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe()) - } // CommandNode holds a command (a pipeline inside an evaluating action). @@ -347,12 +346,12 @@ type IdentifierNode struct { Ident string // The identifier's name. } -// NewIdentifier returns a new IdentifierNode with the given identifier name. +// NewIdentifier returns a new [IdentifierNode] with the given identifier name. func NewIdentifier(ident string) *IdentifierNode { return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident} } -// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature. +// SetPos sets the position. [NewIdentifier] is a public method so we can't modify its signature. // Chained for convenience. // TODO: fix one day? func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode { @@ -360,7 +359,7 @@ func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode { return i } -// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature. +// SetTree sets the parent tree for the node. [NewIdentifier] is a public method so we can't modify its signature. // Chained for convenience. // TODO: fix one day? func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode { diff --git a/src/text/template/parse/parse.go b/src/text/template/parse/parse.go index d43d5334ba..27c84f31eb 100644 --- a/src/text/template/parse/parse.go +++ b/src/text/template/parse/parse.go @@ -42,7 +42,7 @@ const ( SkipFuncCheck // do not check that functions are defined ) -// Copy returns a copy of the Tree. Any parsing state is discarded. +// Copy returns a copy of the [Tree]. Any parsing state is discarded. func (t *Tree) Copy() *Tree { if t == nil { return nil @@ -55,7 +55,7 @@ func (t *Tree) Copy() *Tree { } } -// Parse returns a map from template name to parse.Tree, created by parsing the +// Parse returns a map from template name to [Tree], created by parsing the // templates described in the argument string. The top-level template will be // given the specified name. If an error is encountered, parsing stops and an // empty map is returned with the error. @@ -521,7 +521,7 @@ func (t *Tree) checkPipeline(pipe *PipeNode, context string) { } } -func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) { +func (t *Tree) parseControl(context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) { defer t.popVars(len(t.vars)) pipe = t.pipeline(context, itemRightDelim) if context == "range" { @@ -535,27 +535,30 @@ func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int switch next.Type() { case nodeEnd: //done case nodeElse: - if allowElseIf { - // Special case for "else if". If the "else" is followed immediately by an "if", - // the elseControl will have left the "if" token pending. Treat - // {{if a}}_{{else if b}}_{{end}} - // as - // {{if a}}_{{else}}{{if b}}_{{end}}{{end}}. - // To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}} - // is assumed. This technique works even for long if-else-if chains. - // TODO: Should we allow else-if in with and range? - if t.peek().typ == itemIf { - t.next() // Consume the "if" token. - elseList = t.newList(next.Position()) - elseList.append(t.ifControl()) - // Do not consume the next item - only one {{end}} required. - break + // Special case for "else if" and "else with". + // If the "else" is followed immediately by an "if" or "with", + // the elseControl will have left the "if" or "with" token pending. Treat + // {{if a}}_{{else if b}}_{{end}} + // {{with a}}_{{else with b}}_{{end}} + // as + // {{if a}}_{{else}}{{if b}}_{{end}}{{end}} + // {{with a}}_{{else}}{{with b}}_{{end}}{{end}}. + // To do this, parse the "if" or "with" as usual and stop at it {{end}}; + // the subsequent{{end}} is assumed. This technique works even for long if-else-if chains. + if context == "if" && t.peek().typ == itemIf { + t.next() // Consume the "if" token. + elseList = t.newList(next.Position()) + elseList.append(t.ifControl()) + } else if context == "with" && t.peek().typ == itemWith { + t.next() + elseList = t.newList(next.Position()) + elseList.append(t.withControl()) + } else { + elseList, next = t.itemList() + if next.Type() != nodeEnd { + t.errorf("expected end; found %s", next) } } - elseList, next = t.itemList() - if next.Type() != nodeEnd { - t.errorf("expected end; found %s", next) - } } return pipe.Position(), pipe.Line, pipe, list, elseList } @@ -567,7 +570,7 @@ func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int // // If keyword is past. func (t *Tree) ifControl() Node { - return t.newIf(t.parseControl(true, "if")) + return t.newIf(t.parseControl("if")) } // Range: @@ -577,7 +580,7 @@ func (t *Tree) ifControl() Node { // // Range keyword is past. func (t *Tree) rangeControl() Node { - r := t.newRange(t.parseControl(false, "range")) + r := t.newRange(t.parseControl("range")) return r } @@ -588,7 +591,7 @@ func (t *Tree) rangeControl() Node { // // If keyword is past. func (t *Tree) withControl() Node { - return t.newWith(t.parseControl(false, "with")) + return t.newWith(t.parseControl("with")) } // End: @@ -606,10 +609,11 @@ func (t *Tree) endControl() Node { // // Else keyword is past. func (t *Tree) elseControl() Node { - // Special case for "else if". peek := t.peekNonSpace() - if peek.typ == itemIf { - // We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ". + // The "{{else if ... " and "{{else with ..." will be + // treated as "{{else}}{{if ..." and "{{else}}{{with ...". + // So return the else node here. + if peek.typ == itemIf || peek.typ == itemWith { return t.newElse(peek.pos, peek.line) } token := t.expect(itemRightDelim, "else") diff --git a/src/text/template/parse/parse_test.go b/src/text/template/parse/parse_test.go index 59e0a17412..faf226d1c3 100644 --- a/src/text/template/parse/parse_test.go +++ b/src/text/template/parse/parse_test.go @@ -244,6 +244,10 @@ var parseTests = []parseTest{ `{{with .X}}"hello"{{end}}`}, {"with with else", "{{with .X}}hello{{else}}goodbye{{end}}", noError, `{{with .X}}"hello"{{else}}"goodbye"{{end}}`}, + {"with with else with", "{{with .X}}hello{{else with .Y}}goodbye{{end}}", noError, + `{{with .X}}"hello"{{else}}{{with .Y}}"goodbye"{{end}}{{end}}`}, + {"with else chain", "{{with .X}}X{{else with .Y}}Y{{else with .Z}}Z{{end}}", noError, + `{{with .X}}"X"{{else}}{{with .Y}}"Y"{{else}}{{with .Z}}"Z"{{end}}{{end}}{{end}}`}, // Trimming spaces. {"trim left", "x \r\n\t{{- 3}}", noError, `"x"{{3}}`}, {"trim right", "{{3 -}}\n\n\ty", noError, `{{3}}"y"`}, diff --git a/src/text/template/template.go b/src/text/template/template.go index 776be9cd07..86fd3f122a 100644 --- a/src/text/template/template.go +++ b/src/text/template/template.go @@ -24,7 +24,7 @@ type common struct { } // Template is the representation of a parsed template. The *parse.Tree -// field is exported only for use by html/template and should be treated +// field is exported only for use by [html/template] and should be treated // as unexported by all other clients. type Template struct { name string @@ -79,7 +79,7 @@ func (t *Template) init() { // Clone returns a duplicate of the template, including all associated // templates. The actual representation is not copied, but the name space of -// associated templates is, so further calls to Parse in the copy will add +// associated templates is, so further calls to [Template.Parse] in the copy will add // templates to the copy but not to the original. Clone can be used to prepare // common templates and use them with variant definitions for other templates // by adding the variants after the clone is made. @@ -157,7 +157,7 @@ func (t *Template) Templates() []*Template { } // Delims sets the action delimiters to the specified strings, to be used in -// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template +// subsequent calls to [Template.Parse], [Template.ParseFiles], or [Template.ParseGlob]. Nested template // definitions will inherit the settings. An empty delimiter stands for the // corresponding default: {{ or }}. // The return value is the template, so calls can be chained. diff --git a/src/time/example_test.go b/src/time/example_test.go index cfdee8f4d7..53c20a0516 100644 --- a/src/time/example_test.go +++ b/src/time/example_test.go @@ -99,7 +99,7 @@ func ExampleParseDuration() { fmt.Println(complex) fmt.Printf("There are %.0f seconds in %v.\n", complex.Seconds(), complex) fmt.Printf("There are %d nanoseconds in %v.\n", micro.Nanoseconds(), micro) - fmt.Printf("There are %6.2e seconds in %v.\n", micro2.Seconds(), micro) + fmt.Printf("There are %6.2e seconds in %v.\n", micro2.Seconds(), micro2) // Output: // 10h0m0s // 1h10m10s diff --git a/src/time/format_test.go b/src/time/format_test.go index 8a26eaa35b..29b9c280e6 100644 --- a/src/time/format_test.go +++ b/src/time/format_test.go @@ -1001,7 +1001,7 @@ func FuzzFormatRFC3339(f *testing.F) { gotNanos := AppendFormatRFC3339(ts, nil, true) wantNanos := AppendFormatAny(ts, nil, RFC3339Nano) - if !bytes.Equal(got, want) { + if !bytes.Equal(gotNanos, wantNanos) { t.Errorf("Format(%s, RFC3339Nano) mismatch:\n\tgot: %s\n\twant: %s", ts, gotNanos, wantNanos) } }) diff --git a/src/time/sleep.go b/src/time/sleep.go index 0aec4cacc6..bd78de9fd3 100644 --- a/src/time/sleep.go +++ b/src/time/sleep.go @@ -4,6 +4,8 @@ package time +import "unsafe" + // Sleep pauses the current goroutine for at least the duration d. // A negative or zero duration causes Sleep to return immediately. func Sleep(d Duration) @@ -11,7 +13,7 @@ func Sleep(d Duration) // Interface to timers implemented in package runtime. // Must be in sync with ../runtime/time.go:/^type timer type runtimeTimer struct { - pp uintptr + ts unsafe.Pointer when int64 period int64 f func(any, uintptr) // NOTE: must not be closure @@ -41,7 +43,7 @@ func when(d Duration) int64 { func startTimer(*runtimeTimer) func stopTimer(*runtimeTimer) bool func resetTimer(*runtimeTimer, int64) bool -func modTimer(t *runtimeTimer, when, period int64, f func(any, uintptr), arg any, seq uintptr) +func modTimer(t *runtimeTimer, when, period int64) // The Timer type represents a single event. // When the Timer expires, the current time will be sent on C, diff --git a/src/time/sleep_test.go b/src/time/sleep_test.go index 2f791240f9..e985870710 100644 --- a/src/time/sleep_test.go +++ b/src/time/sleep_test.go @@ -15,15 +15,33 @@ import ( "sync/atomic" "testing" . "time" + _ "unsafe" // for go:linkname ) +// haveHighResSleep is true if the system supports at least ~1ms sleeps. +// +//go:linkname haveHighResSleep runtime.haveHighResSleep +var haveHighResSleep bool + +// adjustDelay returns an adjusted delay based on the system sleep resolution. // Go runtime uses different Windows timers for time.Now and sleeping. // These can tick at different frequencies and can arrive out of sync. // The effect can be seen, for example, as time.Sleep(100ms) is actually // shorter then 100ms when measured as difference between time.Now before and // after time.Sleep call. This was observed on Windows XP SP3 (windows/386). -// windowsInaccuracy is to ignore such errors. -const windowsInaccuracy = 17 * Millisecond +func adjustDelay(t *testing.T, delay Duration) Duration { + if haveHighResSleep { + return delay + } + t.Log("adjusting delay for low resolution sleep") + switch runtime.GOOS { + case "windows": + return delay - 17*Millisecond + default: + t.Fatal("adjustDelay unimplemented on " + runtime.GOOS) + return 0 + } +} func TestSleep(t *testing.T) { const delay = 100 * Millisecond @@ -33,10 +51,7 @@ func TestSleep(t *testing.T) { }() start := Now() Sleep(delay) - delayadj := delay - if runtime.GOOS == "windows" { - delayadj -= windowsInaccuracy - } + delayadj := adjustDelay(t, delay) duration := Now().Sub(start) if duration < delayadj { t.Fatalf("Sleep(%s) slept for only %s", delay, duration) @@ -83,6 +98,45 @@ func TestAfterStress(t *testing.T) { stop.Store(true) } +func TestAfterFuncStarvation(t *testing.T) { + // Start two goroutines ping-ponging on a channel send. + // At any given time, at least one of these goroutines is runnable: + // if the channel buffer is full, the receiver is runnable, + // and if it is not full, the sender is runnable. + // + // In addition, the AfterFunc callback should become runnable after + // the indicated delay. + // + // Even if GOMAXPROCS=1, we expect the runtime to eventually schedule + // the AfterFunc goroutine instead of the runnable channel goroutine. + // However, in https://go.dev/issue/65178 this was observed to live-lock + // on wasip1/wasm and js/wasm after <10000 runs. + defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1)) + + var ( + wg sync.WaitGroup + stop atomic.Bool + c = make(chan bool, 1) + ) + + wg.Add(2) + go func() { + for !stop.Load() { + c <- true + } + close(c) + wg.Done() + }() + go func() { + for range c { + } + wg.Done() + }() + + AfterFunc(1*Microsecond, func() { stop.Store(true) }) + wg.Wait() +} + func benchmark(b *testing.B, bench func(n int)) { // Create equal number of garbage timers on each P before starting @@ -203,10 +257,7 @@ func TestAfter(t *testing.T) { const delay = 100 * Millisecond start := Now() end := <-After(delay) - delayadj := delay - if runtime.GOOS == "windows" { - delayadj -= windowsInaccuracy - } + delayadj := adjustDelay(t, delay) if duration := Now().Sub(start); duration < delayadj { t.Fatalf("After(%s) slept for only %d ns", delay, duration) } diff --git a/src/time/tick.go b/src/time/tick.go index 9da16b5d58..e06810db5d 100644 --- a/src/time/tick.go +++ b/src/time/tick.go @@ -55,7 +55,7 @@ func (t *Ticker) Reset(d Duration) { if t.r.f == nil { panic("time: Reset called on uninitialized Ticker") } - modTimer(&t.r, when(d), int64(d), t.r.f, t.r.arg, t.r.seq) + modTimer(&t.r, when(d), int64(d)) } // Tick is a convenience wrapper for NewTicker providing access to the ticking diff --git a/src/time/time.go b/src/time/time.go index 9d4c6e919e..2ca1cdbb72 100644 --- a/src/time/time.go +++ b/src/time/time.go @@ -81,8 +81,9 @@ // // Timer resolution varies depending on the Go runtime, the operating system // and the underlying hardware. -// On Unix, the resolution is approximately 1ms. -// On Windows, the default resolution is approximately 16ms, but +// On Unix, the resolution is ~1ms. +// On Windows version 1803 and newer, the resolution is ~0.5ms. +// On older Windows versions, the default resolution is ~16ms, but // a higher resolution may be requested using [golang.org/x/sys/windows.TimeBeginPeriod]. package time diff --git a/src/time/zoneinfo_read.go b/src/time/zoneinfo_read.go index 707dd1189d..9ce735d279 100644 --- a/src/time/zoneinfo_read.go +++ b/src/time/zoneinfo_read.go @@ -11,6 +11,7 @@ package time import ( "errors" + "internal/bytealg" "runtime" "syscall" ) @@ -99,10 +100,8 @@ func (d *dataIO) rest() []byte { // Make a string by stopping at the first NUL func byteString(p []byte) string { - for i := 0; i < len(p); i++ { - if p[i] == 0 { - return string(p[0:i]) - } + if i := bytealg.IndexByte(p, 0); i != -1 { + p = p[:i] } return string(p) } diff --git a/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s b/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s index 541d696b67..731d2ac6db 100644 --- a/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s +++ b/src/vendor/golang.org/x/crypto/chacha20poly1305/chacha20poly1305_amd64.s @@ -183,11 +183,31 @@ GLOBL ·andMask<>(SB), (NOPTR+RODATA), $240 #define shiftD1Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xd2; BYTE $0x04 // PALIGNR $4, X10, X10 #define shiftD2Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xdb; BYTE $0x04 // PALIGNR $4, X11, X11 #define shiftD3Right BYTE $0x66; BYTE $0x45; BYTE $0x0f; BYTE $0x3a; BYTE $0x0f; BYTE $0xff; BYTE $0x04 // PALIGNR $4, X15, X15 + // Some macros + +// ROL rotates the uint32s in register R left by N bits, using temporary T. +#define ROL(N, R, T) \ + MOVO R, T; PSLLL $(N), T; PSRLL $(32-(N)), R; PXOR T, R + +// ROL16 rotates the uint32s in register R left by 16, using temporary T if needed. +#ifdef GOAMD64_v2 +#define ROL16(R, T) PSHUFB ·rol16<>(SB), R +#else +#define ROL16(R, T) ROL(16, R, T) +#endif + +// ROL8 rotates the uint32s in register R left by 8, using temporary T if needed. +#ifdef GOAMD64_v2 +#define ROL8(R, T) PSHUFB ·rol8<>(SB), R +#else +#define ROL8(R, T) ROL(8, R, T) +#endif + #define chachaQR(A, B, C, D, T) \ - PADDD B, A; PXOR A, D; PSHUFB ·rol16<>(SB), D \ + PADDD B, A; PXOR A, D; ROL16(D, T) \ PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $12, T; PSRLL $20, B; PXOR T, B \ - PADDD B, A; PXOR A, D; PSHUFB ·rol8<>(SB), D \ + PADDD B, A; PXOR A, D; ROL8(D, T) \ PADDD D, C; PXOR C, B; MOVO B, T; PSLLL $7, T; PSRLL $25, B; PXOR T, B #define chachaQR_AVX2(A, B, C, D, T) \ diff --git a/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go b/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go index 6fc2838a3f..2492f796af 100644 --- a/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go +++ b/src/vendor/golang.org/x/crypto/cryptobyte/asn1.go @@ -733,13 +733,14 @@ func (s *String) ReadOptionalASN1OctetString(out *[]byte, outPresent *bool, tag return true } -// ReadOptionalASN1Boolean sets *out to the value of the next ASN.1 BOOLEAN or, -// if the next bytes are not an ASN.1 BOOLEAN, to the value of defaultValue. -// It reports whether the operation was successful. -func (s *String) ReadOptionalASN1Boolean(out *bool, defaultValue bool) bool { +// ReadOptionalASN1Boolean attempts to read an optional ASN.1 BOOLEAN +// explicitly tagged with tag into out and advances. If no element with a +// matching tag is present, it sets "out" to defaultValue instead. It reports +// whether the read was successful. +func (s *String) ReadOptionalASN1Boolean(out *bool, tag asn1.Tag, defaultValue bool) bool { var present bool var child String - if !s.ReadOptionalASN1(&child, &present, asn1.BOOLEAN) { + if !s.ReadOptionalASN1(&child, &present, tag) { return false } @@ -748,7 +749,7 @@ func (s *String) ReadOptionalASN1Boolean(out *bool, defaultValue bool) bool { return true } - return s.ReadASN1Boolean(out) + return child.ReadASN1Boolean(out) } func (s *String) readASN1(out *String, outTag *asn1.Tag, skipHeader bool) bool { diff --git a/src/vendor/golang.org/x/crypto/hkdf/hkdf.go b/src/vendor/golang.org/x/crypto/hkdf/hkdf.go index dda3f143be..f4ded5fee2 100644 --- a/src/vendor/golang.org/x/crypto/hkdf/hkdf.go +++ b/src/vendor/golang.org/x/crypto/hkdf/hkdf.go @@ -56,7 +56,9 @@ func (f *hkdf) Read(p []byte) (int, error) { // Fill the rest of the buffer for len(p) > 0 { - f.expander.Reset() + if f.counter > 1 { + f.expander.Reset() + } f.expander.Write(f.prev) f.expander.Write(f.info) f.expander.Write([]byte{f.counter}) diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go b/src/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go deleted file mode 100644 index d33c8890fc..0000000000 --- a/src/vendor/golang.org/x/crypto/internal/poly1305/bits_compat.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.13 - -package poly1305 - -// Generic fallbacks for the math/bits intrinsics, copied from -// src/math/bits/bits.go. They were added in Go 1.12, but Add64 and Sum64 had -// variable time fallbacks until Go 1.13. - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - sum = x + y + carry - carryOut = ((x & y) | ((x | y) &^ sum)) >> 63 - return -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - diff = x - y - borrow - borrowOut = ((^x & y) | (^(x ^ y) & diff)) >> 63 - return -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - const mask32 = 1<<32 - 1 - x0 := x & mask32 - x1 := x >> 32 - y0 := y & mask32 - y1 := y >> 32 - w0 := x0 * y0 - t := x1*y0 + w0>>32 - w1 := t & mask32 - w2 := t >> 32 - w1 += x0 * y1 - hi = x1*y1 + w2 + w1>>32 - lo = x * y - return -} diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go b/src/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go deleted file mode 100644 index 495c1fa697..0000000000 --- a/src/vendor/golang.org/x/crypto/internal/poly1305/bits_go1.13.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.13 - -package poly1305 - -import "math/bits" - -func bitsAdd64(x, y, carry uint64) (sum, carryOut uint64) { - return bits.Add64(x, y, carry) -} - -func bitsSub64(x, y, borrow uint64) (diff, borrowOut uint64) { - return bits.Sub64(x, y, borrow) -} - -func bitsMul64(x, y uint64) (hi, lo uint64) { - return bits.Mul64(x, y) -} diff --git a/src/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go index e041da5ea3..ec2202bd7d 100644 --- a/src/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go +++ b/src/vendor/golang.org/x/crypto/internal/poly1305/sum_generic.go @@ -7,7 +7,10 @@ package poly1305 -import "encoding/binary" +import ( + "encoding/binary" + "math/bits" +) // Poly1305 [RFC 7539] is a relatively simple algorithm: the authentication tag // for a 64 bytes message is approximately @@ -114,13 +117,13 @@ type uint128 struct { } func mul64(a, b uint64) uint128 { - hi, lo := bitsMul64(a, b) + hi, lo := bits.Mul64(a, b) return uint128{lo, hi} } func add128(a, b uint128) uint128 { - lo, c := bitsAdd64(a.lo, b.lo, 0) - hi, c := bitsAdd64(a.hi, b.hi, c) + lo, c := bits.Add64(a.lo, b.lo, 0) + hi, c := bits.Add64(a.hi, b.hi, c) if c != 0 { panic("poly1305: unexpected overflow") } @@ -155,8 +158,8 @@ func updateGeneric(state *macState, msg []byte) { // hide leading zeroes. For full chunks, that's 1 << 128, so we can just // add 1 to the most significant (2¹²⁸) limb, h2. if len(msg) >= TagSize { - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) + h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(msg[0:8]), 0) + h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(msg[8:16]), c) h2 += c + 1 msg = msg[TagSize:] @@ -165,8 +168,8 @@ func updateGeneric(state *macState, msg []byte) { copy(buf[:], msg) buf[len(msg)] = 1 - h0, c = bitsAdd64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) - h1, c = bitsAdd64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) + h0, c = bits.Add64(h0, binary.LittleEndian.Uint64(buf[0:8]), 0) + h1, c = bits.Add64(h1, binary.LittleEndian.Uint64(buf[8:16]), c) h2 += c msg = nil @@ -219,9 +222,9 @@ func updateGeneric(state *macState, msg []byte) { m3 := h2r1 t0 := m0.lo - t1, c := bitsAdd64(m1.lo, m0.hi, 0) - t2, c := bitsAdd64(m2.lo, m1.hi, c) - t3, _ := bitsAdd64(m3.lo, m2.hi, c) + t1, c := bits.Add64(m1.lo, m0.hi, 0) + t2, c := bits.Add64(m2.lo, m1.hi, c) + t3, _ := bits.Add64(m3.lo, m2.hi, c) // Now we have the result as 4 64-bit limbs, and we need to reduce it // modulo 2¹³⁰ - 5. The special shape of this Crandall prime lets us do @@ -243,14 +246,14 @@ func updateGeneric(state *macState, msg []byte) { // To add c * 5 to h, we first add cc = c * 4, and then add (cc >> 2) = c. - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) + h0, c = bits.Add64(h0, cc.lo, 0) + h1, c = bits.Add64(h1, cc.hi, c) h2 += c cc = shiftRightBy2(cc) - h0, c = bitsAdd64(h0, cc.lo, 0) - h1, c = bitsAdd64(h1, cc.hi, c) + h0, c = bits.Add64(h0, cc.lo, 0) + h1, c = bits.Add64(h1, cc.hi, c) h2 += c // h2 is at most 3 + 1 + 1 = 5, making the whole of h at most @@ -287,9 +290,9 @@ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { // in constant time, we compute t = h - (2¹³⁰ - 5), and select h as the // result if the subtraction underflows, and t otherwise. - hMinusP0, b := bitsSub64(h0, p0, 0) - hMinusP1, b := bitsSub64(h1, p1, b) - _, b = bitsSub64(h2, p2, b) + hMinusP0, b := bits.Sub64(h0, p0, 0) + hMinusP1, b := bits.Sub64(h1, p1, b) + _, b = bits.Sub64(h2, p2, b) // h = h if h < p else h - p h0 = select64(b, h0, hMinusP0) @@ -301,8 +304,8 @@ func finalize(out *[TagSize]byte, h *[3]uint64, s *[2]uint64) { // // by just doing a wide addition with the 128 low bits of h and discarding // the overflow. - h0, c := bitsAdd64(h0, s[0], 0) - h1, _ = bitsAdd64(h1, s[1], c) + h0, c := bits.Add64(h0, s[0], 0) + h1, _ = bits.Add64(h1, s[1], c) binary.LittleEndian.PutUint64(out[0:8], h0) binary.LittleEndian.PutUint64(out[8:16], h1) diff --git a/src/vendor/modules.txt b/src/vendor/modules.txt index 6d61189796..9e3b5b1554 100644 --- a/src/vendor/modules.txt +++ b/src/vendor/modules.txt @@ -1,4 +1,4 @@ -# golang.org/x/crypto v0.14.1-0.20231011220222-8779cbd1c995 +# golang.org/x/crypto v0.18.0 ## explicit; go 1.18 golang.org/x/crypto/chacha20 golang.org/x/crypto/chacha20poly1305 @@ -7,7 +7,7 @@ golang.org/x/crypto/cryptobyte/asn1 golang.org/x/crypto/hkdf golang.org/x/crypto/internal/alias golang.org/x/crypto/internal/poly1305 -# golang.org/x/net v0.17.1-0.20231025180729-4c7a5b64f145 +# golang.org/x/net v0.20.1-0.20240110153537-07e05fd6e95a ## explicit; go 1.18 golang.org/x/net/dns/dnsmessage golang.org/x/net/http/httpguts @@ -17,10 +17,10 @@ golang.org/x/net/idna golang.org/x/net/lif golang.org/x/net/nettest golang.org/x/net/route -# golang.org/x/sys v0.13.1-0.20231011215430-1bfbee0e20e3 +# golang.org/x/sys v0.18.0 ## explicit; go 1.18 golang.org/x/sys/cpu -# golang.org/x/text v0.13.1-0.20231011215848-6c97a165dd66 +# golang.org/x/text v0.14.0 ## explicit; go 1.18 golang.org/x/text/secure/bidirule golang.org/x/text/transform diff --git a/test/codegen/bits.go b/test/codegen/bits.go index 67daf12d62..4b6c8b94b8 100644 --- a/test/codegen/bits.go +++ b/test/codegen/bits.go @@ -382,7 +382,6 @@ func signextendAndMask8to64(a int8) (s, z uint64) { // ppc64x: -"MOVB", "ANDCC\t[$]247," z = uint64(uint8(a)) & 0x3F7 return - } // Verify zero-extended values are not sign-extended under a bit mask (#61297) @@ -392,7 +391,6 @@ func zeroextendAndMask8to64(a int8, b int16) (x, y uint64) { // ppc64x: -"MOVH\t", -"ANDCC", "MOVHZ" y = uint64(b) & 0xFFFF return - } // Verify rotate and mask instructions, and further simplified instructions for small types diff --git a/test/codegen/floats.go b/test/codegen/floats.go index 7991174b66..54dc87ecfd 100644 --- a/test/codegen/floats.go +++ b/test/codegen/floats.go @@ -156,3 +156,35 @@ func ArrayCopy(a [16]byte) (b [16]byte) { b = a return } + +// ---------------- // +// Float Min/Max // +// ---------------- // + +func Float64Min(a, b float64) float64 { + // amd64:"MINSD" + // arm64:"FMIND" + // riscv64:"FMIN" + return min(a, b) +} + +func Float64Max(a, b float64) float64 { + // amd64:"MINSD" + // arm64:"FMAXD" + // riscv64:"FMAX" + return max(a, b) +} + +func Float32Min(a, b float32) float32 { + // amd64:"MINSS" + // arm64:"FMINS" + // riscv64:"FMINS" + return min(a, b) +} + +func Float32Max(a, b float32) float32 { + // amd64:"MINSS" + // arm64:"FMAXS" + // riscv64:"FMAXS" + return max(a, b) +} diff --git a/test/codegen/issue63332.go b/test/codegen/issue63332.go new file mode 100644 index 0000000000..dbe671d247 --- /dev/null +++ b/test/codegen/issue63332.go @@ -0,0 +1,14 @@ +// asmcheck + +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package codegen + +func issue63332(c chan int) { + x := 0 + // amd64:-`MOVQ` + x += 2 + c <- x +} diff --git a/test/codegen/memcombine.go b/test/codegen/memcombine.go index 1b8abc348a..6d6c33d947 100644 --- a/test/codegen/memcombine.go +++ b/test/codegen/memcombine.go @@ -348,7 +348,6 @@ func reassoc_load_uint32(b []byte) uint32 { func extrashift_load_uint32(b []byte) uint32 { // amd64:`MOVL\s\([A-Z]+\)`,`SHLL\s[$]2`,-`MOV[BW]`,-`OR` return uint32(b[0])<<2 | uint32(b[1])<<10 | uint32(b[2])<<18 | uint32(b[3])<<26 - } func outoforder_load_uint32(b []byte) uint32 { @@ -882,3 +881,40 @@ func wideStore2(p *[8]uint64, x, y uint64) { // s390x:-"STMG",-"MOVD" p[1] = y } + +func store32le(p *struct{ a, b uint32 }, x uint64) { + // amd64:"MOVQ",-"MOVL",-"SHRQ" + // arm64:"MOVD",-"MOVW",-"LSR" + // ppc64le:"MOVD",-"MOVW",-"SRD" + p.a = uint32(x) + // amd64:-"MOVL",-"SHRQ" + // arm64:-"MOVW",-"LSR" + // ppc64le:-"MOVW",-"SRD" + p.b = uint32(x >> 32) +} +func store32be(p *struct{ a, b uint32 }, x uint64) { + // ppc64:"MOVD",-"MOVW",-"SRD" + // s390x:"MOVD",-"MOVW",-"SRD" + p.a = uint32(x >> 32) + // ppc64:-"MOVW",-"SRD" + // s390x:-"MOVW",-"SRD" + p.b = uint32(x) +} +func store16le(p *struct{ a, b uint16 }, x uint32) { + // amd64:"MOVL",-"MOVW",-"SHRL" + // arm64:"MOVW",-"MOVH",-"UBFX" + // ppc64le:"MOVW",-"MOVH",-"SRW" + p.a = uint16(x) + // amd64:-"MOVW",-"SHRL" + // arm64:-"MOVH",-"UBFX" + // ppc64le:-"MOVH",-"SRW" + p.b = uint16(x >> 16) +} +func store16be(p *struct{ a, b uint16 }, x uint32) { + // ppc64:"MOVW",-"MOVH",-"SRW" + // s390x:"MOVW",-"MOVH",-"SRW" + p.a = uint16(x >> 16) + // ppc64:-"MOVH",-"SRW" + // s390x:-"MOVH",-"SRW" + p.b = uint16(x) +} diff --git a/test/codegen/noextend.go b/test/codegen/noextend.go index 98ad0ece89..193f75b092 100644 --- a/test/codegen/noextend.go +++ b/test/codegen/noextend.go @@ -277,7 +277,6 @@ func shouldSignEXT(x int) int64 { ret += int64(int8(x & 0x1100000000000011)) return ret - } func noIntermediateExtension(a, b, c uint32) uint32 { diff --git a/test/codegen/rotate.go b/test/codegen/rotate.go index 5495f86b79..109e55763c 100644 --- a/test/codegen/rotate.go +++ b/test/codegen/rotate.go @@ -18,6 +18,7 @@ func rot64(x uint64) uint64 { // amd64:"ROLQ\t[$]7" // ppc64x:"ROTL\t[$]7" // loong64: "ROTRV\t[$]57" + // riscv64: "OR","SLLI","SRLI",-"AND" a += x<<7 | x>>57 // amd64:"ROLQ\t[$]8" @@ -25,6 +26,7 @@ func rot64(x uint64) uint64 { // s390x:"RISBGZ\t[$]0, [$]63, [$]8, " // ppc64x:"ROTL\t[$]8" // loong64: "ROTRV\t[$]56" + // riscv64: "OR","SLLI","SRLI",-"AND" a += x<<8 + x>>56 // amd64:"ROLQ\t[$]9" @@ -32,6 +34,7 @@ func rot64(x uint64) uint64 { // s390x:"RISBGZ\t[$]0, [$]63, [$]9, " // ppc64x:"ROTL\t[$]9" // loong64: "ROTRV\t[$]55" + // riscv64: "OR","SLLI","SRLI",-"AND" a += x<<9 ^ x>>55 // amd64:"ROLQ\t[$]10" @@ -41,6 +44,7 @@ func rot64(x uint64) uint64 { // arm64:"ROR\t[$]54" // s390x:"RISBGZ\t[$]0, [$]63, [$]10, " // loong64: "ROTRV\t[$]54" + // riscv64: "OR","SLLI","SRLI",-"AND" a += bits.RotateLeft64(x, 10) return a @@ -53,6 +57,7 @@ func rot32(x uint32) uint32 { // arm:"MOVW\tR\\d+@>25" // ppc64x:"ROTLW\t[$]7" // loong64: "ROTR\t[$]25" + // riscv64: "OR","SLLIW","SRLIW",-"AND" a += x<<7 | x>>25 // amd64:`ROLL\t[$]8` @@ -61,6 +66,7 @@ func rot32(x uint32) uint32 { // s390x:"RLL\t[$]8" // ppc64x:"ROTLW\t[$]8" // loong64: "ROTR\t[$]24" + // riscv64: "OR","SLLIW","SRLIW",-"AND" a += x<<8 + x>>24 // amd64:"ROLL\t[$]9" @@ -69,6 +75,7 @@ func rot32(x uint32) uint32 { // s390x:"RLL\t[$]9" // ppc64x:"ROTLW\t[$]9" // loong64: "ROTR\t[$]23" + // riscv64: "OR","SLLIW","SRLIW",-"AND" a += x<<9 ^ x>>23 // amd64:"ROLL\t[$]10" @@ -79,6 +86,7 @@ func rot32(x uint32) uint32 { // arm64:"RORW\t[$]22" // s390x:"RLL\t[$]10" // loong64: "ROTR\t[$]22" + // riscv64: "OR","SLLIW","SRLIW",-"AND" a += bits.RotateLeft32(x, 10) return a @@ -88,12 +96,15 @@ func rot16(x uint16) uint16 { var a uint16 // amd64:"ROLW\t[$]7" + // riscv64: "OR","SLLI","SRLI",-"AND" a += x<<7 | x>>9 // amd64:`ROLW\t[$]8` + // riscv64: "OR","SLLI","SRLI",-"AND" a += x<<8 + x>>8 // amd64:"ROLW\t[$]9" + // riscv64: "OR","SLLI","SRLI",-"AND" a += x<<9 ^ x>>7 return a @@ -103,12 +114,15 @@ func rot8(x uint8) uint8 { var a uint8 // amd64:"ROLB\t[$]5" + // riscv64: "OR","SLLI","SRLI",-"AND" a += x<<5 | x>>3 // amd64:`ROLB\t[$]6` + // riscv64: "OR","SLLI","SRLI",-"AND" a += x<<6 + x>>2 // amd64:"ROLB\t[$]7" + // riscv64: "OR","SLLI","SRLI",-"AND" a += x<<7 ^ x>>1 return a @@ -127,12 +141,14 @@ func rot64nc(x uint64, z uint) uint64 { // arm64:"ROR","NEG",-"AND" // ppc64x:"ROTL",-"NEG",-"AND" // loong64: "ROTRV", -"AND" + // riscv64: "OR","SLL","SRL",-"AND" a += x<>(64-z) // amd64:"RORQ",-"AND" // arm64:"ROR",-"NEG",-"AND" // ppc64x:"ROTL","NEG",-"AND" // loong64: "ROTRV", -"AND" + // riscv64: "OR","SLL","SRL",-"AND" a += x>>z | x<<(64-z) return a @@ -147,12 +163,14 @@ func rot32nc(x uint32, z uint) uint32 { // arm64:"ROR","NEG",-"AND" // ppc64x:"ROTLW",-"NEG",-"AND" // loong64: "ROTR", -"AND" + // riscv64: "OR","SLLW","SRLW",-"AND" a += x<>(32-z) // amd64:"RORL",-"AND" // arm64:"ROR",-"NEG",-"AND" // ppc64x:"ROTLW","NEG",-"AND" // loong64: "ROTR", -"AND" + // riscv64: "OR","SLLW","SRLW",-"AND" a += x>>z | x<<(32-z) return a @@ -164,9 +182,11 @@ func rot16nc(x uint16, z uint) uint16 { z &= 15 // amd64:"ROLW",-"ANDQ" + // riscv64: "OR","SLL","SRL",-"AND\t" a += x<>(16-z) // amd64:"RORW",-"ANDQ" + // riscv64: "OR","SLL","SRL",-"AND\t" a += x>>z | x<<(16-z) return a @@ -178,9 +198,11 @@ func rot8nc(x uint8, z uint) uint8 { z &= 7 // amd64:"ROLB",-"ANDQ" + // riscv64: "OR","SLL","SRL",-"AND\t" a += x<>(8-z) // amd64:"RORB",-"ANDQ" + // riscv64: "OR","SLL","SRL",-"AND\t" a += x>>z | x<<(8-z) return a diff --git a/test/codegen/shift.go b/test/codegen/shift.go index 32cfaffae0..50d60426d0 100644 --- a/test/codegen/shift.go +++ b/test/codegen/shift.go @@ -22,12 +22,42 @@ func rshConst64Ux64(v uint64) uint64 { return v >> uint64(33) } +func rshConst64Ux64Overflow32(v uint32) uint64 { + // riscv64:"MOV\t\\$0,",-"SRL" + return uint64(v) >> 32 +} + +func rshConst64Ux64Overflow16(v uint16) uint64 { + // riscv64:"MOV\t\\$0,",-"SRL" + return uint64(v) >> 16 +} + +func rshConst64Ux64Overflow8(v uint8) uint64 { + // riscv64:"MOV\t\\$0,",-"SRL" + return uint64(v) >> 8 +} + func rshConst64x64(v int64) int64 { // ppc64x:"SRAD" // riscv64:"SRAI\t",-"OR",-"SLTIU" return v >> uint64(33) } +func rshConst64x64Overflow32(v int32) int64 { + // riscv64:"SRAIW",-"SLLI",-"SRAI\t" + return int64(v) >> 32 +} + +func rshConst64x64Overflow16(v int16) int64 { + // riscv64:"SLLI","SRAI",-"SRAIW" + return int64(v) >> 16 +} + +func rshConst64x64Overflow8(v int8) int64 { + // riscv64:"SLLI","SRAI",-"SRAIW" + return int64(v) >> 8 +} + func lshConst32x64(v int32) int32 { // ppc64x:"SLW" // riscv64:"SLLI",-"AND",-"SLTIU", -"MOVW" diff --git a/test/codegen/stack.go b/test/codegen/stack.go index eebbbf1677..65c9868d67 100644 --- a/test/codegen/stack.go +++ b/test/codegen/stack.go @@ -113,3 +113,32 @@ func Defer() { // amd64:`CALL\truntime\.deferprocStack` defer func() {}() } + +// Check that stack slots are shared among values of the same +// type, but not pointer-identical types. See issue 65783. + +func spillSlotReuse() { + // The return values of getp1 and getp2 need to be + // spilled around the calls to nopInt. Make sure that + // spill slot gets reused. + + //arm64:`.*autotmp_2-8\(SP\)` + getp1()[nopInt()] = 0 + //arm64:`.*autotmp_2-8\(SP\)` + getp2()[nopInt()] = 0 +} + +//go:noinline +func nopInt() int { + return 0 +} + +//go:noinline +func getp1() *[4]int { + return nil +} + +//go:noinline +func getp2() *[4]int { + return nil +} diff --git a/test/escape2.go b/test/escape2.go index 99f85914a3..3e5d11f88e 100644 --- a/test/escape2.go +++ b/test/escape2.go @@ -397,7 +397,6 @@ func exprsw(i *int) *int { // ERROR "leaking param: i to result ~r0 level=0$" return nil } return nil - } // assigning to an array element is like assigning to the array diff --git a/test/escape2n.go b/test/escape2n.go index 350be65202..2613152150 100644 --- a/test/escape2n.go +++ b/test/escape2n.go @@ -397,7 +397,6 @@ func exprsw(i *int) *int { // ERROR "leaking param: i to result ~r0 level=0$" return nil } return nil - } // assigning to an array element is like assigning to the array diff --git a/test/escape_calls.go b/test/escape_calls.go index aa7c7f516c..5424c006ee 100644 --- a/test/escape_calls.go +++ b/test/escape_calls.go @@ -52,3 +52,10 @@ func bar() { s := "string" f([]string{s}) // ERROR "\[\]string{...} escapes to heap" } + +func strmin(a, b, c string) string { // ERROR "leaking param: a to result ~r0 level=0" "leaking param: b to result ~r0 level=0" "leaking param: c to result ~r0 level=0" + return min(a, b, c) +} +func strmax(a, b, c string) string { // ERROR "leaking param: a to result ~r0 level=0" "leaking param: b to result ~r0 level=0" "leaking param: c to result ~r0 level=0" + return max(a, b, c) +} diff --git a/test/fixedbugs/bug398.go b/test/fixedbugs/bug398.go index db3e43c7f9..2b00f6074d 100644 --- a/test/fixedbugs/bug398.go +++ b/test/fixedbugs/bug398.go @@ -1,4 +1,4 @@ -// compile -d=interfacecycles +// errorcheck // Copyright 2012 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style @@ -11,11 +11,11 @@ package p // exported interfaces -type I1 interface { +type I1 interface { // ERROR "invalid recursive type: anonymous interface refers to itself" F() interface{I1} } -type I2 interface { +type I2 interface { // ERROR "invalid recursive type: anonymous interface refers to itself" F() interface{I2} } @@ -28,11 +28,11 @@ func F() bool { // non-exported interfaces -type i1 interface { +type i1 interface { // ERROR "invalid recursive type: anonymous interface refers to itself" F() interface{i1} } -type i2 interface { +type i2 interface { // ERROR "invalid recursive type: anonymous interface refers to itself" F() interface{i2} } diff --git a/test/fixedbugs/issue16369.go b/test/fixedbugs/issue16369.go index 3a7bb7eaed..86d0ce645d 100644 --- a/test/fixedbugs/issue16369.go +++ b/test/fixedbugs/issue16369.go @@ -1,4 +1,4 @@ -// compile -d=interfacecycles +// errorcheck // Copyright 2016 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style @@ -6,7 +6,7 @@ package p -type T interface { +type T interface { // ERROR "invalid recursive type: anonymous interface refers to itself" M(interface { T }) diff --git a/test/fixedbugs/issue20789.go b/test/fixedbugs/issue20789.go index 4e4eed42a7..cba1290957 100644 --- a/test/fixedbugs/issue20789.go +++ b/test/fixedbugs/issue20789.go @@ -10,4 +10,4 @@ // there yet, so put it here for now. See also #20800.) package e -func([<-chan<-[func u){go // ERROR "unexpected u" \ No newline at end of file +func([<-chan<-[func u){go // ERROR "unexpected `u'" \ No newline at end of file diff --git a/test/fixedbugs/issue22794.go b/test/fixedbugs/issue22794.go index 636af26e84..fb5873e8e5 100644 --- a/test/fixedbugs/issue22794.go +++ b/test/fixedbugs/issue22794.go @@ -13,9 +13,9 @@ type it struct { func main() { i1 := it{Floats: true} - if i1.floats { // ERROR "(type it .* field or method floats, but does have Floats)|undefined field or method" + if i1.floats { // ERROR "(type it .* field or method floats, but does have field Floats)|undefined field or method" } - i2 := &it{floats: false} // ERROR "(but does have Floats)|unknown field|declared and not used" - _ = &it{InneR: "foo"} // ERROR "(but does have inner)|unknown field" + i2 := &it{floats: false} // ERROR "cannot refer to unexported field floats in struct literal|unknown field|declared and not used" + _ = &it{InneR: "foo"} // ERROR "(but does have field inner)|unknown field" _ = i2 } diff --git a/test/fixedbugs/issue23664.go b/test/fixedbugs/issue23664.go index 715654be70..fe171c27d0 100644 --- a/test/fixedbugs/issue23664.go +++ b/test/fixedbugs/issue23664.go @@ -9,9 +9,9 @@ package p func f() { - if f() true { // ERROR "unexpected true, expected {" + if f() true { // ERROR "unexpected `true', expected {" } - switch f() true { // ERROR "unexpected true, expected {" + switch f() true { // ERROR "unexpected `true', expected {" } } diff --git a/test/fixedbugs/issue25727.go b/test/fixedbugs/issue25727.go index 06b2e2cac7..27c60a1764 100644 --- a/test/fixedbugs/issue25727.go +++ b/test/fixedbugs/issue25727.go @@ -11,11 +11,11 @@ import "net/http" var s = http.Server{} var _ = s.doneChan // ERROR "s.doneChan undefined .cannot refer to unexported field or method doneChan.$|unexported field or method|s.doneChan undefined" var _ = s.DoneChan // ERROR "s.DoneChan undefined .type http.Server has no field or method DoneChan.$|undefined field or method" -var _ = http.Server{tlsConfig: nil} // ERROR "unknown field tlsConfig in struct literal.+ .but does have TLSConfig.$|unknown field .?tlsConfig.? in .?http.Server|unknown field" +var _ = http.Server{tlsConfig: nil} // ERROR "cannot refer to unexported field tlsConfig in struct literal|unknown field .?tlsConfig.? in .?http.Server|unknown field" var _ = http.Server{DoneChan: nil} // ERROR "unknown field DoneChan in struct literal of type http.Server$|unknown field .?DoneChan.? in .?http.Server" type foo struct { bar int } -var _ = &foo{bAr: 10} // ERROR "unknown field bAr in struct literal.+ .but does have bar.$|unknown field .?bAr.? in .?foo|unknown field" +var _ = &foo{bAr: 10} // ERROR "cannot refer to unexported field bAr in struct literal|unknown field .?bAr.? in .?foo|unknown field" diff --git a/test/fixedbugs/issue31747.go b/test/fixedbugs/issue31747.go index 319a721337..b40aecd5d2 100644 --- a/test/fixedbugs/issue31747.go +++ b/test/fixedbugs/issue31747.go @@ -8,18 +8,18 @@ package p // numeric literals const ( - _ = 1_000 // ERROR "underscores in numeric literals requires go1.13 or later \(-lang was set to go1.12; check go.mod\)|requires go1.13" - _ = 0b111 // ERROR "binary literals requires go1.13 or later" - _ = 0o567 // ERROR "0o/0O-style octal literals requires go1.13 or later" + _ = 1_000 // ERROR "underscore in numeric literal requires go1.13 or later \(-lang was set to go1.12; check go.mod\)|requires go1.13" + _ = 0b111 // ERROR "binary literal requires go1.13 or later" + _ = 0o567 // ERROR "0o/0O-style octal literal requires go1.13 or later" _ = 0xabc // ok - _ = 0x0p1 // ERROR "hexadecimal floating-point literals requires go1.13 or later" + _ = 0x0p1 // ERROR "hexadecimal floating-point literal requires go1.13 or later" - _ = 0B111 // ERROR "binary" - _ = 0O567 // ERROR "octal" - _ = 0Xabc // ok - _ = 0X0P1 // ERROR "hexadecimal floating-point" + _ = 0b111 // ERROR "binary" + _ = 0o567 // ERROR "octal" + _ = 0xabc // ok + _ = 0x0p1 // ERROR "hexadecimal floating-point" - _ = 1_000i // ERROR "underscores" + _ = 1_000i // ERROR "underscore" _ = 0b111i // ERROR "binary" _ = 0o567i // ERROR "octal" _ = 0xabci // ERROR "hexadecimal floating-point" diff --git a/test/fixedbugs/issue32680b.go b/test/fixedbugs/issue32680b.go index 61e531701a..09bc3a9ce0 100644 --- a/test/fixedbugs/issue32680b.go +++ b/test/fixedbugs/issue32680b.go @@ -12,5 +12,4 @@ func hashBytesRaw(b0, b1, b2, b3, b7 byte) uint64 { func doStuff(data []byte) uint64 { return hashBytesRaw(data[0], data[1], data[2], data[3], data[7]) - } diff --git a/test/fixedbugs/issue35073.go b/test/fixedbugs/issue35073a.go similarity index 100% rename from test/fixedbugs/issue35073.go rename to test/fixedbugs/issue35073a.go diff --git a/test/fixedbugs/issue35073b.go b/test/fixedbugs/issue35073b.go new file mode 100644 index 0000000000..8cdc6c184d --- /dev/null +++ b/test/fixedbugs/issue35073b.go @@ -0,0 +1,23 @@ +// errorcheck -0 -d=checkptr -m + +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Test that we can inline the receiver arguments for +// reflect.Value.UnsafeAddr/Pointer, even in checkptr mode. + +package main + +import ( + "reflect" + "unsafe" +) + +func main() { + n := 10 // ERROR "moved to heap: n" + m := make(map[string]string) // ERROR "moved to heap: m" "make\(map\[string\]string\) escapes to heap" + + _ = unsafe.Pointer(reflect.ValueOf(&n).Elem().UnsafeAddr()) // ERROR "inlining call" + _ = unsafe.Pointer(reflect.ValueOf(&m).Elem().Pointer()) // ERROR "inlining call" +} diff --git a/test/fixedbugs/issue42284.dir/a.go b/test/fixedbugs/issue42284.dir/a.go index f7fd80bd20..ccf54fad54 100644 --- a/test/fixedbugs/issue42284.dir/a.go +++ b/test/fixedbugs/issue42284.dir/a.go @@ -20,7 +20,7 @@ func F(i I) I { // ERROR "can inline F" "leaking param: i to result ~r0 level=0" func g() { h := E() // ERROR "inlining call to E" "T\(0\) does not escape" - h.M() // ERROR "devirtualizing h.M to T" + h.M() // ERROR "devirtualizing h.M to T" "inlining call to T.M" // BAD: T(0) could be stack allocated. i := F(T(0)) // ERROR "inlining call to F" "T\(0\) escapes to heap" diff --git a/test/fixedbugs/issue42284.dir/b.go b/test/fixedbugs/issue42284.dir/b.go index 8cd93b8db4..559de59184 100644 --- a/test/fixedbugs/issue42284.dir/b.go +++ b/test/fixedbugs/issue42284.dir/b.go @@ -8,7 +8,7 @@ import "./a" func g() { h := a.E() // ERROR "inlining call to a.E" "T\(0\) does not escape" - h.M() // ERROR "devirtualizing h.M to a.T" + h.M() // ERROR "devirtualizing h.M to a.T" "inlining call to a.T.M" // BAD: T(0) could be stack allocated. i := a.F(a.T(0)) // ERROR "inlining call to a.F" "a.T\(0\) escapes to heap" diff --git a/test/fixedbugs/issue52193.go b/test/fixedbugs/issue52193.go new file mode 100644 index 0000000000..32375d114f --- /dev/null +++ b/test/fixedbugs/issue52193.go @@ -0,0 +1,46 @@ +// errorcheck -0 -m + +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +import ( + "crypto/ecdh" + "crypto/rand" +) + +func F(peerShare []byte) ([]byte, error) { // ERROR "leaking param: peerShare" + p256 := ecdh.P256() // ERROR "inlining call to ecdh.P256" + + ourKey, err := p256.GenerateKey(rand.Reader) // ERROR "devirtualizing p256.GenerateKey" "inlining call to ecdh.*GenerateKey" + if err != nil { + return nil, err + } + + peerPublic, err := p256.NewPublicKey(peerShare) // ERROR "devirtualizing p256.NewPublicKey" "inlining call to ecdh.*NewPublicKey" + if err != nil { + return nil, err + } + + return ourKey.ECDH(peerPublic) +} + +// Test that inlining doesn't break if devirtualization exposes a new +// inlinable callee. + +func f() { // ERROR "can inline f" + var i interface{ m() } = T(0) // ERROR "T\(0\) does not escape" + i.m() // ERROR "devirtualizing i.m" "inlining call to T.m" +} + +type T int + +func (T) m() { // ERROR "can inline T.m" + if never { + f() // ERROR "inlining call to f" "devirtualizing i.m" "T\(0\) does not escape" + } +} + +var never bool diff --git a/test/fixedbugs/issue56923.go b/test/fixedbugs/issue56923.go index 700a1046a9..7cc9494b76 100644 --- a/test/fixedbugs/issue56923.go +++ b/test/fixedbugs/issue56923.go @@ -18,7 +18,6 @@ func (r EqFunc[T]) Eqv(a, b T) bool { func New[T any](f func(a, b T) bool) Eq[T] { return EqFunc[T](f) - } func Equal(a, b []byte) bool { diff --git a/test/fixedbugs/issue64565.go b/test/fixedbugs/issue64565.go new file mode 100644 index 0000000000..634025ce3e --- /dev/null +++ b/test/fixedbugs/issue64565.go @@ -0,0 +1,15 @@ +// run + +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func main() { + m := "0" + for _, c := range "321" { + m = max(string(c), m) + println(m) + } +} diff --git a/test/fixedbugs/issue64565.out b/test/fixedbugs/issue64565.out new file mode 100644 index 0000000000..1f242fa6f0 --- /dev/null +++ b/test/fixedbugs/issue64565.out @@ -0,0 +1,3 @@ +3 +3 +3 diff --git a/test/fixedbugs/issue64606.go b/test/fixedbugs/issue64606.go new file mode 100644 index 0000000000..9b53c1041f --- /dev/null +++ b/test/fixedbugs/issue64606.go @@ -0,0 +1,32 @@ +// build -race + +//go:build race + +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func main() { + var o any = uint64(5) + switch o.(type) { + case int: + goto ret + case int8: + goto ret + case int16: + goto ret + case int32: + goto ret + case int64: + goto ret + case float32: + goto ret + case float64: + goto ret + default: + goto ret + } +ret: +} diff --git a/test/fixedbugs/issue64715.go b/test/fixedbugs/issue64715.go new file mode 100644 index 0000000000..bf117165b7 --- /dev/null +++ b/test/fixedbugs/issue64715.go @@ -0,0 +1,25 @@ +// run + +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func boolInt32(b bool) int32 { + if b { + return 1 + } + + return 0 +} + +func f(left uint16, right int32) (r uint16) { + return left >> right +} + +var n = uint16(65535) + +func main() { + println(f(n, boolInt32(int64(n^n) > 1))) +} diff --git a/test/fixedbugs/issue64715.out b/test/fixedbugs/issue64715.out new file mode 100644 index 0000000000..7a53b35687 --- /dev/null +++ b/test/fixedbugs/issue64715.out @@ -0,0 +1 @@ +65535 diff --git a/test/fixedbugs/issue64826.go b/test/fixedbugs/issue64826.go new file mode 100644 index 0000000000..864c474a64 --- /dev/null +++ b/test/fixedbugs/issue64826.go @@ -0,0 +1,38 @@ +// build + +// Copyright 2023 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func main() { + f(g(false)) +} +func g(b bool) string { + if b { + return "z" + } + return "q" +} +func f(x string) int { + switch len(x) { + case 4: + return 4 + case 5: + return 5 + case 6: + return 6 + case 7: + return 7 + case 8: + return 8 + case 9: + return 9 + case 10: + return 10 + case 11: + return 11 + } + return 0 +} diff --git a/test/fixedbugs/issue65417.go b/test/fixedbugs/issue65417.go new file mode 100644 index 0000000000..15e84d819c --- /dev/null +++ b/test/fixedbugs/issue65417.go @@ -0,0 +1,42 @@ +// run + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "strings" + "unsafe" +) + +func main() { + shouldPanic("runtime error: index out of range", func() { f(0) }) + shouldPanic("runtime error: index out of range", func() { g(0) }) +} + +func f[T byte](t T) { + const str = "a" + _ = str[unsafe.Sizeof(t)] +} + +func g[T byte](t T) { + const str = "a" + _ = str[unsafe.Sizeof(t)+0] +} + +func shouldPanic(str string, f func()) { + defer func() { + err := recover() + if err == nil { + panic("did not panic") + } + s := err.(error).Error() + if !strings.Contains(s, str) { + panic("got panic " + s + ", want " + str) + } + }() + + f() +} diff --git a/test/fixedbugs/issue65593.go b/test/fixedbugs/issue65593.go new file mode 100644 index 0000000000..892a78122e --- /dev/null +++ b/test/fixedbugs/issue65593.go @@ -0,0 +1,21 @@ +// compile + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +const run = false + +func f() { + if !run { + return + } + + messages := make(chan struct{}, 1) +main: + for range messages { + break main + } +} diff --git a/test/fixedbugs/issue65778.go b/test/fixedbugs/issue65778.go new file mode 100644 index 0000000000..30c680404d --- /dev/null +++ b/test/fixedbugs/issue65778.go @@ -0,0 +1,13 @@ +// compile -godebug gotypesalias=1 + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package p + +type A = int + +type T[P any] *A + +var _ T[int] diff --git a/test/fixedbugs/issue65808.go b/test/fixedbugs/issue65808.go new file mode 100644 index 0000000000..e6c4cf1ed0 --- /dev/null +++ b/test/fixedbugs/issue65808.go @@ -0,0 +1,30 @@ +// compile + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file.package main + +package main + +type Stringer interface { + String() string +} + +type ( + stringer struct{} + stringers [2]stringer + foo struct { + stringers + } +) + +func (stringer) String() string { return "" } +func toString(s Stringer) string { return s.String() } + +func (v stringers) toStrings() []string { + return []string{toString(v[0]), toString(v[1])} +} + +func main() { + _ = stringers{} +} diff --git a/test/fixedbugs/issue65957.dir/a.go b/test/fixedbugs/issue65957.dir/a.go new file mode 100644 index 0000000000..284ec4af9f --- /dev/null +++ b/test/fixedbugs/issue65957.dir/a.go @@ -0,0 +1,12 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package a + +var s any + +//go:noinline +func F() { + s = new([4]int32) +} diff --git a/test/fixedbugs/issue65957.dir/main.go b/test/fixedbugs/issue65957.dir/main.go new file mode 100644 index 0000000000..89b8a28234 --- /dev/null +++ b/test/fixedbugs/issue65957.dir/main.go @@ -0,0 +1,19 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +import ( + "./a" + "reflect" +) + +var s = []rune{0, 1, 2, 3} + +func main() { + m := map[any]int{} + k := reflect.New(reflect.ArrayOf(4, reflect.TypeOf(int32(0)))).Elem().Interface() + m[k] = 1 + a.F() +} diff --git a/test/fixedbugs/issue65957.go b/test/fixedbugs/issue65957.go new file mode 100644 index 0000000000..48e4d34c93 --- /dev/null +++ b/test/fixedbugs/issue65957.go @@ -0,0 +1,7 @@ +// rundir + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ignored diff --git a/test/fixedbugs/issue65962.go b/test/fixedbugs/issue65962.go new file mode 100644 index 0000000000..a60cd32fd1 --- /dev/null +++ b/test/fixedbugs/issue65962.go @@ -0,0 +1,48 @@ +// run + +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package main + +func main() { + test1() + test2() +} + +type I interface { + f() + g() + h() +} + +//go:noinline +func ld[T any]() { + var x I + if _, ok := x.(T); ok { + } +} + +func isI(x any) { + _ = x.(I) +} + +func test1() { + defer func() { recover() }() + ld[bool]() // add itab to binary + _ = any(false).(I) +} + +type B bool + +func (B) f() { +} +func (B) g() { +} + +func test2() { + defer func() { recover() }() + ld[B]() // add itab to binary + _ = any(B(false)).(I) +} diff --git a/test/fuse.go b/test/fuse.go index f64a087965..e9205dcc23 100644 --- a/test/fuse.go +++ b/test/fuse.go @@ -33,7 +33,7 @@ func fEqLessU(a uint, f float64) bool { } func fEqLeqU(a uint64, f float64) bool { - return a == 0 && f > Cf2 || a <= 0 && f < -Cf2 // ERROR "Redirect Leq64U based on Eq64$" + return a == 0 && f > Cf2 || a <= 0 && f < -Cf2 // ERROR "Redirect Eq64 based on Eq64$" } func fNeqEq(a int, f float64) bool { @@ -58,7 +58,7 @@ func fNeqLessU(a uint, f float64) bool { } func fNeqLeqU(a uint32, f float64) bool { - return a != 0 && f > Cf2 || a <= 0 && f < -Cf2 // ERROR "Redirect Leq32U based on Neq32$" + return a != 2 && f > Cf2 || a <= 2 && f < -Cf2 // ERROR "Redirect Leq32U based on Neq32$" } func fLessEq(a int, f float64) bool { @@ -110,11 +110,11 @@ func fLessULeqU(a uint64, f float64) bool { } func fLeqUEq(a uint8, f float64) bool { - return a <= 0 && f > Cf2 || a == 0 && f < -Cf2 // ERROR "Redirect Eq8 based on Leq8U$" + return a <= 2 && f > Cf2 || a == 2 && f < -Cf2 // ERROR "Redirect Eq8 based on Leq8U$" } func fLeqUNeq(a uint16, f float64) bool { - return a <= 0 && f > Cf2 || a != 0 && f < -Cf2 // ERROR "Redirect Neq16 based on Leq16U$" + return a <= 2 && f > Cf2 || a != 2 && f < -Cf2 // ERROR "Redirect Neq16 based on Leq16U$" } func fLeqLessU(a uint32, f float64) bool { @@ -122,7 +122,7 @@ func fLeqLessU(a uint32, f float64) bool { } func fLeqLeqU(a uint64, f float64) bool { - return a <= 0 && f > Cf2 || a <= 0 && f < -Cf2 // ERROR "Redirect Leq64U based on Leq64U$" + return a <= 2 && f > Cf2 || a <= 2 && f < -Cf2 // ERROR "Redirect Leq64U based on Leq64U$" } // Arg tests are disabled because the op name is different on amd64 and arm64. diff --git a/test/live.go b/test/live.go index 6badb011b0..5658c8ba06 100644 --- a/test/live.go +++ b/test/live.go @@ -667,7 +667,7 @@ func bad40() { func good40() { ret := T40{} // ERROR "stack object ret T40$" - ret.m = make(map[int]int) // ERROR "live at call to fastrand: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ runtime.hmap$" + ret.m = make(map[int]int) // ERROR "live at call to rand32: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ runtime.hmap$" t := &ret printnl() // ERROR "live at call to printnl: ret$" // Note: ret is live at the printnl because the compiler moves &ret diff --git a/test/live_regabi.go b/test/live_regabi.go index 80a9cc1002..a335126b3f 100644 --- a/test/live_regabi.go +++ b/test/live_regabi.go @@ -664,7 +664,7 @@ func bad40() { func good40() { ret := T40{} // ERROR "stack object ret T40$" - ret.m = make(map[int]int) // ERROR "live at call to fastrand: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ runtime.hmap$" + ret.m = make(map[int]int) // ERROR "live at call to rand32: .autotmp_[0-9]+$" "stack object .autotmp_[0-9]+ runtime.hmap$" t := &ret printnl() // ERROR "live at call to printnl: ret$" // Note: ret is live at the printnl because the compiler moves &ret diff --git a/test/prove.go b/test/prove.go index 1aea282291..0d93db905a 100644 --- a/test/prove.go +++ b/test/prove.go @@ -396,8 +396,11 @@ func f13e(a int) int { return 0 } -func f13f(a int64) int64 { - if a > math.MaxInt64 { +func f13f(a, b int64) int64 { + if b != math.MaxInt64 { + return 42 + } + if a > b { if a == 0 { // ERROR "Disproved Eq64$" return 1 } @@ -869,9 +872,12 @@ func unrollInclStepTooLarge(a []int) int { } // Not an induction variable (min too small, iterating down) -func unrollDecMin(a []int) int { +func unrollDecMin(a []int, b int) int { + if b != math.MinInt64 { + return 42 + } var i, x int - for i = len(a); i >= math.MinInt64; i -= 2 { + for i = len(a); i >= b; i -= 2 { x += a[i-1] x += a[i-2] } @@ -882,9 +888,12 @@ func unrollDecMin(a []int) int { } // Not an induction variable (min too small, iterating up -- perhaps could allow, but why bother?) -func unrollIncMin(a []int) int { +func unrollIncMin(a []int, b int) int { + if b != math.MinInt64 { + return 42 + } var i, x int - for i = len(a); i >= math.MinInt64; i += 2 { + for i = len(a); i >= b; i += 2 { x += a[i-1] x += a[i-2] } diff --git a/test/range3.go b/test/range3.go index 4f770a2b70..f58a398f94 100644 --- a/test/range3.go +++ b/test/range3.go @@ -74,9 +74,17 @@ func testint4() { } } +// Issue #64471. +func testint5() { + for i := range 'a' { + var _ *rune = &i // ensure i has type rune + } +} + func main() { testint1() testint2() testint3() testint4() + testint5() } diff --git a/test/range4.go b/test/range4.go index 696b205ab7..0b051f6d3c 100644 --- a/test/range4.go +++ b/test/range4.go @@ -311,6 +311,30 @@ func testcalls() { } } +type iter3YieldFunc func(int, int) bool + +func iter3(list ...int) func(iter3YieldFunc) { + return func(yield iter3YieldFunc) { + for k, v := range list { + if !yield(k, v) { + return + } + } + } +} + +func testcalls1() { + ncalls := 0 + for k, v := range iter3(1, 2, 3) { + _, _ = k, v + ncalls++ + } + if ncalls != 3 { + println("wrong number of calls:", ncalls, "!= 3") + panic("fail") + } +} + func main() { testfunc0() testfunc1() @@ -323,4 +347,5 @@ func main() { testfunc8() testfunc9() testcalls() + testcalls1() } diff --git a/test/rangegen.go b/test/rangegen.go index bdcf099862..8231c64db7 100644 --- a/test/rangegen.go +++ b/test/rangegen.go @@ -25,6 +25,7 @@ import ( "bytes" "fmt" "log" + "math/bits" "os" "os/exec" "strings" @@ -37,6 +38,13 @@ func main() { log.SetFlags(0) log.SetPrefix("rangegen: ") + if !long && bits.UintSize == 32 { + // Skip this test on 32-bit platforms, where it seems to + // cause timeouts and build problems. + skip() + return + } + b := new(bytes.Buffer) tests := "" flush := func(force bool) { @@ -331,3 +339,12 @@ func run(f func(*output, int)int, i int) *output { } ` + +func skip() { + const code = ` +package main +func main() { +} +` + fmt.Printf("%s\n", code) +} diff --git a/test/typeparam/issue50485.dir/a.go b/test/typeparam/issue50485.dir/a.go index 3a7c71a711..97cf4d2549 100644 --- a/test/typeparam/issue50485.dir/a.go +++ b/test/typeparam/issue50485.dir/a.go @@ -219,7 +219,6 @@ func (r ApplicativeFunctor2[H, HT, A1, A2, R]) ApOption(a Option[A1]) Applicativ func (r ApplicativeFunctor2[H, HT, A1, A2, R]) Ap(a A1) ApplicativeFunctor1[Cons[A1, H], A1, A2, R] { return r.ApOption(Some(a)) - } func Applicative2[A1, A2, R any](fn Func2[A1, A2, R]) ApplicativeFunctor2[Nil, Nil, A1, A2, R] { diff --git a/test/typeparam/issue51232.go b/test/typeparam/issue51232.go index 0d25e1863d..f4728f6e7c 100644 --- a/test/typeparam/issue51232.go +++ b/test/typeparam/issue51232.go @@ -13,19 +13,19 @@ type RC[RG any] interface { type Fn[RCT RC[RG], RG any] func(RCT) type F[RCT RC[RG], RG any] interface { - Fn() Fn[RCT] // ERROR "got 1 arguments" + Fn() Fn[RCT] // ERROR "not enough type arguments for type Fn: have 1, want 2" } type concreteF[RCT RC[RG], RG any] struct { - makeFn func() Fn[RCT] // ERROR "got 1 arguments" + makeFn func() Fn[RCT] // ERROR "not enough type arguments for type Fn: have 1, want 2" } -func (c *concreteF[RCT, RG]) Fn() Fn[RCT] { // ERROR "got 1 arguments" +func (c *concreteF[RCT, RG]) Fn() Fn[RCT] { // ERROR "not enough type arguments for type Fn: have 1, want 2" return c.makeFn() } -func NewConcrete[RCT RC[RG], RG any](Rc RCT) F[RCT] { // ERROR "got 1 arguments" - return &concreteF[RCT]{ // ERROR "cannot use" "got 1 arguments" +func NewConcrete[RCT RC[RG], RG any](Rc RCT) F[RCT] { // ERROR "not enough type arguments for type F: have 1, want 2" + return &concreteF[RCT]{ // ERROR "cannot use" "not enough type arguments for type concreteF: have 1, want 2" makeFn: nil, } } diff --git a/test/typeparam/issue51233.go b/test/typeparam/issue51233.go index 96a25ddb9c..5f2a045d84 100644 --- a/test/typeparam/issue51233.go +++ b/test/typeparam/issue51233.go @@ -13,16 +13,16 @@ type RC[RG any] interface { type Fn[RCT RC[RG], RG any] func(RCT) -type FFn[RCT RC[RG], RG any] func() Fn[RCT] // ERROR "got 1 arguments" +type FFn[RCT RC[RG], RG any] func() Fn[RCT] // ERROR "not enough type arguments for type Fn: have 1, want 2" type F[RCT RC[RG], RG any] interface { - Fn() Fn[RCT] // ERROR "got 1 arguments" + Fn() Fn[RCT] // ERROR "not enough type arguments for type Fn: have 1, want 2" } type concreteF[RCT RC[RG], RG any] struct { - makeFn FFn[RCT] // ERROR "got 1 arguments" + makeFn FFn[RCT] // ERROR "not enough type arguments for type FFn: have 1, want 2" } -func (c *concreteF[RCT, RG]) Fn() Fn[RCT] { // ERROR "got 1 arguments" +func (c *concreteF[RCT, RG]) Fn() Fn[RCT] { // ERROR "not enough type arguments for type Fn: have 1, want 2" return c.makeFn() } diff --git a/test/typeparam/issue51925.go b/test/typeparam/issue51925.go index 0a385acd17..abebf67766 100644 --- a/test/typeparam/issue51925.go +++ b/test/typeparam/issue51925.go @@ -25,7 +25,6 @@ func min[T IntLike](x, y T) T { return x } return y - } // Min returns the minimum element of `nums`.