mirror of https://github.com/golang/go.git
Merge branch 'golang:master' into master
This commit is contained in:
commit
feae982e68
|
|
@ -0,0 +1,4 @@
|
|||
pkg iter, func Pull2[$0 interface{}, $1 interface{}](Seq2[$0, $1]) (func() ($0, $1, bool), func()) #61897
|
||||
pkg iter, func Pull[$0 interface{}](Seq[$0]) (func() ($0, bool), func()) #61897
|
||||
pkg iter, type Seq2[$0 interface{}, $1 interface{}] func(func($0, $1) bool) #61897
|
||||
pkg iter, type Seq[$0 interface{}] func(func($0) bool) #61897
|
||||
|
|
@ -0,0 +1,4 @@
|
|||
### Iterators
|
||||
|
||||
The new [`iter` package](/pkg/iter/) provides the basic definitions for
|
||||
working with user-defined iterators.
|
||||
|
|
@ -0,0 +1 @@
|
|||
<!-- see ../../3-iter.md -->
|
||||
|
|
@ -0,0 +1,60 @@
|
|||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
// Test case for issue 66427.
|
||||
// Running under TSAN, this fails with "signal handler
|
||||
// spoils errno".
|
||||
|
||||
/*
|
||||
#include <pthread.h>
|
||||
#include <signal.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
void go_callback();
|
||||
|
||||
static void *thr(void *arg) {
|
||||
int i;
|
||||
for (i = 0; i < 10; i++)
|
||||
go_callback();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *sendthr(void *arg) {
|
||||
pthread_t th = *(pthread_t*)arg;
|
||||
while (1) {
|
||||
int r = pthread_kill(th, SIGWINCH);
|
||||
if (r < 0)
|
||||
break;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void foo() {
|
||||
pthread_t *th = malloc(sizeof(pthread_t));
|
||||
pthread_t th2;
|
||||
pthread_create(th, 0, thr, 0);
|
||||
pthread_create(&th2, 0, sendthr, th);
|
||||
pthread_join(*th, 0);
|
||||
}
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
//export go_callback
|
||||
func go_callback() {}
|
||||
|
||||
func main() {
|
||||
go func() {
|
||||
for {
|
||||
C.foo()
|
||||
}
|
||||
}()
|
||||
|
||||
time.Sleep(1000 * time.Millisecond)
|
||||
}
|
||||
|
|
@ -54,6 +54,7 @@ func TestTSAN(t *testing.T) {
|
|||
{src: "tsan12.go", needsRuntime: true},
|
||||
{src: "tsan13.go", needsRuntime: true},
|
||||
{src: "tsan14.go", needsRuntime: true},
|
||||
{src: "tsan15.go", needsRuntime: true},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
tc := tc
|
||||
|
|
|
|||
|
|
@ -147,7 +147,7 @@ func calculateCostForType(t *types.Type) int64 {
|
|||
return EqStructCost(t)
|
||||
case types.TSLICE:
|
||||
// Slices are not comparable.
|
||||
base.Fatalf("eqStructFieldCost: unexpected slice type")
|
||||
base.Fatalf("calculateCostForType: unexpected slice type")
|
||||
case types.TARRAY:
|
||||
elemCost := calculateCostForType(t.Elem())
|
||||
cost = t.NumElem() * elemCost
|
||||
|
|
@ -370,6 +370,11 @@ func eqmem(p, q ir.Node, field int, size int64) ir.Node {
|
|||
}
|
||||
|
||||
func eqmemfunc(size int64, t *types.Type) (fn *ir.Name, needsize bool) {
|
||||
if !base.Ctxt.Arch.CanMergeLoads && t.Alignment() < int64(base.Ctxt.Arch.Alignment) && t.Alignment() < t.Size() {
|
||||
// We can't use larger comparisons if the value might not be aligned
|
||||
// enough for the larger comparison. See issues 46283 and 67160.
|
||||
size = 0
|
||||
}
|
||||
switch size {
|
||||
case 1, 2, 4, 8, 16:
|
||||
buf := fmt.Sprintf("memequal%d", int(size)*8)
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ import "C"
|
|||
import (
|
||||
"crypto/internal/boring/sig"
|
||||
_ "crypto/internal/boring/syso"
|
||||
"internal/stringslite"
|
||||
"math/bits"
|
||||
"unsafe"
|
||||
)
|
||||
|
|
@ -39,16 +40,12 @@ func Unreachable() {
|
|||
// provided by runtime to avoid os import.
|
||||
func runtime_arg0() string
|
||||
|
||||
func hasSuffix(s, t string) bool {
|
||||
return len(s) > len(t) && s[len(s)-len(t):] == t
|
||||
}
|
||||
|
||||
// UnreachableExceptTests marks code that should be unreachable
|
||||
// when BoringCrypto is in use. It panics.
|
||||
func UnreachableExceptTests() {
|
||||
name := runtime_arg0()
|
||||
// If BoringCrypto ran on Windows we'd need to allow _test.exe and .test.exe as well.
|
||||
if !hasSuffix(name, "_test") && !hasSuffix(name, ".test") {
|
||||
if !stringslite.HasSuffix(name, "_test") && !stringslite.HasSuffix(name, ".test") {
|
||||
println("boringcrypto: unexpected code execution in", name)
|
||||
panic("boringcrypto: invalid code execution")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,7 +9,10 @@
|
|||
// of the use of BoringCrypto.
|
||||
package fipstls
|
||||
|
||||
import "sync/atomic"
|
||||
import (
|
||||
"internal/stringslite"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
var required atomic.Bool
|
||||
|
||||
|
|
@ -33,7 +36,7 @@ func Abandon() {
|
|||
// and empty string for Windows (where runtime_arg0 can't easily find the name).
|
||||
// Since this is an internal package, testing that this isn't used on the
|
||||
// other operating systems should suffice to catch any mistakes.
|
||||
if !hasSuffix(name, "_test") && !hasSuffix(name, ".test") && name != "NaClMain" && name != "" {
|
||||
if !stringslite.HasSuffix(name, "_test") && !stringslite.HasSuffix(name, ".test") && name != "NaClMain" && name != "" {
|
||||
panic("fipstls: invalid use of Abandon in " + name)
|
||||
}
|
||||
required.Store(false)
|
||||
|
|
@ -42,10 +45,6 @@ func Abandon() {
|
|||
// provided by runtime
|
||||
func runtime_arg0() string
|
||||
|
||||
func hasSuffix(s, t string) bool {
|
||||
return len(s) > len(t) && s[len(s)-len(t):] == t
|
||||
}
|
||||
|
||||
// Required reports whether FIPS-approved settings are required.
|
||||
func Required() bool {
|
||||
return required.Load()
|
||||
|
|
|
|||
|
|
@ -68,57 +68,123 @@ const (
|
|||
SeedSize = 32 + 32
|
||||
)
|
||||
|
||||
// GenerateKey generates an encapsulation key and a corresponding decapsulation
|
||||
// key, drawing random bytes from crypto/rand.
|
||||
//
|
||||
// The decapsulation key must be kept secret.
|
||||
func GenerateKey() (encapsulationKey, decapsulationKey []byte, err error) {
|
||||
d := make([]byte, 32)
|
||||
if _, err := rand.Read(d); err != nil {
|
||||
return nil, nil, errors.New("mlkem768: crypto/rand Read failed: " + err.Error())
|
||||
}
|
||||
z := make([]byte, 32)
|
||||
if _, err := rand.Read(z); err != nil {
|
||||
return nil, nil, errors.New("mlkem768: crypto/rand Read failed: " + err.Error())
|
||||
}
|
||||
ek, dk := kemKeyGen(d, z)
|
||||
return ek, dk, nil
|
||||
// A DecapsulationKey is the secret key used to decapsulate a shared key from a
|
||||
// ciphertext. It includes various precomputed values.
|
||||
type DecapsulationKey struct {
|
||||
dk [DecapsulationKeySize]byte
|
||||
encryptionKey
|
||||
decryptionKey
|
||||
}
|
||||
|
||||
// NewKeyFromSeed deterministically generates an encapsulation key and a
|
||||
// corresponding decapsulation key from a 64-byte seed. The seed must be
|
||||
// uniformly random.
|
||||
func NewKeyFromSeed(seed []byte) (encapsulationKey, decapsulationKey []byte, err error) {
|
||||
// Bytes returns the extended encoding of the decapsulation key, according to
|
||||
// FIPS 203 (DRAFT).
|
||||
func (dk *DecapsulationKey) Bytes() []byte {
|
||||
var b [DecapsulationKeySize]byte
|
||||
copy(b[:], dk.dk[:])
|
||||
return b[:]
|
||||
}
|
||||
|
||||
// EncapsulationKey returns the public encapsulation key necessary to produce
|
||||
// ciphertexts.
|
||||
func (dk *DecapsulationKey) EncapsulationKey() []byte {
|
||||
var b [EncapsulationKeySize]byte
|
||||
copy(b[:], dk.dk[decryptionKeySize:])
|
||||
return b[:]
|
||||
}
|
||||
|
||||
// encryptionKey is the parsed and expanded form of a PKE encryption key.
|
||||
type encryptionKey struct {
|
||||
t [k]nttElement // ByteDecode₁₂(ek[:384k])
|
||||
A [k * k]nttElement // A[i*k+j] = sampleNTT(ρ, j, i)
|
||||
}
|
||||
|
||||
// decryptionKey is the parsed and expanded form of a PKE decryption key.
|
||||
type decryptionKey struct {
|
||||
s [k]nttElement // ByteDecode₁₂(dk[:decryptionKeySize])
|
||||
}
|
||||
|
||||
// GenerateKey generates a new decapsulation key, drawing random bytes from
|
||||
// crypto/rand. The decapsulation key must be kept secret.
|
||||
func GenerateKey() (*DecapsulationKey, error) {
|
||||
// The actual logic is in a separate function to outline this allocation.
|
||||
dk := &DecapsulationKey{}
|
||||
return generateKey(dk)
|
||||
}
|
||||
|
||||
func generateKey(dk *DecapsulationKey) (*DecapsulationKey, error) {
|
||||
var d [32]byte
|
||||
if _, err := rand.Read(d[:]); err != nil {
|
||||
return nil, errors.New("mlkem768: crypto/rand Read failed: " + err.Error())
|
||||
}
|
||||
var z [32]byte
|
||||
if _, err := rand.Read(z[:]); err != nil {
|
||||
return nil, errors.New("mlkem768: crypto/rand Read failed: " + err.Error())
|
||||
}
|
||||
return kemKeyGen(dk, &d, &z), nil
|
||||
}
|
||||
|
||||
// NewKeyFromSeed deterministically generates a decapsulation key from a 64-byte
|
||||
// seed in the "d || z" form. The seed must be uniformly random.
|
||||
func NewKeyFromSeed(seed []byte) (*DecapsulationKey, error) {
|
||||
// The actual logic is in a separate function to outline this allocation.
|
||||
dk := &DecapsulationKey{}
|
||||
return newKeyFromSeed(dk, seed)
|
||||
}
|
||||
|
||||
func newKeyFromSeed(dk *DecapsulationKey, seed []byte) (*DecapsulationKey, error) {
|
||||
if len(seed) != SeedSize {
|
||||
return nil, nil, errors.New("mlkem768: invalid seed length")
|
||||
return nil, errors.New("mlkem768: invalid seed length")
|
||||
}
|
||||
ek, dk := kemKeyGen(seed[:32], seed[32:])
|
||||
return ek, dk, nil
|
||||
d := (*[32]byte)(seed[:32])
|
||||
z := (*[32]byte)(seed[32:])
|
||||
return kemKeyGen(dk, d, z), nil
|
||||
}
|
||||
|
||||
// kemKeyGen generates an encapsulation key and a corresponding decapsulation key.
|
||||
//
|
||||
// It implements ML-KEM.KeyGen according to FIPS 203 (DRAFT), Algorithm 15.
|
||||
func kemKeyGen(d, z []byte) (ek, dk []byte) {
|
||||
ekPKE, dkPKE := pkeKeyGen(d)
|
||||
dk = make([]byte, 0, DecapsulationKeySize)
|
||||
dk = append(dk, dkPKE...)
|
||||
dk = append(dk, ekPKE...)
|
||||
H := sha3.New256()
|
||||
H.Write(ekPKE)
|
||||
dk = H.Sum(dk)
|
||||
dk = append(dk, z...)
|
||||
return ekPKE, dk
|
||||
// NewKeyFromExtendedEncoding parses a decapsulation key from its FIPS 203
|
||||
// (DRAFT) extended encoding.
|
||||
func NewKeyFromExtendedEncoding(decapsulationKey []byte) (*DecapsulationKey, error) {
|
||||
// The actual logic is in a separate function to outline this allocation.
|
||||
dk := &DecapsulationKey{}
|
||||
return newKeyFromExtendedEncoding(dk, decapsulationKey)
|
||||
}
|
||||
|
||||
// pkeKeyGen generates a key pair for the underlying PKE from a 32-byte random seed.
|
||||
func newKeyFromExtendedEncoding(dk *DecapsulationKey, dkBytes []byte) (*DecapsulationKey, error) {
|
||||
if len(dkBytes) != DecapsulationKeySize {
|
||||
return nil, errors.New("mlkem768: invalid decapsulation key length")
|
||||
}
|
||||
|
||||
// Note that we don't check that H(ek) matches ekPKE, as that's not
|
||||
// specified in FIPS 203 (DRAFT). This is one reason to prefer the seed
|
||||
// private key format.
|
||||
dk.dk = [DecapsulationKeySize]byte(dkBytes)
|
||||
|
||||
dkPKE := dkBytes[:decryptionKeySize]
|
||||
if err := parseDK(&dk.decryptionKey, dkPKE); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ekPKE := dkBytes[decryptionKeySize : decryptionKeySize+encryptionKeySize]
|
||||
if err := parseEK(&dk.encryptionKey, ekPKE); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dk, nil
|
||||
}
|
||||
|
||||
// kemKeyGen generates a decapsulation key.
|
||||
//
|
||||
// It implements K-PKE.KeyGen according to FIPS 203 (DRAFT), Algorithm 12.
|
||||
func pkeKeyGen(d []byte) (ek, dk []byte) {
|
||||
G := sha3.Sum512(d)
|
||||
// It implements ML-KEM.KeyGen according to FIPS 203 (DRAFT), Algorithm 15, and
|
||||
// K-PKE.KeyGen according to FIPS 203 (DRAFT), Algorithm 12. The two are merged
|
||||
// to save copies and allocations.
|
||||
func kemKeyGen(dk *DecapsulationKey, d, z *[32]byte) *DecapsulationKey {
|
||||
if dk == nil {
|
||||
dk = &DecapsulationKey{}
|
||||
}
|
||||
|
||||
G := sha3.Sum512(d[:])
|
||||
ρ, σ := G[:32], G[32:]
|
||||
|
||||
A := make([]nttElement, k*k)
|
||||
A := &dk.A
|
||||
for i := byte(0); i < k; i++ {
|
||||
for j := byte(0); j < k; j++ {
|
||||
// Note that this is consistent with Kyber round 3, rather than with
|
||||
|
|
@ -129,36 +195,51 @@ func pkeKeyGen(d []byte) (ek, dk []byte) {
|
|||
}
|
||||
|
||||
var N byte
|
||||
s, e := make([]nttElement, k), make([]nttElement, k)
|
||||
s := &dk.s
|
||||
for i := range s {
|
||||
s[i] = ntt(samplePolyCBD(σ, N))
|
||||
N++
|
||||
}
|
||||
e := make([]nttElement, k)
|
||||
for i := range e {
|
||||
e[i] = ntt(samplePolyCBD(σ, N))
|
||||
N++
|
||||
}
|
||||
|
||||
t := make([]nttElement, k) // A ◦ s + e
|
||||
for i := range t {
|
||||
t := &dk.t
|
||||
for i := range t { // t = A ◦ s + e
|
||||
t[i] = e[i]
|
||||
for j := range s {
|
||||
t[i] = polyAdd(t[i], nttMul(A[i*k+j], s[j]))
|
||||
}
|
||||
}
|
||||
|
||||
ek = make([]byte, 0, encryptionKeySize)
|
||||
for i := range t {
|
||||
ek = polyByteEncode(ek, t[i])
|
||||
}
|
||||
ek = append(ek, ρ...)
|
||||
// dkPKE ← ByteEncode₁₂(s)
|
||||
// ekPKE ← ByteEncode₁₂(t) || ρ
|
||||
// ek ← ekPKE
|
||||
// dk ← dkPKE || ek || H(ek) || z
|
||||
dkB := dk.dk[:0]
|
||||
|
||||
dk = make([]byte, 0, decryptionKeySize)
|
||||
for i := range s {
|
||||
dk = polyByteEncode(dk, s[i])
|
||||
dkB = polyByteEncode(dkB, s[i])
|
||||
}
|
||||
|
||||
return ek, dk
|
||||
for i := range t {
|
||||
dkB = polyByteEncode(dkB, t[i])
|
||||
}
|
||||
dkB = append(dkB, ρ...)
|
||||
|
||||
H := sha3.New256()
|
||||
H.Write(dkB[decryptionKeySize:])
|
||||
dkB = H.Sum(dkB)
|
||||
|
||||
dkB = append(dkB, z[:]...)
|
||||
|
||||
if len(dkB) != len(dk.dk) {
|
||||
panic("mlkem768: internal error: invalid decapsulation key size")
|
||||
}
|
||||
|
||||
return dk
|
||||
}
|
||||
|
||||
// Encapsulate generates a shared key and an associated ciphertext from an
|
||||
|
|
@ -167,65 +248,79 @@ func pkeKeyGen(d []byte) (ek, dk []byte) {
|
|||
//
|
||||
// The shared key must be kept secret.
|
||||
func Encapsulate(encapsulationKey []byte) (ciphertext, sharedKey []byte, err error) {
|
||||
// The actual logic is in a separate function to outline this allocation.
|
||||
var cc [CiphertextSize]byte
|
||||
return encapsulate(&cc, encapsulationKey)
|
||||
}
|
||||
|
||||
func encapsulate(cc *[CiphertextSize]byte, encapsulationKey []byte) (ciphertext, sharedKey []byte, err error) {
|
||||
if len(encapsulationKey) != EncapsulationKeySize {
|
||||
return nil, nil, errors.New("mlkem768: invalid encapsulation key length")
|
||||
}
|
||||
m := make([]byte, messageSize)
|
||||
if _, err := rand.Read(m); err != nil {
|
||||
var m [messageSize]byte
|
||||
if _, err := rand.Read(m[:]); err != nil {
|
||||
return nil, nil, errors.New("mlkem768: crypto/rand Read failed: " + err.Error())
|
||||
}
|
||||
ciphertext, sharedKey, err = kemEncaps(encapsulationKey, m)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return ciphertext, sharedKey, nil
|
||||
return kemEncaps(cc, encapsulationKey, &m)
|
||||
}
|
||||
|
||||
// kemEncaps generates a shared key and an associated ciphertext.
|
||||
//
|
||||
// It implements ML-KEM.Encaps according to FIPS 203 (DRAFT), Algorithm 16.
|
||||
func kemEncaps(ek, m []byte) (c, K []byte, err error) {
|
||||
H := sha3.Sum256(ek)
|
||||
func kemEncaps(cc *[CiphertextSize]byte, ek []byte, m *[messageSize]byte) (c, K []byte, err error) {
|
||||
if cc == nil {
|
||||
cc = &[CiphertextSize]byte{}
|
||||
}
|
||||
|
||||
H := sha3.Sum256(ek[:])
|
||||
g := sha3.New512()
|
||||
g.Write(m)
|
||||
g.Write(m[:])
|
||||
g.Write(H[:])
|
||||
G := g.Sum(nil)
|
||||
K, r := G[:SharedKeySize], G[SharedKeySize:]
|
||||
c, err = pkeEncrypt(ek, m, r)
|
||||
return c, K, err
|
||||
var ex encryptionKey
|
||||
if err := parseEK(&ex, ek[:]); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
c = pkeEncrypt(cc, &ex, m, r)
|
||||
return c, K, nil
|
||||
}
|
||||
|
||||
// pkeEncrypt encrypt a plaintext message. It expects ek (the encryption key) to
|
||||
// be 1184 bytes, and m (the message) and rnd (the randomness) to be 32 bytes.
|
||||
// parseEK parses an encryption key from its encoded form.
|
||||
//
|
||||
// It implements K-PKE.Encrypt according to FIPS 203 (DRAFT), Algorithm 13.
|
||||
func pkeEncrypt(ek, m, rnd []byte) ([]byte, error) {
|
||||
if len(ek) != encryptionKeySize {
|
||||
return nil, errors.New("mlkem768: invalid encryption key length")
|
||||
}
|
||||
if len(m) != messageSize {
|
||||
return nil, errors.New("mlkem768: invalid messages length")
|
||||
// It implements the initial stages of K-PKE.Encrypt according to FIPS 203
|
||||
// (DRAFT), Algorithm 13.
|
||||
func parseEK(ex *encryptionKey, ekPKE []byte) error {
|
||||
if len(ekPKE) != encryptionKeySize {
|
||||
return errors.New("mlkem768: invalid encryption key length")
|
||||
}
|
||||
|
||||
t := make([]nttElement, k)
|
||||
for i := range t {
|
||||
for i := range ex.t {
|
||||
var err error
|
||||
t[i], err = polyByteDecode[nttElement](ek[:encodingSize12])
|
||||
ex.t[i], err = polyByteDecode[nttElement](ekPKE[:encodingSize12])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
ek = ek[encodingSize12:]
|
||||
ekPKE = ekPKE[encodingSize12:]
|
||||
}
|
||||
ρ := ek
|
||||
ρ := ekPKE
|
||||
|
||||
AT := make([]nttElement, k*k)
|
||||
for i := byte(0); i < k; i++ {
|
||||
for j := byte(0); j < k; j++ {
|
||||
// Note that i and j are inverted, as we need the transposed of A.
|
||||
AT[i*k+j] = sampleNTT(ρ, i, j)
|
||||
// See the note in pkeKeyGen about the order of the indices being
|
||||
// consistent with Kyber round 3.
|
||||
ex.A[i*k+j] = sampleNTT(ρ, j, i)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// pkeEncrypt encrypt a plaintext message.
|
||||
//
|
||||
// It implements K-PKE.Encrypt according to FIPS 203 (DRAFT), Algorithm 13,
|
||||
// although the computation of t and AT is done in parseEK.
|
||||
func pkeEncrypt(cc *[CiphertextSize]byte, ex *encryptionKey, m *[messageSize]byte, rnd []byte) []byte {
|
||||
var N byte
|
||||
r, e1 := make([]nttElement, k), make([]ringElement, k)
|
||||
for i := range r {
|
||||
|
|
@ -242,125 +337,107 @@ func pkeEncrypt(ek, m, rnd []byte) ([]byte, error) {
|
|||
for i := range u {
|
||||
u[i] = e1[i]
|
||||
for j := range r {
|
||||
u[i] = polyAdd(u[i], inverseNTT(nttMul(AT[i*k+j], r[j])))
|
||||
// Note that i and j are inverted, as we need the transposed of A.
|
||||
u[i] = polyAdd(u[i], inverseNTT(nttMul(ex.A[j*k+i], r[j])))
|
||||
}
|
||||
}
|
||||
|
||||
μ, err := ringDecodeAndDecompress1(m)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
μ := ringDecodeAndDecompress1(m)
|
||||
|
||||
var vNTT nttElement // t⊺ ◦ r
|
||||
for i := range t {
|
||||
vNTT = polyAdd(vNTT, nttMul(t[i], r[i]))
|
||||
for i := range ex.t {
|
||||
vNTT = polyAdd(vNTT, nttMul(ex.t[i], r[i]))
|
||||
}
|
||||
v := polyAdd(polyAdd(inverseNTT(vNTT), e2), μ)
|
||||
|
||||
c := make([]byte, 0, CiphertextSize)
|
||||
c := cc[:0]
|
||||
for _, f := range u {
|
||||
c = ringCompressAndEncode10(c, f)
|
||||
}
|
||||
c = ringCompressAndEncode4(c, v)
|
||||
|
||||
return c, nil
|
||||
return c
|
||||
}
|
||||
|
||||
// Decapsulate generates a shared key from a ciphertext and a decapsulation key.
|
||||
// If the decapsulation key or the ciphertext are not valid, Decapsulate returns
|
||||
// an error.
|
||||
// If the ciphertext is not valid, Decapsulate returns an error.
|
||||
//
|
||||
// The shared key must be kept secret.
|
||||
func Decapsulate(decapsulationKey, ciphertext []byte) (sharedKey []byte, err error) {
|
||||
if len(decapsulationKey) != DecapsulationKeySize {
|
||||
return nil, errors.New("mlkem768: invalid decapsulation key length")
|
||||
}
|
||||
func Decapsulate(dk *DecapsulationKey, ciphertext []byte) (sharedKey []byte, err error) {
|
||||
if len(ciphertext) != CiphertextSize {
|
||||
return nil, errors.New("mlkem768: invalid ciphertext length")
|
||||
}
|
||||
return kemDecaps(decapsulationKey, ciphertext)
|
||||
c := (*[CiphertextSize]byte)(ciphertext)
|
||||
return kemDecaps(dk, c), nil
|
||||
}
|
||||
|
||||
// kemDecaps produces a shared key from a ciphertext.
|
||||
//
|
||||
// It implements ML-KEM.Decaps according to FIPS 203 (DRAFT), Algorithm 17.
|
||||
func kemDecaps(dk, c []byte) (K []byte, err error) {
|
||||
dkPKE := dk[:decryptionKeySize]
|
||||
ekPKE := dk[decryptionKeySize : decryptionKeySize+encryptionKeySize]
|
||||
h := dk[decryptionKeySize+encryptionKeySize : decryptionKeySize+encryptionKeySize+32]
|
||||
z := dk[decryptionKeySize+encryptionKeySize+32:]
|
||||
func kemDecaps(dk *DecapsulationKey, c *[CiphertextSize]byte) (K []byte) {
|
||||
h := dk.dk[decryptionKeySize+encryptionKeySize : decryptionKeySize+encryptionKeySize+32]
|
||||
z := dk.dk[decryptionKeySize+encryptionKeySize+32:]
|
||||
|
||||
m, err := pkeDecrypt(dkPKE, c)
|
||||
if err != nil {
|
||||
// This is only reachable if the ciphertext or the decryption key are
|
||||
// encoded incorrectly, so it leaks no information about the message.
|
||||
return nil, err
|
||||
}
|
||||
m := pkeDecrypt(&dk.decryptionKey, c)
|
||||
g := sha3.New512()
|
||||
g.Write(m)
|
||||
g.Write(m[:])
|
||||
g.Write(h)
|
||||
G := g.Sum(nil)
|
||||
Kprime, r := G[:SharedKeySize], G[SharedKeySize:]
|
||||
J := sha3.NewShake256()
|
||||
J.Write(z)
|
||||
J.Write(c)
|
||||
J.Write(c[:])
|
||||
Kout := make([]byte, SharedKeySize)
|
||||
J.Read(Kout)
|
||||
c1, err := pkeEncrypt(ekPKE, m, r)
|
||||
if err != nil {
|
||||
// Likewise, this is only reachable if the encryption key is encoded
|
||||
// incorrectly, so it leaks no secret information through timing.
|
||||
return nil, err
|
||||
}
|
||||
var cc [CiphertextSize]byte
|
||||
c1 := pkeEncrypt(&cc, &dk.encryptionKey, (*[32]byte)(m), r)
|
||||
|
||||
subtle.ConstantTimeCopy(subtle.ConstantTimeCompare(c, c1), Kout, Kprime)
|
||||
return Kout, nil
|
||||
subtle.ConstantTimeCopy(subtle.ConstantTimeCompare(c[:], c1), Kout, Kprime)
|
||||
return Kout
|
||||
}
|
||||
|
||||
// pkeDecrypt decrypts a ciphertext. It expects dk (the decryption key) to
|
||||
// be 1152 bytes, and c (the ciphertext) to be 1088 bytes.
|
||||
// parseDK parses a decryption key from its encoded form.
|
||||
//
|
||||
// It implements K-PKE.Decrypt according to FIPS 203 (DRAFT), Algorithm 14.
|
||||
func pkeDecrypt(dk, c []byte) ([]byte, error) {
|
||||
if len(dk) != decryptionKeySize {
|
||||
return nil, errors.New("mlkem768: invalid decryption key length")
|
||||
}
|
||||
if len(c) != CiphertextSize {
|
||||
return nil, errors.New("mlkem768: invalid ciphertext length")
|
||||
// It implements the computation of s from K-PKE.Decrypt according to FIPS 203
|
||||
// (DRAFT), Algorithm 14.
|
||||
func parseDK(dx *decryptionKey, dkPKE []byte) error {
|
||||
if len(dkPKE) != decryptionKeySize {
|
||||
return errors.New("mlkem768: invalid decryption key length")
|
||||
}
|
||||
|
||||
for i := range dx.s {
|
||||
f, err := polyByteDecode[nttElement](dkPKE[:encodingSize12])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
dx.s[i] = f
|
||||
dkPKE = dkPKE[encodingSize12:]
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// pkeDecrypt decrypts a ciphertext.
|
||||
//
|
||||
// It implements K-PKE.Decrypt according to FIPS 203 (DRAFT), Algorithm 14,
|
||||
// although the computation of s is done in parseDK.
|
||||
func pkeDecrypt(dx *decryptionKey, c *[CiphertextSize]byte) []byte {
|
||||
u := make([]ringElement, k)
|
||||
for i := range u {
|
||||
f, err := ringDecodeAndDecompress10(c[:encodingSize10])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
u[i] = f
|
||||
c = c[encodingSize10:]
|
||||
b := (*[encodingSize10]byte)(c[encodingSize10*i : encodingSize10*(i+1)])
|
||||
u[i] = ringDecodeAndDecompress10(b)
|
||||
}
|
||||
|
||||
v, err := ringDecodeAndDecompress4(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := make([]nttElement, k)
|
||||
for i := range s {
|
||||
f, err := polyByteDecode[nttElement](dk[:encodingSize12])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s[i] = f
|
||||
dk = dk[encodingSize12:]
|
||||
}
|
||||
b := (*[encodingSize4]byte)(c[encodingSize10*k:])
|
||||
v := ringDecodeAndDecompress4(b)
|
||||
|
||||
var mask nttElement // s⊺ ◦ NTT(u)
|
||||
for i := range s {
|
||||
mask = polyAdd(mask, nttMul(s[i], ntt(u[i])))
|
||||
for i := range dx.s {
|
||||
mask = polyAdd(mask, nttMul(dx.s[i], ntt(u[i])))
|
||||
}
|
||||
w := polySub(v, inverseNTT(mask))
|
||||
|
||||
return ringCompressAndEncode1(nil, w), nil
|
||||
return ringCompressAndEncode1(nil, w)
|
||||
}
|
||||
|
||||
// fieldElement is an integer modulo q, an element of ℤ_q. It is always reduced.
|
||||
|
|
@ -397,7 +474,7 @@ const (
|
|||
barrettShift = 24 // log₂(2¹² * 2¹²)
|
||||
)
|
||||
|
||||
// fieldReduce reduces a value a < q² using Barrett reduction, to avoid
|
||||
// fieldReduce reduces a value a < 2q² using Barrett reduction, to avoid
|
||||
// potentially variable-time division.
|
||||
func fieldReduce(a uint32) fieldElement {
|
||||
quotient := uint32((uint64(a) * barrettMultiplier) >> barrettShift)
|
||||
|
|
@ -409,6 +486,21 @@ func fieldMul(a, b fieldElement) fieldElement {
|
|||
return fieldReduce(x)
|
||||
}
|
||||
|
||||
// fieldMulSub returns a * (b - c). This operation is fused to save a
|
||||
// fieldReduceOnce after the subtraction.
|
||||
func fieldMulSub(a, b, c fieldElement) fieldElement {
|
||||
x := uint32(a) * uint32(b-c+q)
|
||||
return fieldReduce(x)
|
||||
}
|
||||
|
||||
// fieldAddMul returns a * b + c * d. This operation is fused to save a
|
||||
// fieldReduceOnce and a fieldReduce.
|
||||
func fieldAddMul(a, b, c, d fieldElement) fieldElement {
|
||||
x := uint32(a) * uint32(b)
|
||||
x += uint32(c) * uint32(d)
|
||||
return fieldReduce(x)
|
||||
}
|
||||
|
||||
// compress maps a field element uniformly to the range 0 to 2ᵈ-1, according to
|
||||
// FIPS 203 (DRAFT), Definition 4.5.
|
||||
func compress(x fieldElement, d uint8) uint16 {
|
||||
|
|
@ -558,17 +650,14 @@ func ringCompressAndEncode1(s []byte, f ringElement) []byte {
|
|||
//
|
||||
// It implements ByteDecode₁, according to FIPS 203 (DRAFT), Algorithm 5,
|
||||
// followed by Decompress₁, according to FIPS 203 (DRAFT), Definition 4.6.
|
||||
func ringDecodeAndDecompress1(b []byte) (ringElement, error) {
|
||||
if len(b) != encodingSize1 {
|
||||
return ringElement{}, errors.New("mlkem768: invalid message length")
|
||||
}
|
||||
func ringDecodeAndDecompress1(b *[encodingSize1]byte) ringElement {
|
||||
var f ringElement
|
||||
for i := range f {
|
||||
b_i := b[i/8] >> (i % 8) & 1
|
||||
const halfQ = (q + 1) / 2 // ⌈q/2⌋, rounded up per FIPS 203 (DRAFT), Section 2.3
|
||||
f[i] = fieldElement(b_i) * halfQ // 0 decompresses to 0, and 1 to ⌈q/2⌋
|
||||
}
|
||||
return f, nil
|
||||
return f
|
||||
}
|
||||
|
||||
// ringCompressAndEncode4 appends a 128-byte encoding of a ring element to s,
|
||||
|
|
@ -589,16 +678,13 @@ func ringCompressAndEncode4(s []byte, f ringElement) []byte {
|
|||
//
|
||||
// It implements ByteDecode₄, according to FIPS 203 (DRAFT), Algorithm 5,
|
||||
// followed by Decompress₄, according to FIPS 203 (DRAFT), Definition 4.6.
|
||||
func ringDecodeAndDecompress4(b []byte) (ringElement, error) {
|
||||
if len(b) != encodingSize4 {
|
||||
return ringElement{}, errors.New("mlkem768: invalid encoding length")
|
||||
}
|
||||
func ringDecodeAndDecompress4(b *[encodingSize4]byte) ringElement {
|
||||
var f ringElement
|
||||
for i := 0; i < n; i += 2 {
|
||||
f[i] = fieldElement(decompress(uint16(b[i/2]&0b1111), 4))
|
||||
f[i+1] = fieldElement(decompress(uint16(b[i/2]>>4), 4))
|
||||
}
|
||||
return f, nil
|
||||
return f
|
||||
}
|
||||
|
||||
// ringCompressAndEncode10 appends a 320-byte encoding of a ring element to s,
|
||||
|
|
@ -629,10 +715,8 @@ func ringCompressAndEncode10(s []byte, f ringElement) []byte {
|
|||
//
|
||||
// It implements ByteDecode₁₀, according to FIPS 203 (DRAFT), Algorithm 5,
|
||||
// followed by Decompress₁₀, according to FIPS 203 (DRAFT), Definition 4.6.
|
||||
func ringDecodeAndDecompress10(b []byte) (ringElement, error) {
|
||||
if len(b) != encodingSize10 {
|
||||
return ringElement{}, errors.New("mlkem768: invalid encoding length")
|
||||
}
|
||||
func ringDecodeAndDecompress10(bb *[encodingSize10]byte) ringElement {
|
||||
b := bb[:]
|
||||
var f ringElement
|
||||
for i := 0; i < n; i += 4 {
|
||||
x := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32
|
||||
|
|
@ -642,7 +726,7 @@ func ringDecodeAndDecompress10(b []byte) (ringElement, error) {
|
|||
f[i+2] = fieldElement(decompress(uint16(x>>20&0b11_1111_1111), 10))
|
||||
f[i+3] = fieldElement(decompress(uint16(x>>30&0b11_1111_1111), 10))
|
||||
}
|
||||
return f, nil
|
||||
return f
|
||||
}
|
||||
|
||||
// samplePolyCBD draws a ringElement from the special Dη distribution given a
|
||||
|
|
@ -681,11 +765,12 @@ var gammas = [128]fieldElement{17, 3312, 2761, 568, 583, 2746, 2649, 680, 1637,
|
|||
// It implements MultiplyNTTs, according to FIPS 203 (DRAFT), Algorithm 10.
|
||||
func nttMul(f, g nttElement) nttElement {
|
||||
var h nttElement
|
||||
for i := 0; i < 128; i++ {
|
||||
a0, a1 := f[2*i], f[2*i+1]
|
||||
b0, b1 := g[2*i], g[2*i+1]
|
||||
h[2*i] = fieldAdd(fieldMul(a0, b0), fieldMul(fieldMul(a1, b1), gammas[i]))
|
||||
h[2*i+1] = fieldAdd(fieldMul(a0, b1), fieldMul(a1, b0))
|
||||
// We use i += 2 for bounds check elimination. See https://go.dev/issue/66826.
|
||||
for i := 0; i < 256; i += 2 {
|
||||
a0, a1 := f[i], f[i+1]
|
||||
b0, b1 := g[i], g[i+1]
|
||||
h[i] = fieldAddMul(a0, b0, fieldMul(a1, b1), gammas[i/2])
|
||||
h[i+1] = fieldAddMul(a0, b1, a1, b0)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
|
@ -702,18 +787,12 @@ func ntt(f ringElement) nttElement {
|
|||
for start := 0; start < 256; start += 2 * len {
|
||||
zeta := zetas[k]
|
||||
k++
|
||||
for j := start; j < start+len; j += 2 {
|
||||
// Loop 2x unrolled for performance.
|
||||
{
|
||||
t := fieldMul(zeta, f[j+len])
|
||||
f[j+len] = fieldSub(f[j], t)
|
||||
f[j] = fieldAdd(f[j], t)
|
||||
}
|
||||
{
|
||||
t := fieldMul(zeta, f[j+1+len])
|
||||
f[j+1+len] = fieldSub(f[j+1], t)
|
||||
f[j+1] = fieldAdd(f[j+1], t)
|
||||
}
|
||||
// Bounds check elimination hint.
|
||||
f, flen := f[start:start+len], f[start+len:start+len+len]
|
||||
for j := 0; j < len; j++ {
|
||||
t := fieldMul(zeta, flen[j])
|
||||
flen[j] = fieldSub(f[j], t)
|
||||
f[j] = fieldAdd(f[j], t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -729,18 +808,12 @@ func inverseNTT(f nttElement) ringElement {
|
|||
for start := 0; start < 256; start += 2 * len {
|
||||
zeta := zetas[k]
|
||||
k--
|
||||
for j := start; j < start+len; j += 2 {
|
||||
// Loop 2x unrolled for performance.
|
||||
{
|
||||
t := f[j]
|
||||
f[j] = fieldAdd(t, f[j+len])
|
||||
f[j+len] = fieldMul(zeta, fieldSub(f[j+len], t))
|
||||
}
|
||||
{
|
||||
t := f[j+1]
|
||||
f[j+1] = fieldAdd(t, f[j+1+len])
|
||||
f[j+1+len] = fieldMul(zeta, fieldSub(f[j+1+len], t))
|
||||
}
|
||||
// Bounds check elimination hint.
|
||||
f, flen := f[start:start+len], f[start+len:start+len+len]
|
||||
for j := 0; j < len; j++ {
|
||||
t := f[j]
|
||||
f[j] = fieldAdd(t, flen[j])
|
||||
flen[j] = fieldMulSub(zeta, flen[j], t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import (
|
|||
"crypto/rand"
|
||||
_ "embed"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"flag"
|
||||
"math/big"
|
||||
"strconv"
|
||||
|
|
@ -17,6 +18,16 @@ import (
|
|||
"golang.org/x/crypto/sha3"
|
||||
)
|
||||
|
||||
func TestFieldReduce(t *testing.T) {
|
||||
for a := uint32(0); a < 2*q*q; a++ {
|
||||
got := fieldReduce(a)
|
||||
exp := fieldElement(a % q)
|
||||
if got != exp {
|
||||
t.Fatalf("reduce(%d) = %d, expected %d", a, got, exp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFieldAdd(t *testing.T) {
|
||||
for a := fieldElement(0); a < q; a++ {
|
||||
for b := fieldElement(0); b < q; b++ {
|
||||
|
|
@ -188,11 +199,11 @@ func TestGammas(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRoundTrip(t *testing.T) {
|
||||
ek, dk, err := GenerateKey()
|
||||
dk, err := GenerateKey()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
c, Ke, err := Encapsulate(ek)
|
||||
c, Ke, err := Encapsulate(dk.EncapsulationKey())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
@ -204,21 +215,21 @@ func TestRoundTrip(t *testing.T) {
|
|||
t.Fail()
|
||||
}
|
||||
|
||||
ek1, dk1, err := GenerateKey()
|
||||
dk1, err := GenerateKey()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if bytes.Equal(ek, ek1) {
|
||||
if bytes.Equal(dk.EncapsulationKey(), dk1.EncapsulationKey()) {
|
||||
t.Fail()
|
||||
}
|
||||
if bytes.Equal(dk, dk1) {
|
||||
if bytes.Equal(dk.Bytes(), dk1.Bytes()) {
|
||||
t.Fail()
|
||||
}
|
||||
if bytes.Equal(dk[len(dk)-32:], dk1[len(dk)-32:]) {
|
||||
if bytes.Equal(dk.Bytes()[EncapsulationKeySize-32:], dk1.Bytes()[EncapsulationKeySize-32:]) {
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
c1, Ke1, err := Encapsulate(ek)
|
||||
c1, Ke1, err := Encapsulate(dk.EncapsulationKey())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
@ -231,10 +242,11 @@ func TestRoundTrip(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestBadLengths(t *testing.T) {
|
||||
ek, dk, err := GenerateKey()
|
||||
dk, err := GenerateKey()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ek := dk.EncapsulationKey()
|
||||
|
||||
for i := 0; i < len(ek)-1; i++ {
|
||||
if _, _, err := Encapsulate(ek[:i]); err == nil {
|
||||
|
|
@ -254,15 +266,15 @@ func TestBadLengths(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
for i := 0; i < len(dk)-1; i++ {
|
||||
if _, err := Decapsulate(dk[:i], c); err == nil {
|
||||
for i := 0; i < len(dk.Bytes())-1; i++ {
|
||||
if _, err := NewKeyFromExtendedEncoding(dk.Bytes()[:i]); err == nil {
|
||||
t.Errorf("expected error for dk length %d", i)
|
||||
}
|
||||
}
|
||||
dkLong := dk
|
||||
dkLong := dk.Bytes()
|
||||
for i := 0; i < 100; i++ {
|
||||
dkLong = append(dkLong, 0)
|
||||
if _, err := Decapsulate(dkLong, c); err == nil {
|
||||
if _, err := NewKeyFromExtendedEncoding(dkLong); err == nil {
|
||||
t.Errorf("expected error for dk length %d", len(dkLong))
|
||||
}
|
||||
}
|
||||
|
|
@ -281,6 +293,29 @@ func TestBadLengths(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func EncapsulateDerand(ek, m []byte) (c, K []byte, err error) {
|
||||
if len(m) != messageSize {
|
||||
return nil, nil, errors.New("bad message length")
|
||||
}
|
||||
return kemEncaps(nil, ek, (*[messageSize]byte)(m))
|
||||
}
|
||||
|
||||
func DecapsulateFromBytes(dkBytes []byte, c []byte) ([]byte, error) {
|
||||
dk, err := NewKeyFromExtendedEncoding(dkBytes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return Decapsulate(dk, c)
|
||||
}
|
||||
|
||||
func GenerateKeyDerand(t testing.TB, d, z []byte) ([]byte, *DecapsulationKey) {
|
||||
if len(d) != 32 || len(z) != 32 {
|
||||
t.Fatal("bad length")
|
||||
}
|
||||
dk := kemKeyGen(nil, (*[32]byte)(d), (*[32]byte)(z))
|
||||
return dk.EncapsulationKey(), dk
|
||||
}
|
||||
|
||||
var millionFlag = flag.Bool("million", false, "run the million vector test")
|
||||
|
||||
// TestPQCrystalsAccumulated accumulates the 10k vectors generated by the
|
||||
|
|
@ -308,19 +343,19 @@ func TestPQCrystalsAccumulated(t *testing.T) {
|
|||
for i := 0; i < n; i++ {
|
||||
s.Read(d)
|
||||
s.Read(z)
|
||||
ek, dk := kemKeyGen(d, z)
|
||||
ek, dk := GenerateKeyDerand(t, d, z)
|
||||
o.Write(ek)
|
||||
o.Write(dk)
|
||||
o.Write(dk.Bytes())
|
||||
|
||||
s.Read(msg)
|
||||
ct, k, err := kemEncaps(ek, msg)
|
||||
ct, k, err := EncapsulateDerand(ek, msg)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
o.Write(ct)
|
||||
o.Write(k)
|
||||
|
||||
kk, err := kemDecaps(dk, ct)
|
||||
kk, err := Decapsulate(dk, ct)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
@ -329,7 +364,7 @@ func TestPQCrystalsAccumulated(t *testing.T) {
|
|||
}
|
||||
|
||||
s.Read(ct1)
|
||||
k1, err := kemDecaps(dk, ct1)
|
||||
k1, err := Decapsulate(dk, ct1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
@ -342,25 +377,17 @@ func TestPQCrystalsAccumulated(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
var sinkElement fieldElement
|
||||
|
||||
func BenchmarkSampleNTT(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
sinkElement ^= sampleNTT(bytes.Repeat([]byte("A"), 32), '4', '2')[0]
|
||||
}
|
||||
}
|
||||
|
||||
var sink byte
|
||||
|
||||
func BenchmarkKeyGen(b *testing.B) {
|
||||
d := make([]byte, 32)
|
||||
rand.Read(d)
|
||||
z := make([]byte, 32)
|
||||
rand.Read(z)
|
||||
var dk DecapsulationKey
|
||||
var d, z [32]byte
|
||||
rand.Read(d[:])
|
||||
rand.Read(z[:])
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
ek, dk := kemKeyGen(d, z)
|
||||
sink ^= ek[0] ^ dk[0]
|
||||
dk := kemKeyGen(&dk, &d, &z)
|
||||
sink ^= dk.EncapsulationKey()[0]
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -369,12 +396,13 @@ func BenchmarkEncaps(b *testing.B) {
|
|||
rand.Read(d)
|
||||
z := make([]byte, 32)
|
||||
rand.Read(z)
|
||||
m := make([]byte, 32)
|
||||
rand.Read(m)
|
||||
ek, _ := kemKeyGen(d, z)
|
||||
var m [messageSize]byte
|
||||
rand.Read(m[:])
|
||||
ek, _ := GenerateKeyDerand(b, d, z)
|
||||
var c [CiphertextSize]byte
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
c, K, err := kemEncaps(ek, m)
|
||||
c, K, err := kemEncaps(&c, ek, &m)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
|
|
@ -389,41 +417,42 @@ func BenchmarkDecaps(b *testing.B) {
|
|||
rand.Read(z)
|
||||
m := make([]byte, 32)
|
||||
rand.Read(m)
|
||||
ek, dk := kemKeyGen(d, z)
|
||||
c, _, err := kemEncaps(ek, m)
|
||||
ek, dk := GenerateKeyDerand(b, d, z)
|
||||
c, _, err := EncapsulateDerand(ek, m)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
K, err := kemDecaps(dk, c)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
K := kemDecaps(dk, (*[CiphertextSize]byte)(c))
|
||||
sink ^= K[0]
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkRoundTrip(b *testing.B) {
|
||||
ek, dk, err := GenerateKey()
|
||||
dk, err := GenerateKey()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
ek := dk.EncapsulationKey()
|
||||
c, _, err := Encapsulate(ek)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
b.Run("Alice", func(b *testing.B) {
|
||||
for i := 0; i < b.N; i++ {
|
||||
ekS, dkS, err := GenerateKey()
|
||||
dkS, err := GenerateKey()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
ekS := dkS.EncapsulationKey()
|
||||
sink ^= ekS[0]
|
||||
|
||||
Ks, err := Decapsulate(dk, c)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
sink ^= ekS[0] ^ dkS[0] ^ Ks[0]
|
||||
sink ^= Ks[0]
|
||||
}
|
||||
})
|
||||
b.Run("Bob", func(b *testing.B) {
|
||||
|
|
|
|||
|
|
@ -964,7 +964,7 @@ func parseCertificate(der []byte) (*Certificate, error) {
|
|||
}
|
||||
oidStr := ext.Id.String()
|
||||
if seenExts[oidStr] {
|
||||
return nil, errors.New("x509: certificate contains duplicate extensions")
|
||||
return nil, fmt.Errorf("x509: certificate contains duplicate extension with OID %q", oidStr)
|
||||
}
|
||||
seenExts[oidStr] = true
|
||||
cert.Extensions = append(cert.Extensions, ext)
|
||||
|
|
|
|||
|
|
@ -85,11 +85,9 @@ var depsRules = `
|
|||
< internal/reflectlite
|
||||
< errors
|
||||
< internal/oserror, math/bits
|
||||
< iter
|
||||
< RUNTIME;
|
||||
|
||||
internal/race
|
||||
< iter;
|
||||
|
||||
# slices depends on unsafe for overlapping check, cmp for comparison
|
||||
# semantics, and math/bits for # calculating bitlength of numbers.
|
||||
unsafe, cmp, math/bits
|
||||
|
|
@ -389,7 +387,6 @@ var depsRules = `
|
|||
internal/nettrace,
|
||||
internal/poll,
|
||||
internal/singleflight,
|
||||
internal/race,
|
||||
net/netip,
|
||||
os
|
||||
< net;
|
||||
|
|
|
|||
|
|
@ -23,6 +23,7 @@ var stdPkgs = []string{
|
|||
"html",
|
||||
"image",
|
||||
"io",
|
||||
"iter",
|
||||
"log",
|
||||
"maps",
|
||||
"math",
|
||||
|
|
|
|||
|
|
@ -63,67 +63,56 @@ loop:
|
|||
RLDICL $40,R9,$56,R17 // p[7]
|
||||
SLD $2,R17,R17 // p[7]*4
|
||||
RLDICL $40,R7,$56,R8 // crc>>24
|
||||
ADD R17,R10,R17 // &tab[0][p[7]]
|
||||
SLD $2,R8,R8 // crc>>24*4
|
||||
RLDICL $48,R9,$56,R18 // p[6]
|
||||
SLD $2,R18,R18 // p[6]*4
|
||||
MOVWZ (R10)(R17),R21 // tab[0][p[7]]
|
||||
ADD $1024,R10,R10 // tab[1]
|
||||
MOVWZ 0(R17),R21 // tab[0][p[7]]
|
||||
RLDICL $56,R9,$56,R19 // p[5]
|
||||
ADD R10,R18,R18 // &tab[1][p[6]]
|
||||
SLD $2,R19,R19 // p[5]*4:1
|
||||
MOVWZ 0(R18),R22 // tab[1][p[6]]
|
||||
MOVWZ (R10)(R18),R22 // tab[1][p[6]]
|
||||
ADD $1024,R10,R10 // tab[2]
|
||||
XOR R21,R22,R21 // xor done R22
|
||||
ADD R19,R10,R19 // &tab[2][p[5]]
|
||||
ANDCC $255,R9,R20 // p[4] ??
|
||||
SLD $2,R20,R20 // p[4]*4
|
||||
MOVWZ 0(R19),R23 // tab[2][p[5]]
|
||||
CLRLSLDI $56,R9,$2,R20
|
||||
MOVWZ (R10)(R19),R23 // tab[2][p[5]]
|
||||
ADD $1024,R10,R10 // &tab[3]
|
||||
ADD R20,R10,R20 // tab[3][p[4]]
|
||||
XOR R21,R23,R21 // xor done R23
|
||||
ADD $1024,R10,R10 // &tab[4]
|
||||
MOVWZ 0(R20),R24 // tab[3][p[4]]
|
||||
ADD R10,R8,R23 // &tab[4][crc>>24]
|
||||
MOVWZ (R10)(R20),R24 // tab[3][p[4]]
|
||||
ADD $1024,R10,R10 // &tab[4]
|
||||
XOR R21,R24,R21 // xor done R24
|
||||
MOVWZ 0(R23),R25 // tab[4][crc>>24]
|
||||
MOVWZ (R10)(R8),R25 // tab[4][crc>>24]
|
||||
RLDICL $48,R7,$56,R24 // crc>>16&0xFF
|
||||
XOR R21,R25,R21 // xor done R25
|
||||
ADD $1024,R10,R10 // &tab[5]
|
||||
SLD $2,R24,R24 // crc>>16&0xFF*4
|
||||
ADD R24,R10,R24 // &tab[5][crc>>16&0xFF]
|
||||
MOVWZ 0(R24),R26 // tab[5][crc>>16&0xFF]
|
||||
MOVWZ (R10)(R24),R26 // tab[5][crc>>16&0xFF]
|
||||
XOR R21,R26,R21 // xor done R26
|
||||
RLDICL $56,R7,$56,R25 // crc>>8
|
||||
ADD $1024,R10,R10 // &tab[6]
|
||||
SLD $2,R25,R25 // crc>>8&FF*2
|
||||
ADD R25,R10,R25 // &tab[6][crc>>8&0xFF]
|
||||
MOVBZ R7,R26 // crc&0xFF
|
||||
ADD $1024,R10,R10 // &tab[7]
|
||||
MOVWZ 0(R25),R27 // tab[6][crc>>8&0xFF]
|
||||
MOVWZ (R10)(R25),R27 // tab[6][crc>>8&0xFF]
|
||||
ADD $1024,R10,R10 // &tab[7]
|
||||
SLD $2,R26,R26 // crc&0xFF*2
|
||||
XOR R21,R27,R21 // xor done R27
|
||||
ADD R26,R10,R26 // &tab[7][crc&0xFF]
|
||||
ADD $8,R5 // p = p[8:]
|
||||
MOVWZ 0(R26),R28 // tab[7][crc&0xFF]
|
||||
MOVWZ (R10)(R26),R28 // tab[7][crc&0xFF]
|
||||
XOR R21,R28,R21 // xor done R28
|
||||
MOVWZ R21,R7 // crc for next round
|
||||
BC 16,0,loop // next 8 bytes
|
||||
BDNZ loop
|
||||
ANDCC $7,R6,R8 // any leftover bytes
|
||||
BEQ done // none --> done
|
||||
MOVD R8,CTR // byte count
|
||||
PCALIGN $16 // align short loop
|
||||
short:
|
||||
MOVBZ 0(R5),R8 // get v
|
||||
MOVBZ R7,R9 // byte(crc) -> R8 BE vs LE?
|
||||
SRD $8,R7,R14 // crc>>8
|
||||
XOR R8,R9,R8 // byte(crc)^v -> R8
|
||||
ADD $1,R5 // ptr to next v
|
||||
SLD $2,R8 // convert index-> bytes
|
||||
ADD R8,R4,R9 // &tab[byte(crc)^v]
|
||||
MOVWZ 0(R9),R10 // tab[byte(crc)^v]
|
||||
XOR R10,R14,R7 // loop crc in R7
|
||||
BC 16,0,short
|
||||
MOVBZ 0(R5),R8 // get v
|
||||
XOR R8,R7,R8 // byte(crc)^v -> R8
|
||||
RLDIC $2,R8,$54,R8 // rldicl r8,r8,2,22
|
||||
SRD $8,R7,R14 // crc>>8
|
||||
MOVWZ (R4)(R8),R10
|
||||
ADD $1,R5
|
||||
XOR R10,R14,R7 // loop crc in R7
|
||||
BDNZ short
|
||||
done:
|
||||
NOR R7,R7,R7 // ^crc
|
||||
MOVW R7,ret+40(FP) // return crc
|
||||
|
|
@ -333,7 +322,7 @@ cool_top:
|
|||
LVX (R4+off112),V23 // next in buffer
|
||||
|
||||
ADD $128,R4 // bump up buffer pointer
|
||||
BC 16,0,cool_top // are we done?
|
||||
BDNZ cool_top // are we done?
|
||||
|
||||
first_cool_down:
|
||||
|
||||
|
|
|
|||
|
|
@ -1,52 +0,0 @@
|
|||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build gofuzz
|
||||
|
||||
package png
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
cfg, err := DecodeConfig(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
if cfg.Width*cfg.Height > 1e6 {
|
||||
return 0
|
||||
}
|
||||
img, err := Decode(bytes.NewReader(data))
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
levels := []CompressionLevel{
|
||||
DefaultCompression,
|
||||
NoCompression,
|
||||
BestSpeed,
|
||||
BestCompression,
|
||||
}
|
||||
for _, l := range levels {
|
||||
var w bytes.Buffer
|
||||
e := &Encoder{CompressionLevel: l}
|
||||
err = e.Encode(&w, img)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
img1, err := Decode(&w)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
got := img1.Bounds()
|
||||
want := img.Bounds()
|
||||
if !got.Eq(want) {
|
||||
fmt.Printf("bounds0: %#v\n", want)
|
||||
fmt.Printf("bounds1: %#v\n", got)
|
||||
panic("bounds have changed")
|
||||
}
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
|
@ -6,6 +6,7 @@ package poll
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"internal/stringslite"
|
||||
"io"
|
||||
"sync"
|
||||
"syscall"
|
||||
|
|
@ -203,11 +204,11 @@ func (fd *FD) ReadUnlock() {
|
|||
}
|
||||
|
||||
func isHangup(err error) bool {
|
||||
return err != nil && stringsHasSuffix(err.Error(), "Hangup")
|
||||
return err != nil && stringslite.HasSuffix(err.Error(), "Hangup")
|
||||
}
|
||||
|
||||
func isInterrupted(err error) bool {
|
||||
return err != nil && stringsHasSuffix(err.Error(), "interrupted")
|
||||
return err != nil && stringslite.HasSuffix(err.Error(), "interrupted")
|
||||
}
|
||||
|
||||
// IsPollDescriptor reports whether fd is the descriptor being used by the poller.
|
||||
|
|
|
|||
|
|
@ -1,13 +0,0 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build plan9
|
||||
|
||||
package poll
|
||||
|
||||
// stringsHasSuffix is strings.HasSuffix. It reports whether s ends in
|
||||
// suffix.
|
||||
func stringsHasSuffix(s, suffix string) bool {
|
||||
return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix
|
||||
}
|
||||
|
|
@ -2,13 +2,8 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build goexperiment.rangefunc
|
||||
|
||||
// Package iter provides basic definitions and operations
|
||||
// related to iteration in Go.
|
||||
//
|
||||
// This package is experimental and can only be imported
|
||||
// when building with GOEXPERIMENT=rangefunc.
|
||||
package iter
|
||||
|
||||
import (
|
||||
|
|
|
|||
|
|
@ -2,12 +2,11 @@
|
|||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build goexperiment.rangefunc
|
||||
|
||||
package iter
|
||||
package iter_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
. "iter"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
|
@ -33,7 +32,6 @@ func squares(n int) Seq2[int, int64] {
|
|||
}
|
||||
|
||||
func TestPull(t *testing.T) {
|
||||
|
||||
for end := 0; end <= 3; end++ {
|
||||
t.Run(fmt.Sprint(end), func(t *testing.T) {
|
||||
ng := runtime.NumGoroutine()
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ import (
|
|||
"errors"
|
||||
"internal/bytealg"
|
||||
"internal/godebug"
|
||||
"internal/stringslite"
|
||||
"io/fs"
|
||||
"os"
|
||||
"runtime"
|
||||
|
|
@ -335,7 +336,7 @@ func (c *conf) lookupOrder(r *Resolver, hostname string) (ret hostLookupOrder, d
|
|||
}
|
||||
|
||||
// Canonicalize the hostname by removing any trailing dot.
|
||||
if stringsHasSuffix(hostname, ".") {
|
||||
if stringslite.HasSuffix(hostname, ".") {
|
||||
hostname = hostname[:len(hostname)-1]
|
||||
}
|
||||
|
||||
|
|
@ -396,7 +397,7 @@ func (c *conf) lookupOrder(r *Resolver, hostname string) (ret hostLookupOrder, d
|
|||
return hostLookupCgo, dnsConf
|
||||
}
|
||||
continue
|
||||
case hostname != "" && stringsHasPrefix(src.source, "mdns"):
|
||||
case hostname != "" && stringslite.HasPrefix(src.source, "mdns"):
|
||||
if stringsHasSuffixFold(hostname, ".local") {
|
||||
// Per RFC 6762, the ".local" TLD is special. And
|
||||
// because Go's native resolver doesn't do mDNS or
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ package net
|
|||
import (
|
||||
"errors"
|
||||
"internal/itoa"
|
||||
"internal/stringslite"
|
||||
"os"
|
||||
)
|
||||
|
||||
|
|
@ -70,7 +71,7 @@ func readInterface(i int) (*Interface, error) {
|
|||
ifc.MTU = mtu
|
||||
|
||||
// Not a loopback device ("/dev/null") or packet interface (e.g. "pkt2")
|
||||
if stringsHasPrefix(device, netdir+"/") {
|
||||
if stringslite.HasPrefix(device, netdir+"/") {
|
||||
deviceaddrf, err := open(device + "/addr")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ import (
|
|||
"errors"
|
||||
"internal/bytealg"
|
||||
"internal/itoa"
|
||||
"internal/stringslite"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
|
@ -107,10 +108,10 @@ func queryDNS(ctx context.Context, addr string, typ string) (res []string, err e
|
|||
}
|
||||
|
||||
func handlePlan9DNSError(err error, name string) error {
|
||||
if stringsHasSuffix(err.Error(), "dns: name does not exist") ||
|
||||
stringsHasSuffix(err.Error(), "dns: resource does not exist; negrcode 0") ||
|
||||
stringsHasSuffix(err.Error(), "dns: resource does not exist; negrcode") ||
|
||||
stringsHasSuffix(err.Error(), "dns failure") {
|
||||
if stringslite.HasSuffix(err.Error(), "dns: name does not exist") ||
|
||||
stringslite.HasSuffix(err.Error(), "dns: resource does not exist; negrcode 0") ||
|
||||
stringslite.HasSuffix(err.Error(), "dns: resource does not exist; negrcode") ||
|
||||
stringslite.HasSuffix(err.Error(), "dns failure") {
|
||||
err = errNoSuchHost
|
||||
}
|
||||
return newDNSError(err, name, "")
|
||||
|
|
@ -227,7 +228,7 @@ func (r *Resolver) lookupPort(ctx context.Context, network, service string) (por
|
|||
func (*Resolver) lookupPortWithNetwork(ctx context.Context, network, errNetwork, service string) (port int, err error) {
|
||||
lines, err := queryCS(ctx, network, "127.0.0.1", toLower(service))
|
||||
if err != nil {
|
||||
if stringsHasSuffix(err.Error(), "can't translate service") {
|
||||
if stringslite.HasSuffix(err.Error(), "can't translate service") {
|
||||
return 0, &DNSError{Err: "unknown port", Name: errNetwork + "/" + service, IsNotFound: true}
|
||||
}
|
||||
return
|
||||
|
|
@ -256,7 +257,7 @@ func (r *Resolver) lookupCNAME(ctx context.Context, name string) (cname string,
|
|||
|
||||
lines, err := queryDNS(ctx, name, "cname")
|
||||
if err != nil {
|
||||
if stringsHasSuffix(err.Error(), "dns failure") || stringsHasSuffix(err.Error(), "resource does not exist; negrcode 0") {
|
||||
if stringslite.HasSuffix(err.Error(), "dns failure") || stringslite.HasSuffix(err.Error(), "resource does not exist; negrcode 0") {
|
||||
return absDomainName(name), nil
|
||||
}
|
||||
return "", handlePlan9DNSError(err, cname)
|
||||
|
|
|
|||
|
|
@ -251,23 +251,12 @@ func foreachField(x string, fn func(field string) error) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// stringsHasSuffix is strings.HasSuffix. It reports whether s ends in
|
||||
// suffix.
|
||||
func stringsHasSuffix(s, suffix string) bool {
|
||||
return len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix
|
||||
}
|
||||
|
||||
// stringsHasSuffixFold reports whether s ends in suffix,
|
||||
// ASCII-case-insensitively.
|
||||
func stringsHasSuffixFold(s, suffix string) bool {
|
||||
return len(s) >= len(suffix) && stringsEqualFold(s[len(s)-len(suffix):], suffix)
|
||||
}
|
||||
|
||||
// stringsHasPrefix is strings.HasPrefix. It reports whether s begins with prefix.
|
||||
func stringsHasPrefix(s, prefix string) bool {
|
||||
return len(s) >= len(prefix) && s[:len(prefix)] == prefix
|
||||
}
|
||||
|
||||
// stringsEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
|
||||
// are equal, ASCII-case-insensitively.
|
||||
func stringsEqualFold(s, t string) bool {
|
||||
|
|
|
|||
|
|
@ -8,6 +8,7 @@ package os
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"internal/stringslite"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
|
|
@ -25,13 +26,5 @@ func executable() (string, error) {
|
|||
|
||||
// When the executable has been deleted then Readlink returns a
|
||||
// path appended with " (deleted)".
|
||||
return stringsTrimSuffix(path, " (deleted)"), err
|
||||
}
|
||||
|
||||
// stringsTrimSuffix is the same as strings.TrimSuffix.
|
||||
func stringsTrimSuffix(s, suffix string) string {
|
||||
if len(s) >= len(suffix) && s[len(s)-len(suffix):] == suffix {
|
||||
return s[:len(s)-len(suffix)]
|
||||
}
|
||||
return s
|
||||
return stringslite.TrimSuffix(path, " (deleted)"), err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -18,6 +18,8 @@ TEXT ·mapinitnoop<ABIInternal>(SB),NOSPLIT,$0-0
|
|||
#ifndef GOARCH_amd64
|
||||
#ifndef GOARCH_arm64
|
||||
#ifndef GOARCH_loong64
|
||||
#ifndef GOARCH_mips
|
||||
#ifndef GOARCH_mipsle
|
||||
#ifndef GOARCH_mips64
|
||||
#ifndef GOARCH_mips64le
|
||||
#ifndef GOARCH_ppc64
|
||||
|
|
@ -40,3 +42,5 @@ TEXT ·switchToCrashStack0<ABIInternal>(SB),NOSPLIT,$0-0
|
|||
#endif
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
|
|
|||
|
|
@ -204,6 +204,29 @@ noswitch:
|
|||
ADD $4, R29
|
||||
JMP (R4)
|
||||
|
||||
// func switchToCrashStack0(fn func())
|
||||
TEXT runtime·switchToCrashStack0(SB), NOSPLIT, $0-4
|
||||
MOVW fn+0(FP), REGCTXT // context register
|
||||
MOVW g_m(g), R2 // curm
|
||||
|
||||
// set g to gcrash
|
||||
MOVW $runtime·gcrash(SB), g // g = &gcrash
|
||||
CALL runtime·save_g(SB)
|
||||
MOVW R2, g_m(g) // g.m = curm
|
||||
MOVW g, m_g0(R2) // curm.g0 = g
|
||||
|
||||
// switch to crashstack
|
||||
MOVW (g_stack+stack_hi)(g), R2
|
||||
ADDU $(-4*8), R2, R29
|
||||
|
||||
// call target function
|
||||
MOVW 0(REGCTXT), R25
|
||||
JAL (R25)
|
||||
|
||||
// should never return
|
||||
CALL runtime·abort(SB)
|
||||
UNDEF
|
||||
|
||||
/*
|
||||
* support for morestack
|
||||
*/
|
||||
|
|
@ -217,6 +240,13 @@ noswitch:
|
|||
// calling the scheduler calling newm calling gc), so we must
|
||||
// record an argument size. For that purpose, it has no arguments.
|
||||
TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0
|
||||
// Called from f.
|
||||
// Set g->sched to context in f.
|
||||
MOVW R29, (g_sched+gobuf_sp)(g)
|
||||
MOVW R31, (g_sched+gobuf_pc)(g)
|
||||
MOVW R3, (g_sched+gobuf_lr)(g)
|
||||
MOVW REGCTXT, (g_sched+gobuf_ctxt)(g)
|
||||
|
||||
// Cannot grow scheduler stack (m->g0).
|
||||
MOVW g_m(g), R7
|
||||
MOVW m_g0(R7), R8
|
||||
|
|
@ -230,13 +260,6 @@ TEXT runtime·morestack(SB),NOSPLIT|NOFRAME,$0-0
|
|||
JAL runtime·badmorestackgsignal(SB)
|
||||
JAL runtime·abort(SB)
|
||||
|
||||
// Called from f.
|
||||
// Set g->sched to context in f.
|
||||
MOVW R29, (g_sched+gobuf_sp)(g)
|
||||
MOVW R31, (g_sched+gobuf_pc)(g)
|
||||
MOVW R3, (g_sched+gobuf_lr)(g)
|
||||
MOVW REGCTXT, (g_sched+gobuf_ctxt)(g)
|
||||
|
||||
// Called from f.
|
||||
// Set m->morebuf to f's caller.
|
||||
MOVW R3, (m_morebuf+gobuf_pc)(R7) // f's caller's PC
|
||||
|
|
|
|||
|
|
@ -45,16 +45,17 @@ func doRequest(useSelect bool) (*response, error) {
|
|||
}
|
||||
|
||||
func TestChanSendSelectBarrier(t *testing.T) {
|
||||
t.Parallel()
|
||||
testChanSendBarrier(true)
|
||||
}
|
||||
|
||||
func TestChanSendBarrier(t *testing.T) {
|
||||
t.Parallel()
|
||||
testChanSendBarrier(false)
|
||||
}
|
||||
|
||||
func testChanSendBarrier(useSelect bool) {
|
||||
var wg sync.WaitGroup
|
||||
var globalMu sync.Mutex
|
||||
outer := 100
|
||||
inner := 100000
|
||||
if testing.Short() || runtime.GOARCH == "wasm" {
|
||||
|
|
@ -72,12 +73,15 @@ func testChanSendBarrier(useSelect bool) {
|
|||
if !ok {
|
||||
panic(1)
|
||||
}
|
||||
garbage = make([]byte, 1<<10)
|
||||
garbage = makeByte()
|
||||
}
|
||||
globalMu.Lock()
|
||||
global = garbage
|
||||
globalMu.Unlock()
|
||||
_ = garbage
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
//go:noinline
|
||||
func makeByte() []byte {
|
||||
return make([]byte, 1<<10)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -143,6 +143,7 @@ func TestSmhasherSmallKeys(t *testing.T) {
|
|||
if race.Enabled {
|
||||
t.Skip("Too long for race mode")
|
||||
}
|
||||
testenv.ParallelOn64Bit(t)
|
||||
h := newHashSet()
|
||||
var b [3]byte
|
||||
for i := 0; i < 256; i++ {
|
||||
|
|
@ -164,6 +165,7 @@ func TestSmhasherSmallKeys(t *testing.T) {
|
|||
|
||||
// Different length strings of all zeros have distinct hashes.
|
||||
func TestSmhasherZeros(t *testing.T) {
|
||||
t.Parallel()
|
||||
N := 256 * 1024
|
||||
if testing.Short() {
|
||||
N = 1024
|
||||
|
|
@ -187,6 +189,7 @@ func TestSmhasherTwoNonzero(t *testing.T) {
|
|||
if race.Enabled {
|
||||
t.Skip("Too long for race mode")
|
||||
}
|
||||
testenv.ParallelOn64Bit(t)
|
||||
h := newHashSet()
|
||||
for n := 2; n <= 16; n++ {
|
||||
twoNonZero(h, n)
|
||||
|
|
@ -232,6 +235,7 @@ func TestSmhasherCyclic(t *testing.T) {
|
|||
if race.Enabled {
|
||||
t.Skip("Too long for race mode")
|
||||
}
|
||||
t.Parallel()
|
||||
r := rand.New(rand.NewSource(1234))
|
||||
const REPEAT = 8
|
||||
const N = 1000000
|
||||
|
|
@ -261,6 +265,7 @@ func TestSmhasherSparse(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("Skipping in short mode")
|
||||
}
|
||||
t.Parallel()
|
||||
h := newHashSet()
|
||||
sparse(t, h, 32, 6)
|
||||
sparse(t, h, 40, 6)
|
||||
|
|
@ -302,6 +307,7 @@ func TestSmhasherPermutation(t *testing.T) {
|
|||
if race.Enabled {
|
||||
t.Skip("Too long for race mode")
|
||||
}
|
||||
testenv.ParallelOn64Bit(t)
|
||||
h := newHashSet()
|
||||
permutation(t, h, []uint32{0, 1, 2, 3, 4, 5, 6, 7}, 8)
|
||||
permutation(t, h, []uint32{0, 1 << 29, 2 << 29, 3 << 29, 4 << 29, 5 << 29, 6 << 29, 7 << 29}, 8)
|
||||
|
|
@ -475,6 +481,7 @@ func TestSmhasherAvalanche(t *testing.T) {
|
|||
if race.Enabled {
|
||||
t.Skip("Too long for race mode")
|
||||
}
|
||||
t.Parallel()
|
||||
avalancheTest1(t, &BytesKey{make([]byte, 2)})
|
||||
avalancheTest1(t, &BytesKey{make([]byte, 4)})
|
||||
avalancheTest1(t, &BytesKey{make([]byte, 8)})
|
||||
|
|
@ -545,6 +552,7 @@ func TestSmhasherWindowed(t *testing.T) {
|
|||
if race.Enabled {
|
||||
t.Skip("Too long for race mode")
|
||||
}
|
||||
t.Parallel()
|
||||
h := newHashSet()
|
||||
t.Logf("32 bit keys")
|
||||
windowed(t, h, &Int32Key{})
|
||||
|
|
@ -588,6 +596,7 @@ func TestSmhasherText(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("Skipping in short mode")
|
||||
}
|
||||
t.Parallel()
|
||||
h := newHashSet()
|
||||
text(t, h, "Foo", "Bar")
|
||||
text(t, h, "FooBar", "")
|
||||
|
|
@ -798,6 +807,7 @@ func TestCollisions(t *testing.T) {
|
|||
if testing.Short() {
|
||||
t.Skip("Skipping in short mode")
|
||||
}
|
||||
t.Parallel()
|
||||
for i := 0; i < 16; i++ {
|
||||
for j := 0; j < 16; j++ {
|
||||
if j == i {
|
||||
|
|
|
|||
|
|
@ -578,7 +578,7 @@ func switchToCrashStack(fn func()) {
|
|||
// Disable crash stack on Windows for now. Apparently, throwing an exception
|
||||
// on a non-system-allocated crash stack causes EXCEPTION_STACK_OVERFLOW and
|
||||
// hangs the process (see issue 63938).
|
||||
const crashStackImplemented = (GOARCH == "386" || GOARCH == "amd64" || GOARCH == "arm" || GOARCH == "arm64" || GOARCH == "loong64" || GOARCH == "mips64" || GOARCH == "mips64le" || GOARCH == "ppc64" || GOARCH == "ppc64le" || GOARCH == "riscv64" || GOARCH == "s390x" || GOARCH == "wasm") && GOOS != "windows"
|
||||
const crashStackImplemented = (GOARCH == "386" || GOARCH == "amd64" || GOARCH == "arm" || GOARCH == "arm64" || GOARCH == "loong64" || GOARCH == "mips" || GOARCH == "mipsle" || GOARCH == "mips64" || GOARCH == "mips64le" || GOARCH == "ppc64" || GOARCH == "ppc64le" || GOARCH == "riscv64" || GOARCH == "s390x" || GOARCH == "wasm") && GOOS != "windows"
|
||||
|
||||
//go:noescape
|
||||
func switchToCrashStack0(fn func()) // in assembly
|
||||
|
|
@ -2343,11 +2343,6 @@ func oneNewExtraM() {
|
|||
if raceenabled {
|
||||
gp.racectx = racegostart(abi.FuncPCABIInternal(newextram) + sys.PCQuantum)
|
||||
}
|
||||
trace := traceAcquire()
|
||||
if trace.ok() {
|
||||
trace.OneNewExtraM(gp)
|
||||
traceRelease(trace)
|
||||
}
|
||||
// put on allg for garbage collector
|
||||
allgadd(gp)
|
||||
|
||||
|
|
@ -5489,7 +5484,6 @@ func (pp *p) destroy() {
|
|||
freemcache(pp.mcache)
|
||||
pp.mcache = nil
|
||||
gfpurge(pp)
|
||||
traceProcFree(pp)
|
||||
if raceenabled {
|
||||
if pp.timers.raceCtx != 0 {
|
||||
// The race detector code uses a callback to fetch
|
||||
|
|
|
|||
|
|
@ -953,10 +953,17 @@ func raisebadsignal(sig uint32, c *sigctxt) {
|
|||
}
|
||||
|
||||
var handler uintptr
|
||||
var flags int32
|
||||
if sig >= _NSIG {
|
||||
handler = _SIG_DFL
|
||||
} else {
|
||||
handler = atomic.Loaduintptr(&fwdSig[sig])
|
||||
flags = sigtable[sig].flags
|
||||
}
|
||||
|
||||
// If the signal is ignored, raising the signal is no-op.
|
||||
if handler == _SIG_IGN || (handler == _SIG_DFL && flags&_SigIgn != 0) {
|
||||
return
|
||||
}
|
||||
|
||||
// Reset the signal handler and raise the signal.
|
||||
|
|
|
|||
|
|
@ -560,11 +560,6 @@ func (tl traceLocker) HeapGoal() {
|
|||
tl.eventWriter(traceGoRunning, traceProcRunning).commit(traceEvHeapGoal, traceArg(heapGoal))
|
||||
}
|
||||
|
||||
// OneNewExtraM is a no-op in the new tracer. This is worth keeping around though because
|
||||
// it's a good place to insert a thread-level event about the new extra M.
|
||||
func (tl traceLocker) OneNewExtraM(_ *g) {
|
||||
}
|
||||
|
||||
// GoCreateSyscall indicates that a goroutine has transitioned from dead to GoSyscall.
|
||||
//
|
||||
// Unlike GoCreate, the caller must be running on gp.
|
||||
|
|
@ -657,14 +652,6 @@ func trace_userLog(id uint64, category, message string) {
|
|||
traceRelease(tl)
|
||||
}
|
||||
|
||||
// traceProcFree is called when a P is destroyed.
|
||||
//
|
||||
// This must run on the system stack to match the old tracer.
|
||||
//
|
||||
//go:systemstack
|
||||
func traceProcFree(_ *p) {
|
||||
}
|
||||
|
||||
// traceThreadDestroy is called when a thread is removed from
|
||||
// sched.freem.
|
||||
//
|
||||
|
|
@ -703,10 +690,3 @@ func traceThreadDestroy(mp *m) {
|
|||
throw("bad use of trace.seqlock")
|
||||
}
|
||||
}
|
||||
|
||||
// Not used in the new tracer; solely for compatibility with the old tracer.
|
||||
// nosplit because it's called from exitsyscall without a P.
|
||||
//
|
||||
//go:nosplit
|
||||
func (_ traceLocker) RecordSyscallExitedTime(_ *g, _ *p) {
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,6 +4,8 @@
|
|||
|
||||
package strconv
|
||||
|
||||
import "internal/stringslite"
|
||||
|
||||
const fnParseComplex = "ParseComplex"
|
||||
|
||||
// convErr splits an error returned by parseFloatPrefix
|
||||
|
|
@ -11,7 +13,7 @@ const fnParseComplex = "ParseComplex"
|
|||
func convErr(err error, s string) (syntax, range_ error) {
|
||||
if x, ok := err.(*NumError); ok {
|
||||
x.Func = fnParseComplex
|
||||
x.Num = cloneString(s)
|
||||
x.Num = stringslite.Clone(s)
|
||||
if x.Err == ErrRange {
|
||||
return nil, x
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,7 +4,10 @@
|
|||
|
||||
package strconv
|
||||
|
||||
import "errors"
|
||||
import (
|
||||
"errors"
|
||||
"internal/stringslite"
|
||||
)
|
||||
|
||||
// lower(c) is a lower-case letter if and only if
|
||||
// c is either that lower-case letter or the equivalent upper-case letter.
|
||||
|
|
@ -33,8 +36,6 @@ func (e *NumError) Error() string {
|
|||
|
||||
func (e *NumError) Unwrap() error { return e.Err }
|
||||
|
||||
// cloneString returns a string copy of x.
|
||||
//
|
||||
// All ParseXXX functions allow the input string to escape to the error value.
|
||||
// This hurts strconv.ParseXXX(string(b)) calls where b is []byte since
|
||||
// the conversion from []byte must allocate a string on the heap.
|
||||
|
|
@ -42,27 +43,21 @@ func (e *NumError) Unwrap() error { return e.Err }
|
|||
// back to the output by copying it first. This allows the compiler to call
|
||||
// strconv.ParseXXX without a heap allocation for most []byte to string
|
||||
// conversions, since it can now prove that the string cannot escape Parse.
|
||||
//
|
||||
// TODO: Use strings.Clone instead? However, we cannot depend on "strings"
|
||||
// since it incurs a transitive dependency on "unicode".
|
||||
// Either move strings.Clone to an internal/bytealg or make the
|
||||
// "strings" to "unicode" dependency lighter (see https://go.dev/issue/54098).
|
||||
func cloneString(x string) string { return string([]byte(x)) }
|
||||
|
||||
func syntaxError(fn, str string) *NumError {
|
||||
return &NumError{fn, cloneString(str), ErrSyntax}
|
||||
return &NumError{fn, stringslite.Clone(str), ErrSyntax}
|
||||
}
|
||||
|
||||
func rangeError(fn, str string) *NumError {
|
||||
return &NumError{fn, cloneString(str), ErrRange}
|
||||
return &NumError{fn, stringslite.Clone(str), ErrRange}
|
||||
}
|
||||
|
||||
func baseError(fn, str string, base int) *NumError {
|
||||
return &NumError{fn, cloneString(str), errors.New("invalid base " + Itoa(base))}
|
||||
return &NumError{fn, stringslite.Clone(str), errors.New("invalid base " + Itoa(base))}
|
||||
}
|
||||
|
||||
func bitSizeError(fn, str string, bitSize int) *NumError {
|
||||
return &NumError{fn, cloneString(str), errors.New("invalid bit size " + Itoa(bitSize))}
|
||||
return &NumError{fn, stringslite.Clone(str), errors.New("invalid bit size " + Itoa(bitSize))}
|
||||
}
|
||||
|
||||
const intSize = 32 << (^uint(0) >> 63)
|
||||
|
|
@ -221,7 +216,7 @@ func ParseInt(s string, base int, bitSize int) (i int64, err error) {
|
|||
un, err = ParseUint(s, base, bitSize)
|
||||
if err != nil && err.(*NumError).Err != ErrRange {
|
||||
err.(*NumError).Func = fnParseInt
|
||||
err.(*NumError).Num = cloneString(s0)
|
||||
err.(*NumError).Num = stringslite.Clone(s0)
|
||||
return 0, err
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,10 @@
|
|||
|
||||
package time
|
||||
|
||||
import "errors"
|
||||
import (
|
||||
"errors"
|
||||
"internal/stringslite"
|
||||
)
|
||||
|
||||
// These are predefined layouts for use in [Time.Format] and [time.Parse].
|
||||
// The reference time used in these layouts is the specific time stamp:
|
||||
|
|
@ -827,17 +830,11 @@ type ParseError struct {
|
|||
// newParseError creates a new ParseError.
|
||||
// The provided value and valueElem are cloned to avoid escaping their values.
|
||||
func newParseError(layout, value, layoutElem, valueElem, message string) *ParseError {
|
||||
valueCopy := cloneString(value)
|
||||
valueElemCopy := cloneString(valueElem)
|
||||
valueCopy := stringslite.Clone(value)
|
||||
valueElemCopy := stringslite.Clone(valueElem)
|
||||
return &ParseError{layout, valueCopy, layoutElem, valueElemCopy, message}
|
||||
}
|
||||
|
||||
// cloneString returns a string copy of s.
|
||||
// Do not use strings.Clone to avoid dependency on strings package.
|
||||
func cloneString(s string) string {
|
||||
return string([]byte(s))
|
||||
}
|
||||
|
||||
// These are borrowed from unicode/utf8 and strconv and replicate behavior in
|
||||
// that package, since we can't take a dependency on either.
|
||||
const (
|
||||
|
|
@ -1368,7 +1365,7 @@ func parse(layout, value string, defaultLocation, local *Location) (Time, error)
|
|||
}
|
||||
|
||||
// Otherwise create fake zone to record offset.
|
||||
zoneNameCopy := cloneString(zoneName) // avoid leaking the input value
|
||||
zoneNameCopy := stringslite.Clone(zoneName) // avoid leaking the input value
|
||||
t.setLoc(FixedZone(zoneNameCopy, zoneOffset))
|
||||
return t, nil
|
||||
}
|
||||
|
|
@ -1389,7 +1386,7 @@ func parse(layout, value string, defaultLocation, local *Location) (Time, error)
|
|||
offset, _ = atoi(zoneName[3:]) // Guaranteed OK by parseGMT.
|
||||
offset *= 3600
|
||||
}
|
||||
zoneNameCopy := cloneString(zoneName) // avoid leaking the input value
|
||||
zoneNameCopy := stringslite.Clone(zoneName) // avoid leaking the input value
|
||||
t.setLoc(FixedZone(zoneNameCopy, offset))
|
||||
return t, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ package unique
|
|||
|
||||
import (
|
||||
"internal/abi"
|
||||
"internal/stringslite"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
|
|
@ -20,7 +21,7 @@ import (
|
|||
func clone[T comparable](value T, seq *cloneSeq) T {
|
||||
for _, offset := range seq.stringOffsets {
|
||||
ps := (*string)(unsafe.Pointer(uintptr(unsafe.Pointer(&value)) + offset))
|
||||
*ps = cloneString(*ps)
|
||||
*ps = stringslite.Clone(*ps)
|
||||
}
|
||||
return value
|
||||
}
|
||||
|
|
@ -86,15 +87,3 @@ func buildArrayCloneSeq(typ *abi.Type, seq *cloneSeq, baseOffset uintptr) {
|
|||
offset = (offset + align - 1) &^ (align - 1)
|
||||
}
|
||||
}
|
||||
|
||||
// cloneString is a copy of strings.Clone, because we can't depend on the strings
|
||||
// package here. Several packages that might make use of unique, like net, explicitly
|
||||
// forbid depending on unicode, which strings depends on.
|
||||
func cloneString(s string) string {
|
||||
if len(s) == 0 {
|
||||
return ""
|
||||
}
|
||||
b := make([]byte, len(s))
|
||||
copy(b, s)
|
||||
return unsafe.String(&b[0], len(b))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,32 @@
|
|||
// run
|
||||
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Test to make sure that we don't try using larger loads for
|
||||
// generated equality functions on architectures that can't do
|
||||
// unaligned loads.
|
||||
|
||||
package main
|
||||
|
||||
// T has a big field that wants to be compared with larger loads/stores.
|
||||
// T is "special" because of the unnamed field, so it needs a generated equality function.
|
||||
// T is an odd number of bytes in size and has alignment 1.
|
||||
type T struct {
|
||||
src [8]byte
|
||||
_ byte
|
||||
}
|
||||
|
||||
// U contains 8 copies of T, each at a different %8 alignment.
|
||||
type U [8]T
|
||||
|
||||
//go:noinline
|
||||
func f(x, y *U) bool {
|
||||
return *x == *y
|
||||
}
|
||||
|
||||
func main() {
|
||||
var a U
|
||||
_ = f(&a, &a)
|
||||
}
|
||||
Loading…
Reference in New Issue