chore: make deps

This commit is contained in:
2025-11-11 14:18:57 +01:00
parent db7c4042d0
commit 45af67d22d
590 changed files with 22837 additions and 16387 deletions

View File

@ -3,11 +3,14 @@
// license that can be found in the LICENSE file.
// Package curve25519 provides an implementation of the X25519 function, which
// performs scalar multiplication on the elliptic curve known as Curve25519.
// See RFC 7748.
// performs scalar multiplication on the elliptic curve known as Curve25519
// according to [RFC 7748].
//
// This package is a wrapper for the X25519 implementation
// in the crypto/ecdh package.
// The curve25519 package is a wrapper for the X25519 implementation in the
// crypto/ecdh package. It is [frozen] and is not accepting new features.
//
// [RFC 7748]: https://datatracker.ietf.org/doc/html/rfc7748
// [frozen]: https://go.dev/wiki/Frozen
package curve25519
import "crypto/ecdh"

View File

@ -430,8 +430,9 @@ func (c *client) List() ([]*Key, error) {
return keys, nil
case *failureAgentMsg:
return nil, errors.New("agent: failed to list keys")
default:
return nil, fmt.Errorf("agent: failed to list keys, unexpected message type %T", msg)
}
panic("unreachable")
}
// Sign has the agent sign the data using a protocol 2 key as defined
@ -462,8 +463,9 @@ func (c *client) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureFl
return &sig, nil
case *failureAgentMsg:
return nil, errors.New("agent: failed to sign challenge")
default:
return nil, fmt.Errorf("agent: failed to sign challenge, unexpected message type %T", msg)
}
panic("unreachable")
}
// unmarshal parses an agent message in packet, returning the parsed

View File

@ -8,6 +8,7 @@ import (
"crypto/aes"
"crypto/cipher"
"crypto/des"
"crypto/fips140"
"crypto/rc4"
"crypto/subtle"
"encoding/binary"
@ -15,6 +16,7 @@ import (
"fmt"
"hash"
"io"
"slices"
"golang.org/x/crypto/chacha20"
"golang.org/x/crypto/internal/poly1305"
@ -93,41 +95,41 @@ func streamCipherMode(skip int, createFunc func(key, iv []byte) (cipher.Stream,
}
// cipherModes documents properties of supported ciphers. Ciphers not included
// are not supported and will not be negotiated, even if explicitly requested in
// ClientConfig.Crypto.Ciphers.
var cipherModes = map[string]*cipherMode{
// Ciphers from RFC 4344, which introduced many CTR-based ciphers. Algorithms
// are defined in the order specified in the RFC.
CipherAES128CTR: {16, aes.BlockSize, streamCipherMode(0, newAESCTR)},
CipherAES192CTR: {24, aes.BlockSize, streamCipherMode(0, newAESCTR)},
CipherAES256CTR: {32, aes.BlockSize, streamCipherMode(0, newAESCTR)},
// are not supported and will not be negotiated, even if explicitly configured.
// When FIPS mode is enabled, only FIPS-approved algorithms are included.
var cipherModes = map[string]*cipherMode{}
// Ciphers from RFC 4345, which introduces security-improved arcfour ciphers.
// They are defined in the order specified in the RFC.
InsecureCipherRC4128: {16, 0, streamCipherMode(1536, newRC4)},
InsecureCipherRC4256: {32, 0, streamCipherMode(1536, newRC4)},
func init() {
cipherModes[CipherAES128CTR] = &cipherMode{16, aes.BlockSize, streamCipherMode(0, newAESCTR)}
cipherModes[CipherAES192CTR] = &cipherMode{24, aes.BlockSize, streamCipherMode(0, newAESCTR)}
cipherModes[CipherAES256CTR] = &cipherMode{32, aes.BlockSize, streamCipherMode(0, newAESCTR)}
// Use of GCM with arbitrary IVs is not allowed in FIPS 140-only mode,
// we'll wire it up to NewGCMForSSH in Go 1.26.
//
// For now it means we'll work with fips140=on but not fips140=only.
cipherModes[CipherAES128GCM] = &cipherMode{16, 12, newGCMCipher}
cipherModes[CipherAES256GCM] = &cipherMode{32, 12, newGCMCipher}
// Cipher defined in RFC 4253, which describes SSH Transport Layer Protocol.
// Note that this cipher is not safe, as stated in RFC 4253: "Arcfour (and
// RC4) has problems with weak keys, and should be used with caution."
// RFC 4345 introduces improved versions of Arcfour.
InsecureCipherRC4: {16, 0, streamCipherMode(0, newRC4)},
// AEAD ciphers
CipherAES128GCM: {16, 12, newGCMCipher},
CipherAES256GCM: {32, 12, newGCMCipher},
CipherChaCha20Poly1305: {64, 0, newChaCha20Cipher},
if fips140.Enabled() {
defaultCiphers = slices.DeleteFunc(defaultCiphers, func(algo string) bool {
_, ok := cipherModes[algo]
return !ok
})
return
}
cipherModes[CipherChaCha20Poly1305] = &cipherMode{64, 0, newChaCha20Cipher}
// Insecure ciphers not included in the default configuration.
cipherModes[InsecureCipherRC4128] = &cipherMode{16, 0, streamCipherMode(1536, newRC4)}
cipherModes[InsecureCipherRC4256] = &cipherMode{32, 0, streamCipherMode(1536, newRC4)}
cipherModes[InsecureCipherRC4] = &cipherMode{16, 0, streamCipherMode(0, newRC4)}
// CBC mode is insecure and so is not included in the default config.
// (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely
// needed, it's possible to specify a custom Config to enable it.
// You should expect that an active attacker can recover plaintext if
// you do.
InsecureCipherAES128CBC: {16, aes.BlockSize, newAESCBCCipher},
// 3des-cbc is insecure and is not included in the default
// config.
InsecureCipherTripleDESCBC: {24, des.BlockSize, newTripleDESCBCCipher},
cipherModes[InsecureCipherAES128CBC] = &cipherMode{16, aes.BlockSize, newAESCBCCipher}
cipherModes[InsecureCipherTripleDESCBC] = &cipherMode{24, des.BlockSize, newTripleDESCBCCipher}
}
// prefixLen is the length of the packet prefix that contains the packet length

View File

@ -9,6 +9,7 @@ import (
"errors"
"fmt"
"io"
"slices"
"strings"
)
@ -83,7 +84,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
// success
return nil
} else if ok == authFailure {
if m := auth.method(); !contains(tried, m) {
if m := auth.method(); !slices.Contains(tried, m) {
tried = append(tried, m)
}
}
@ -97,7 +98,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
findNext:
for _, a := range config.Auth {
candidateMethod := a.method()
if contains(tried, candidateMethod) {
if slices.Contains(tried, candidateMethod) {
continue
}
for _, meth := range methods {
@ -117,15 +118,6 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried)
}
func contains(list []string, e string) bool {
for _, s := range list {
if s == e {
return true
}
}
return false
}
// An AuthMethod represents an instance of an RFC 4252 authentication method.
type AuthMethod interface {
// auth authenticates user over transport t.
@ -255,7 +247,7 @@ func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiA
// Fallback to use if there is no "server-sig-algs" extension or a
// common algorithm cannot be found. We use the public key format if the
// MultiAlgorithmSigner supports it, otherwise we return an error.
if !contains(as.Algorithms(), underlyingAlgo(keyFormat)) {
if !slices.Contains(as.Algorithms(), underlyingAlgo(keyFormat)) {
return "", fmt.Errorf("ssh: no common public key signature algorithm, server only supports %q for key type %q, signer only supports %v",
underlyingAlgo(keyFormat), keyFormat, as.Algorithms())
}
@ -284,7 +276,7 @@ func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (MultiA
// Filter algorithms based on those supported by MultiAlgorithmSigner.
var keyAlgos []string
for _, algo := range algorithmsForKeyFormat(keyFormat) {
if contains(as.Algorithms(), underlyingAlgo(algo)) {
if slices.Contains(as.Algorithms(), underlyingAlgo(algo)) {
keyAlgos = append(keyAlgos, algo)
}
}
@ -334,7 +326,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
// the key try to use the obtained algorithm as if "server-sig-algs" had
// not been implemented if supported from the algorithm signer.
if !ok && idx < origSignersLen && isRSACert(algo) && algo != CertAlgoRSAv01 {
if contains(as.Algorithms(), KeyAlgoRSA) {
if slices.Contains(as.Algorithms(), KeyAlgoRSA) {
// We retry using the compat algorithm after all signers have
// been tried normally.
signers = append(signers, &multiAlgorithmSigner{
@ -385,7 +377,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
// contain the "publickey" method, do not attempt to authenticate with any
// other keys. According to RFC 4252 Section 7, the latter can occur when
// additional authentication methods are required.
if success == authSuccess || !contains(methods, cb.method()) {
if success == authSuccess || !slices.Contains(methods, cb.method()) {
return success, methods, err
}
}
@ -434,7 +426,7 @@ func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
// servers send the key type instead. OpenSSH allows any algorithm
// that matches the public key, so we do the same.
// https://github.com/openssh/openssh-portable/blob/86bdd385/sshconnect2.c#L709
if !contains(algorithmsForKeyFormat(key.Type()), msg.Algo) {
if !slices.Contains(algorithmsForKeyFormat(key.Type()), msg.Algo) {
return false, nil
}
if !bytes.Equal(msg.PubKey, pubKey) {

View File

@ -6,6 +6,7 @@ package ssh
import (
"crypto"
"crypto/fips140"
"crypto/rand"
"fmt"
"io"
@ -256,6 +257,40 @@ type Algorithms struct {
PublicKeyAuths []string
}
func init() {
if fips140.Enabled() {
defaultHostKeyAlgos = slices.DeleteFunc(defaultHostKeyAlgos, func(algo string) bool {
_, err := hashFunc(underlyingAlgo(algo))
return err != nil
})
defaultPubKeyAuthAlgos = slices.DeleteFunc(defaultPubKeyAuthAlgos, func(algo string) bool {
_, err := hashFunc(underlyingAlgo(algo))
return err != nil
})
}
}
func hashFunc(format string) (crypto.Hash, error) {
switch format {
case KeyAlgoRSASHA256, KeyAlgoECDSA256, KeyAlgoSKED25519, KeyAlgoSKECDSA256:
return crypto.SHA256, nil
case KeyAlgoECDSA384:
return crypto.SHA384, nil
case KeyAlgoRSASHA512, KeyAlgoECDSA521:
return crypto.SHA512, nil
case KeyAlgoED25519:
// KeyAlgoED25519 doesn't pre-hash.
return 0, nil
case KeyAlgoRSA, InsecureKeyAlgoDSA:
if fips140.Enabled() {
return 0, fmt.Errorf("ssh: hash algorithm for format %q not allowed in FIPS 140 mode", format)
}
return crypto.SHA1, nil
default:
return 0, fmt.Errorf("ssh: hash algorithm for format %q not mapped", format)
}
}
// SupportedAlgorithms returns algorithms currently implemented by this package,
// excluding those with security issues, which are returned by
// InsecureAlgorithms. The algorithms listed here are in preference order.
@ -283,21 +318,6 @@ func InsecureAlgorithms() Algorithms {
var supportedCompressions = []string{compressionNone}
// hashFuncs keeps the mapping of supported signature algorithms to their
// respective hashes needed for signing and verification.
var hashFuncs = map[string]crypto.Hash{
KeyAlgoRSA: crypto.SHA1,
KeyAlgoRSASHA256: crypto.SHA256,
KeyAlgoRSASHA512: crypto.SHA512,
InsecureKeyAlgoDSA: crypto.SHA1,
KeyAlgoECDSA256: crypto.SHA256,
KeyAlgoECDSA384: crypto.SHA384,
KeyAlgoECDSA521: crypto.SHA512,
// KeyAlgoED25519 doesn't pre-hash.
KeyAlgoSKECDSA256: crypto.SHA256,
KeyAlgoSKED25519: crypto.SHA256,
}
// algorithmsForKeyFormat returns the supported signature algorithms for a given
// public key format (PublicKey.Type), in order of preference. See RFC 8332,
// Section 2. See also the note in sendKexInit on backwards compatibility.
@ -312,11 +332,40 @@ func algorithmsForKeyFormat(keyFormat string) []string {
}
}
// keyFormatForAlgorithm returns the key format corresponding to the given
// signature algorithm. It returns an empty string if the signature algorithm is
// invalid or unsupported.
func keyFormatForAlgorithm(sigAlgo string) string {
switch sigAlgo {
case KeyAlgoRSA, KeyAlgoRSASHA256, KeyAlgoRSASHA512:
return KeyAlgoRSA
case CertAlgoRSAv01, CertAlgoRSASHA256v01, CertAlgoRSASHA512v01:
return CertAlgoRSAv01
case KeyAlgoED25519,
KeyAlgoSKED25519,
KeyAlgoSKECDSA256,
KeyAlgoECDSA256,
KeyAlgoECDSA384,
KeyAlgoECDSA521,
InsecureKeyAlgoDSA,
InsecureCertAlgoDSAv01,
CertAlgoECDSA256v01,
CertAlgoECDSA384v01,
CertAlgoECDSA521v01,
CertAlgoSKECDSA256v01,
CertAlgoED25519v01,
CertAlgoSKED25519v01:
return sigAlgo
default:
return ""
}
}
// isRSA returns whether algo is a supported RSA algorithm, including certificate
// algorithms.
func isRSA(algo string) bool {
algos := algorithmsForKeyFormat(KeyAlgoRSA)
return contains(algos, underlyingAlgo(algo))
return slices.Contains(algos, underlyingAlgo(algo))
}
func isRSACert(algo string) bool {
@ -515,7 +564,7 @@ func (c *Config) SetDefaults() {
if kexAlgoMap[k] != nil {
// Ignore the KEX if we have no kexAlgoMap definition.
kexs = append(kexs, k)
if k == KeyExchangeCurve25519 && !contains(c.KeyExchanges, keyExchangeCurve25519LibSSH) {
if k == KeyExchangeCurve25519 && !slices.Contains(c.KeyExchanges, keyExchangeCurve25519LibSSH) {
kexs = append(kexs, keyExchangeCurve25519LibSSH)
}
}

View File

@ -17,8 +17,18 @@ References:
[PROTOCOL.certkeys]: http://cvsweb.openbsd.org/cgi-bin/cvsweb/src/usr.bin/ssh/PROTOCOL.certkeys?rev=HEAD
[SSH-PARAMETERS]: http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xml#ssh-parameters-1
[SSH-CERTS]: https://datatracker.ietf.org/doc/html/draft-miller-ssh-cert-01
[FIPS 140-3 mode]: https://go.dev/doc/security/fips140
This package does not fall under the stability promise of the Go language itself,
so its API may be changed when pressing needs arise.
# FIPS 140-3 mode
When the program is in [FIPS 140-3 mode], this package behaves as if only SP
800-140C and SP 800-140D approved cipher suites, signature algorithms,
certificate public key types and sizes, and key exchange and derivation
algorithms were implemented. Others are silently ignored and not negotiated, or
rejected. This set may depend on the algorithms supported by the FIPS 140-3 Go
Cryptographic Module selected with GOFIPS140, and may change across Go versions.
*/
package ssh

View File

@ -10,6 +10,7 @@ import (
"io"
"log"
"net"
"slices"
"strings"
"sync"
)
@ -527,7 +528,7 @@ func (t *handshakeTransport) sendKexInit() error {
switch s := k.(type) {
case MultiAlgorithmSigner:
for _, algo := range algorithmsForKeyFormat(keyFormat) {
if contains(s.Algorithms(), underlyingAlgo(algo)) {
if slices.Contains(s.Algorithms(), underlyingAlgo(algo)) {
msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algo)
}
}
@ -679,7 +680,7 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
return err
}
if t.sessionID == nil && ((isClient && contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && contains(clientInit.KexAlgos, kexStrictClient))) {
if t.sessionID == nil && ((isClient && slices.Contains(serverInit.KexAlgos, kexStrictServer)) || (!isClient && slices.Contains(clientInit.KexAlgos, kexStrictClient))) {
t.strictMode = true
if err := t.conn.setStrictMode(); err != nil {
return err
@ -736,7 +737,7 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
// On the server side, after the first SSH_MSG_NEWKEYS, send a SSH_MSG_EXT_INFO
// message with the server-sig-algs extension if the client supports it. See
// RFC 8308, Sections 2.4 and 3.1, and [PROTOCOL], Section 1.9.
if !isClient && firstKeyExchange && contains(clientInit.KexAlgos, "ext-info-c") {
if !isClient && firstKeyExchange && slices.Contains(clientInit.KexAlgos, "ext-info-c") {
supportedPubKeyAuthAlgosList := strings.Join(t.publicKeyAuthAlgorithms, ",")
extInfo := &extInfoMsg{
NumExtensions: 2,
@ -790,7 +791,7 @@ func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, a
func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner {
for _, k := range hostKeys {
if s, ok := k.(MultiAlgorithmSigner); ok {
if !contains(s.Algorithms(), underlyingAlgo(algo)) {
if !slices.Contains(s.Algorithms(), underlyingAlgo(algo)) {
continue
}
}

View File

@ -8,12 +8,14 @@ import (
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/fips140"
"crypto/rand"
"encoding/binary"
"errors"
"fmt"
"io"
"math/big"
"slices"
"golang.org/x/crypto/curve25519"
)
@ -395,9 +397,27 @@ func ecHash(curve elliptic.Curve) crypto.Hash {
return crypto.SHA512
}
// kexAlgoMap defines the supported KEXs. KEXs not included are not supported
// and will not be negotiated, even if explicitly configured. When FIPS mode is
// enabled, only FIPS-approved algorithms are included.
var kexAlgoMap = map[string]kexAlgorithm{}
func init() {
// mlkem768x25519-sha256 we'll work with fips140=on but not fips140=only
// until Go 1.26.
kexAlgoMap[KeyExchangeMLKEM768X25519] = &mlkem768WithCurve25519sha256{}
kexAlgoMap[KeyExchangeECDHP521] = &ecdh{elliptic.P521()}
kexAlgoMap[KeyExchangeECDHP384] = &ecdh{elliptic.P384()}
kexAlgoMap[KeyExchangeECDHP256] = &ecdh{elliptic.P256()}
if fips140.Enabled() {
defaultKexAlgos = slices.DeleteFunc(defaultKexAlgos, func(algo string) bool {
_, ok := kexAlgoMap[algo]
return !ok
})
return
}
p, _ := new(big.Int).SetString(oakleyGroup2, 16)
kexAlgoMap[InsecureKeyExchangeDH1SHA1] = &dhGroup{
g: new(big.Int).SetInt64(2),
@ -431,14 +451,10 @@ func init() {
hashFunc: crypto.SHA512,
}
kexAlgoMap[KeyExchangeECDHP521] = &ecdh{elliptic.P521()}
kexAlgoMap[KeyExchangeECDHP384] = &ecdh{elliptic.P384()}
kexAlgoMap[KeyExchangeECDHP256] = &ecdh{elliptic.P256()}
kexAlgoMap[KeyExchangeCurve25519] = &curve25519sha256{}
kexAlgoMap[keyExchangeCurve25519LibSSH] = &curve25519sha256{}
kexAlgoMap[InsecureKeyExchangeDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1}
kexAlgoMap[KeyExchangeDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256}
kexAlgoMap[KeyExchangeMLKEM768X25519] = &mlkem768WithCurve25519sha256{}
}
// curve25519sha256 implements the curve25519-sha256 (formerly known as

View File

@ -27,6 +27,7 @@ import (
"fmt"
"io"
"math/big"
"slices"
"strings"
"golang.org/x/crypto/ssh/internal/bcrypt_pbkdf"
@ -89,6 +90,11 @@ func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err err
}
return cert, nil, nil
}
if keyFormat := keyFormatForAlgorithm(algo); keyFormat != "" {
return nil, nil, fmt.Errorf("ssh: signature algorithm %q isn't a key format; key is malformed and should be re-encoded with type %q",
algo, keyFormat)
}
return nil, nil, fmt.Errorf("ssh: unknown key algorithm: %v", algo)
}
@ -191,9 +197,10 @@ func ParseKnownHosts(in []byte) (marker string, hosts []string, pubKey PublicKey
return "", nil, nil, "", nil, io.EOF
}
// ParseAuthorizedKey parses a public key from an authorized_keys
// file used in OpenSSH according to the sshd(8) manual page.
// ParseAuthorizedKey parses a public key from an authorized_keys file used in
// OpenSSH according to the sshd(8) manual page. Invalid lines are ignored.
func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []string, rest []byte, err error) {
var lastErr error
for len(in) > 0 {
end := bytes.IndexByte(in, '\n')
if end != -1 {
@ -222,6 +229,8 @@ func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []str
if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
return out, comment, options, rest, nil
} else {
lastErr = err
}
// No key type recognised. Maybe there's an options field at
@ -264,12 +273,18 @@ func ParseAuthorizedKey(in []byte) (out PublicKey, comment string, options []str
if out, comment, err = parseAuthorizedKey(in[i:]); err == nil {
options = candidateOptions
return out, comment, options, rest, nil
} else {
lastErr = err
}
in = rest
continue
}
if lastErr != nil {
return nil, "", nil, nil, fmt.Errorf("ssh: no key found; last parsing error for ignored line: %w", lastErr)
}
return nil, "", nil, nil, errors.New("ssh: no key found")
}
@ -395,11 +410,11 @@ func NewSignerWithAlgorithms(signer AlgorithmSigner, algorithms []string) (Multi
}
for _, algo := range algorithms {
if !contains(supportedAlgos, algo) {
if !slices.Contains(supportedAlgos, algo) {
return nil, fmt.Errorf("ssh: algorithm %q is not supported for key type %q",
algo, signer.PublicKey().Type())
}
if !contains(signerAlgos, algo) {
if !slices.Contains(signerAlgos, algo) {
return nil, fmt.Errorf("ssh: algorithm %q is restricted for the provided signer", algo)
}
}
@ -486,10 +501,13 @@ func (r *rsaPublicKey) Marshal() []byte {
func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error {
supportedAlgos := algorithmsForKeyFormat(r.Type())
if !contains(supportedAlgos, sig.Format) {
if !slices.Contains(supportedAlgos, sig.Format) {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type())
}
hash := hashFuncs[sig.Format]
hash, err := hashFunc(sig.Format)
if err != nil {
return err
}
h := hash.New()
h.Write(data)
digest := h.Sum(nil)
@ -606,7 +624,11 @@ func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
h := hashFuncs[sig.Format].New()
hash, err := hashFunc(sig.Format)
if err != nil {
return err
}
h := hash.New()
h.Write(data)
digest := h.Sum(nil)
@ -651,7 +673,11 @@ func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm
return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
}
h := hashFuncs[k.PublicKey().Type()].New()
hash, err := hashFunc(k.PublicKey().Type())
if err != nil {
return nil, err
}
h := hash.New()
h.Write(data)
digest := h.Sum(nil)
r, s, err := dsa.Sign(rand, k.PrivateKey, digest)
@ -801,8 +827,11 @@ func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
h := hashFuncs[sig.Format].New()
hash, err := hashFunc(sig.Format)
if err != nil {
return err
}
h := hash.New()
h.Write(data)
digest := h.Sum(nil)
@ -905,8 +934,11 @@ func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error {
if sig.Format != k.Type() {
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
}
h := hashFuncs[sig.Format].New()
hash, err := hashFunc(sig.Format)
if err != nil {
return err
}
h := hash.New()
h.Write([]byte(k.application))
appDigest := h.Sum(nil)
@ -1009,7 +1041,11 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error {
return fmt.Errorf("invalid size %d for Ed25519 public key", l)
}
h := hashFuncs[sig.Format].New()
hash, err := hashFunc(sig.Format)
if err != nil {
return err
}
h := hash.New()
h.Write([]byte(k.application))
appDigest := h.Sum(nil)
@ -1112,11 +1148,14 @@ func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm
algorithm = s.pubKey.Type()
}
if !contains(s.Algorithms(), algorithm) {
if !slices.Contains(s.Algorithms(), algorithm) {
return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type())
}
hashFunc := hashFuncs[algorithm]
hashFunc, err := hashFunc(algorithm)
if err != nil {
return nil, err
}
var digest []byte
if hashFunc != 0 {
h := hashFunc.New()

View File

@ -7,11 +7,13 @@ package ssh
// Message authentication support
import (
"crypto/fips140"
"crypto/hmac"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"hash"
"slices"
)
type macMode struct {
@ -46,23 +48,37 @@ func (t truncatingMAC) Size() int {
func (t truncatingMAC) BlockSize() int { return t.hmac.BlockSize() }
var macModes = map[string]*macMode{
HMACSHA512ETM: {64, true, func(key []byte) hash.Hash {
// macModes defines the supported MACs. MACs not included are not supported
// and will not be negotiated, even if explicitly configured. When FIPS mode is
// enabled, only FIPS-approved algorithms are included.
var macModes = map[string]*macMode{}
func init() {
macModes[HMACSHA512ETM] = &macMode{64, true, func(key []byte) hash.Hash {
return hmac.New(sha512.New, key)
}},
HMACSHA256ETM: {32, true, func(key []byte) hash.Hash {
}}
macModes[HMACSHA256ETM] = &macMode{32, true, func(key []byte) hash.Hash {
return hmac.New(sha256.New, key)
}},
HMACSHA512: {64, false, func(key []byte) hash.Hash {
}}
macModes[HMACSHA512] = &macMode{64, false, func(key []byte) hash.Hash {
return hmac.New(sha512.New, key)
}},
HMACSHA256: {32, false, func(key []byte) hash.Hash {
}}
macModes[HMACSHA256] = &macMode{32, false, func(key []byte) hash.Hash {
return hmac.New(sha256.New, key)
}},
HMACSHA1: {20, false, func(key []byte) hash.Hash {
}}
if fips140.Enabled() {
defaultMACs = slices.DeleteFunc(defaultMACs, func(algo string) bool {
_, ok := macModes[algo]
return !ok
})
return
}
macModes[HMACSHA1] = &macMode{20, false, func(key []byte) hash.Hash {
return hmac.New(sha1.New, key)
}},
InsecureHMACSHA196: {20, false, func(key []byte) hash.Hash {
}}
macModes[InsecureHMACSHA196] = &macMode{20, false, func(key []byte) hash.Hash {
return truncatingMAC{12, hmac.New(sha1.New, key)}
}},
}}
}

View File

@ -10,6 +10,7 @@ import (
"fmt"
"io"
"net"
"slices"
"strings"
)
@ -43,6 +44,9 @@ type Permissions struct {
// pass data from the authentication callbacks to the server
// application layer.
Extensions map[string]string
// ExtraData allows to store user defined data.
ExtraData map[any]any
}
type GSSAPIWithMICConfig struct {
@ -126,6 +130,21 @@ type ServerConfig struct {
// Permissions.Extensions entry.
PublicKeyCallback func(conn ConnMetadata, key PublicKey) (*Permissions, error)
// VerifiedPublicKeyCallback, if non-nil, is called after a client
// successfully confirms having control over a key that was previously
// approved by PublicKeyCallback. The permissions object passed to the
// callback is the one returned by PublicKeyCallback for the given public
// key and its ownership is transferred to the callback. The returned
// Permissions object can be the same object, optionally modified, or a
// completely new object. If VerifiedPublicKeyCallback is non-nil,
// PublicKeyCallback is not allowed to return a PartialSuccessError, which
// can instead be returned by VerifiedPublicKeyCallback.
//
// VerifiedPublicKeyCallback does not affect which authentication methods
// are included in the list of methods that can be attempted by the client.
VerifiedPublicKeyCallback func(conn ConnMetadata, key PublicKey, permissions *Permissions,
signatureAlgorithm string) (*Permissions, error)
// KeyboardInteractiveCallback, if non-nil, is called when
// keyboard-interactive authentication is selected (RFC
// 4256). The client object's Challenge function should be
@ -246,7 +265,7 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha
fullConf.PublicKeyAuthAlgorithms = defaultPubKeyAuthAlgos
} else {
for _, algo := range fullConf.PublicKeyAuthAlgorithms {
if !contains(SupportedAlgorithms().PublicKeyAuths, algo) && !contains(InsecureAlgorithms().PublicKeyAuths, algo) {
if !slices.Contains(SupportedAlgorithms().PublicKeyAuths, algo) && !slices.Contains(InsecureAlgorithms().PublicKeyAuths, algo) {
c.Close()
return nil, nil, nil, fmt.Errorf("ssh: unsupported public key authentication algorithm %s", algo)
}
@ -631,7 +650,7 @@ userAuthLoop:
return nil, parseError(msgUserAuthRequest)
}
algo := string(algoBytes)
if !contains(config.PublicKeyAuthAlgorithms, underlyingAlgo(algo)) {
if !slices.Contains(config.PublicKeyAuthAlgorithms, underlyingAlgo(algo)) {
authErr = fmt.Errorf("ssh: algorithm %q not accepted", algo)
break
}
@ -652,6 +671,9 @@ userAuthLoop:
candidate.pubKeyData = pubKeyData
candidate.perms, candidate.result = authConfig.PublicKeyCallback(s, pubKey)
_, isPartialSuccessError := candidate.result.(*PartialSuccessError)
if isPartialSuccessError && config.VerifiedPublicKeyCallback != nil {
return nil, errors.New("ssh: invalid library usage: PublicKeyCallback must not return partial success when VerifiedPublicKeyCallback is defined")
}
if (candidate.result == nil || isPartialSuccessError) &&
candidate.perms != nil &&
@ -695,7 +717,7 @@ userAuthLoop:
// ssh-rsa-cert-v01@openssh.com algorithm with ssh-rsa public
// key type. The algorithm and public key type must be
// consistent: both must be certificate algorithms, or neither.
if !contains(algorithmsForKeyFormat(pubKey.Type()), algo) {
if !slices.Contains(algorithmsForKeyFormat(pubKey.Type()), algo) {
authErr = fmt.Errorf("ssh: public key type %q not compatible with selected algorithm %q",
pubKey.Type(), algo)
break
@ -705,7 +727,7 @@ userAuthLoop:
// algorithm name that corresponds to algo with
// sig.Format. This is usually the same, but
// for certs, the names differ.
if !contains(config.PublicKeyAuthAlgorithms, sig.Format) {
if !slices.Contains(config.PublicKeyAuthAlgorithms, sig.Format) {
authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format)
break
}
@ -722,6 +744,12 @@ userAuthLoop:
authErr = candidate.result
perms = candidate.perms
if authErr == nil && config.VerifiedPublicKeyCallback != nil {
// Only call VerifiedPublicKeyCallback after the key has been accepted
// and successfully verified. If authErr is non-nil, the key is not
// considered verified and the callback must not run.
perms, authErr = config.VerifiedPublicKeyCallback(s, pubKey, perms, algo)
}
}
case "gssapi-with-mic":
if authConfig.GSSAPIWithMICConfig == nil {

View File

@ -8,6 +8,7 @@ import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"log"
)
@ -254,6 +255,9 @@ var (
// (to setup server->client keys) or clientKeys (for client->server keys).
func newPacketCipher(d direction, algs DirectionAlgorithms, kex *kexResult) (packetCipher, error) {
cipherMode := cipherModes[algs.Cipher]
if cipherMode == nil {
return nil, fmt.Errorf("ssh: unsupported cipher %v", algs.Cipher)
}
iv := make([]byte, cipherMode.ivSize)
key := make([]byte, cipherMode.keySize)

View File

@ -27,6 +27,7 @@ import (
// - If the resulting value is zero or out of range, use a default.
type http2Config struct {
MaxConcurrentStreams uint32
StrictMaxConcurrentRequests bool
MaxDecoderHeaderTableSize uint32
MaxEncoderHeaderTableSize uint32
MaxReadFrameSize uint32
@ -64,12 +65,13 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config {
// (the net/http Transport).
func configFromTransport(h2 *Transport) http2Config {
conf := http2Config{
MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
MaxReadFrameSize: h2.MaxReadFrameSize,
SendPingTimeout: h2.ReadIdleTimeout,
PingTimeout: h2.PingTimeout,
WriteByteTimeout: h2.WriteByteTimeout,
StrictMaxConcurrentRequests: h2.StrictMaxConcurrentStreams,
MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize,
MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize,
MaxReadFrameSize: h2.MaxReadFrameSize,
SendPingTimeout: h2.ReadIdleTimeout,
PingTimeout: h2.PingTimeout,
WriteByteTimeout: h2.WriteByteTimeout,
}
// Unlike most config fields, where out-of-range values revert to the default,
@ -128,6 +130,9 @@ func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
if h2.MaxConcurrentStreams != 0 {
conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
}
if http2ConfigStrictMaxConcurrentRequests(h2) {
conf.StrictMaxConcurrentRequests = true
}
if h2.MaxEncoderHeaderTableSize != 0 {
conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
}

15
vendor/golang.org/x/net/http2/config_go125.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.26
package http2
import (
"net/http"
)
func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
return false
}

15
vendor/golang.org/x/net/http2/config_go126.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.26
package http2
import (
"net/http"
)
func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool {
return h2.StrictMaxConcurrentRequests
}

View File

@ -347,7 +347,7 @@ func (fr *Framer) maxHeaderListSize() uint32 {
func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) {
// Write the FrameHeader.
f.wbuf = append(f.wbuf[:0],
0, // 3 bytes of length, filled in in endWrite
0, // 3 bytes of length, filled in endWrite
0,
0,
byte(ftype),
@ -1152,6 +1152,15 @@ type PriorityFrame struct {
PriorityParam
}
var defaultRFC9218Priority = PriorityParam{
incremental: 0,
urgency: 3,
}
// Note that HTTP/2 has had two different prioritization schemes, and
// PriorityParam struct below is a superset of both schemes. The exported
// symbols are from RFC 7540 and the non-exported ones are from RFC 9218.
// PriorityParam are the stream prioritzation parameters.
type PriorityParam struct {
// StreamDep is a 31-bit stream identifier for the
@ -1167,6 +1176,20 @@ type PriorityParam struct {
// the spec, "Add one to the value to obtain a weight between
// 1 and 256."
Weight uint8
// "The urgency (u) parameter value is Integer (see Section 3.3.1 of
// [STRUCTURED-FIELDS]), between 0 and 7 inclusive, in descending order of
// priority. The default is 3."
urgency uint8
// "The incremental (i) parameter value is Boolean (see Section 3.3.6 of
// [STRUCTURED-FIELDS]). It indicates if an HTTP response can be processed
// incrementally, i.e., provide some meaningful output as chunks of the
// response arrive."
//
// We use uint8 (i.e. 0 is false, 1 is true) instead of bool so we can
// avoid unnecessary type conversions and because either type takes 1 byte.
incremental uint8
}
func (p PriorityParam) IsZero() bool {

View File

@ -34,7 +34,6 @@ var (
VerboseLogs bool
logFrameWrites bool
logFrameReads bool
inTests bool
// Enabling extended CONNECT by causes browsers to attempt to use
// WebSockets-over-HTTP/2. This results in problems when the server's websocket

View File

@ -181,6 +181,10 @@ type Server struct {
type serverInternalState struct {
mu sync.Mutex
activeConns map[*serverConn]struct{}
// Pool of error channels. This is per-Server rather than global
// because channels can't be reused across synctest bubbles.
errChanPool sync.Pool
}
func (s *serverInternalState) registerConn(sc *serverConn) {
@ -212,6 +216,27 @@ func (s *serverInternalState) startGracefulShutdown() {
s.mu.Unlock()
}
// Global error channel pool used for uninitialized Servers.
// We use a per-Server pool when possible to avoid using channels across synctest bubbles.
var errChanPool = sync.Pool{
New: func() any { return make(chan error, 1) },
}
func (s *serverInternalState) getErrChan() chan error {
if s == nil {
return errChanPool.Get().(chan error) // Server used without calling ConfigureServer
}
return s.errChanPool.Get().(chan error)
}
func (s *serverInternalState) putErrChan(ch chan error) {
if s == nil {
errChanPool.Put(ch) // Server used without calling ConfigureServer
return
}
s.errChanPool.Put(ch)
}
// ConfigureServer adds HTTP/2 support to a net/http Server.
//
// The configuration conf may be nil.
@ -224,7 +249,10 @@ func ConfigureServer(s *http.Server, conf *Server) error {
if conf == nil {
conf = new(Server)
}
conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})}
conf.state = &serverInternalState{
activeConns: make(map[*serverConn]struct{}),
errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }},
}
if h1, h2 := s, conf; h2.IdleTimeout == 0 {
if h1.IdleTimeout != 0 {
h2.IdleTimeout = h1.IdleTimeout
@ -1124,25 +1152,6 @@ func (sc *serverConn) readPreface() error {
}
}
var errChanPool = sync.Pool{
New: func() interface{} { return make(chan error, 1) },
}
func getErrChan() chan error {
if inTests {
// Channels cannot be reused across synctest tests.
return make(chan error, 1)
} else {
return errChanPool.Get().(chan error)
}
}
func putErrChan(ch chan error) {
if !inTests {
errChanPool.Put(ch)
}
}
var writeDataPool = sync.Pool{
New: func() interface{} { return new(writeData) },
}
@ -1150,7 +1159,7 @@ var writeDataPool = sync.Pool{
// writeDataFromHandler writes DATA response frames from a handler on
// the given stream.
func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
ch := getErrChan()
ch := sc.srv.state.getErrChan()
writeArg := writeDataPool.Get().(*writeData)
*writeArg = writeData{stream.id, data, endStream}
err := sc.writeFrameFromHandler(FrameWriteRequest{
@ -1182,7 +1191,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea
return errStreamClosed
}
}
putErrChan(ch)
sc.srv.state.putErrChan(ch)
if frameWriteDone {
writeDataPool.Put(writeArg)
}
@ -2436,7 +2445,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
// waiting for this frame to be written, so an http.Flush mid-handler
// writes out the correct value of keys, before a handler later potentially
// mutates it.
errc = getErrChan()
errc = sc.srv.state.getErrChan()
}
if err := sc.writeFrameFromHandler(FrameWriteRequest{
write: headerData,
@ -2448,7 +2457,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
if errc != nil {
select {
case err := <-errc:
putErrChan(errc)
sc.srv.state.putErrChan(errc)
return err
case <-sc.doneServing:
return errClientDisconnected
@ -3129,7 +3138,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
method: opts.Method,
url: u,
header: cloneHeader(opts.Header),
done: getErrChan(),
done: sc.srv.state.getErrChan(),
}
select {
@ -3146,7 +3155,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
case <-st.cw:
return errStreamClosed
case err := <-msg.done:
putErrChan(msg.done)
sc.srv.state.putErrChan(msg.done)
return err
}
}

View File

@ -355,6 +355,7 @@ type ClientConn struct {
readIdleTimeout time.Duration
pingTimeout time.Duration
extendedConnectAllowed bool
strictMaxConcurrentStreams bool
// rstStreamPingsBlocked works around an unfortunate gRPC behavior.
// gRPC strictly limits the number of PING frames that it will receive.
@ -784,7 +785,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
initialWindowSize: 65535, // spec default
initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream,
maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings.
peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
strictMaxConcurrentStreams: conf.StrictMaxConcurrentRequests,
peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
streams: make(map[uint32]*clientStream),
singleUse: singleUse,
seenSettingsChan: make(chan struct{}),
@ -1018,7 +1020,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
return
}
var maxConcurrentOkay bool
if cc.t.StrictMaxConcurrentStreams {
if cc.strictMaxConcurrentStreams {
// We'll tell the caller we can take a new request to
// prevent the caller from dialing a new TCP
// connection, but then we'll block later before

View File

@ -42,6 +42,8 @@ type OpenStreamOptions struct {
// PusherID is zero if the stream was initiated by the client. Otherwise,
// PusherID names the stream that pushed the newly opened stream.
PusherID uint32
// priority is used to set the priority of the newly opened stream.
priority PriorityParam
}
// FrameWriteRequest is a request to write a frame.

View File

@ -11,7 +11,7 @@ import (
)
// RFC 7540, Section 5.3.5: the default weight is 16.
const priorityDefaultWeight = 15 // 16 = 15 + 1
const priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1
// PriorityWriteSchedulerConfig configures a priorityWriteScheduler.
type PriorityWriteSchedulerConfig struct {
@ -66,8 +66,8 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler
}
}
ws := &priorityWriteScheduler{
nodes: make(map[uint32]*priorityNode),
ws := &priorityWriteSchedulerRFC7540{
nodes: make(map[uint32]*priorityNodeRFC7540),
maxClosedNodesInTree: cfg.MaxClosedNodesInTree,
maxIdleNodesInTree: cfg.MaxIdleNodesInTree,
enableWriteThrottle: cfg.ThrottleOutOfOrderWrites,
@ -81,32 +81,32 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler
return ws
}
type priorityNodeState int
type priorityNodeStateRFC7540 int
const (
priorityNodeOpen priorityNodeState = iota
priorityNodeClosed
priorityNodeIdle
priorityNodeOpenRFC7540 priorityNodeStateRFC7540 = iota
priorityNodeClosedRFC7540
priorityNodeIdleRFC7540
)
// priorityNode is a node in an HTTP/2 priority tree.
// priorityNodeRFC7540 is a node in an HTTP/2 priority tree.
// Each node is associated with a single stream ID.
// See RFC 7540, Section 5.3.
type priorityNode struct {
q writeQueue // queue of pending frames to write
id uint32 // id of the stream, or 0 for the root of the tree
weight uint8 // the actual weight is weight+1, so the value is in [1,256]
state priorityNodeState // open | closed | idle
bytes int64 // number of bytes written by this node, or 0 if closed
subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
type priorityNodeRFC7540 struct {
q writeQueue // queue of pending frames to write
id uint32 // id of the stream, or 0 for the root of the tree
weight uint8 // the actual weight is weight+1, so the value is in [1,256]
state priorityNodeStateRFC7540 // open | closed | idle
bytes int64 // number of bytes written by this node, or 0 if closed
subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree
// These links form the priority tree.
parent *priorityNode
kids *priorityNode // start of the kids list
prev, next *priorityNode // doubly-linked list of siblings
parent *priorityNodeRFC7540
kids *priorityNodeRFC7540 // start of the kids list
prev, next *priorityNodeRFC7540 // doubly-linked list of siblings
}
func (n *priorityNode) setParent(parent *priorityNode) {
func (n *priorityNodeRFC7540) setParent(parent *priorityNodeRFC7540) {
if n == parent {
panic("setParent to self")
}
@ -141,7 +141,7 @@ func (n *priorityNode) setParent(parent *priorityNode) {
}
}
func (n *priorityNode) addBytes(b int64) {
func (n *priorityNodeRFC7540) addBytes(b int64) {
n.bytes += b
for ; n != nil; n = n.parent {
n.subtreeBytes += b
@ -154,7 +154,7 @@ func (n *priorityNode) addBytes(b int64) {
//
// f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true
// if any ancestor p of n is still open (ignoring the root node).
func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool {
func (n *priorityNodeRFC7540) walkReadyInOrder(openParent bool, tmp *[]*priorityNodeRFC7540, f func(*priorityNodeRFC7540, bool) bool) bool {
if !n.q.empty() && f(n, openParent) {
return true
}
@ -165,7 +165,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
// Don't consider the root "open" when updating openParent since
// we can't send data frames on the root stream (only control frames).
if n.id != 0 {
openParent = openParent || (n.state == priorityNodeOpen)
openParent = openParent || (n.state == priorityNodeOpenRFC7540)
}
// Common case: only one kid or all kids have the same weight.
@ -195,7 +195,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
*tmp = append(*tmp, n.kids)
n.kids.setParent(nil)
}
sort.Sort(sortPriorityNodeSiblings(*tmp))
sort.Sort(sortPriorityNodeSiblingsRFC7540(*tmp))
for i := len(*tmp) - 1; i >= 0; i-- {
(*tmp)[i].setParent(n) // setParent inserts at the head of n.kids
}
@ -207,11 +207,11 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f
return false
}
type sortPriorityNodeSiblings []*priorityNode
type sortPriorityNodeSiblingsRFC7540 []*priorityNodeRFC7540
func (z sortPriorityNodeSiblings) Len() int { return len(z) }
func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
func (z sortPriorityNodeSiblings) Less(i, k int) bool {
func (z sortPriorityNodeSiblingsRFC7540) Len() int { return len(z) }
func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i] }
func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool {
// Prefer the subtree that has sent fewer bytes relative to its weight.
// See sections 5.3.2 and 5.3.4.
wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
@ -225,13 +225,13 @@ func (z sortPriorityNodeSiblings) Less(i, k int) bool {
return bi/bk <= wi/wk
}
type priorityWriteScheduler struct {
type priorityWriteSchedulerRFC7540 struct {
// root is the root of the priority tree, where root.id = 0.
// The root queues control frames that are not associated with any stream.
root priorityNode
root priorityNodeRFC7540
// nodes maps stream ids to priority tree nodes.
nodes map[uint32]*priorityNode
nodes map[uint32]*priorityNodeRFC7540
// maxID is the maximum stream id in nodes.
maxID uint32
@ -239,7 +239,7 @@ type priorityWriteScheduler struct {
// lists of nodes that have been closed or are idle, but are kept in
// the tree for improved prioritization. When the lengths exceed either
// maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded.
closedNodes, idleNodes []*priorityNode
closedNodes, idleNodes []*priorityNodeRFC7540
// From the config.
maxClosedNodesInTree int
@ -248,19 +248,19 @@ type priorityWriteScheduler struct {
enableWriteThrottle bool
// tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations.
tmp []*priorityNode
tmp []*priorityNodeRFC7540
// pool of empty queues for reuse.
queuePool writeQueuePool
}
func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) {
func (ws *priorityWriteSchedulerRFC7540) OpenStream(streamID uint32, options OpenStreamOptions) {
// The stream may be currently idle but cannot be opened or closed.
if curr := ws.nodes[streamID]; curr != nil {
if curr.state != priorityNodeIdle {
if curr.state != priorityNodeIdleRFC7540 {
panic(fmt.Sprintf("stream %d already opened", streamID))
}
curr.state = priorityNodeOpen
curr.state = priorityNodeOpenRFC7540
return
}
@ -272,11 +272,11 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream
if parent == nil {
parent = &ws.root
}
n := &priorityNode{
n := &priorityNodeRFC7540{
q: *ws.queuePool.get(),
id: streamID,
weight: priorityDefaultWeight,
state: priorityNodeOpen,
weight: priorityDefaultWeightRFC7540,
state: priorityNodeOpenRFC7540,
}
n.setParent(parent)
ws.nodes[streamID] = n
@ -285,19 +285,19 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream
}
}
func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) {
if streamID == 0 {
panic("violation of WriteScheduler interface: cannot close stream 0")
}
if ws.nodes[streamID] == nil {
panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID))
}
if ws.nodes[streamID].state != priorityNodeOpen {
if ws.nodes[streamID].state != priorityNodeOpenRFC7540 {
panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID))
}
n := ws.nodes[streamID]
n.state = priorityNodeClosed
n.state = priorityNodeClosedRFC7540
n.addBytes(-n.bytes)
q := n.q
@ -310,7 +310,7 @@ func (ws *priorityWriteScheduler) CloseStream(streamID uint32) {
}
}
func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) {
func (ws *priorityWriteSchedulerRFC7540) AdjustStream(streamID uint32, priority PriorityParam) {
if streamID == 0 {
panic("adjustPriority on root")
}
@ -324,11 +324,11 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
return
}
ws.maxID = streamID
n = &priorityNode{
n = &priorityNodeRFC7540{
q: *ws.queuePool.get(),
id: streamID,
weight: priorityDefaultWeight,
state: priorityNodeIdle,
weight: priorityDefaultWeightRFC7540,
state: priorityNodeIdleRFC7540,
}
n.setParent(&ws.root)
ws.nodes[streamID] = n
@ -340,7 +340,7 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
parent := ws.nodes[priority.StreamDep]
if parent == nil {
n.setParent(&ws.root)
n.weight = priorityDefaultWeight
n.weight = priorityDefaultWeightRFC7540
return
}
@ -381,8 +381,8 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit
n.weight = priority.Weight
}
func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
var n *priorityNode
func (ws *priorityWriteSchedulerRFC7540) Push(wr FrameWriteRequest) {
var n *priorityNodeRFC7540
if wr.isControl() {
n = &ws.root
} else {
@ -401,8 +401,8 @@ func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) {
n.q.push(wr)
}
func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool {
func (ws *priorityWriteSchedulerRFC7540) Pop() (wr FrameWriteRequest, ok bool) {
ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNodeRFC7540, openParent bool) bool {
limit := int32(math.MaxInt32)
if openParent {
limit = ws.writeThrottleLimit
@ -428,7 +428,7 @@ func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) {
return wr, ok
}
func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) {
func (ws *priorityWriteSchedulerRFC7540) addClosedOrIdleNode(list *[]*priorityNodeRFC7540, maxSize int, n *priorityNodeRFC7540) {
if maxSize == 0 {
return
}
@ -442,7 +442,7 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max
*list = append(*list, n)
}
func (ws *priorityWriteScheduler) removeNode(n *priorityNode) {
func (ws *priorityWriteSchedulerRFC7540) removeNode(n *priorityNodeRFC7540) {
for n.kids != nil {
n.kids.setParent(n.parent)
}

View File

@ -0,0 +1,209 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
import (
"fmt"
"math"
)
type streamMetadata struct {
location *writeQueue
priority PriorityParam
}
type priorityWriteSchedulerRFC9218 struct {
// control contains control frames (SETTINGS, PING, etc.).
control writeQueue
// heads contain the head of a circular list of streams.
// We put these heads within a nested array that represents urgency and
// incremental, as defined in
// https://www.rfc-editor.org/rfc/rfc9218.html#name-priority-parameters.
// 8 represents u=0 up to u=7, and 2 represents i=false and i=true.
heads [8][2]*writeQueue
// streams contains a mapping between each stream ID and their metadata, so
// we can quickly locate them when needing to, for example, adjust their
// priority.
streams map[uint32]streamMetadata
// queuePool are empty queues for reuse.
queuePool writeQueuePool
// prioritizeIncremental is used to determine whether we should prioritize
// incremental streams or not, when urgency is the same in a given Pop()
// call.
prioritizeIncremental bool
}
func newPriorityWriteSchedulerRFC9128() WriteScheduler {
ws := &priorityWriteSchedulerRFC9218{
streams: make(map[uint32]streamMetadata),
}
return ws
}
func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStreamOptions) {
if ws.streams[streamID].location != nil {
panic(fmt.Errorf("stream %d already opened", streamID))
}
q := ws.queuePool.get()
ws.streams[streamID] = streamMetadata{
location: q,
priority: opt.priority,
}
u, i := opt.priority.urgency, opt.priority.incremental
if ws.heads[u][i] == nil {
ws.heads[u][i] = q
q.next = q
q.prev = q
} else {
// Queues are stored in a ring.
// Insert the new stream before ws.head, putting it at the end of the list.
q.prev = ws.heads[u][i].prev
q.next = ws.heads[u][i]
q.prev.next = q
q.next.prev = q
}
}
func (ws *priorityWriteSchedulerRFC9218) CloseStream(streamID uint32) {
metadata := ws.streams[streamID]
q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
if q == nil {
return
}
if q.next == q {
// This was the only open stream.
ws.heads[u][i] = nil
} else {
q.prev.next = q.next
q.next.prev = q.prev
if ws.heads[u][i] == q {
ws.heads[u][i] = q.next
}
}
delete(ws.streams, streamID)
ws.queuePool.put(q)
}
func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority PriorityParam) {
metadata := ws.streams[streamID]
q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental
if q == nil {
return
}
// Remove stream from current location.
if q.next == q {
// This was the only open stream.
ws.heads[u][i] = nil
} else {
q.prev.next = q.next
q.next.prev = q.prev
if ws.heads[u][i] == q {
ws.heads[u][i] = q.next
}
}
// Insert stream to the new queue.
u, i = priority.urgency, priority.incremental
if ws.heads[u][i] == nil {
ws.heads[u][i] = q
q.next = q
q.prev = q
} else {
// Queues are stored in a ring.
// Insert the new stream before ws.head, putting it at the end of the list.
q.prev = ws.heads[u][i].prev
q.next = ws.heads[u][i]
q.prev.next = q
q.next.prev = q
}
// Update the metadata.
ws.streams[streamID] = streamMetadata{
location: q,
priority: priority,
}
}
func (ws *priorityWriteSchedulerRFC9218) Push(wr FrameWriteRequest) {
if wr.isControl() {
ws.control.push(wr)
return
}
q := ws.streams[wr.StreamID()].location
if q == nil {
// This is a closed stream.
// wr should not be a HEADERS or DATA frame.
// We push the request onto the control queue.
if wr.DataSize() > 0 {
panic("add DATA on non-open stream")
}
ws.control.push(wr)
return
}
q.push(wr)
}
func (ws *priorityWriteSchedulerRFC9218) Pop() (FrameWriteRequest, bool) {
// Control and RST_STREAM frames first.
if !ws.control.empty() {
return ws.control.shift(), true
}
// On the next Pop(), we want to prioritize incremental if we prioritized
// non-incremental request of the same urgency this time. Vice-versa.
// i.e. when there are incremental and non-incremental requests at the same
// priority, we give 50% of our bandwidth to the incremental ones in
// aggregate and 50% to the first non-incremental one (since
// non-incremental streams do not use round-robin writes).
ws.prioritizeIncremental = !ws.prioritizeIncremental
// Always prioritize lowest u (i.e. highest urgency level).
for u := range ws.heads {
for i := range ws.heads[u] {
// When we want to prioritize incremental, we try to pop i=true
// first before i=false when u is the same.
if ws.prioritizeIncremental {
i = (i + 1) % 2
}
q := ws.heads[u][i]
if q == nil {
continue
}
for {
if wr, ok := q.consume(math.MaxInt32); ok {
if i == 1 {
// For incremental streams, we update head to q.next so
// we can round-robin between multiple streams that can
// immediately benefit from partial writes.
ws.heads[u][i] = q.next
} else {
// For non-incremental streams, we try to finish one to
// completion rather than doing round-robin. However,
// we update head here so that if q.consume() is !ok
// (e.g. the stream has no more frame to consume), head
// is updated to the next q that has frames to consume
// on future iterations. This way, we do not prioritize
// writing to unavailable stream on next Pop() calls,
// preventing head-of-line blocking.
ws.heads[u][i] = q
}
return wr, true
}
q = q.next
if q == ws.heads[u][i] {
break
}
}
}
}
return FrameWriteRequest{}, false
}

View File

@ -25,7 +25,7 @@ type roundRobinWriteScheduler struct {
}
// newRoundRobinWriteScheduler constructs a new write scheduler.
// The round robin scheduler priorizes control frames
// The round robin scheduler prioritizes control frames
// like SETTINGS and PING over DATA frames.
// When there are no control frames to send, it performs a round-robin
// selection from the ready streams.

View File

@ -51,7 +51,7 @@ type EncodeHeadersParam struct {
DefaultUserAgent string
}
// EncodeHeadersParam is the result of EncodeHeaders.
// EncodeHeadersResult is the result of EncodeHeaders.
type EncodeHeadersResult struct {
HasBody bool
HasTrailers bool
@ -399,7 +399,7 @@ type ServerRequestResult struct {
// If the request should be rejected, this is a short string suitable for passing
// to the http2 package's CountError function.
// It might be a bit odd to return errors this way rather than returing an error,
// It might be a bit odd to return errors this way rather than returning an error,
// but this ensures we don't forget to include a CountError reason.
InvalidReason string
}

View File

@ -297,7 +297,7 @@ func (up *UsernamePassword) Authenticate(ctx context.Context, rw io.ReadWriter,
b = append(b, up.Username...)
b = append(b, byte(len(up.Password)))
b = append(b, up.Password...)
// TODO(mikio): handle IO deadlines and cancelation if
// TODO(mikio): handle IO deadlines and cancellation if
// necessary
if _, err := rw.Write(b); err != nil {
return err

3
vendor/golang.org/x/sys/cpu/cpu.go generated vendored
View File

@ -92,6 +92,9 @@ var ARM64 struct {
HasSHA2 bool // SHA2 hardware implementation
HasCRC32 bool // CRC32 hardware implementation
HasATOMICS bool // Atomic memory operation instruction set
HasHPDS bool // Hierarchical permission disables in translations tables
HasLOR bool // Limited ordering regions
HasPAN bool // Privileged access never
HasFPHP bool // Half precision floating-point instruction set
HasASIMDHP bool // Advanced SIMD half precision instruction set
HasCPUID bool // CPUID identification scheme registers

View File

@ -65,10 +65,10 @@ func setMinimalFeatures() {
func readARM64Registers() {
Initialized = true
parseARM64SystemRegisters(getisar0(), getisar1(), getpfr0())
parseARM64SystemRegisters(getisar0(), getisar1(), getmmfr1(), getpfr0())
}
func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) {
func parseARM64SystemRegisters(isar0, isar1, mmfr1, pfr0 uint64) {
// ID_AA64ISAR0_EL1
switch extractBits(isar0, 4, 7) {
case 1:
@ -152,6 +152,22 @@ func parseARM64SystemRegisters(isar0, isar1, pfr0 uint64) {
ARM64.HasI8MM = true
}
// ID_AA64MMFR1_EL1
switch extractBits(mmfr1, 12, 15) {
case 1, 2:
ARM64.HasHPDS = true
}
switch extractBits(mmfr1, 16, 19) {
case 1:
ARM64.HasLOR = true
}
switch extractBits(mmfr1, 20, 23) {
case 1, 2, 3:
ARM64.HasPAN = true
}
// ID_AA64PFR0_EL1
switch extractBits(pfr0, 16, 19) {
case 0:

View File

@ -9,31 +9,34 @@
// func getisar0() uint64
TEXT ·getisar0(SB),NOSPLIT,$0-8
// get Instruction Set Attributes 0 into x0
// mrs x0, ID_AA64ISAR0_EL1 = d5380600
WORD $0xd5380600
MRS ID_AA64ISAR0_EL1, R0
MOVD R0, ret+0(FP)
RET
// func getisar1() uint64
TEXT ·getisar1(SB),NOSPLIT,$0-8
// get Instruction Set Attributes 1 into x0
// mrs x0, ID_AA64ISAR1_EL1 = d5380620
WORD $0xd5380620
MRS ID_AA64ISAR1_EL1, R0
MOVD R0, ret+0(FP)
RET
// func getmmfr1() uint64
TEXT ·getmmfr1(SB),NOSPLIT,$0-8
// get Memory Model Feature Register 1 into x0
MRS ID_AA64MMFR1_EL1, R0
MOVD R0, ret+0(FP)
RET
// func getpfr0() uint64
TEXT ·getpfr0(SB),NOSPLIT,$0-8
// get Processor Feature Register 0 into x0
// mrs x0, ID_AA64PFR0_EL1 = d5380400
WORD $0xd5380400
MRS ID_AA64PFR0_EL1, R0
MOVD R0, ret+0(FP)
RET
// func getzfr0() uint64
TEXT ·getzfr0(SB),NOSPLIT,$0-8
// get SVE Feature Register 0 into x0
// mrs x0, ID_AA64ZFR0_EL1 = d5380480
WORD $0xd5380480
MRS ID_AA64ZFR0_EL1, R0
MOVD R0, ret+0(FP)
RET

View File

@ -8,5 +8,6 @@ package cpu
func getisar0() uint64
func getisar1() uint64
func getmmfr1() uint64
func getpfr0() uint64
func getzfr0() uint64

View File

@ -8,4 +8,5 @@ package cpu
func getisar0() uint64 { return 0 }
func getisar1() uint64 { return 0 }
func getmmfr1() uint64 { return 0 }
func getpfr0() uint64 { return 0 }

View File

@ -167,7 +167,7 @@ func doinit() {
setMinimalFeatures()
return
}
parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64pfr0)
parseARM64SystemRegisters(cpuid.aa64isar0, cpuid.aa64isar1, cpuid.aa64mmfr1, cpuid.aa64pfr0)
Initialized = true
}

View File

@ -59,7 +59,7 @@ func doinit() {
if !ok {
return
}
parseARM64SystemRegisters(isar0, isar1, 0)
parseARM64SystemRegisters(isar0, isar1, 0, 0)
Initialized = true
}

View File

@ -41,6 +41,15 @@ func (s *CPUSet) Zero() {
clear(s[:])
}
// Fill adds all possible CPU bits to the set s. On Linux, [SchedSetaffinity]
// will silently ignore any invalid CPU bits in [CPUSet] so this is an
// efficient way of resetting the CPU affinity of a process.
func (s *CPUSet) Fill() {
for i := range s {
s[i] = ^cpuMask(0)
}
}
func cpuBitsIndex(cpu int) int {
return cpu / _NCPUBITS
}

View File

@ -23,7 +23,5 @@ func (fds *FdSet) IsSet(fd int) bool {
// Zero clears the set fds.
func (fds *FdSet) Zero() {
for i := range fds.Bits {
fds.Bits[i] = 0
}
clear(fds.Bits[:])
}

View File

@ -111,9 +111,7 @@ func (ifr *Ifreq) SetUint32(v uint32) {
// clear zeroes the ifreq's union field to prevent trailing garbage data from
// being sent to the kernel if an ifreq is reused.
func (ifr *Ifreq) clear() {
for i := range ifr.raw.Ifru {
ifr.raw.Ifru[i] = 0
}
clear(ifr.raw.Ifru[:])
}
// TODO(mdlayher): export as IfreqData? For now we can provide helpers such as

View File

@ -49,6 +49,7 @@ esac
if [[ "$GOOS" = "linux" ]]; then
# Use the Docker-based build system
# Files generated through docker (use $cmd so you can Ctl-C the build or run)
set -e
$cmd docker build --tag generate:$GOOS $GOOS
$cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && pwd):/build generate:$GOOS
exit

View File

@ -226,6 +226,7 @@ struct ltchars {
#include <linux/cryptouser.h>
#include <linux/devlink.h>
#include <linux/dm-ioctl.h>
#include <linux/elf.h>
#include <linux/errqueue.h>
#include <linux/ethtool_netlink.h>
#include <linux/falloc.h>
@ -529,6 +530,7 @@ ccflags="$@"
$2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ ||
$2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ ||
$2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ ||
$2 ~ /^(DT|EI|ELF|EV|NN|NT|PF|SHF|SHN|SHT|STB|STT|VER)_/ ||
$2 ~ /^O?XTABS$/ ||
$2 ~ /^TC[IO](ON|OFF)$/ ||
$2 ~ /^IN_/ ||

View File

@ -801,9 +801,7 @@ func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) {
// one. The kernel expects SID to be in network byte order.
binary.BigEndian.PutUint16(sa.raw[6:8], sa.SID)
copy(sa.raw[8:14], sa.Remote)
for i := 14; i < 14+IFNAMSIZ; i++ {
sa.raw[i] = 0
}
clear(sa.raw[14 : 14+IFNAMSIZ])
copy(sa.raw[14:], sa.Dev)
return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil
}
@ -2645,3 +2643,9 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) {
//sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error)
//sys Mseal(b []byte, flags uint) (err error)
//sys setMemPolicy(mode int, mask *CPUSet, size int) (err error) = SYS_SET_MEMPOLICY
func SetMemPolicy(mode int, mask *CPUSet) error {
return setMemPolicy(mode, mask, _CPU_SETSIZE)
}

View File

@ -248,6 +248,23 @@ func Statvfs(path string, buf *Statvfs_t) (err error) {
return Statvfs1(path, buf, ST_WAIT)
}
func Getvfsstat(buf []Statvfs_t, flags int) (n int, err error) {
var (
_p0 unsafe.Pointer
bufsize uintptr
)
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
bufsize = unsafe.Sizeof(Statvfs_t{}) * uintptr(len(buf))
}
r0, _, e1 := Syscall(SYS_GETVFSSTAT, uintptr(_p0), bufsize, uintptr(flags))
n = int(r0)
if e1 != 0 {
err = e1
}
return
}
/*
* Exposed directly
*/

View File

@ -853,20 +853,86 @@ const (
DM_VERSION_MAJOR = 0x4
DM_VERSION_MINOR = 0x32
DM_VERSION_PATCHLEVEL = 0x0
DT_ADDRRNGHI = 0x6ffffeff
DT_ADDRRNGLO = 0x6ffffe00
DT_BLK = 0x6
DT_CHR = 0x2
DT_DEBUG = 0x15
DT_DIR = 0x4
DT_ENCODING = 0x20
DT_FIFO = 0x1
DT_FINI = 0xd
DT_FLAGS_1 = 0x6ffffffb
DT_GNU_HASH = 0x6ffffef5
DT_HASH = 0x4
DT_HIOS = 0x6ffff000
DT_HIPROC = 0x7fffffff
DT_INIT = 0xc
DT_JMPREL = 0x17
DT_LNK = 0xa
DT_LOOS = 0x6000000d
DT_LOPROC = 0x70000000
DT_NEEDED = 0x1
DT_NULL = 0x0
DT_PLTGOT = 0x3
DT_PLTREL = 0x14
DT_PLTRELSZ = 0x2
DT_REG = 0x8
DT_REL = 0x11
DT_RELA = 0x7
DT_RELACOUNT = 0x6ffffff9
DT_RELAENT = 0x9
DT_RELASZ = 0x8
DT_RELCOUNT = 0x6ffffffa
DT_RELENT = 0x13
DT_RELSZ = 0x12
DT_RPATH = 0xf
DT_SOCK = 0xc
DT_SONAME = 0xe
DT_STRSZ = 0xa
DT_STRTAB = 0x5
DT_SYMBOLIC = 0x10
DT_SYMENT = 0xb
DT_SYMTAB = 0x6
DT_TEXTREL = 0x16
DT_UNKNOWN = 0x0
DT_VALRNGHI = 0x6ffffdff
DT_VALRNGLO = 0x6ffffd00
DT_VERDEF = 0x6ffffffc
DT_VERDEFNUM = 0x6ffffffd
DT_VERNEED = 0x6ffffffe
DT_VERNEEDNUM = 0x6fffffff
DT_VERSYM = 0x6ffffff0
DT_WHT = 0xe
ECHO = 0x8
ECRYPTFS_SUPER_MAGIC = 0xf15f
EFD_SEMAPHORE = 0x1
EFIVARFS_MAGIC = 0xde5e81e4
EFS_SUPER_MAGIC = 0x414a53
EI_CLASS = 0x4
EI_DATA = 0x5
EI_MAG0 = 0x0
EI_MAG1 = 0x1
EI_MAG2 = 0x2
EI_MAG3 = 0x3
EI_NIDENT = 0x10
EI_OSABI = 0x7
EI_PAD = 0x8
EI_VERSION = 0x6
ELFCLASS32 = 0x1
ELFCLASS64 = 0x2
ELFCLASSNONE = 0x0
ELFCLASSNUM = 0x3
ELFDATA2LSB = 0x1
ELFDATA2MSB = 0x2
ELFDATANONE = 0x0
ELFMAG = "\177ELF"
ELFMAG0 = 0x7f
ELFMAG1 = 'E'
ELFMAG2 = 'L'
ELFMAG3 = 'F'
ELFOSABI_LINUX = 0x3
ELFOSABI_NONE = 0x0
EM_386 = 0x3
EM_486 = 0x6
EM_68K = 0x4
@ -1152,14 +1218,24 @@ const (
ETH_P_WCCP = 0x883e
ETH_P_X25 = 0x805
ETH_P_XDSA = 0xf8
ET_CORE = 0x4
ET_DYN = 0x3
ET_EXEC = 0x2
ET_HIPROC = 0xffff
ET_LOPROC = 0xff00
ET_NONE = 0x0
ET_REL = 0x1
EV_ABS = 0x3
EV_CNT = 0x20
EV_CURRENT = 0x1
EV_FF = 0x15
EV_FF_STATUS = 0x17
EV_KEY = 0x1
EV_LED = 0x11
EV_MAX = 0x1f
EV_MSC = 0x4
EV_NONE = 0x0
EV_NUM = 0x2
EV_PWR = 0x16
EV_REL = 0x2
EV_REP = 0x14
@ -2276,7 +2352,167 @@ const (
NLM_F_REPLACE = 0x100
NLM_F_REQUEST = 0x1
NLM_F_ROOT = 0x100
NN_386_IOPERM = "LINUX"
NN_386_TLS = "LINUX"
NN_ARC_V2 = "LINUX"
NN_ARM_FPMR = "LINUX"
NN_ARM_GCS = "LINUX"
NN_ARM_HW_BREAK = "LINUX"
NN_ARM_HW_WATCH = "LINUX"
NN_ARM_PACA_KEYS = "LINUX"
NN_ARM_PACG_KEYS = "LINUX"
NN_ARM_PAC_ENABLED_KEYS = "LINUX"
NN_ARM_PAC_MASK = "LINUX"
NN_ARM_POE = "LINUX"
NN_ARM_SSVE = "LINUX"
NN_ARM_SVE = "LINUX"
NN_ARM_SYSTEM_CALL = "LINUX"
NN_ARM_TAGGED_ADDR_CTRL = "LINUX"
NN_ARM_TLS = "LINUX"
NN_ARM_VFP = "LINUX"
NN_ARM_ZA = "LINUX"
NN_ARM_ZT = "LINUX"
NN_AUXV = "CORE"
NN_FILE = "CORE"
NN_GNU_PROPERTY_TYPE_0 = "GNU"
NN_LOONGARCH_CPUCFG = "LINUX"
NN_LOONGARCH_CSR = "LINUX"
NN_LOONGARCH_HW_BREAK = "LINUX"
NN_LOONGARCH_HW_WATCH = "LINUX"
NN_LOONGARCH_LASX = "LINUX"
NN_LOONGARCH_LBT = "LINUX"
NN_LOONGARCH_LSX = "LINUX"
NN_MIPS_DSP = "LINUX"
NN_MIPS_FP_MODE = "LINUX"
NN_MIPS_MSA = "LINUX"
NN_PPC_DEXCR = "LINUX"
NN_PPC_DSCR = "LINUX"
NN_PPC_EBB = "LINUX"
NN_PPC_HASHKEYR = "LINUX"
NN_PPC_PKEY = "LINUX"
NN_PPC_PMU = "LINUX"
NN_PPC_PPR = "LINUX"
NN_PPC_SPE = "LINUX"
NN_PPC_TAR = "LINUX"
NN_PPC_TM_CDSCR = "LINUX"
NN_PPC_TM_CFPR = "LINUX"
NN_PPC_TM_CGPR = "LINUX"
NN_PPC_TM_CPPR = "LINUX"
NN_PPC_TM_CTAR = "LINUX"
NN_PPC_TM_CVMX = "LINUX"
NN_PPC_TM_CVSX = "LINUX"
NN_PPC_TM_SPR = "LINUX"
NN_PPC_VMX = "LINUX"
NN_PPC_VSX = "LINUX"
NN_PRFPREG = "CORE"
NN_PRPSINFO = "CORE"
NN_PRSTATUS = "CORE"
NN_PRXFPREG = "LINUX"
NN_RISCV_CSR = "LINUX"
NN_RISCV_TAGGED_ADDR_CTRL = "LINUX"
NN_RISCV_VECTOR = "LINUX"
NN_S390_CTRS = "LINUX"
NN_S390_GS_BC = "LINUX"
NN_S390_GS_CB = "LINUX"
NN_S390_HIGH_GPRS = "LINUX"
NN_S390_LAST_BREAK = "LINUX"
NN_S390_PREFIX = "LINUX"
NN_S390_PV_CPU_DATA = "LINUX"
NN_S390_RI_CB = "LINUX"
NN_S390_SYSTEM_CALL = "LINUX"
NN_S390_TDB = "LINUX"
NN_S390_TIMER = "LINUX"
NN_S390_TODCMP = "LINUX"
NN_S390_TODPREG = "LINUX"
NN_S390_VXRS_HIGH = "LINUX"
NN_S390_VXRS_LOW = "LINUX"
NN_SIGINFO = "CORE"
NN_TASKSTRUCT = "CORE"
NN_VMCOREDD = "LINUX"
NN_X86_SHSTK = "LINUX"
NN_X86_XSAVE_LAYOUT = "LINUX"
NN_X86_XSTATE = "LINUX"
NSFS_MAGIC = 0x6e736673
NT_386_IOPERM = 0x201
NT_386_TLS = 0x200
NT_ARC_V2 = 0x600
NT_ARM_FPMR = 0x40e
NT_ARM_GCS = 0x410
NT_ARM_HW_BREAK = 0x402
NT_ARM_HW_WATCH = 0x403
NT_ARM_PACA_KEYS = 0x407
NT_ARM_PACG_KEYS = 0x408
NT_ARM_PAC_ENABLED_KEYS = 0x40a
NT_ARM_PAC_MASK = 0x406
NT_ARM_POE = 0x40f
NT_ARM_SSVE = 0x40b
NT_ARM_SVE = 0x405
NT_ARM_SYSTEM_CALL = 0x404
NT_ARM_TAGGED_ADDR_CTRL = 0x409
NT_ARM_TLS = 0x401
NT_ARM_VFP = 0x400
NT_ARM_ZA = 0x40c
NT_ARM_ZT = 0x40d
NT_AUXV = 0x6
NT_FILE = 0x46494c45
NT_GNU_PROPERTY_TYPE_0 = 0x5
NT_LOONGARCH_CPUCFG = 0xa00
NT_LOONGARCH_CSR = 0xa01
NT_LOONGARCH_HW_BREAK = 0xa05
NT_LOONGARCH_HW_WATCH = 0xa06
NT_LOONGARCH_LASX = 0xa03
NT_LOONGARCH_LBT = 0xa04
NT_LOONGARCH_LSX = 0xa02
NT_MIPS_DSP = 0x800
NT_MIPS_FP_MODE = 0x801
NT_MIPS_MSA = 0x802
NT_PPC_DEXCR = 0x111
NT_PPC_DSCR = 0x105
NT_PPC_EBB = 0x106
NT_PPC_HASHKEYR = 0x112
NT_PPC_PKEY = 0x110
NT_PPC_PMU = 0x107
NT_PPC_PPR = 0x104
NT_PPC_SPE = 0x101
NT_PPC_TAR = 0x103
NT_PPC_TM_CDSCR = 0x10f
NT_PPC_TM_CFPR = 0x109
NT_PPC_TM_CGPR = 0x108
NT_PPC_TM_CPPR = 0x10e
NT_PPC_TM_CTAR = 0x10d
NT_PPC_TM_CVMX = 0x10a
NT_PPC_TM_CVSX = 0x10b
NT_PPC_TM_SPR = 0x10c
NT_PPC_VMX = 0x100
NT_PPC_VSX = 0x102
NT_PRFPREG = 0x2
NT_PRPSINFO = 0x3
NT_PRSTATUS = 0x1
NT_PRXFPREG = 0x46e62b7f
NT_RISCV_CSR = 0x900
NT_RISCV_TAGGED_ADDR_CTRL = 0x902
NT_RISCV_VECTOR = 0x901
NT_S390_CTRS = 0x304
NT_S390_GS_BC = 0x30c
NT_S390_GS_CB = 0x30b
NT_S390_HIGH_GPRS = 0x300
NT_S390_LAST_BREAK = 0x306
NT_S390_PREFIX = 0x305
NT_S390_PV_CPU_DATA = 0x30e
NT_S390_RI_CB = 0x30d
NT_S390_SYSTEM_CALL = 0x307
NT_S390_TDB = 0x308
NT_S390_TIMER = 0x301
NT_S390_TODCMP = 0x302
NT_S390_TODPREG = 0x303
NT_S390_VXRS_HIGH = 0x30a
NT_S390_VXRS_LOW = 0x309
NT_SIGINFO = 0x53494749
NT_TASKSTRUCT = 0x4
NT_VMCOREDD = 0x700
NT_X86_SHSTK = 0x204
NT_X86_XSAVE_LAYOUT = 0x205
NT_X86_XSTATE = 0x202
OCFS2_SUPER_MAGIC = 0x7461636f
OCRNL = 0x8
OFDEL = 0x80
@ -2463,6 +2699,59 @@ const (
PERF_RECORD_MISC_USER = 0x2
PERF_SAMPLE_BRANCH_PLM_ALL = 0x7
PERF_SAMPLE_WEIGHT_TYPE = 0x1004000
PF_ALG = 0x26
PF_APPLETALK = 0x5
PF_ASH = 0x12
PF_ATMPVC = 0x8
PF_ATMSVC = 0x14
PF_AX25 = 0x3
PF_BLUETOOTH = 0x1f
PF_BRIDGE = 0x7
PF_CAIF = 0x25
PF_CAN = 0x1d
PF_DECnet = 0xc
PF_ECONET = 0x13
PF_FILE = 0x1
PF_IB = 0x1b
PF_IEEE802154 = 0x24
PF_INET = 0x2
PF_INET6 = 0xa
PF_IPX = 0x4
PF_IRDA = 0x17
PF_ISDN = 0x22
PF_IUCV = 0x20
PF_KCM = 0x29
PF_KEY = 0xf
PF_LLC = 0x1a
PF_LOCAL = 0x1
PF_MAX = 0x2e
PF_MCTP = 0x2d
PF_MPLS = 0x1c
PF_NETBEUI = 0xd
PF_NETLINK = 0x10
PF_NETROM = 0x6
PF_NFC = 0x27
PF_PACKET = 0x11
PF_PHONET = 0x23
PF_PPPOX = 0x18
PF_QIPCRTR = 0x2a
PF_R = 0x4
PF_RDS = 0x15
PF_ROSE = 0xb
PF_ROUTE = 0x10
PF_RXRPC = 0x21
PF_SECURITY = 0xe
PF_SMC = 0x2b
PF_SNA = 0x16
PF_TIPC = 0x1e
PF_UNIX = 0x1
PF_UNSPEC = 0x0
PF_VSOCK = 0x28
PF_W = 0x2
PF_WANPIPE = 0x19
PF_X = 0x1
PF_X25 = 0x9
PF_XDP = 0x2c
PID_FS_MAGIC = 0x50494446
PIPEFS_MAGIC = 0x50495045
PPPIOCGNPMODE = 0xc008744c
@ -2758,6 +3047,23 @@ const (
PTRACE_SYSCALL_INFO_NONE = 0x0
PTRACE_SYSCALL_INFO_SECCOMP = 0x3
PTRACE_TRACEME = 0x0
PT_AARCH64_MEMTAG_MTE = 0x70000002
PT_DYNAMIC = 0x2
PT_GNU_EH_FRAME = 0x6474e550
PT_GNU_PROPERTY = 0x6474e553
PT_GNU_RELRO = 0x6474e552
PT_GNU_STACK = 0x6474e551
PT_HIOS = 0x6fffffff
PT_HIPROC = 0x7fffffff
PT_INTERP = 0x3
PT_LOAD = 0x1
PT_LOOS = 0x60000000
PT_LOPROC = 0x70000000
PT_NOTE = 0x4
PT_NULL = 0x0
PT_PHDR = 0x6
PT_SHLIB = 0x5
PT_TLS = 0x7
P_ALL = 0x0
P_PGID = 0x2
P_PID = 0x1
@ -3091,6 +3397,47 @@ const (
SEEK_MAX = 0x4
SEEK_SET = 0x0
SELINUX_MAGIC = 0xf97cff8c
SHF_ALLOC = 0x2
SHF_EXCLUDE = 0x8000000
SHF_EXECINSTR = 0x4
SHF_GROUP = 0x200
SHF_INFO_LINK = 0x40
SHF_LINK_ORDER = 0x80
SHF_MASKOS = 0xff00000
SHF_MASKPROC = 0xf0000000
SHF_MERGE = 0x10
SHF_ORDERED = 0x4000000
SHF_OS_NONCONFORMING = 0x100
SHF_RELA_LIVEPATCH = 0x100000
SHF_RO_AFTER_INIT = 0x200000
SHF_STRINGS = 0x20
SHF_TLS = 0x400
SHF_WRITE = 0x1
SHN_ABS = 0xfff1
SHN_COMMON = 0xfff2
SHN_HIPROC = 0xff1f
SHN_HIRESERVE = 0xffff
SHN_LIVEPATCH = 0xff20
SHN_LOPROC = 0xff00
SHN_LORESERVE = 0xff00
SHN_UNDEF = 0x0
SHT_DYNAMIC = 0x6
SHT_DYNSYM = 0xb
SHT_HASH = 0x5
SHT_HIPROC = 0x7fffffff
SHT_HIUSER = 0xffffffff
SHT_LOPROC = 0x70000000
SHT_LOUSER = 0x80000000
SHT_NOBITS = 0x8
SHT_NOTE = 0x7
SHT_NULL = 0x0
SHT_NUM = 0xc
SHT_PROGBITS = 0x1
SHT_REL = 0x9
SHT_RELA = 0x4
SHT_SHLIB = 0xa
SHT_STRTAB = 0x3
SHT_SYMTAB = 0x2
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@ -3317,6 +3664,16 @@ const (
STATX_UID = 0x8
STATX_WRITE_ATOMIC = 0x10000
STATX__RESERVED = 0x80000000
STB_GLOBAL = 0x1
STB_LOCAL = 0x0
STB_WEAK = 0x2
STT_COMMON = 0x5
STT_FILE = 0x4
STT_FUNC = 0x2
STT_NOTYPE = 0x0
STT_OBJECT = 0x1
STT_SECTION = 0x3
STT_TLS = 0x6
SYNC_FILE_RANGE_WAIT_AFTER = 0x4
SYNC_FILE_RANGE_WAIT_BEFORE = 0x1
SYNC_FILE_RANGE_WRITE = 0x2
@ -3553,6 +3910,8 @@ const (
UTIME_OMIT = 0x3ffffffe
V9FS_MAGIC = 0x1021997
VERASE = 0x2
VER_FLG_BASE = 0x1
VER_FLG_WEAK = 0x2
VINTR = 0x0
VKILL = 0x3
VLNEXT = 0xf

View File

@ -2238,3 +2238,13 @@ func Mseal(b []byte, flags uint) (err error) {
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func setMemPolicy(mode int, mask *CPUSet, size int) (err error) {
_, _, e1 := Syscall(SYS_SET_MEMPOLICY, uintptr(mode), uintptr(unsafe.Pointer(mask)), uintptr(size))
if e1 != 0 {
err = errnoErr(e1)
}
return
}

View File

@ -3590,6 +3590,8 @@ type Nhmsg struct {
Flags uint32
}
const SizeofNhmsg = 0x8
type NexthopGrp struct {
Id uint32
Weight uint8
@ -3597,6 +3599,8 @@ type NexthopGrp struct {
Resvd2 uint16
}
const SizeofNexthopGrp = 0x8
const (
NHA_UNSPEC = 0x0
NHA_ID = 0x1
@ -6332,3 +6336,30 @@ type SockDiagReq struct {
}
const RTM_NEWNVLAN = 0x70
const (
MPOL_BIND = 0x2
MPOL_DEFAULT = 0x0
MPOL_F_ADDR = 0x2
MPOL_F_MEMS_ALLOWED = 0x4
MPOL_F_MOF = 0x8
MPOL_F_MORON = 0x10
MPOL_F_NODE = 0x1
MPOL_F_NUMA_BALANCING = 0x2000
MPOL_F_RELATIVE_NODES = 0x4000
MPOL_F_SHARED = 0x1
MPOL_F_STATIC_NODES = 0x8000
MPOL_INTERLEAVE = 0x3
MPOL_LOCAL = 0x4
MPOL_MAX = 0x7
MPOL_MF_INTERNAL = 0x10
MPOL_MF_LAZY = 0x8
MPOL_MF_MOVE_ALL = 0x4
MPOL_MF_MOVE = 0x2
MPOL_MF_STRICT = 0x1
MPOL_MF_VALID = 0x7
MPOL_MODE_FLAGS = 0xe000
MPOL_PREFERRED = 0x1
MPOL_PREFERRED_MANY = 0x5
MPOL_WEIGHTED_INTERLEAVE = 0x6
)

View File

@ -321,6 +321,8 @@ func NewCallbackCDecl(fn interface{}) uintptr {
//sys SetConsoleOutputCP(cp uint32) (err error) = kernel32.SetConsoleOutputCP
//sys WriteConsole(console Handle, buf *uint16, towrite uint32, written *uint32, reserved *byte) (err error) = kernel32.WriteConsoleW
//sys ReadConsole(console Handle, buf *uint16, toread uint32, read *uint32, inputControl *byte) (err error) = kernel32.ReadConsoleW
//sys GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) = kernel32.GetNumberOfConsoleInputEvents
//sys FlushConsoleInputBuffer(console Handle) (err error) = kernel32.FlushConsoleInputBuffer
//sys resizePseudoConsole(pconsole Handle, size uint32) (hr error) = kernel32.ResizePseudoConsole
//sys CreateToolhelp32Snapshot(flags uint32, processId uint32) (handle Handle, err error) [failretval==InvalidHandle] = kernel32.CreateToolhelp32Snapshot
//sys Module32First(snapshot Handle, moduleEntry *ModuleEntry32) (err error) = kernel32.Module32FirstW
@ -890,8 +892,12 @@ const socket_error = uintptr(^uint32(0))
//sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar
//sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx
//sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex
//sys GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) = iphlpapi.GetIpForwardEntry2
//sys GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) = iphlpapi.GetIpForwardTable2
//sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry
//sys FreeMibTable(memory unsafe.Pointer) = iphlpapi.FreeMibTable
//sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange
//sys NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyRouteChange2
//sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange
//sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2
@ -914,6 +920,17 @@ type RawSockaddrInet6 struct {
Scope_id uint32
}
// RawSockaddrInet is a union that contains an IPv4, an IPv6 address, or an address family. See
// https://learn.microsoft.com/en-us/windows/win32/api/ws2ipdef/ns-ws2ipdef-sockaddr_inet.
//
// A [*RawSockaddrInet] may be converted to a [*RawSockaddrInet4] or [*RawSockaddrInet6] using
// unsafe, depending on the address family.
type RawSockaddrInet struct {
Family uint16
Port uint16
Data [6]uint32
}
type RawSockaddr struct {
Family uint16
Data [14]int8

View File

@ -65,6 +65,22 @@ var signals = [...]string{
15: "terminated",
}
// File flags for [os.OpenFile]. The O_ prefix is used to indicate
// that these flags are specific to the OpenFile function.
const (
O_FILE_FLAG_OPEN_NO_RECALL = FILE_FLAG_OPEN_NO_RECALL
O_FILE_FLAG_OPEN_REPARSE_POINT = FILE_FLAG_OPEN_REPARSE_POINT
O_FILE_FLAG_SESSION_AWARE = FILE_FLAG_SESSION_AWARE
O_FILE_FLAG_POSIX_SEMANTICS = FILE_FLAG_POSIX_SEMANTICS
O_FILE_FLAG_BACKUP_SEMANTICS = FILE_FLAG_BACKUP_SEMANTICS
O_FILE_FLAG_DELETE_ON_CLOSE = FILE_FLAG_DELETE_ON_CLOSE
O_FILE_FLAG_SEQUENTIAL_SCAN = FILE_FLAG_SEQUENTIAL_SCAN
O_FILE_FLAG_RANDOM_ACCESS = FILE_FLAG_RANDOM_ACCESS
O_FILE_FLAG_NO_BUFFERING = FILE_FLAG_NO_BUFFERING
O_FILE_FLAG_OVERLAPPED = FILE_FLAG_OVERLAPPED
O_FILE_FLAG_WRITE_THROUGH = FILE_FLAG_WRITE_THROUGH
)
const (
FILE_READ_DATA = 0x00000001
FILE_READ_ATTRIBUTES = 0x00000080
@ -2304,6 +2320,82 @@ type MibIfRow2 struct {
OutQLen uint64
}
// IP_ADDRESS_PREFIX stores an IP address prefix. See
// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-ip_address_prefix.
type IpAddressPrefix struct {
Prefix RawSockaddrInet
PrefixLength uint8
}
// NL_ROUTE_ORIGIN enumeration from nldef.h or
// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_origin.
const (
NlroManual = 0
NlroWellKnown = 1
NlroDHCP = 2
NlroRouterAdvertisement = 3
Nlro6to4 = 4
)
// NL_ROUTE_ORIGIN enumeration from nldef.h or
// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_protocol.
const (
MIB_IPPROTO_OTHER = 1
MIB_IPPROTO_LOCAL = 2
MIB_IPPROTO_NETMGMT = 3
MIB_IPPROTO_ICMP = 4
MIB_IPPROTO_EGP = 5
MIB_IPPROTO_GGP = 6
MIB_IPPROTO_HELLO = 7
MIB_IPPROTO_RIP = 8
MIB_IPPROTO_IS_IS = 9
MIB_IPPROTO_ES_IS = 10
MIB_IPPROTO_CISCO = 11
MIB_IPPROTO_BBN = 12
MIB_IPPROTO_OSPF = 13
MIB_IPPROTO_BGP = 14
MIB_IPPROTO_IDPR = 15
MIB_IPPROTO_EIGRP = 16
MIB_IPPROTO_DVMRP = 17
MIB_IPPROTO_RPL = 18
MIB_IPPROTO_DHCP = 19
MIB_IPPROTO_NT_AUTOSTATIC = 10002
MIB_IPPROTO_NT_STATIC = 10006
MIB_IPPROTO_NT_STATIC_NON_DOD = 10007
)
// MIB_IPFORWARD_ROW2 stores information about an IP route entry. See
// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_row2.
type MibIpForwardRow2 struct {
InterfaceLuid uint64
InterfaceIndex uint32
DestinationPrefix IpAddressPrefix
NextHop RawSockaddrInet
SitePrefixLength uint8
ValidLifetime uint32
PreferredLifetime uint32
Metric uint32
Protocol uint32
Loopback uint8
AutoconfigureAddress uint8
Publish uint8
Immortal uint8
Age uint32
Origin uint32
}
// MIB_IPFORWARD_TABLE2 contains a table of IP route entries. See
// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_table2.
type MibIpForwardTable2 struct {
NumEntries uint32
Table [1]MibIpForwardRow2
}
// Rows returns the IP route entries in the table.
func (t *MibIpForwardTable2) Rows() []MibIpForwardRow2 {
return unsafe.Slice(&t.Table[0], t.NumEntries)
}
// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See
// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row.
type MibUnicastIpAddressRow struct {

View File

@ -182,13 +182,17 @@ var (
procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute")
procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute")
procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2")
procFreeMibTable = modiphlpapi.NewProc("FreeMibTable")
procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses")
procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo")
procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx")
procGetIfEntry = modiphlpapi.NewProc("GetIfEntry")
procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex")
procGetIpForwardEntry2 = modiphlpapi.NewProc("GetIpForwardEntry2")
procGetIpForwardTable2 = modiphlpapi.NewProc("GetIpForwardTable2")
procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry")
procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange")
procNotifyRouteChange2 = modiphlpapi.NewProc("NotifyRouteChange2")
procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange")
procAddDllDirectory = modkernel32.NewProc("AddDllDirectory")
procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject")
@ -238,6 +242,7 @@ var (
procFindResourceW = modkernel32.NewProc("FindResourceW")
procFindVolumeClose = modkernel32.NewProc("FindVolumeClose")
procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose")
procFlushConsoleInputBuffer = modkernel32.NewProc("FlushConsoleInputBuffer")
procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers")
procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile")
procFormatMessageW = modkernel32.NewProc("FormatMessageW")
@ -284,6 +289,7 @@ var (
procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW")
procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo")
procGetNamedPipeServerProcessId = modkernel32.NewProc("GetNamedPipeServerProcessId")
procGetNumberOfConsoleInputEvents = modkernel32.NewProc("GetNumberOfConsoleInputEvents")
procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult")
procGetPriorityClass = modkernel32.NewProc("GetPriorityClass")
procGetProcAddress = modkernel32.NewProc("GetProcAddress")
@ -1622,6 +1628,11 @@ func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) {
return
}
func FreeMibTable(memory unsafe.Pointer) {
syscall.SyscallN(procFreeMibTable.Addr(), uintptr(memory))
return
}
func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) {
r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)))
if r0 != 0 {
@ -1662,6 +1673,22 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) {
return
}
func GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) {
r0, _, _ := syscall.SyscallN(procGetIpForwardEntry2.Addr(), uintptr(unsafe.Pointer(row)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
return
}
func GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) {
r0, _, _ := syscall.SyscallN(procGetIpForwardTable2.Addr(), uintptr(family), uintptr(unsafe.Pointer(table)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
return
}
func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) {
r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row)))
if r0 != 0 {
@ -1682,6 +1709,18 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa
return
}
func NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) {
var _p0 uint32
if initialNotification {
_p0 = 1
}
r0, _, _ := syscall.SyscallN(procNotifyRouteChange2.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
if r0 != 0 {
errcode = syscall.Errno(r0)
}
return
}
func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) {
var _p0 uint32
if initialNotification {
@ -2111,6 +2150,14 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) {
return
}
func FlushConsoleInputBuffer(console Handle) (err error) {
r1, _, e1 := syscall.SyscallN(procFlushConsoleInputBuffer.Addr(), uintptr(console))
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func FlushFileBuffers(handle Handle) (err error) {
r1, _, e1 := syscall.SyscallN(procFlushFileBuffers.Addr(), uintptr(handle))
if r1 == 0 {
@ -2481,6 +2528,14 @@ func GetNamedPipeServerProcessId(pipe Handle, serverProcessID *uint32) (err erro
return
}
func GetNumberOfConsoleInputEvents(console Handle, numevents *uint32) (err error) {
r1, _, e1 := syscall.SyscallN(procGetNumberOfConsoleInputEvents.Addr(), uintptr(console), uintptr(unsafe.Pointer(numevents)))
if r1 == 0 {
err = errnoErr(e1)
}
return
}
func GetOverlappedResult(handle Handle, overlapped *Overlapped, done *uint32, wait bool) (err error) {
var _p0 uint32
if wait {

View File

@ -427,13 +427,6 @@ type isolatingRunSequence struct {
func (i *isolatingRunSequence) Len() int { return len(i.indexes) }
func maxLevel(a, b level) level {
if a > b {
return a
}
return b
}
// Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types,
// either L or R, for each isolating run sequence.
func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence {
@ -474,8 +467,8 @@ func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence {
indexes: indexes,
types: types,
level: level,
sos: typeForLevel(maxLevel(prevLevel, level)),
eos: typeForLevel(maxLevel(succLevel, level)),
sos: typeForLevel(max(prevLevel, level)),
eos: typeForLevel(max(succLevel, level)),
}
}

View File

@ -195,7 +195,7 @@ func (r *Reservation) CancelAt(t time.Time) {
// update state
r.lim.last = t
r.lim.tokens = tokens
if r.timeToAct == r.lim.lastEvent {
if r.timeToAct.Equal(r.lim.lastEvent) {
prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens)))
if !prevEvent.Before(t) {
r.lim.lastEvent = prevEvent