chore: bump deps
All checks were successful
continuous-integration/drone/push Build is passing

This commit is contained in:
2025-08-12 07:04:57 +02:00
committed by decentral1se
parent 157d131b37
commit 56a68dfa91
981 changed files with 36486 additions and 39650 deletions

View File

@ -180,6 +180,16 @@ func (dke ErrMalformedMessage) Error() string {
return "openpgp: malformed message " + string(dke)
}
type messageTooLargeError int
func (e messageTooLargeError) Error() string {
return "openpgp: decompressed message size exceeds provided limit"
}
// ErrMessageTooLarge is returned if the read data from
// a compressed packet exceeds the provided limit.
var ErrMessageTooLarge error = messageTooLargeError(0)
// ErrEncryptionKeySelection is returned if encryption key selection fails (v2 API).
type ErrEncryptionKeySelection struct {
PrimaryKeyId string

View File

@ -37,7 +37,7 @@ func (conf *AEADConfig) Mode() AEADMode {
// ChunkSizeByte returns the byte indicating the chunk size. The effective
// chunk size is computed with the formula uint64(1) << (chunkSizeByte + 6)
// limit to 16 = 4 MiB
// limit chunkSizeByte to 16 which equals to 2^22 = 4 MiB
// https://www.ietf.org/archive/id/draft-ietf-openpgp-crypto-refresh-07.html#section-5.13.2
func (conf *AEADConfig) ChunkSizeByte() byte {
if conf == nil || conf.ChunkSize == 0 {
@ -49,8 +49,8 @@ func (conf *AEADConfig) ChunkSizeByte() byte {
switch {
case exponent < 6:
exponent = 6
case exponent > 16:
exponent = 16
case exponent > 22:
exponent = 22
}
return byte(exponent - 6)

View File

@ -98,6 +98,16 @@ func (c *Compressed) parse(r io.Reader) error {
return err
}
// LimitedBodyReader wraps the provided body reader with a limiter that restricts
// the number of bytes read to the specified limit.
// If limit is nil, the reader is unbounded.
func (c *Compressed) LimitedBodyReader(limit *int64) io.Reader {
if limit == nil {
return c.Body
}
return &LimitReader{R: c.Body, N: *limit}
}
// compressedWriterCloser represents the serialized compression stream
// header and the compressor. Its Close() method ensures that both the
// compressor and serialized stream header are closed. Its Write()
@ -159,3 +169,24 @@ func SerializeCompressed(w io.WriteCloser, algo CompressionAlgo, cc *Compression
return
}
// LimitReader is an io.Reader that fails with MessageToLarge if read bytes exceed N.
type LimitReader struct {
R io.Reader // underlying reader
N int64 // max bytes allowed
}
func (l *LimitReader) Read(p []byte) (int, error) {
if l.N <= 0 {
return 0, errors.ErrMessageTooLarge
}
n, err := l.R.Read(p)
l.N -= int64(n)
if err == nil && l.N <= 0 {
err = errors.ErrMessageTooLarge
}
return n, err
}

View File

@ -178,6 +178,11 @@ type Config struct {
// When set to true, a key without flags is treated as if all flags are enabled.
// This behavior is consistent with GPG.
InsecureAllowAllKeyFlagsWhenMissing bool
// MaxDecompressedMessageSize specifies the maximum number of bytes that can be
// read from a compressed packet. This serves as an upper limit to prevent
// excessively large decompressed messages.
MaxDecompressedMessageSize *int64
}
func (c *Config) Random() io.Reader {
@ -415,6 +420,13 @@ func (c *Config) AllowAllKeyFlagsWhenMissing() bool {
return c.InsecureAllowAllKeyFlagsWhenMissing
}
func (c *Config) DecompressedMessageSizeLimit() *int64 {
if c == nil {
return nil
}
return c.MaxDecompressedMessageSize
}
// BoolPointer is a helper function to set a boolean pointer in the Config.
// e.g., config.CheckPacketSequence = BoolPointer(true)
func BoolPointer(value bool) *bool {

View File

@ -259,7 +259,7 @@ FindLiteralData:
}
switch p := p.(type) {
case *packet.Compressed:
if err := packets.Push(p.Body); err != nil {
if err := packets.Push(p.LimitedBodyReader(config.DecompressedMessageSizeLimit())); err != nil {
return nil, err
}
case *packet.OnePassSignature:

View File

@ -253,34 +253,12 @@ func writeAndSign(payload io.WriteCloser, candidateHashes []uint8, signed *Entit
}
var hash crypto.Hash
for _, hashId := range candidateHashes {
if h, ok := algorithm.HashIdToHash(hashId); ok && h.Available() {
hash = h
break
}
}
// If the hash specified by config is a candidate, we'll use that.
if configuredHash := config.Hash(); configuredHash.Available() {
for _, hashId := range candidateHashes {
if h, ok := algorithm.HashIdToHash(hashId); ok && h == configuredHash {
hash = h
break
}
}
}
if hash == 0 {
hashId := candidateHashes[0]
name, ok := algorithm.HashIdToString(hashId)
if !ok {
name = "#" + strconv.Itoa(int(hashId))
}
return nil, errors.InvalidArgumentError("cannot encrypt because no candidate hash functions are compiled in. (Wanted " + name + " in this case.)")
}
var salt []byte
if signer != nil {
if hash, err = selectHash(candidateHashes, config.Hash(), signer); err != nil {
return nil, err
}
var opsVersion = 3
if signer.Version == 6 {
opsVersion = signer.Version
@ -558,13 +536,34 @@ func (s signatureWriter) Close() error {
return s.encryptedData.Close()
}
func selectHashForSigningKey(config *packet.Config, signer *packet.PublicKey) crypto.Hash {
acceptableHashes := acceptableHashesToWrite(signer)
hash, ok := algorithm.HashToHashId(config.Hash())
if !ok {
return config.Hash()
}
for _, acceptableHashes := range acceptableHashes {
if acceptableHashes == hash {
return config.Hash()
}
}
if len(acceptableHashes) > 0 {
defaultAcceptedHash, ok := algorithm.HashIdToHash(acceptableHashes[0])
if ok {
return defaultAcceptedHash
}
}
return config.Hash()
}
func createSignaturePacket(signer *packet.PublicKey, sigType packet.SignatureType, config *packet.Config) *packet.Signature {
sigLifetimeSecs := config.SigLifetime()
hash := selectHashForSigningKey(config, signer)
return &packet.Signature{
Version: signer.Version,
SigType: sigType,
PubKeyAlgo: signer.PubKeyAlgo,
Hash: config.Hash(),
Hash: hash,
CreationTime: config.Now(),
IssuerKeyId: &signer.KeyId,
IssuerFingerprint: signer.Fingerprint,
@ -618,3 +617,74 @@ func handleCompression(compressed io.WriteCloser, candidateCompression []uint8,
}
return data, nil
}
// selectHash selects the preferred hash given the candidateHashes and the configuredHash
func selectHash(candidateHashes []byte, configuredHash crypto.Hash, signer *packet.PrivateKey) (hash crypto.Hash, err error) {
acceptableHashes := acceptableHashesToWrite(&signer.PublicKey)
candidateHashes = intersectPreferences(acceptableHashes, candidateHashes)
for _, hashId := range candidateHashes {
if h, ok := algorithm.HashIdToHash(hashId); ok && h.Available() {
hash = h
break
}
}
// If the hash specified by config is a candidate, we'll use that.
if configuredHash.Available() {
for _, hashId := range candidateHashes {
if h, ok := algorithm.HashIdToHash(hashId); ok && h == configuredHash {
hash = h
break
}
}
}
if hash == 0 {
if len(acceptableHashes) > 0 {
if h, ok := algorithm.HashIdToHash(acceptableHashes[0]); ok {
hash = h
} else {
return 0, errors.UnsupportedError("no candidate hash functions are compiled in.")
}
} else {
return 0, errors.UnsupportedError("no candidate hash functions are compiled in.")
}
}
return
}
func acceptableHashesToWrite(singingKey *packet.PublicKey) []uint8 {
switch singingKey.PubKeyAlgo {
case packet.PubKeyAlgoEd448:
return []uint8{
hashToHashId(crypto.SHA512),
hashToHashId(crypto.SHA3_512),
}
case packet.PubKeyAlgoECDSA, packet.PubKeyAlgoEdDSA:
if curve, err := singingKey.Curve(); err == nil {
if curve == packet.Curve448 ||
curve == packet.CurveNistP521 ||
curve == packet.CurveBrainpoolP512 {
return []uint8{
hashToHashId(crypto.SHA512),
hashToHashId(crypto.SHA3_512),
}
} else if curve == packet.CurveBrainpoolP384 ||
curve == packet.CurveNistP384 {
return []uint8{
hashToHashId(crypto.SHA384),
hashToHashId(crypto.SHA512),
hashToHashId(crypto.SHA3_512),
}
}
}
}
return []uint8{
hashToHashId(crypto.SHA256),
hashToHashId(crypto.SHA384),
hashToHashId(crypto.SHA512),
hashToHashId(crypto.SHA3_256),
hashToHashId(crypto.SHA3_512),
}
}