chore: make deps

This commit is contained in:
2025-11-11 14:18:57 +01:00
parent db7c4042d0
commit 45af67d22d
590 changed files with 22837 additions and 16387 deletions

View File

@ -33,17 +33,21 @@ guidelines, there may be valid reasons to do so, but it should be rare.
## Guidelines for Pull Requests
How to get your contributions merged smoothly and quickly:
Please read the following carefully to ensure your contributions can be merged
smoothly and quickly.
### PR Contents
- Create **small PRs** that are narrowly focused on **addressing a single
concern**. We often receive PRs that attempt to fix several things at the same
time, and if one part of the PR has a problem, that will hold up the entire
PR.
- For **speculative changes**, consider opening an issue and discussing it
first. If you are suggesting a behavioral or API change, consider starting
with a [gRFC proposal](https://github.com/grpc/proposal). Many new features
that are not bug fixes will require cross-language agreement.
- If your change does not address an **open issue** with an **agreed
resolution**, consider opening an issue and discussing it first. If you are
suggesting a behavioral or API change, consider starting with a [gRFC
proposal](https://github.com/grpc/proposal). Many new features that are not
bug fixes will require cross-language agreement.
- If you want to fix **formatting or style**, consider whether your changes are
an obvious improvement or might be considered a personal preference. If a
@ -56,16 +60,6 @@ How to get your contributions merged smoothly and quickly:
often written as "iff". Please do not make spelling correction changes unless
you are certain they are misspellings.
- Provide a good **PR description** as a record of **what** change is being made
and **why** it was made. Link to a GitHub issue if it exists.
- Maintain a **clean commit history** and use **meaningful commit messages**.
PRs with messy commit histories are difficult to review and won't be merged.
Before sending your PR, ensure your changes are based on top of the latest
`upstream/master` commits, and avoid rebasing in the middle of a code review.
You should **never use `git push -f`** unless absolutely necessary during a
review, as it can interfere with GitHub's tracking of comments.
- **All tests need to be passing** before your change can be merged. We
recommend you run tests locally before creating your PR to catch breakages
early on:
@ -81,15 +75,80 @@ How to get your contributions merged smoothly and quickly:
GitHub, which will trigger a GitHub Actions run that you can use to verify
everything is passing.
- If you are adding a new file, make sure it has the **copyright message**
- Note that there are two GitHub actions checks that need not be green:
1. We test the freshness of the generated proto code we maintain via the
`vet-proto` check. If the source proto files are updated, but our repo is
not updated, an optional checker will fail. This will be fixed by our team
in a separate PR and will not prevent the merge of your PR.
2. We run a checker that will fail if there is any change in dependencies of
an exported package via the `dependencies` check. If new dependencies are
added that are not appropriate, we may not accept your PR (see below).
- If you are adding a **new file**, make sure it has the **copyright message**
template at the top as a comment. You can copy the message from an existing
file and update the year.
- The grpc package should only depend on standard Go packages and a small number
of exceptions. **If your contribution introduces new dependencies**, you will
need a discussion with gRPC-Go maintainers. A GitHub action check will run on
every PR, and will flag any transitive dependency changes from any public
package.
need a discussion with gRPC-Go maintainers.
### PR Descriptions
- **PR titles** should start with the name of the component being addressed, or
the type of change. Examples: transport, client, server, round_robin, xds,
cleanup, deps.
- Read and follow the **guidelines for PR titles and descriptions** here:
https://google.github.io/eng-practices/review/developer/cl-descriptions.html
*particularly* the sections "First Line" and "Body is Informative".
Note: your PR description will be used as the git commit message in a
squash-and-merge if your PR is approved. We may make changes to this as
necessary.
- **Does this PR relate to an open issue?** On the first line, please use the
tag `Fixes #<issue>` to ensure the issue is closed when the PR is merged. Or
use `Updates #<issue>` if the PR is related to an open issue, but does not fix
it. Consider filing an issue if one does not already exist.
- PR descriptions *must* conclude with **release notes** as follows:
```
RELEASE NOTES:
* <component>: <summary>
```
This need not match the PR title.
The summary must:
* be something that gRPC users will understand.
* clearly explain the feature being added, the issue being fixed, or the
behavior being changed, etc. If fixing a bug, be clear about how the bug
can be triggered by an end-user.
* begin with a capital letter and use complete sentences.
* be as short as possible to describe the change being made.
If a PR is *not* end-user visible -- e.g. a cleanup, testing change, or
GitHub-related, use `RELEASE NOTES: n/a`.
### PR Process
- Please **self-review** your code changes before sending your PR. This will
prevent simple, obvious errors from causing delays.
- Maintain a **clean commit history** and use **meaningful commit messages**.
PRs with messy commit histories are difficult to review and won't be merged.
Before sending your PR, ensure your changes are based on top of the latest
`upstream/master` commits, and avoid rebasing in the middle of a code review.
You should **never use `git push -f`** unless absolutely necessary during a
review, as it can interfere with GitHub's tracking of comments.
- Unless your PR is trivial, you should **expect reviewer comments** that you
will need to address before merging. We'll label the PR as `Status: Requires
@ -98,5 +157,3 @@ How to get your contributions merged smoothly and quickly:
`stale`, and we will automatically close it after 7 days if we don't hear back
from you. Please feel free to ping issues or bugs if you do not get a response
within a week.
- Exceptions to the rules can be made if there's a compelling reason to do so.

View File

@ -169,7 +169,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
addrs = state.ResolverState.Addresses
if cfg.ShuffleAddressList {
addrs = append([]resolver.Address{}, addrs...)
rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
internal.RandShuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
}
}

View File

@ -283,7 +283,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
newAddrs = state.ResolverState.Addresses
if cfg.ShuffleAddressList {
newAddrs = append([]resolver.Address{}, newAddrs...)
internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
internal.RandShuffle(len(newAddrs), func(i, j int) { newAddrs[i], newAddrs[j] = newAddrs[j], newAddrs[i] })
}
}
@ -351,6 +351,13 @@ func (b *pickfirstBalancer) ExitIdle() {
b.mu.Lock()
defer b.mu.Unlock()
if b.state == connectivity.Idle {
// Move the balancer into CONNECTING state immediately. This is done to
// avoid staying in IDLE if a resolver update arrives before the first
// SubConn reports CONNECTING.
b.updateBalancerState(balancer.State{
ConnectivityState: connectivity.Connecting,
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
})
b.startFirstPassLocked()
}
}
@ -604,7 +611,7 @@ func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.Sub
if !b.addressList.seekTo(sd.addr) {
// This should not fail as we should have only one SubConn after
// entering READY. The SubConn should be present in the addressList.
b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses)
b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses)
return
}
if !b.healthCheckingEnabled {

View File

@ -456,7 +456,7 @@ func (cc *ClientConn) validateTransportCredentials() error {
func (cc *ClientConn) channelzRegistration(target string) {
parentChannel, _ := cc.dopts.channelzParent.(*channelz.Channel)
cc.channelz = channelz.RegisterChannel(parentChannel, target)
cc.addTraceEvent("created")
cc.addTraceEvent(fmt.Sprintf("created for target %q", target))
}
// chainUnaryClientInterceptors chains all unary client interceptors into one.

View File

@ -46,9 +46,25 @@ func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) {
return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v)
}
// Important: if we remove this Size call then we cannot use
// UseCachedSize in MarshalOptions below.
size := proto.Size(vv)
// MarshalOptions with UseCachedSize allows reusing the result from the
// previous Size call. This is safe here because:
//
// 1. We just computed the size.
// 2. We assume the message is not being mutated concurrently.
//
// Important: If the proto.Size call above is removed, using UseCachedSize
// becomes unsafe and may lead to incorrect marshaling.
//
// For more details, see the doc of UseCachedSize:
// https://pkg.go.dev/google.golang.org/protobuf/proto#MarshalOptions
marshalOptions := proto.MarshalOptions{UseCachedSize: true}
if mem.IsBelowBufferPoolingThreshold(size) {
buf, err := proto.Marshal(vv)
buf, err := marshalOptions.Marshal(vv)
if err != nil {
return nil, err
}
@ -56,7 +72,7 @@ func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) {
} else {
pool := mem.DefaultBufferPool()
buf := pool.Get(size)
if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil {
if _, err := marshalOptions.MarshalAppend((*buf)[:0], vv); err != nil {
pool.Put(buf)
return nil, err
}

View File

@ -83,6 +83,7 @@ func (b *Unbounded) Load() {
default:
}
} else if b.closing && !b.closed {
b.closed = true
close(b.c)
}
}

View File

@ -194,7 +194,7 @@ func (r RefChannelType) String() string {
// If channelz is not turned ON, this will simply log the event descriptions.
func AddTraceEvent(l grpclog.DepthLoggerV2, e Entity, depth int, desc *TraceEvent) {
// Log only the trace description associated with the bottom most entity.
d := fmt.Sprintf("[%s]%s", e, desc.Desc)
d := fmt.Sprintf("[%s] %s", e, desc.Desc)
switch desc.Severity {
case CtUnknown, CtInfo:
l.InfoDepth(depth+1, d)

View File

@ -68,4 +68,10 @@ var (
// trust. For more details, see:
// https://github.com/grpc/proposal/blob/master/A87-mtls-spiffe-support.md
XDSSPIFFEEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_MTLS_SPIFFE", false)
// XDSHTTPConnectEnabled is true if gRPC should parse custom Metadata
// configuring use of an HTTP CONNECT proxy via xDS from cluster resources.
// For more details, see:
// https://github.com/grpc/proposal/blob/master/A86-xds-http-connect.md
XDSHTTPConnectEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_HTTP_CONNECT", false)
)

View File

@ -80,25 +80,11 @@ func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure
func (cs *CallbackSerializer) run(ctx context.Context) {
defer close(cs.done)
// TODO: when Go 1.21 is the oldest supported version, this loop and Close
// can be replaced with:
//
// context.AfterFunc(ctx, cs.callbacks.Close)
for ctx.Err() == nil {
select {
case <-ctx.Done():
// Do nothing here. Next iteration of the for loop will not happen,
// since ctx.Err() would be non-nil.
case cb := <-cs.callbacks.Get():
cs.callbacks.Load()
cb.(func(context.Context))(ctx)
}
}
// Close the buffer when the context is canceled
// to prevent new callbacks from being added.
context.AfterFunc(ctx, cs.callbacks.Close)
// Close the buffer to prevent new callbacks from being added.
cs.callbacks.Close()
// Run all pending callbacks.
// Run all callbacks.
for cb := range cs.callbacks.Get() {
cs.callbacks.Load()
cb.(func(context.Context))(ctx)

View File

@ -556,6 +556,19 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
// Make the slice of certain predictable size to reduce allocations made by append.
hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te
hfLen += len(authData) + len(callAuthData)
registeredCompressors := t.registeredCompressors
if callHdr.PreviousAttempts > 0 {
hfLen++
}
if callHdr.SendCompress != "" {
hfLen++
}
if registeredCompressors != "" {
hfLen++
}
if _, ok := ctx.Deadline(); ok {
hfLen++
}
headerFields := make([]hpack.HeaderField, 0, hfLen)
headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"})
headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme})
@ -568,7 +581,6 @@ func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr)
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-previous-rpc-attempts", Value: strconv.Itoa(callHdr.PreviousAttempts)})
}
registeredCompressors := t.registeredCompressors
if callHdr.SendCompress != "" {
headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress})
// Include the outgoing compressor name when compressor is not registered
@ -1499,13 +1511,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
case "grpc-message":
grpcMessage = decodeGrpcMessage(hf.Value)
case ":status":
if hf.Value == "200" {
httpStatusErr = ""
statusCode := 200
httpStatusCode = &statusCode
break
}
c, err := strconv.ParseInt(hf.Value, 10, 32)
if err != nil {
se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err))
@ -1513,7 +1518,19 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
return
}
statusCode := int(c)
if statusCode >= 100 && statusCode < 200 {
if endStream {
se := status.New(codes.Internal, fmt.Sprintf(
"protocol error: informational header with status code %d must not have END_STREAM set", statusCode))
t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
}
return
}
httpStatusCode = &statusCode
if statusCode == 200 {
httpStatusErr = ""
break
}
httpStatusErr = fmt.Sprintf(
"unexpected HTTP status code received from server: %d (%s)",

View File

@ -549,6 +549,8 @@ type clientStream struct {
sentLast bool // sent an end stream
receivedFirstMsg bool // set after the first message is received
methodConfig *MethodConfig
ctx context.Context // the application's context, wrapped by stats/tracing
@ -1144,11 +1146,16 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
if statusErr := a.transportStream.Status().Err(); statusErr != nil {
return statusErr
}
// Received no msg and status OK for non-server streaming rpcs.
if !cs.desc.ServerStreams && !cs.receivedFirstMsg {
return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC")
}
return io.EOF // indicates successful end of stream.
}
return toRPCErr(err)
}
cs.receivedFirstMsg = true
if a.trInfo != nil {
a.mu.Lock()
if a.trInfo.tr != nil {
@ -1177,7 +1184,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
} else if err != nil {
return toRPCErr(err)
}
return status.Errorf(codes.Internal, "cardinality violation: expected <EOF> for non server-streaming RPCs, but received another message")
return status.Error(codes.Internal, "cardinality violation: expected <EOF> for non server-streaming RPCs, but received another message")
}
func (a *csAttempt) finish(err error) {
@ -1359,6 +1366,7 @@ type addrConnStream struct {
transport transport.ClientTransport
ctx context.Context
sentLast bool
receivedFirstMsg bool
desc *StreamDesc
codec baseCodec
sendCompressorV0 Compressor
@ -1484,10 +1492,15 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
if statusErr := as.transportStream.Status().Err(); statusErr != nil {
return statusErr
}
// Received no msg and status OK for non-server streaming rpcs.
if !as.desc.ServerStreams && !as.receivedFirstMsg {
return status.Error(codes.Internal, "cardinality violation: received no response message from non-server-streaming RPC")
}
return io.EOF // indicates successful end of stream.
}
return toRPCErr(err)
}
as.receivedFirstMsg = true
if as.desc.ServerStreams {
// Subsequent messages should be received by subsequent RecvMsg calls.
@ -1501,7 +1514,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
} else if err != nil {
return toRPCErr(err)
}
return status.Errorf(codes.Internal, "cardinality violation: expected <EOF> for non server-streaming RPCs, but received another message")
return status.Error(codes.Internal, "cardinality violation: expected <EOF> for non server-streaming RPCs, but received another message")
}
func (as *addrConnStream) finish(err error) {

View File

@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
const Version = "1.75.1"
const Version = "1.76.0"

View File

@ -72,9 +72,10 @@ type (
EditionFeatures EditionFeatures
}
FileL2 struct {
Options func() protoreflect.ProtoMessage
Imports FileImports
Locations SourceLocations
Options func() protoreflect.ProtoMessage
Imports FileImports
OptionImports func() protoreflect.FileImports
Locations SourceLocations
}
// EditionFeatures is a frequently-instantiated struct, so please take care
@ -126,12 +127,9 @@ func (fd *File) ParentFile() protoreflect.FileDescriptor { return fd }
func (fd *File) Parent() protoreflect.Descriptor { return nil }
func (fd *File) Index() int { return 0 }
func (fd *File) Syntax() protoreflect.Syntax { return fd.L1.Syntax }
// Not exported and just used to reconstruct the original FileDescriptor proto
func (fd *File) Edition() int32 { return int32(fd.L1.Edition) }
func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() }
func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package }
func (fd *File) IsPlaceholder() bool { return false }
func (fd *File) Name() protoreflect.Name { return fd.L1.Package.Name() }
func (fd *File) FullName() protoreflect.FullName { return fd.L1.Package }
func (fd *File) IsPlaceholder() bool { return false }
func (fd *File) Options() protoreflect.ProtoMessage {
if f := fd.lazyInit().Options; f != nil {
return f()
@ -150,6 +148,16 @@ func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatD
func (fd *File) ProtoType(protoreflect.FileDescriptor) {}
func (fd *File) ProtoInternal(pragma.DoNotImplement) {}
// The next two are not part of the FileDescriptor interface. They are just used to reconstruct
// the original FileDescriptor proto.
func (fd *File) Edition() int32 { return int32(fd.L1.Edition) }
func (fd *File) OptionImports() protoreflect.FileImports {
if f := fd.lazyInit().OptionImports; f != nil {
return f()
}
return emptyFiles
}
func (fd *File) lazyInit() *FileL2 {
if atomic.LoadUint32(&fd.once) == 0 {
fd.lazyInitOnce()
@ -182,9 +190,9 @@ type (
L2 *EnumL2 // protected by fileDesc.once
}
EnumL1 struct {
eagerValues bool // controls whether EnumL2.Values is already populated
EditionFeatures EditionFeatures
Visibility int32
eagerValues bool // controls whether EnumL2.Values is already populated
}
EnumL2 struct {
Options func() protoreflect.ProtoMessage
@ -219,6 +227,11 @@ func (ed *Enum) ReservedNames() protoreflect.Names { return &ed.lazyInit()
func (ed *Enum) ReservedRanges() protoreflect.EnumRanges { return &ed.lazyInit().ReservedRanges }
func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) }
func (ed *Enum) ProtoType(protoreflect.EnumDescriptor) {}
// This is not part of the EnumDescriptor interface. It is just used to reconstruct
// the original FileDescriptor proto.
func (ed *Enum) Visibility() int32 { return ed.L1.Visibility }
func (ed *Enum) lazyInit() *EnumL2 {
ed.L0.ParentFile.lazyInit() // implicitly initializes L2
return ed.L2
@ -244,13 +257,13 @@ type (
L2 *MessageL2 // protected by fileDesc.once
}
MessageL1 struct {
Enums Enums
Messages Messages
Extensions Extensions
IsMapEntry bool // promoted from google.protobuf.MessageOptions
IsMessageSet bool // promoted from google.protobuf.MessageOptions
Enums Enums
Messages Messages
Extensions Extensions
EditionFeatures EditionFeatures
Visibility int32
IsMapEntry bool // promoted from google.protobuf.MessageOptions
IsMessageSet bool // promoted from google.protobuf.MessageOptions
}
MessageL2 struct {
Options func() protoreflect.ProtoMessage
@ -319,6 +332,11 @@ func (md *Message) Messages() protoreflect.MessageDescriptors { return &md.L
func (md *Message) Extensions() protoreflect.ExtensionDescriptors { return &md.L1.Extensions }
func (md *Message) ProtoType(protoreflect.MessageDescriptor) {}
func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) }
// This is not part of the MessageDescriptor interface. It is just used to reconstruct
// the original FileDescriptor proto.
func (md *Message) Visibility() int32 { return md.L1.Visibility }
func (md *Message) lazyInit() *MessageL2 {
md.L0.ParentFile.lazyInit() // implicitly initializes L2
return md.L2

View File

@ -284,6 +284,13 @@ func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protorefl
case genid.EnumDescriptorProto_Value_field_number:
numValues++
}
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case genid.EnumDescriptorProto_Visibility_field_number:
ed.L1.Visibility = int32(v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]
@ -365,6 +372,13 @@ func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd protor
md.unmarshalSeedOptions(v)
}
prevField = num
case protowire.VarintType:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
switch num {
case genid.DescriptorProto_Visibility_field_number:
md.L1.Visibility = int32(v)
}
default:
m := protowire.ConsumeFieldValue(num, typ, b)
b = b[m:]

View File

@ -134,6 +134,7 @@ func (fd *File) unmarshalFull(b []byte) {
var enumIdx, messageIdx, extensionIdx, serviceIdx int
var rawOptions []byte
var optionImports []string
fd.L2 = new(FileL2)
for len(b) > 0 {
num, typ, n := protowire.ConsumeTag(b)
@ -157,6 +158,8 @@ func (fd *File) unmarshalFull(b []byte) {
imp = PlaceholderFile(path)
}
fd.L2.Imports = append(fd.L2.Imports, protoreflect.FileImport{FileDescriptor: imp})
case genid.FileDescriptorProto_OptionDependency_field_number:
optionImports = append(optionImports, sb.MakeString(v))
case genid.FileDescriptorProto_EnumType_field_number:
fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb)
enumIdx++
@ -178,6 +181,23 @@ func (fd *File) unmarshalFull(b []byte) {
}
}
fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions)
if len(optionImports) > 0 {
var imps FileImports
var once sync.Once
fd.L2.OptionImports = func() protoreflect.FileImports {
once.Do(func() {
imps = make(FileImports, len(optionImports))
for i, path := range optionImports {
imp, _ := fd.builder.FileRegistry.FindFileByPath(path)
if imp == nil {
imp = PlaceholderFile(path)
}
imps[i] = protoreflect.FileImport{FileDescriptor: imp}
}
})
return &imps
}
}
}
func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) {

View File

@ -52,7 +52,7 @@ import (
const (
Major = 1
Minor = 36
Patch = 9
Patch = 10
PreRelease = ""
)