chore: make deps, go mod vendor

This commit is contained in:
2024-12-02 01:45:06 +01:00
parent f664599836
commit 31fa9b1a7a
598 changed files with 37898 additions and 18309 deletions

View File

@ -4,7 +4,7 @@ We definitely welcome your patches and contributions to gRPC! Please read the gR
organization's [governance rules](https://github.com/grpc/grpc-community/blob/master/governance.md)
and [contribution guidelines](https://github.com/grpc/grpc-community/blob/master/CONTRIBUTING.md) before proceeding.
If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
If you are new to GitHub, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/)
## Legal requirements
@ -25,8 +25,8 @@ How to get your contributions merged smoothly and quickly.
is a great place to start. These issues are well-documented and usually can be
resolved with a single pull request.
- If you are adding a new file, make sure it has the copyright message template
at the top as a comment. You can copy over the message from an existing file
- If you are adding a new file, make sure it has the copyright message template
at the top as a comment. You can copy over the message from an existing file
and update the year.
- The grpc package should only depend on standard Go packages and a small number
@ -39,12 +39,12 @@ How to get your contributions merged smoothly and quickly.
proposal](https://github.com/grpc/proposal).
- Provide a good **PR description** as a record of **what** change is being made
and **why** it was made. Link to a github issue if it exists.
and **why** it was made. Link to a GitHub issue if it exists.
- If you want to fix formatting or style, consider whether your changes are an
obvious improvement or might be considered a personal preference. If a style
change is based on preference, it likely will not be accepted. If it corrects
widely agreed-upon anti-patterns, then please do create a PR and explain the
- If you want to fix formatting or style, consider whether your changes are an
obvious improvement or might be considered a personal preference. If a style
change is based on preference, it likely will not be accepted. If it corrects
widely agreed-upon anti-patterns, then please do create a PR and explain the
benefits of the change.
- Unless your PR is trivial, you should expect there will be reviewer comments

View File

@ -9,21 +9,28 @@ for general contribution guidelines.
## Maintainers (in alphabetical order)
- [aranjans](https://github.com/aranjans), Google LLC
- [arjan-bal](https://github.com/arjan-bal), Google LLC
- [arvindbr8](https://github.com/arvindbr8), Google LLC
- [atollena](https://github.com/atollena), Datadog, Inc.
- [cesarghali](https://github.com/cesarghali), Google LLC
- [dfawley](https://github.com/dfawley), Google LLC
- [easwars](https://github.com/easwars), Google LLC
- [menghanl](https://github.com/menghanl), Google LLC
- [srini100](https://github.com/srini100), Google LLC
- [erm-g](https://github.com/erm-g), Google LLC
- [gtcooke94](https://github.com/gtcooke94), Google LLC
- [purnesh42h](https://github.com/purnesh42h), Google LLC
- [zasweq](https://github.com/zasweq), Google LLC
## Emeritus Maintainers (in alphabetical order)
- [adelez](https://github.com/adelez), Google LLC
- [canguler](https://github.com/canguler), Google LLC
- [iamqizhao](https://github.com/iamqizhao), Google LLC
- [jadekler](https://github.com/jadekler), Google LLC
- [jtattermusch](https://github.com/jtattermusch), Google LLC
- [lyuxuan](https://github.com/lyuxuan), Google LLC
- [makmukhi](https://github.com/makmukhi), Google LLC
- [matt-kwong](https://github.com/matt-kwong), Google LLC
- [nicolasnoble](https://github.com/nicolasnoble), Google LLC
- [yongni](https://github.com/yongni), Google LLC
- [adelez](https://github.com/adelez)
- [canguler](https://github.com/canguler)
- [cesarghali](https://github.com/cesarghali)
- [iamqizhao](https://github.com/iamqizhao)
- [jeanbza](https://github.com/jeanbza)
- [jtattermusch](https://github.com/jtattermusch)
- [lyuxuan](https://github.com/lyuxuan)
- [makmukhi](https://github.com/makmukhi)
- [matt-kwong](https://github.com/matt-kwong)
- [menghanl](https://github.com/menghanl)
- [nicolasnoble](https://github.com/nicolasnoble)
- [srini100](https://github.com/srini100)
- [yongni](https://github.com/yongni)

View File

@ -1,3 +1,3 @@
# Security Policy
For information on gRPC Security Policy and reporting potentional security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).
For information on gRPC Security Policy and reporting potential security issues, please see [gRPC CVE Process](https://github.com/grpc/proposal/blob/master/P4-grpc-cve-process.md).

View File

@ -39,7 +39,7 @@ type Config struct {
MaxDelay time.Duration
}
// DefaultConfig is a backoff configuration with the default values specfied
// DefaultConfig is a backoff configuration with the default values specified
// at https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md.
//
// This should be useful for callers who want to configure backoff with

View File

@ -30,6 +30,7 @@ import (
"google.golang.org/grpc/channelz"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
estats "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/metadata"
@ -72,8 +73,21 @@ func unregisterForTesting(name string) {
delete(m, name)
}
// connectedAddress returns the connected address for a SubConnState. The
// address is only valid if the state is READY.
func connectedAddress(scs SubConnState) resolver.Address {
return scs.connectedAddress
}
// setConnectedAddress sets the connected address for a SubConnState.
func setConnectedAddress(scs *SubConnState, addr resolver.Address) {
scs.connectedAddress = addr
}
func init() {
internal.BalancerUnregister = unregisterForTesting
internal.ConnectedAddress = connectedAddress
internal.SetConnectedAddress = setConnectedAddress
}
// Get returns the resolver builder registered with the given name.
@ -116,7 +130,7 @@ type SubConn interface {
// UpdateAddresses updates the addresses used in this SubConn.
// gRPC checks if currently-connected address is still in the new list.
// If it's in the list, the connection will be kept.
// If it's not in the list, the connection will gracefully closed, and
// If it's not in the list, the connection will gracefully close, and
// a new connection will be created.
//
// This will trigger a state transition for the SubConn.
@ -128,8 +142,11 @@ type SubConn interface {
Connect()
// GetOrBuildProducer returns a reference to the existing Producer for this
// ProducerBuilder in this SubConn, or, if one does not currently exist,
// creates a new one and returns it. Returns a close function which must
// be called when the Producer is no longer needed.
// creates a new one and returns it. Returns a close function which may be
// called when the Producer is no longer needed. Otherwise the producer
// will automatically be closed upon connection loss or subchannel close.
// Should only be called on a SubConn in state Ready. Otherwise the
// producer will be unable to create streams.
GetOrBuildProducer(ProducerBuilder) (p Producer, close func())
// Shutdown shuts down the SubConn gracefully. Any started RPCs will be
// allowed to complete. No future calls should be made on the SubConn.
@ -243,6 +260,10 @@ type BuildOptions struct {
// same resolver.Target as passed to the resolver. See the documentation for
// the resolver.Target type for details about what it contains.
Target resolver.Target
// MetricsRecorder is the metrics recorder that balancers can use to record
// metrics. Balancer implementations which do not register metrics on
// metrics registry and record on them can ignore this field.
MetricsRecorder estats.MetricsRecorder
}
// Builder creates a balancer.
@ -410,6 +431,9 @@ type SubConnState struct {
// ConnectionError is set if the ConnectivityState is TransientFailure,
// describing the reason the SubConn failed. Otherwise, it is nil.
ConnectionError error
// connectedAddr contains the connected address when ConnectivityState is
// Ready. Otherwise, it is indeterminate.
connectedAddress resolver.Address
}
// ClientConnState describes the state of a ClientConn relevant to the
@ -431,8 +455,10 @@ type ProducerBuilder interface {
// Build creates a Producer. The first parameter is always a
// grpc.ClientConnInterface (a type to allow creating RPCs/streams on the
// associated SubConn), but is declared as `any` to avoid a dependency
// cycle. Should also return a close function that will be called when all
// references to the Producer have been given up.
// cycle. Build also returns a close function that will be called when all
// references to the Producer have been given up for a SubConn, or when a
// connectivity state change occurs on the SubConn. The close function
// should always block until all asynchronous cleanup work is completed.
Build(grpcClientConnInterface any) (p Producer, close func())
}

View File

@ -36,7 +36,7 @@ type baseBuilder struct {
config Config
}
func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
func (bb *baseBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
bal := &baseBalancer{
cc: cc,
pickerBuilder: bb.pickerBuilder,
@ -133,7 +133,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
}
}
// If resolver state contains no addresses, return an error so ClientConn
// will trigger re-resolve. Also records this as an resolver error, so when
// will trigger re-resolve. Also records this as a resolver error, so when
// the overall state turns transient failure, the error message will have
// the zero address information.
if len(s.ResolverState.Addresses) == 0 {
@ -259,6 +259,6 @@ type errPicker struct {
err error // Pick() always returns this err.
}
func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
return balancer.PickResult{}, p.err
}

View File

@ -0,0 +1,24 @@
/*
* Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package internal contains code internal to the pickfirst package.
package internal
import "math/rand"
// RandShuffle pseudo-randomizes the order of addresses.
var RandShuffle = rand.Shuffle

View File

@ -26,18 +26,23 @@ import (
"math/rand"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/pickfirst/internal"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/envconfig"
internalgrpclog "google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
_ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required.
)
func init() {
if envconfig.NewPickFirstEnabled {
return
}
balancer.Register(pickfirstBuilder{})
internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
}
var logger = grpclog.Component("pick-first-lb")
@ -50,7 +55,7 @@ const (
type pickfirstBuilder struct{}
func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
b := &pickfirstBalancer{cc: cc}
b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
return b
@ -103,10 +108,13 @@ func (b *pickfirstBalancer) ResolverError(err error) {
})
}
// Shuffler is an interface for shuffling an address list.
type Shuffler interface {
ShuffleAddressListForTesting(n int, swap func(i, j int))
}
// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n
// is the number of elements. swap swaps the elements with indexes i and j.
func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
@ -140,7 +148,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
// within each endpoint. - A61
if cfg.ShuffleAddressList {
endpoints = append([]resolver.Endpoint{}, endpoints...)
internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
}
// "Flatten the list by concatenating the ordered list of addresses for each
@ -155,7 +163,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
// Endpoints not set, process addresses until we migrate resolver
// emissions fully to Endpoints. The top channel does wrap emitted
// addresses with endpoints, however some balancers such as weighted
// target do not forwarrd the corresponding correct endpoints down/split
// target do not forward the corresponding correct endpoints down/split
// endpoints properly. Once all balancers correctly forward endpoints
// down, can delete this else conditional.
addrs = state.ResolverState.Addresses

View File

@ -0,0 +1,625 @@
/*
*
* Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package pickfirstleaf contains the pick_first load balancing policy which
// will be the universal leaf policy after dualstack changes are implemented.
//
// # Experimental
//
// Notice: This package is EXPERIMENTAL and may be changed or removed in a
// later release.
package pickfirstleaf
import (
"encoding/json"
"errors"
"fmt"
"sync"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/pickfirst/internal"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/envconfig"
internalgrpclog "google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
)
func init() {
if envconfig.NewPickFirstEnabled {
// Register as the default pick_first balancer.
Name = "pick_first"
}
balancer.Register(pickfirstBuilder{})
}
var (
logger = grpclog.Component("pick-first-leaf-lb")
// Name is the name of the pick_first_leaf balancer.
// It is changed to "pick_first" in init() if this balancer is to be
// registered as the default pickfirst.
Name = "pick_first_leaf"
)
// TODO: change to pick-first when this becomes the default pick_first policy.
const logPrefix = "[pick-first-leaf-lb %p] "
type pickfirstBuilder struct{}
func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
b := &pickfirstBalancer{
cc: cc,
addressList: addressList{},
subConns: resolver.NewAddressMap(),
state: connectivity.Connecting,
mu: sync.Mutex{},
}
b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
return b
}
func (b pickfirstBuilder) Name() string {
return Name
}
func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
var cfg pfConfig
if err := json.Unmarshal(js, &cfg); err != nil {
return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
}
return cfg, nil
}
type pfConfig struct {
serviceconfig.LoadBalancingConfig `json:"-"`
// If set to true, instructs the LB policy to shuffle the order of the list
// of endpoints received from the name resolver before attempting to
// connect to them.
ShuffleAddressList bool `json:"shuffleAddressList"`
}
// scData keeps track of the current state of the subConn.
// It is not safe for concurrent access.
type scData struct {
// The following fields are initialized at build time and read-only after
// that.
subConn balancer.SubConn
addr resolver.Address
state connectivity.State
lastErr error
}
func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
sd := &scData{
state: connectivity.Idle,
addr: addr,
}
sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{
StateListener: func(state balancer.SubConnState) {
b.updateSubConnState(sd, state)
},
})
if err != nil {
return nil, err
}
sd.subConn = sc
return sd, nil
}
type pickfirstBalancer struct {
// The following fields are initialized at build time and read-only after
// that and therefore do not need to be guarded by a mutex.
logger *internalgrpclog.PrefixLogger
cc balancer.ClientConn
// The mutex is used to ensure synchronization of updates triggered
// from the idle picker and the already serialized resolver,
// SubConn state updates.
mu sync.Mutex
state connectivity.State
// scData for active subonns mapped by address.
subConns *resolver.AddressMap
addressList addressList
firstPass bool
numTF int
}
// ResolverError is called by the ClientConn when the name resolver produces
// an error or when pickfirst determined the resolver update to be invalid.
func (b *pickfirstBalancer) ResolverError(err error) {
b.mu.Lock()
defer b.mu.Unlock()
b.resolverErrorLocked(err)
}
func (b *pickfirstBalancer) resolverErrorLocked(err error) {
if b.logger.V(2) {
b.logger.Infof("Received error from the name resolver: %v", err)
}
// The picker will not change since the balancer does not currently
// report an error. If the balancer hasn't received a single good resolver
// update yet, transition to TRANSIENT_FAILURE.
if b.state != connectivity.TransientFailure && b.addressList.size() > 0 {
if b.logger.V(2) {
b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.")
}
return
}
b.cc.UpdateState(balancer.State{
ConnectivityState: connectivity.TransientFailure,
Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
})
}
func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
b.mu.Lock()
defer b.mu.Unlock()
if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
// Cleanup state pertaining to the previous resolver state.
// Treat an empty address list like an error by calling b.ResolverError.
b.state = connectivity.TransientFailure
b.closeSubConnsLocked()
b.addressList.updateAddrs(nil)
b.resolverErrorLocked(errors.New("produced zero addresses"))
return balancer.ErrBadResolverState
}
cfg, ok := state.BalancerConfig.(pfConfig)
if state.BalancerConfig != nil && !ok {
return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState)
}
if b.logger.V(2) {
b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
}
var newAddrs []resolver.Address
if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
// Perform the optional shuffling described in gRFC A62. The shuffling
// will change the order of endpoints but not touch the order of the
// addresses within each endpoint. - A61
if cfg.ShuffleAddressList {
endpoints = append([]resolver.Endpoint{}, endpoints...)
internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
}
// "Flatten the list by concatenating the ordered list of addresses for
// each of the endpoints, in order." - A61
for _, endpoint := range endpoints {
// "In the flattened list, interleave addresses from the two address
// families, as per RFC-8305 section 4." - A61
// TODO: support the above language.
newAddrs = append(newAddrs, endpoint.Addresses...)
}
} else {
// Endpoints not set, process addresses until we migrate resolver
// emissions fully to Endpoints. The top channel does wrap emitted
// addresses with endpoints, however some balancers such as weighted
// target do not forward the corresponding correct endpoints down/split
// endpoints properly. Once all balancers correctly forward endpoints
// down, can delete this else conditional.
newAddrs = state.ResolverState.Addresses
if cfg.ShuffleAddressList {
newAddrs = append([]resolver.Address{}, newAddrs...)
internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
}
}
// If an address appears in multiple endpoints or in the same endpoint
// multiple times, we keep it only once. We will create only one SubConn
// for the address because an AddressMap is used to store SubConns.
// Not de-duplicating would result in attempting to connect to the same
// SubConn multiple times in the same pass. We don't want this.
newAddrs = deDupAddresses(newAddrs)
// Since we have a new set of addresses, we are again at first pass.
b.firstPass = true
// If the previous ready SubConn exists in new address list,
// keep this connection and don't create new SubConns.
prevAddr := b.addressList.currentAddress()
prevAddrsCount := b.addressList.size()
b.addressList.updateAddrs(newAddrs)
if b.state == connectivity.Ready && b.addressList.seekTo(prevAddr) {
return nil
}
b.reconcileSubConnsLocked(newAddrs)
// If it's the first resolver update or the balancer was already READY
// (but the new address list does not contain the ready SubConn) or
// CONNECTING, enter CONNECTING.
// We may be in TRANSIENT_FAILURE due to a previous empty address list,
// we should still enter CONNECTING because the sticky TF behaviour
// mentioned in A62 applies only when the TRANSIENT_FAILURE is reported
// due to connectivity failures.
if b.state == connectivity.Ready || b.state == connectivity.Connecting || prevAddrsCount == 0 {
// Start connection attempt at first address.
b.state = connectivity.Connecting
b.cc.UpdateState(balancer.State{
ConnectivityState: connectivity.Connecting,
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
})
b.requestConnectionLocked()
} else if b.state == connectivity.TransientFailure {
// If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until
// we're READY. See A62.
b.requestConnectionLocked()
}
return nil
}
// UpdateSubConnState is unused as a StateListener is always registered when
// creating SubConns.
func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
}
func (b *pickfirstBalancer) Close() {
b.mu.Lock()
defer b.mu.Unlock()
b.closeSubConnsLocked()
b.state = connectivity.Shutdown
}
// ExitIdle moves the balancer out of idle state. It can be called concurrently
// by the idlePicker and clientConn so access to variables should be
// synchronized.
func (b *pickfirstBalancer) ExitIdle() {
b.mu.Lock()
defer b.mu.Unlock()
if b.state == connectivity.Idle && b.addressList.currentAddress() == b.addressList.first() {
b.firstPass = true
b.requestConnectionLocked()
}
}
func (b *pickfirstBalancer) closeSubConnsLocked() {
for _, sd := range b.subConns.Values() {
sd.(*scData).subConn.Shutdown()
}
b.subConns = resolver.NewAddressMap()
}
// deDupAddresses ensures that each address appears only once in the slice.
func deDupAddresses(addrs []resolver.Address) []resolver.Address {
seenAddrs := resolver.NewAddressMap()
retAddrs := []resolver.Address{}
for _, addr := range addrs {
if _, ok := seenAddrs.Get(addr); ok {
continue
}
retAddrs = append(retAddrs, addr)
}
return retAddrs
}
// reconcileSubConnsLocked updates the active subchannels based on a new address
// list from the resolver. It does this by:
// - closing subchannels: any existing subchannels associated with addresses
// that are no longer in the updated list are shut down.
// - removing subchannels: entries for these closed subchannels are removed
// from the subchannel map.
//
// This ensures that the subchannel map accurately reflects the current set of
// addresses received from the name resolver.
func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) {
newAddrsMap := resolver.NewAddressMap()
for _, addr := range newAddrs {
newAddrsMap.Set(addr, true)
}
for _, oldAddr := range b.subConns.Keys() {
if _, ok := newAddrsMap.Get(oldAddr); ok {
continue
}
val, _ := b.subConns.Get(oldAddr)
val.(*scData).subConn.Shutdown()
b.subConns.Delete(oldAddr)
}
}
// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn
// becomes ready, which means that all other subConn must be shutdown.
func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
for _, v := range b.subConns.Values() {
sd := v.(*scData)
if sd.subConn != selected.subConn {
sd.subConn.Shutdown()
}
}
b.subConns = resolver.NewAddressMap()
b.subConns.Set(selected.addr, selected)
}
// requestConnectionLocked starts connecting on the subchannel corresponding to
// the current address. If no subchannel exists, one is created. If the current
// subchannel is in TransientFailure, a connection to the next address is
// attempted until a subchannel is found.
func (b *pickfirstBalancer) requestConnectionLocked() {
if !b.addressList.isValid() {
return
}
var lastErr error
for valid := true; valid; valid = b.addressList.increment() {
curAddr := b.addressList.currentAddress()
sd, ok := b.subConns.Get(curAddr)
if !ok {
var err error
// We want to assign the new scData to sd from the outer scope,
// hence we can't use := below.
sd, err = b.newSCData(curAddr)
if err != nil {
// This should never happen, unless the clientConn is being shut
// down.
if b.logger.V(2) {
b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err)
}
// Do nothing, the LB policy will be closed soon.
return
}
b.subConns.Set(curAddr, sd)
}
scd := sd.(*scData)
switch scd.state {
case connectivity.Idle:
scd.subConn.Connect()
case connectivity.TransientFailure:
// Try the next address.
lastErr = scd.lastErr
continue
case connectivity.Ready:
// Should never happen.
b.logger.Errorf("Requesting a connection even though we have a READY SubConn")
case connectivity.Shutdown:
// Should never happen.
b.logger.Errorf("SubConn with state SHUTDOWN present in SubConns map")
case connectivity.Connecting:
// Wait for the SubConn to report success or failure.
}
return
}
// All the remaining addresses in the list are in TRANSIENT_FAILURE, end the
// first pass.
b.endFirstPassLocked(lastErr)
}
func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) {
b.mu.Lock()
defer b.mu.Unlock()
oldState := sd.state
sd.state = newState.ConnectivityState
// Previously relevant SubConns can still callback with state updates.
// To prevent pickers from returning these obsolete SubConns, this logic
// is included to check if the current list of active SubConns includes this
// SubConn.
if activeSD, found := b.subConns.Get(sd.addr); !found || activeSD != sd {
return
}
if newState.ConnectivityState == connectivity.Shutdown {
return
}
if newState.ConnectivityState == connectivity.Ready {
b.shutdownRemainingLocked(sd)
if !b.addressList.seekTo(sd.addr) {
// This should not fail as we should have only one SubConn after
// entering READY. The SubConn should be present in the addressList.
b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses)
return
}
b.state = connectivity.Ready
b.cc.UpdateState(balancer.State{
ConnectivityState: connectivity.Ready,
Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
})
return
}
// If the LB policy is READY, and it receives a subchannel state change,
// it means that the READY subchannel has failed.
// A SubConn can also transition from CONNECTING directly to IDLE when
// a transport is successfully created, but the connection fails
// before the SubConn can send the notification for READY. We treat
// this as a successful connection and transition to IDLE.
if (b.state == connectivity.Ready && newState.ConnectivityState != connectivity.Ready) || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) {
// Once a transport fails, the balancer enters IDLE and starts from
// the first address when the picker is used.
b.shutdownRemainingLocked(sd)
b.state = connectivity.Idle
b.addressList.reset()
b.cc.UpdateState(balancer.State{
ConnectivityState: connectivity.Idle,
Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)},
})
return
}
if b.firstPass {
switch newState.ConnectivityState {
case connectivity.Connecting:
// The balancer can be in either IDLE, CONNECTING or
// TRANSIENT_FAILURE. If it's in TRANSIENT_FAILURE, stay in
// TRANSIENT_FAILURE until it's READY. See A62.
// If the balancer is already in CONNECTING, no update is needed.
if b.state == connectivity.Idle {
b.state = connectivity.Connecting
b.cc.UpdateState(balancer.State{
ConnectivityState: connectivity.Connecting,
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
})
}
case connectivity.TransientFailure:
sd.lastErr = newState.ConnectionError
// Since we're re-using common SubConns while handling resolver
// updates, we could receive an out of turn TRANSIENT_FAILURE from
// a pass over the previous address list. We ignore such updates.
if curAddr := b.addressList.currentAddress(); !equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) {
return
}
if b.addressList.increment() {
b.requestConnectionLocked()
return
}
// End of the first pass.
b.endFirstPassLocked(newState.ConnectionError)
}
return
}
// We have finished the first pass, keep re-connecting failing SubConns.
switch newState.ConnectivityState {
case connectivity.TransientFailure:
b.numTF = (b.numTF + 1) % b.subConns.Len()
sd.lastErr = newState.ConnectionError
if b.numTF%b.subConns.Len() == 0 {
b.cc.UpdateState(balancer.State{
ConnectivityState: connectivity.TransientFailure,
Picker: &picker{err: newState.ConnectionError},
})
}
// We don't need to request re-resolution since the SubConn already
// does that before reporting TRANSIENT_FAILURE.
// TODO: #7534 - Move re-resolution requests from SubConn into
// pick_first.
case connectivity.Idle:
sd.subConn.Connect()
}
}
func (b *pickfirstBalancer) endFirstPassLocked(lastErr error) {
b.firstPass = false
b.numTF = 0
b.state = connectivity.TransientFailure
b.cc.UpdateState(balancer.State{
ConnectivityState: connectivity.TransientFailure,
Picker: &picker{err: lastErr},
})
// Start re-connecting all the SubConns that are already in IDLE.
for _, v := range b.subConns.Values() {
sd := v.(*scData)
if sd.state == connectivity.Idle {
sd.subConn.Connect()
}
}
}
type picker struct {
result balancer.PickResult
err error
}
func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
return p.result, p.err
}
// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
// CONNECTING when Pick is called.
type idlePicker struct {
exitIdle func()
}
func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
i.exitIdle()
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
}
// addressList manages sequentially iterating over addresses present in a list
// of endpoints. It provides a 1 dimensional view of the addresses present in
// the endpoints.
// This type is not safe for concurrent access.
type addressList struct {
addresses []resolver.Address
idx int
}
func (al *addressList) isValid() bool {
return al.idx < len(al.addresses)
}
func (al *addressList) size() int {
return len(al.addresses)
}
// increment moves to the next index in the address list.
// This method returns false if it went off the list, true otherwise.
func (al *addressList) increment() bool {
if !al.isValid() {
return false
}
al.idx++
return al.idx < len(al.addresses)
}
// currentAddress returns the current address pointed to in the addressList.
// If the list is in an invalid state, it returns an empty address instead.
func (al *addressList) currentAddress() resolver.Address {
if !al.isValid() {
return resolver.Address{}
}
return al.addresses[al.idx]
}
// first returns the first address in the list. If the list is empty, it returns
// an empty address instead.
func (al *addressList) first() resolver.Address {
if len(al.addresses) == 0 {
return resolver.Address{}
}
return al.addresses[0]
}
func (al *addressList) reset() {
al.idx = 0
}
func (al *addressList) updateAddrs(addrs []resolver.Address) {
al.addresses = addrs
al.reset()
}
// seekTo returns false if the needle was not found and the current index was
// left unchanged.
func (al *addressList) seekTo(needle resolver.Address) bool {
for ai, addr := range al.addresses {
if !equalAddressIgnoringBalAttributes(&addr, &needle) {
continue
}
al.idx = ai
return true
}
return false
}
// equalAddressIgnoringBalAttributes returns true is a and b are considered
// equal. This is different from the Equal method on the resolver.Address type
// which considers all fields to determine equality. Here, we only consider
// fields that are meaningful to the SubConn.
func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
return a.Addr == b.Addr && a.ServerName == b.ServerName &&
a.Attributes.Equal(b.Attributes) &&
a.Metadata == b.Metadata
}

View File

@ -24,13 +24,18 @@ import (
"sync"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/balancer/gracefulswitch"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/status"
)
var setConnectedAddress = internal.SetConnectedAddress.(func(*balancer.SubConnState, resolver.Address))
// ccBalancerWrapper sits between the ClientConn and the Balancer.
//
// ccBalancerWrapper implements methods corresponding to the ones on the
@ -79,6 +84,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
CustomUserAgent: cc.dopts.copts.UserAgent,
ChannelzParent: cc.channelz,
Target: cc.parsedTarget,
MetricsRecorder: cc.metricsRecorderList,
},
serializer: grpcsync.NewCallbackSerializer(ctx),
serializerCancel: cancel,
@ -92,7 +98,7 @@ func newCCBalancerWrapper(cc *ClientConn) *ccBalancerWrapper {
// it is safe to call into the balancer here.
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
errCh := make(chan error)
ok := ccb.serializer.Schedule(func(ctx context.Context) {
uccs := func(ctx context.Context) {
defer close(errCh)
if ctx.Err() != nil || ccb.balancer == nil {
return
@ -107,17 +113,23 @@ func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnStat
logger.Infof("error from balancer.UpdateClientConnState: %v", err)
}
errCh <- err
})
if !ok {
return nil
}
onFailure := func() { close(errCh) }
// UpdateClientConnState can race with Close, and when the latter wins, the
// serializer is closed, and the attempt to schedule the callback will fail.
// It is acceptable to ignore this failure. But since we want to handle the
// state update in a blocking fashion (when we successfully schedule the
// callback), we have to use the ScheduleOr method and not the MaybeSchedule
// method on the serializer.
ccb.serializer.ScheduleOr(uccs, onFailure)
return <-errCh
}
// resolverError is invoked by grpc to push a resolver error to the underlying
// balancer. The call to the balancer is executed from the serializer.
func (ccb *ccBalancerWrapper) resolverError(err error) {
ccb.serializer.Schedule(func(ctx context.Context) {
ccb.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || ccb.balancer == nil {
return
}
@ -133,7 +145,7 @@ func (ccb *ccBalancerWrapper) close() {
ccb.closed = true
ccb.mu.Unlock()
channelz.Info(logger, ccb.cc.channelz, "ccBalancerWrapper: closing")
ccb.serializer.Schedule(func(context.Context) {
ccb.serializer.TrySchedule(func(context.Context) {
if ccb.balancer == nil {
return
}
@ -145,7 +157,7 @@ func (ccb *ccBalancerWrapper) close() {
// exitIdle invokes the balancer's exitIdle method in the serializer.
func (ccb *ccBalancerWrapper) exitIdle() {
ccb.serializer.Schedule(func(ctx context.Context) {
ccb.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || ccb.balancer == nil {
return
}
@ -182,7 +194,7 @@ func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer
return acbw, nil
}
func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
func (ccb *ccBalancerWrapper) RemoveSubConn(balancer.SubConn) {
// The graceful switch balancer will never call this.
logger.Errorf("ccb RemoveSubConn(%v) called unexpectedly, sc")
}
@ -246,21 +258,28 @@ type acBalancerWrapper struct {
ccb *ccBalancerWrapper // read-only
stateListener func(balancer.SubConnState)
mu sync.Mutex
producers map[balancer.ProducerBuilder]*refCountedProducer
producersMu sync.Mutex
producers map[balancer.ProducerBuilder]*refCountedProducer
}
// updateState is invoked by grpc to push a subConn state update to the
// underlying balancer.
func (acbw *acBalancerWrapper) updateState(s connectivity.State, err error) {
acbw.ccb.serializer.Schedule(func(ctx context.Context) {
func (acbw *acBalancerWrapper) updateState(s connectivity.State, curAddr resolver.Address, err error) {
acbw.ccb.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || acbw.ccb.balancer == nil {
return
}
// Invalidate all producers on any state change.
acbw.closeProducers()
// Even though it is optional for balancers, gracefulswitch ensures
// opts.StateListener is set, so this cannot ever be nil.
// TODO: delete this comment when UpdateSubConnState is removed.
acbw.stateListener(balancer.SubConnState{ConnectivityState: s, ConnectionError: err})
scs := balancer.SubConnState{ConnectivityState: s, ConnectionError: err}
if s == connectivity.Ready {
setConnectedAddress(&scs, curAddr)
}
acbw.stateListener(scs)
})
}
@ -277,6 +296,7 @@ func (acbw *acBalancerWrapper) Connect() {
}
func (acbw *acBalancerWrapper) Shutdown() {
acbw.closeProducers()
acbw.ccb.cc.removeAddrConn(acbw.ac, errConnDrain)
}
@ -284,9 +304,10 @@ func (acbw *acBalancerWrapper) Shutdown() {
// ready, blocks until it is or ctx expires. Returns an error when the context
// expires or the addrConn is shut down.
func (acbw *acBalancerWrapper) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) {
transport, err := acbw.ac.getTransport(ctx)
if err != nil {
return nil, err
transport := acbw.ac.getReadyTransport()
if transport == nil {
return nil, status.Errorf(codes.Unavailable, "SubConn state is not Ready")
}
return newNonRetryClientStream(ctx, desc, method, transport, acbw.ac, opts...)
}
@ -311,15 +332,15 @@ type refCountedProducer struct {
}
func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (balancer.Producer, func()) {
acbw.mu.Lock()
defer acbw.mu.Unlock()
acbw.producersMu.Lock()
defer acbw.producersMu.Unlock()
// Look up existing producer from this builder.
pData := acbw.producers[pb]
if pData == nil {
// Not found; create a new one and add it to the producers map.
p, close := pb.Build(acbw)
pData = &refCountedProducer{producer: p, close: close}
p, closeFn := pb.Build(acbw)
pData = &refCountedProducer{producer: p, close: closeFn}
acbw.producers[pb] = pData
}
// Account for this new reference.
@ -329,13 +350,26 @@ func (acbw *acBalancerWrapper) GetOrBuildProducer(pb balancer.ProducerBuilder) (
// and delete the refCountedProducer from the map if the total reference
// count goes to zero.
unref := func() {
acbw.mu.Lock()
acbw.producersMu.Lock()
// If closeProducers has already closed this producer instance, refs is
// set to 0, so the check after decrementing will never pass, and the
// producer will not be double-closed.
pData.refs--
if pData.refs == 0 {
defer pData.close() // Run outside the acbw mutex
delete(acbw.producers, pb)
}
acbw.mu.Unlock()
acbw.producersMu.Unlock()
}
return pData.producer, grpcsync.OnceFunc(unref)
}
func (acbw *acBalancerWrapper) closeProducers() {
acbw.producersMu.Lock()
defer acbw.producersMu.Unlock()
for pb, pData := range acbw.producers {
pData.refs = 0
pData.close()
delete(acbw.producers, pb)
}
}

View File

@ -18,8 +18,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.34.1
// protoc v4.25.2
// protoc-gen-go v1.34.2
// protoc v5.27.1
// source: grpc/binlog/v1/binarylog.proto
package grpc_binarylog_v1
@ -1015,7 +1015,7 @@ func file_grpc_binlog_v1_binarylog_proto_rawDescGZIP() []byte {
var file_grpc_binlog_v1_binarylog_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
var file_grpc_binlog_v1_binarylog_proto_msgTypes = make([]protoimpl.MessageInfo, 8)
var file_grpc_binlog_v1_binarylog_proto_goTypes = []interface{}{
var file_grpc_binlog_v1_binarylog_proto_goTypes = []any{
(GrpcLogEntry_EventType)(0), // 0: grpc.binarylog.v1.GrpcLogEntry.EventType
(GrpcLogEntry_Logger)(0), // 1: grpc.binarylog.v1.GrpcLogEntry.Logger
(Address_Type)(0), // 2: grpc.binarylog.v1.Address.Type
@ -1058,7 +1058,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
file_grpc_binlog_v1_binarylog_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*GrpcLogEntry); i {
case 0:
return &v.state
@ -1070,7 +1070,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
file_grpc_binlog_v1_binarylog_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*ClientHeader); i {
case 0:
return &v.state
@ -1082,7 +1082,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
file_grpc_binlog_v1_binarylog_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*ServerHeader); i {
case 0:
return &v.state
@ -1094,7 +1094,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
file_grpc_binlog_v1_binarylog_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*Trailer); i {
case 0:
return &v.state
@ -1106,7 +1106,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
file_grpc_binlog_v1_binarylog_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*Message); i {
case 0:
return &v.state
@ -1118,7 +1118,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
file_grpc_binlog_v1_binarylog_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*Metadata); i {
case 0:
return &v.state
@ -1130,7 +1130,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
file_grpc_binlog_v1_binarylog_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*MetadataEntry); i {
case 0:
return &v.state
@ -1142,7 +1142,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
return nil
}
}
file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
file_grpc_binlog_v1_binarylog_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*Address); i {
case 0:
return &v.state
@ -1155,7 +1155,7 @@ func file_grpc_binlog_v1_binarylog_proto_init() {
}
}
}
file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []interface{}{
file_grpc_binlog_v1_binarylog_proto_msgTypes[0].OneofWrappers = []any{
(*GrpcLogEntry_ClientHeader)(nil),
(*GrpcLogEntry_ServerHeader)(nil),
(*GrpcLogEntry_Message)(nil),

View File

@ -24,6 +24,7 @@ import (
"fmt"
"math"
"net/url"
"slices"
"strings"
"sync"
"sync/atomic"
@ -39,6 +40,7 @@ import (
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/idle"
iresolver "google.golang.org/grpc/internal/resolver"
"google.golang.org/grpc/internal/stats"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/resolver"
@ -194,8 +196,11 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error)
cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz)
cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers)
cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers)
cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc.
cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout)
return cc, nil
}
@ -590,13 +595,14 @@ type ClientConn struct {
cancel context.CancelFunc // Cancelled on close.
// The following are initialized at dial time, and are read-only after that.
target string // User's dial target.
parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder().
authority string // See initAuthority().
dopts dialOptions // Default and user specified dial options.
channelz *channelz.Channel // Channelz object.
resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder().
idlenessMgr *idle.Manager
target string // User's dial target.
parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder().
authority string // See initAuthority().
dopts dialOptions // Default and user specified dial options.
channelz *channelz.Channel // Channelz object.
resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder().
idlenessMgr *idle.Manager
metricsRecorderList *stats.MetricsRecorderList
// The following provide their own synchronization, and therefore don't
// require cc.mu to be held to access them.
@ -626,11 +632,6 @@ type ClientConn struct {
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
// ctx expires. A true value is returned in former case and false in latter.
//
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool {
ch := cc.csMgr.getNotifyChan()
if cc.csMgr.getState() != sourceState {
@ -645,11 +646,6 @@ func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connec
}
// GetState returns the connectivity.State of ClientConn.
//
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
// release.
func (cc *ClientConn) GetState() connectivity.State {
return cc.csMgr.getState()
}
@ -812,17 +808,11 @@ func (cc *ClientConn) applyFailingLBLocked(sc *serviceconfig.ParseResult) {
cc.csMgr.updateState(connectivity.TransientFailure)
}
// Makes a copy of the input addresses slice and clears out the balancer
// attributes field. Addresses are passed during subconn creation and address
// update operations. In both cases, we will clear the balancer attributes by
// calling this function, and therefore we will be able to use the Equal method
// provided by the resolver.Address type for comparison.
func copyAddressesWithoutBalancerAttributes(in []resolver.Address) []resolver.Address {
// Makes a copy of the input addresses slice. Addresses are passed during
// subconn creation and address update operations.
func copyAddresses(in []resolver.Address) []resolver.Address {
out := make([]resolver.Address, len(in))
for i := range in {
out[i] = in[i]
out[i].BalancerAttributes = nil
}
copy(out, in)
return out
}
@ -837,12 +827,11 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer.
ac := &addrConn{
state: connectivity.Idle,
cc: cc,
addrs: copyAddressesWithoutBalancerAttributes(addrs),
addrs: copyAddresses(addrs),
scopts: opts,
dopts: cc.dopts,
channelz: channelz.RegisterSubChannel(cc.channelz, ""),
resetBackoff: make(chan struct{}),
stateChan: make(chan struct{}),
}
ac.ctx, ac.cancel = context.WithCancel(cc.ctx)
// Start with our address set to the first address; this may be updated if
@ -918,28 +907,29 @@ func (ac *addrConn) connect() error {
ac.mu.Unlock()
return nil
}
ac.mu.Unlock()
ac.resetTransport()
ac.resetTransportAndUnlock()
return nil
}
func equalAddresses(a, b []resolver.Address) bool {
if len(a) != len(b) {
return false
}
for i, v := range a {
if !v.Equal(b[i]) {
return false
}
}
return true
// equalAddressIgnoringBalAttributes returns true is a and b are considered equal.
// This is different from the Equal method on the resolver.Address type which
// considers all fields to determine equality. Here, we only consider fields
// that are meaningful to the subConn.
func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
return a.Addr == b.Addr && a.ServerName == b.ServerName &&
a.Attributes.Equal(b.Attributes) &&
a.Metadata == b.Metadata
}
func equalAddressesIgnoringBalAttributes(a, b []resolver.Address) bool {
return slices.EqualFunc(a, b, func(a, b resolver.Address) bool { return equalAddressIgnoringBalAttributes(&a, &b) })
}
// updateAddrs updates ac.addrs with the new addresses list and handles active
// connections or connection attempts.
func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
addrs = copyAddressesWithoutBalancerAttributes(addrs)
addrs = copyAddresses(addrs)
limit := len(addrs)
if limit > 5 {
limit = 5
@ -947,7 +937,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
channelz.Infof(logger, ac.channelz, "addrConn: updateAddrs addrs (%d of %d): %v", limit, len(addrs), addrs[:limit])
ac.mu.Lock()
if equalAddresses(ac.addrs, addrs) {
if equalAddressesIgnoringBalAttributes(ac.addrs, addrs) {
ac.mu.Unlock()
return
}
@ -966,7 +956,7 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
// Try to find the connected address.
for _, a := range addrs {
a.ServerName = ac.cc.getServerName(a)
if a.Equal(ac.curAddr) {
if equalAddressIgnoringBalAttributes(&a, &ac.curAddr) {
// We are connected to a valid address, so do nothing but
// update the addresses.
ac.mu.Unlock()
@ -992,11 +982,9 @@ func (ac *addrConn) updateAddrs(addrs []resolver.Address) {
ac.updateConnectivityState(connectivity.Idle, nil)
}
ac.mu.Unlock()
// Since we were connecting/connected, we should start a new connection
// attempt.
go ac.resetTransport()
go ac.resetTransportAndUnlock()
}
// getServerName determines the serverName to be used in the connection
@ -1152,10 +1140,15 @@ func (cc *ClientConn) Close() error {
<-cc.resolverWrapper.serializer.Done()
<-cc.balancerWrapper.serializer.Done()
var wg sync.WaitGroup
for ac := range conns {
ac.tearDown(ErrClientConnClosing)
wg.Add(1)
go func(ac *addrConn) {
defer wg.Done()
ac.tearDown(ErrClientConnClosing)
}(ac)
}
wg.Wait()
cc.addTraceEvent("deleted")
// TraceEvent needs to be called before RemoveEntry, as TraceEvent may add
// trace reference to the entity being deleted, and thus prevent it from being
@ -1190,8 +1183,7 @@ type addrConn struct {
addrs []resolver.Address // All addresses that the resolver resolved to.
// Use updateConnectivityState for updating addrConn's connectivity state.
state connectivity.State
stateChan chan struct{} // closed and recreated on every state change.
state connectivity.State
backoffIdx int // Needs to be stateful for resetConnectBackoff.
resetBackoff chan struct{}
@ -1204,9 +1196,6 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
if ac.state == s {
return
}
// When changing states, reset the state change channel.
close(ac.stateChan)
ac.stateChan = make(chan struct{})
ac.state = s
ac.channelz.ChannelMetrics.State.Store(&s)
if lastErr == nil {
@ -1214,7 +1203,7 @@ func (ac *addrConn) updateConnectivityState(s connectivity.State, lastErr error)
} else {
channelz.Infof(logger, ac.channelz, "Subchannel Connectivity change to %v, last error: %s", s, lastErr)
}
ac.acbw.updateState(s, lastErr)
ac.acbw.updateState(s, ac.curAddr, lastErr)
}
// adjustParams updates parameters used to create transports upon
@ -1231,8 +1220,10 @@ func (ac *addrConn) adjustParams(r transport.GoAwayReason) {
}
}
func (ac *addrConn) resetTransport() {
ac.mu.Lock()
// resetTransportAndUnlock unconditionally connects the addrConn.
//
// ac.mu must be held by the caller, and this function will guarantee it is released.
func (ac *addrConn) resetTransportAndUnlock() {
acCtx := ac.ctx
if acCtx.Err() != nil {
ac.mu.Unlock()
@ -1263,6 +1254,8 @@ func (ac *addrConn) resetTransport() {
ac.mu.Unlock()
if err := ac.tryAllAddrs(acCtx, addrs, connectDeadline); err != nil {
// TODO: #7534 - Move re-resolution requests into the pick_first LB policy
// to ensure one resolution request per pass instead of per subconn failure.
ac.cc.resolveNow(resolver.ResolveNowOptions{})
ac.mu.Lock()
if acCtx.Err() != nil {
@ -1304,7 +1297,7 @@ func (ac *addrConn) resetTransport() {
ac.mu.Unlock()
}
// tryAllAddrs tries to creates a connection to the addresses, and stop when at
// tryAllAddrs tries to create a connection to the addresses, and stop when at
// the first successful one. It returns an error if no address was successfully
// connected, or updates ac appropriately with the new transport.
func (ac *addrConn) tryAllAddrs(ctx context.Context, addrs []resolver.Address, connectDeadline time.Time) error {
@ -1516,29 +1509,6 @@ func (ac *addrConn) getReadyTransport() transport.ClientTransport {
return nil
}
// getTransport waits until the addrconn is ready and returns the transport.
// If the context expires first, returns an appropriate status. If the
// addrConn is stopped first, returns an Unavailable status error.
func (ac *addrConn) getTransport(ctx context.Context) (transport.ClientTransport, error) {
for ctx.Err() == nil {
ac.mu.Lock()
t, state, sc := ac.transport, ac.state, ac.stateChan
ac.mu.Unlock()
if state == connectivity.Ready {
return t, nil
}
if state == connectivity.Shutdown {
return nil, status.Errorf(codes.Unavailable, "SubConn shutting down")
}
select {
case <-ctx.Done():
case <-sc:
}
}
return nil, status.FromContextError(ctx.Err()).Err()
}
// tearDown starts to tear down the addrConn.
//
// Note that tearDown doesn't remove ac from ac.cc.conns, so the addrConn struct
@ -1585,7 +1555,7 @@ func (ac *addrConn) tearDown(err error) {
} else {
// Hard close the transport when the channel is entering idle or is
// being shutdown. In the case where the channel is being shutdown,
// closing of transports is also taken care of by cancelation of cc.ctx.
// closing of transports is also taken care of by cancellation of cc.ctx.
// But in the case where the channel is entering idle, we need to
// explicitly close the transports here. Instead of distinguishing
// between these two cases, it is simpler to close the transport

View File

@ -21,18 +21,73 @@ package grpc
import (
"google.golang.org/grpc/encoding"
_ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto"
"google.golang.org/grpc/mem"
)
// baseCodec contains the functionality of both Codec and encoding.Codec, but
// omits the name/string, which vary between the two and are not needed for
// anything besides the registry in the encoding package.
// baseCodec captures the new encoding.CodecV2 interface without the Name
// function, allowing it to be implemented by older Codec and encoding.Codec
// implementations. The omitted Name function is only needed for the register in
// the encoding package and is not part of the core functionality.
type baseCodec interface {
Marshal(v any) ([]byte, error)
Unmarshal(data []byte, v any) error
Marshal(v any) (mem.BufferSlice, error)
Unmarshal(data mem.BufferSlice, v any) error
}
var _ baseCodec = Codec(nil)
var _ baseCodec = encoding.Codec(nil)
// getCodec returns an encoding.CodecV2 for the codec of the given name (if
// registered). Initially checks the V2 registry with encoding.GetCodecV2 and
// returns the V2 codec if it is registered. Otherwise, it checks the V1 registry
// with encoding.GetCodec and if it is registered wraps it with newCodecV1Bridge
// to turn it into an encoding.CodecV2. Returns nil otherwise.
func getCodec(name string) encoding.CodecV2 {
if codecV1 := encoding.GetCodec(name); codecV1 != nil {
return newCodecV1Bridge(codecV1)
}
return encoding.GetCodecV2(name)
}
func newCodecV0Bridge(c Codec) baseCodec {
return codecV0Bridge{codec: c}
}
func newCodecV1Bridge(c encoding.Codec) encoding.CodecV2 {
return codecV1Bridge{
codecV0Bridge: codecV0Bridge{codec: c},
name: c.Name(),
}
}
var _ baseCodec = codecV0Bridge{}
type codecV0Bridge struct {
codec interface {
Marshal(v any) ([]byte, error)
Unmarshal(data []byte, v any) error
}
}
func (c codecV0Bridge) Marshal(v any) (mem.BufferSlice, error) {
data, err := c.codec.Marshal(v)
if err != nil {
return nil, err
}
return mem.BufferSlice{mem.NewBuffer(&data, nil)}, nil
}
func (c codecV0Bridge) Unmarshal(data mem.BufferSlice, v any) (err error) {
return c.codec.Unmarshal(data.Materialize(), v)
}
var _ encoding.CodecV2 = codecV1Bridge{}
type codecV1Bridge struct {
codecV0Bridge
name string
}
func (c codecV1Bridge) Name() string {
return c.name
}
// Codec defines the interface gRPC uses to encode and decode messages.
// Note that implementations of this interface must be thread safe;

View File

@ -40,7 +40,7 @@ func NewCredentials() credentials.TransportCredentials {
// NoSecurity.
type insecureTC struct{}
func (insecureTC) ClientHandshake(ctx context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
func (insecureTC) ClientHandshake(_ context.Context, _ string, conn net.Conn) (net.Conn, credentials.AuthInfo, error) {
return conn, info{credentials.CommonAuthInfo{SecurityLevel: credentials.NoSecurity}}, nil
}

View File

@ -200,25 +200,40 @@ var tls12ForbiddenCipherSuites = map[uint16]struct{}{
// NewTLS uses c to construct a TransportCredentials based on TLS.
func NewTLS(c *tls.Config) TransportCredentials {
tc := &tlsCreds{credinternal.CloneTLSConfig(c)}
tc.config.NextProtos = credinternal.AppendH2ToNextProtos(tc.config.NextProtos)
config := applyDefaults(c)
if config.GetConfigForClient != nil {
oldFn := config.GetConfigForClient
config.GetConfigForClient = func(hello *tls.ClientHelloInfo) (*tls.Config, error) {
cfgForClient, err := oldFn(hello)
if err != nil || cfgForClient == nil {
return cfgForClient, err
}
return applyDefaults(cfgForClient), nil
}
}
return &tlsCreds{config: config}
}
func applyDefaults(c *tls.Config) *tls.Config {
config := credinternal.CloneTLSConfig(c)
config.NextProtos = credinternal.AppendH2ToNextProtos(config.NextProtos)
// If the user did not configure a MinVersion and did not configure a
// MaxVersion < 1.2, use MinVersion=1.2, which is required by
// https://datatracker.ietf.org/doc/html/rfc7540#section-9.2
if tc.config.MinVersion == 0 && (tc.config.MaxVersion == 0 || tc.config.MaxVersion >= tls.VersionTLS12) {
tc.config.MinVersion = tls.VersionTLS12
if config.MinVersion == 0 && (config.MaxVersion == 0 || config.MaxVersion >= tls.VersionTLS12) {
config.MinVersion = tls.VersionTLS12
}
// If the user did not configure CipherSuites, use all "secure" cipher
// suites reported by the TLS package, but remove some explicitly forbidden
// by https://datatracker.ietf.org/doc/html/rfc7540#appendix-A
if tc.config.CipherSuites == nil {
if config.CipherSuites == nil {
for _, cs := range tls.CipherSuites() {
if _, ok := tls12ForbiddenCipherSuites[cs.ID]; !ok {
tc.config.CipherSuites = append(tc.config.CipherSuites, cs.ID)
config.CipherSuites = append(config.CipherSuites, cs.ID)
}
}
}
return tc
return config
}
// NewClientTLSFromCert constructs TLS credentials from the provided root

View File

@ -33,6 +33,7 @@ import (
"google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/mem"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/stats"
)
@ -60,7 +61,7 @@ func init() {
internal.WithBinaryLogger = withBinaryLogger
internal.JoinDialOptions = newJoinDialOption
internal.DisableGlobalDialOptions = newDisableGlobalDialOptions
internal.WithRecvBufferPool = withRecvBufferPool
internal.WithBufferPool = withBufferPool
}
// dialOptions configure a Dial call. dialOptions are set by the DialOption
@ -92,7 +93,6 @@ type dialOptions struct {
defaultServiceConfigRawJSON *string
resolvers []resolver.Builder
idleTimeout time.Duration
recvBufferPool SharedBufferPool
defaultScheme string
maxCallAttempts int
}
@ -436,7 +436,7 @@ func WithTimeout(d time.Duration) DialOption {
// option to true from the Control field. For a concrete example of how to do
// this, see internal.NetDialerWithTCPKeepalive().
//
// For more information, please see [issue 23459] in the Go github repo.
// For more information, please see [issue 23459] in the Go GitHub repo.
//
// [issue 23459]: https://github.com/golang/go/issues/23459
func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption {
@ -518,6 +518,8 @@ func WithUserAgent(s string) DialOption {
// WithKeepaliveParams returns a DialOption that specifies keepalive parameters
// for the client transport.
//
// Keepalive is disabled by default.
func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption {
if kp.Time < internal.KeepaliveMinPingTime {
logger.Warningf("Adjusting keepalive ping interval to minimum period of %v", internal.KeepaliveMinPingTime)
@ -677,11 +679,11 @@ func defaultDialOptions() dialOptions {
WriteBufferSize: defaultWriteBufSize,
UseProxy: true,
UserAgent: grpcUA,
BufferPool: mem.DefaultBufferPool(),
},
bs: internalbackoff.DefaultExponential,
healthCheckFunc: internal.HealthCheckFunc,
idleTimeout: 30 * time.Minute,
recvBufferPool: nopBufferPool{},
defaultScheme: "dns",
maxCallAttempts: defaultMaxCallAttempts,
}
@ -758,25 +760,8 @@ func WithMaxCallAttempts(n int) DialOption {
})
}
// WithRecvBufferPool returns a DialOption that configures the ClientConn
// to use the provided shared buffer pool for parsing incoming messages. Depending
// on the application's workload, this could result in reduced memory allocation.
//
// If you are unsure about how to implement a memory pool but want to utilize one,
// begin with grpc.NewSharedBufferPool.
//
// Note: The shared buffer pool feature will not be active if any of the following
// options are used: WithStatsHandler, EnableTracing, or binary logging. In such
// cases, the shared buffer pool will be ignored.
//
// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in
// v1.60.0 or later.
func WithRecvBufferPool(bufferPool SharedBufferPool) DialOption {
return withRecvBufferPool(bufferPool)
}
func withRecvBufferPool(bufferPool SharedBufferPool) DialOption {
func withBufferPool(bufferPool mem.BufferPool) DialOption {
return newFuncDialOption(func(o *dialOptions) {
o.recvBufferPool = bufferPool
o.copts.BufferPool = bufferPool
})
}

View File

@ -16,7 +16,7 @@
*
*/
//go:generate ./regenerate.sh
//go:generate ./scripts/regenerate.sh
/*
Package grpc implements an RPC system called gRPC.

View File

@ -94,7 +94,7 @@ type Codec interface {
Name() string
}
var registeredCodecs = make(map[string]Codec)
var registeredCodecs = make(map[string]any)
// RegisterCodec registers the provided Codec for use with all gRPC clients and
// servers.
@ -126,5 +126,6 @@ func RegisterCodec(codec Codec) {
//
// The content-subtype is expected to be lowercase.
func GetCodec(contentSubtype string) Codec {
return registeredCodecs[contentSubtype]
c, _ := registeredCodecs[contentSubtype].(Codec)
return c
}

81
vendor/google.golang.org/grpc/encoding/encoding_v2.go generated vendored Normal file
View File

@ -0,0 +1,81 @@
/*
*
* Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package encoding
import (
"strings"
"google.golang.org/grpc/mem"
)
// CodecV2 defines the interface gRPC uses to encode and decode messages. Note
// that implementations of this interface must be thread safe; a CodecV2's
// methods can be called from concurrent goroutines.
type CodecV2 interface {
// Marshal returns the wire format of v. The buffers in the returned
// [mem.BufferSlice] must have at least one reference each, which will be freed
// by gRPC when they are no longer needed.
Marshal(v any) (out mem.BufferSlice, err error)
// Unmarshal parses the wire format into v. Note that data will be freed as soon
// as this function returns. If the codec wishes to guarantee access to the data
// after this function, it must take its own reference that it frees when it is
// no longer needed.
Unmarshal(data mem.BufferSlice, v any) error
// Name returns the name of the Codec implementation. The returned string
// will be used as part of content type in transmission. The result must be
// static; the result cannot change between calls.
Name() string
}
// RegisterCodecV2 registers the provided CodecV2 for use with all gRPC clients and
// servers.
//
// The CodecV2 will be stored and looked up by result of its Name() method, which
// should match the content-subtype of the encoding handled by the CodecV2. This
// is case-insensitive, and is stored and looked up as lowercase. If the
// result of calling Name() is an empty string, RegisterCodecV2 will panic. See
// Content-Type on
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
// more details.
//
// If both a Codec and CodecV2 are registered with the same name, the CodecV2
// will be used.
//
// NOTE: this function must only be called during initialization time (i.e. in
// an init() function), and is not thread-safe. If multiple Codecs are
// registered with the same name, the one registered last will take effect.
func RegisterCodecV2(codec CodecV2) {
if codec == nil {
panic("cannot register a nil CodecV2")
}
if codec.Name() == "" {
panic("cannot register CodecV2 with empty string result for Name()")
}
contentSubtype := strings.ToLower(codec.Name())
registeredCodecs[contentSubtype] = codec
}
// GetCodecV2 gets a registered CodecV2 by content-subtype, or nil if no CodecV2 is
// registered for the content-subtype.
//
// The content-subtype is expected to be lowercase.
func GetCodecV2(contentSubtype string) CodecV2 {
c, _ := registeredCodecs[contentSubtype].(CodecV2)
return c
}

View File

@ -1,6 +1,6 @@
/*
*
* Copyright 2018 gRPC authors.
* Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -24,6 +24,7 @@ import (
"fmt"
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/mem"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/protoadapt"
)
@ -32,28 +33,51 @@ import (
const Name = "proto"
func init() {
encoding.RegisterCodec(codec{})
encoding.RegisterCodecV2(&codecV2{})
}
// codec is a Codec implementation with protobuf. It is the default codec for gRPC.
type codec struct{}
// codec is a CodecV2 implementation with protobuf. It is the default codec for
// gRPC.
type codecV2 struct{}
func (codec) Marshal(v any) ([]byte, error) {
func (c *codecV2) Marshal(v any) (data mem.BufferSlice, err error) {
vv := messageV2Of(v)
if vv == nil {
return nil, fmt.Errorf("failed to marshal, message is %T, want proto.Message", v)
return nil, fmt.Errorf("proto: failed to marshal, message is %T, want proto.Message", v)
}
return proto.Marshal(vv)
size := proto.Size(vv)
if mem.IsBelowBufferPoolingThreshold(size) {
buf, err := proto.Marshal(vv)
if err != nil {
return nil, err
}
data = append(data, mem.SliceBuffer(buf))
} else {
pool := mem.DefaultBufferPool()
buf := pool.Get(size)
if _, err := (proto.MarshalOptions{}).MarshalAppend((*buf)[:0], vv); err != nil {
pool.Put(buf)
return nil, err
}
data = append(data, mem.NewBuffer(buf, pool))
}
return data, nil
}
func (codec) Unmarshal(data []byte, v any) error {
func (c *codecV2) Unmarshal(data mem.BufferSlice, v any) (err error) {
vv := messageV2Of(v)
if vv == nil {
return fmt.Errorf("failed to unmarshal, message is %T, want proto.Message", v)
}
return proto.Unmarshal(data, vv)
buf := data.MaterializeToBuffer(mem.DefaultBufferPool())
defer buf.Free()
// TODO: Upgrade proto.Unmarshal to support mem.BufferSlice. Right now, it's not
// really possible without a major overhaul of the proto package, but the
// vtprotobuf library may be able to support this.
return proto.Unmarshal(buf.ReadOnlyData(), vv)
}
func messageV2Of(v any) proto.Message {
@ -67,6 +91,6 @@ func messageV2Of(v any) proto.Message {
return nil
}
func (codec) Name() string {
func (c *codecV2) Name() string {
return Name
}

View File

@ -0,0 +1,269 @@
/*
*
* Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package stats
import (
"maps"
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal"
)
func init() {
internal.SnapshotMetricRegistryForTesting = snapshotMetricsRegistryForTesting
}
var logger = grpclog.Component("metrics-registry")
// DefaultMetrics are the default metrics registered through global metrics
// registry. This is written to at initialization time only, and is read only
// after initialization.
var DefaultMetrics = NewMetrics()
// MetricDescriptor is the data for a registered metric.
type MetricDescriptor struct {
// The name of this metric. This name must be unique across the whole binary
// (including any per call metrics). See
// https://github.com/grpc/proposal/blob/master/A79-non-per-call-metrics-architecture.md#metric-instrument-naming-conventions
// for metric naming conventions.
Name Metric
// The description of this metric.
Description string
// The unit (e.g. entries, seconds) of this metric.
Unit string
// The required label keys for this metric. These are intended to
// metrics emitted from a stats handler.
Labels []string
// The optional label keys for this metric. These are intended to attached
// to metrics emitted from a stats handler if configured.
OptionalLabels []string
// Whether this metric is on by default.
Default bool
// The type of metric. This is set by the metric registry, and not intended
// to be set by a component registering a metric.
Type MetricType
// Bounds are the bounds of this metric. This only applies to histogram
// metrics. If unset or set with length 0, stats handlers will fall back to
// default bounds.
Bounds []float64
}
// MetricType is the type of metric.
type MetricType int
// Type of metric supported by this instrument registry.
const (
MetricTypeIntCount MetricType = iota
MetricTypeFloatCount
MetricTypeIntHisto
MetricTypeFloatHisto
MetricTypeIntGauge
)
// Int64CountHandle is a typed handle for a int count metric. This handle
// is passed at the recording point in order to know which metric to record
// on.
type Int64CountHandle MetricDescriptor
// Descriptor returns the int64 count handle typecast to a pointer to a
// MetricDescriptor.
func (h *Int64CountHandle) Descriptor() *MetricDescriptor {
return (*MetricDescriptor)(h)
}
// Record records the int64 count value on the metrics recorder provided.
func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
recorder.RecordInt64Count(h, incr, labels...)
}
// Float64CountHandle is a typed handle for a float count metric. This handle is
// passed at the recording point in order to know which metric to record on.
type Float64CountHandle MetricDescriptor
// Descriptor returns the float64 count handle typecast to a pointer to a
// MetricDescriptor.
func (h *Float64CountHandle) Descriptor() *MetricDescriptor {
return (*MetricDescriptor)(h)
}
// Record records the float64 count value on the metrics recorder provided.
func (h *Float64CountHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
recorder.RecordFloat64Count(h, incr, labels...)
}
// Int64HistoHandle is a typed handle for an int histogram metric. This handle
// is passed at the recording point in order to know which metric to record on.
type Int64HistoHandle MetricDescriptor
// Descriptor returns the int64 histo handle typecast to a pointer to a
// MetricDescriptor.
func (h *Int64HistoHandle) Descriptor() *MetricDescriptor {
return (*MetricDescriptor)(h)
}
// Record records the int64 histo value on the metrics recorder provided.
func (h *Int64HistoHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
recorder.RecordInt64Histo(h, incr, labels...)
}
// Float64HistoHandle is a typed handle for a float histogram metric. This
// handle is passed at the recording point in order to know which metric to
// record on.
type Float64HistoHandle MetricDescriptor
// Descriptor returns the float64 histo handle typecast to a pointer to a
// MetricDescriptor.
func (h *Float64HistoHandle) Descriptor() *MetricDescriptor {
return (*MetricDescriptor)(h)
}
// Record records the float64 histo value on the metrics recorder provided.
func (h *Float64HistoHandle) Record(recorder MetricsRecorder, incr float64, labels ...string) {
recorder.RecordFloat64Histo(h, incr, labels...)
}
// Int64GaugeHandle is a typed handle for an int gauge metric. This handle is
// passed at the recording point in order to know which metric to record on.
type Int64GaugeHandle MetricDescriptor
// Descriptor returns the int64 gauge handle typecast to a pointer to a
// MetricDescriptor.
func (h *Int64GaugeHandle) Descriptor() *MetricDescriptor {
return (*MetricDescriptor)(h)
}
// Record records the int64 histo value on the metrics recorder provided.
func (h *Int64GaugeHandle) Record(recorder MetricsRecorder, incr int64, labels ...string) {
recorder.RecordInt64Gauge(h, incr, labels...)
}
// registeredMetrics are the registered metric descriptor names.
var registeredMetrics = make(map[Metric]bool)
// metricsRegistry contains all of the registered metrics.
//
// This is written to only at init time, and read only after that.
var metricsRegistry = make(map[Metric]*MetricDescriptor)
// DescriptorForMetric returns the MetricDescriptor from the global registry.
//
// Returns nil if MetricDescriptor not present.
func DescriptorForMetric(metric Metric) *MetricDescriptor {
return metricsRegistry[metric]
}
func registerMetric(name Metric, def bool) {
if registeredMetrics[name] {
logger.Fatalf("metric %v already registered", name)
}
registeredMetrics[name] = true
if def {
DefaultMetrics = DefaultMetrics.Add(name)
}
}
// RegisterInt64Count registers the metric description onto the global registry.
// It returns a typed handle to use to recording data.
//
// NOTE: this function must only be called during initialization time (i.e. in
// an init() function), and is not thread-safe. If multiple metrics are
// registered with the same name, this function will panic.
func RegisterInt64Count(descriptor MetricDescriptor) *Int64CountHandle {
registerMetric(descriptor.Name, descriptor.Default)
descriptor.Type = MetricTypeIntCount
descPtr := &descriptor
metricsRegistry[descriptor.Name] = descPtr
return (*Int64CountHandle)(descPtr)
}
// RegisterFloat64Count registers the metric description onto the global
// registry. It returns a typed handle to use to recording data.
//
// NOTE: this function must only be called during initialization time (i.e. in
// an init() function), and is not thread-safe. If multiple metrics are
// registered with the same name, this function will panic.
func RegisterFloat64Count(descriptor MetricDescriptor) *Float64CountHandle {
registerMetric(descriptor.Name, descriptor.Default)
descriptor.Type = MetricTypeFloatCount
descPtr := &descriptor
metricsRegistry[descriptor.Name] = descPtr
return (*Float64CountHandle)(descPtr)
}
// RegisterInt64Histo registers the metric description onto the global registry.
// It returns a typed handle to use to recording data.
//
// NOTE: this function must only be called during initialization time (i.e. in
// an init() function), and is not thread-safe. If multiple metrics are
// registered with the same name, this function will panic.
func RegisterInt64Histo(descriptor MetricDescriptor) *Int64HistoHandle {
registerMetric(descriptor.Name, descriptor.Default)
descriptor.Type = MetricTypeIntHisto
descPtr := &descriptor
metricsRegistry[descriptor.Name] = descPtr
return (*Int64HistoHandle)(descPtr)
}
// RegisterFloat64Histo registers the metric description onto the global
// registry. It returns a typed handle to use to recording data.
//
// NOTE: this function must only be called during initialization time (i.e. in
// an init() function), and is not thread-safe. If multiple metrics are
// registered with the same name, this function will panic.
func RegisterFloat64Histo(descriptor MetricDescriptor) *Float64HistoHandle {
registerMetric(descriptor.Name, descriptor.Default)
descriptor.Type = MetricTypeFloatHisto
descPtr := &descriptor
metricsRegistry[descriptor.Name] = descPtr
return (*Float64HistoHandle)(descPtr)
}
// RegisterInt64Gauge registers the metric description onto the global registry.
// It returns a typed handle to use to recording data.
//
// NOTE: this function must only be called during initialization time (i.e. in
// an init() function), and is not thread-safe. If multiple metrics are
// registered with the same name, this function will panic.
func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle {
registerMetric(descriptor.Name, descriptor.Default)
descriptor.Type = MetricTypeIntGauge
descPtr := &descriptor
metricsRegistry[descriptor.Name] = descPtr
return (*Int64GaugeHandle)(descPtr)
}
// snapshotMetricsRegistryForTesting snapshots the global data of the metrics
// registry. Returns a cleanup function that sets the metrics registry to its
// original state.
func snapshotMetricsRegistryForTesting() func() {
oldDefaultMetrics := DefaultMetrics
oldRegisteredMetrics := registeredMetrics
oldMetricsRegistry := metricsRegistry
registeredMetrics = make(map[Metric]bool)
metricsRegistry = make(map[Metric]*MetricDescriptor)
maps.Copy(registeredMetrics, registeredMetrics)
maps.Copy(metricsRegistry, metricsRegistry)
return func() {
DefaultMetrics = oldDefaultMetrics
registeredMetrics = oldRegisteredMetrics
metricsRegistry = oldMetricsRegistry
}
}

View File

@ -0,0 +1,114 @@
/*
*
* Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package stats contains experimental metrics/stats API's.
package stats
import "maps"
// MetricsRecorder records on metrics derived from metric registry.
type MetricsRecorder interface {
// RecordInt64Count records the measurement alongside labels on the int
// count associated with the provided handle.
RecordInt64Count(handle *Int64CountHandle, incr int64, labels ...string)
// RecordFloat64Count records the measurement alongside labels on the float
// count associated with the provided handle.
RecordFloat64Count(handle *Float64CountHandle, incr float64, labels ...string)
// RecordInt64Histo records the measurement alongside labels on the int
// histo associated with the provided handle.
RecordInt64Histo(handle *Int64HistoHandle, incr int64, labels ...string)
// RecordFloat64Histo records the measurement alongside labels on the float
// histo associated with the provided handle.
RecordFloat64Histo(handle *Float64HistoHandle, incr float64, labels ...string)
// RecordInt64Gauge records the measurement alongside labels on the int
// gauge associated with the provided handle.
RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string)
}
// Metric is an identifier for a metric.
type Metric string
// Metrics is a set of metrics to record. Once created, Metrics is immutable,
// however Add and Remove can make copies with specific metrics added or
// removed, respectively.
//
// Do not construct directly; use NewMetrics instead.
type Metrics struct {
// metrics are the set of metrics to initialize.
metrics map[Metric]bool
}
// NewMetrics returns a Metrics containing Metrics.
func NewMetrics(metrics ...Metric) *Metrics {
newMetrics := make(map[Metric]bool)
for _, metric := range metrics {
newMetrics[metric] = true
}
return &Metrics{
metrics: newMetrics,
}
}
// Metrics returns the metrics set. The returned map is read-only and must not
// be modified.
func (m *Metrics) Metrics() map[Metric]bool {
return m.metrics
}
// Add adds the metrics to the metrics set and returns a new copy with the
// additional metrics.
func (m *Metrics) Add(metrics ...Metric) *Metrics {
newMetrics := make(map[Metric]bool)
for metric := range m.metrics {
newMetrics[metric] = true
}
for _, metric := range metrics {
newMetrics[metric] = true
}
return &Metrics{
metrics: newMetrics,
}
}
// Join joins the metrics passed in with the metrics set, and returns a new copy
// with the merged metrics.
func (m *Metrics) Join(metrics *Metrics) *Metrics {
newMetrics := make(map[Metric]bool)
maps.Copy(newMetrics, m.metrics)
maps.Copy(newMetrics, metrics.metrics)
return &Metrics{
metrics: newMetrics,
}
}
// Remove removes the metrics from the metrics set and returns a new copy with
// the metrics removed.
func (m *Metrics) Remove(metrics ...Metric) *Metrics {
newMetrics := make(map[Metric]bool)
for metric := range m.metrics {
newMetrics[metric] = true
}
for _, metric := range metrics {
delete(newMetrics, metric)
}
return &Metrics{
metrics: newMetrics,
}
}

View File

@ -20,8 +20,6 @@ package grpclog
import (
"fmt"
"google.golang.org/grpc/internal/grpclog"
)
// componentData records the settings for a component.
@ -33,22 +31,22 @@ var cache = map[string]*componentData{}
func (c *componentData) InfoDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
grpclog.InfoDepth(depth+1, args...)
InfoDepth(depth+1, args...)
}
func (c *componentData) WarningDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
grpclog.WarningDepth(depth+1, args...)
WarningDepth(depth+1, args...)
}
func (c *componentData) ErrorDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
grpclog.ErrorDepth(depth+1, args...)
ErrorDepth(depth+1, args...)
}
func (c *componentData) FatalDepth(depth int, args ...any) {
args = append([]any{"[" + string(c.name) + "]"}, args...)
grpclog.FatalDepth(depth+1, args...)
FatalDepth(depth+1, args...)
}
func (c *componentData) Info(args ...any) {

View File

@ -18,18 +18,15 @@
// Package grpclog defines logging for grpc.
//
// All logs in transport and grpclb packages only go to verbose level 2.
// All logs in other packages in grpc are logged in spite of the verbosity level.
//
// In the default logger,
// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL,
// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL.
package grpclog // import "google.golang.org/grpc/grpclog"
// In the default logger, severity level can be set by environment variable
// GRPC_GO_LOG_SEVERITY_LEVEL, verbosity level can be set by
// GRPC_GO_LOG_VERBOSITY_LEVEL.
package grpclog
import (
"os"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/grpclog/internal"
)
func init() {
@ -38,58 +35,58 @@ func init() {
// V reports whether verbosity level l is at least the requested verbose level.
func V(l int) bool {
return grpclog.Logger.V(l)
return internal.LoggerV2Impl.V(l)
}
// Info logs to the INFO log.
func Info(args ...any) {
grpclog.Logger.Info(args...)
internal.LoggerV2Impl.Info(args...)
}
// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf.
func Infof(format string, args ...any) {
grpclog.Logger.Infof(format, args...)
internal.LoggerV2Impl.Infof(format, args...)
}
// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println.
func Infoln(args ...any) {
grpclog.Logger.Infoln(args...)
internal.LoggerV2Impl.Infoln(args...)
}
// Warning logs to the WARNING log.
func Warning(args ...any) {
grpclog.Logger.Warning(args...)
internal.LoggerV2Impl.Warning(args...)
}
// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf.
func Warningf(format string, args ...any) {
grpclog.Logger.Warningf(format, args...)
internal.LoggerV2Impl.Warningf(format, args...)
}
// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println.
func Warningln(args ...any) {
grpclog.Logger.Warningln(args...)
internal.LoggerV2Impl.Warningln(args...)
}
// Error logs to the ERROR log.
func Error(args ...any) {
grpclog.Logger.Error(args...)
internal.LoggerV2Impl.Error(args...)
}
// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf.
func Errorf(format string, args ...any) {
grpclog.Logger.Errorf(format, args...)
internal.LoggerV2Impl.Errorf(format, args...)
}
// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println.
func Errorln(args ...any) {
grpclog.Logger.Errorln(args...)
internal.LoggerV2Impl.Errorln(args...)
}
// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print.
// It calls os.Exit() with exit code 1.
func Fatal(args ...any) {
grpclog.Logger.Fatal(args...)
internal.LoggerV2Impl.Fatal(args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
@ -97,15 +94,15 @@ func Fatal(args ...any) {
// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf.
// It calls os.Exit() with exit code 1.
func Fatalf(format string, args ...any) {
grpclog.Logger.Fatalf(format, args...)
internal.LoggerV2Impl.Fatalf(format, args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println.
// It calle os.Exit()) with exit code 1.
// It calls os.Exit() with exit code 1.
func Fatalln(args ...any) {
grpclog.Logger.Fatalln(args...)
internal.LoggerV2Impl.Fatalln(args...)
// Make sure fatal logs will exit.
os.Exit(1)
}
@ -114,19 +111,76 @@ func Fatalln(args ...any) {
//
// Deprecated: use Info.
func Print(args ...any) {
grpclog.Logger.Info(args...)
internal.LoggerV2Impl.Info(args...)
}
// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf.
//
// Deprecated: use Infof.
func Printf(format string, args ...any) {
grpclog.Logger.Infof(format, args...)
internal.LoggerV2Impl.Infof(format, args...)
}
// Println prints to the logger. Arguments are handled in the manner of fmt.Println.
//
// Deprecated: use Infoln.
func Println(args ...any) {
grpclog.Logger.Infoln(args...)
internal.LoggerV2Impl.Infoln(args...)
}
// InfoDepth logs to the INFO log at the specified depth.
//
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func InfoDepth(depth int, args ...any) {
if internal.DepthLoggerV2Impl != nil {
internal.DepthLoggerV2Impl.InfoDepth(depth, args...)
} else {
internal.LoggerV2Impl.Infoln(args...)
}
}
// WarningDepth logs to the WARNING log at the specified depth.
//
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func WarningDepth(depth int, args ...any) {
if internal.DepthLoggerV2Impl != nil {
internal.DepthLoggerV2Impl.WarningDepth(depth, args...)
} else {
internal.LoggerV2Impl.Warningln(args...)
}
}
// ErrorDepth logs to the ERROR log at the specified depth.
//
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func ErrorDepth(depth int, args ...any) {
if internal.DepthLoggerV2Impl != nil {
internal.DepthLoggerV2Impl.ErrorDepth(depth, args...)
} else {
internal.LoggerV2Impl.Errorln(args...)
}
}
// FatalDepth logs to the FATAL log at the specified depth.
//
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func FatalDepth(depth int, args ...any) {
if internal.DepthLoggerV2Impl != nil {
internal.DepthLoggerV2Impl.FatalDepth(depth, args...)
} else {
internal.LoggerV2Impl.Fatalln(args...)
}
os.Exit(1)
}

View File

@ -0,0 +1,26 @@
/*
*
* Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package internal contains functionality internal to the grpclog package.
package internal
// LoggerV2Impl is the logger used for the non-depth log functions.
var LoggerV2Impl LoggerV2
// DepthLoggerV2Impl is the logger used for the depth log functions.
var DepthLoggerV2Impl DepthLoggerV2

View File

@ -0,0 +1,87 @@
/*
*
* Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package internal
// Logger mimics golang's standard Logger as an interface.
//
// Deprecated: use LoggerV2.
type Logger interface {
Fatal(args ...any)
Fatalf(format string, args ...any)
Fatalln(args ...any)
Print(args ...any)
Printf(format string, args ...any)
Println(args ...any)
}
// LoggerWrapper wraps Logger into a LoggerV2.
type LoggerWrapper struct {
Logger
}
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
func (l *LoggerWrapper) Info(args ...any) {
l.Logger.Print(args...)
}
// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
func (l *LoggerWrapper) Infoln(args ...any) {
l.Logger.Println(args...)
}
// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
func (l *LoggerWrapper) Infof(format string, args ...any) {
l.Logger.Printf(format, args...)
}
// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
func (l *LoggerWrapper) Warning(args ...any) {
l.Logger.Print(args...)
}
// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
func (l *LoggerWrapper) Warningln(args ...any) {
l.Logger.Println(args...)
}
// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
func (l *LoggerWrapper) Warningf(format string, args ...any) {
l.Logger.Printf(format, args...)
}
// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
func (l *LoggerWrapper) Error(args ...any) {
l.Logger.Print(args...)
}
// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
func (l *LoggerWrapper) Errorln(args ...any) {
l.Logger.Println(args...)
}
// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
func (l *LoggerWrapper) Errorf(format string, args ...any) {
l.Logger.Printf(format, args...)
}
// V reports whether verbosity level l is at least the requested verbose level.
func (*LoggerWrapper) V(int) bool {
// Returns true for all verbose level.
return true
}

View File

@ -1,6 +1,6 @@
/*
*
* Copyright 2020 gRPC authors.
* Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
@ -16,59 +16,17 @@
*
*/
// Package grpclog (internal) defines depth logging for grpc.
package grpclog
package internal
import (
"encoding/json"
"fmt"
"io"
"log"
"os"
)
// Logger is the logger used for the non-depth log functions.
var Logger LoggerV2
// DepthLogger is the logger used for the depth log functions.
var DepthLogger DepthLoggerV2
// InfoDepth logs to the INFO log at the specified depth.
func InfoDepth(depth int, args ...any) {
if DepthLogger != nil {
DepthLogger.InfoDepth(depth, args...)
} else {
Logger.Infoln(args...)
}
}
// WarningDepth logs to the WARNING log at the specified depth.
func WarningDepth(depth int, args ...any) {
if DepthLogger != nil {
DepthLogger.WarningDepth(depth, args...)
} else {
Logger.Warningln(args...)
}
}
// ErrorDepth logs to the ERROR log at the specified depth.
func ErrorDepth(depth int, args ...any) {
if DepthLogger != nil {
DepthLogger.ErrorDepth(depth, args...)
} else {
Logger.Errorln(args...)
}
}
// FatalDepth logs to the FATAL log at the specified depth.
func FatalDepth(depth int, args ...any) {
if DepthLogger != nil {
DepthLogger.FatalDepth(depth, args...)
} else {
Logger.Fatalln(args...)
}
os.Exit(1)
}
// LoggerV2 does underlying logging work for grpclog.
// This is a copy of the LoggerV2 defined in the external grpclog package. It
// is defined here to avoid a circular dependency.
type LoggerV2 interface {
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
Info(args ...any)
@ -107,14 +65,13 @@ type LoggerV2 interface {
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
// DepthLoggerV2, the below functions will be called with the appropriate stack
// depth set for trivial functions the logger may ignore.
// This is a copy of the DepthLoggerV2 defined in the external grpclog package.
// It is defined here to avoid a circular dependency.
//
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type DepthLoggerV2 interface {
LoggerV2
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
InfoDepth(depth int, args ...any)
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
@ -124,3 +81,124 @@ type DepthLoggerV2 interface {
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
FatalDepth(depth int, args ...any)
}
const (
// infoLog indicates Info severity.
infoLog int = iota
// warningLog indicates Warning severity.
warningLog
// errorLog indicates Error severity.
errorLog
// fatalLog indicates Fatal severity.
fatalLog
)
// severityName contains the string representation of each severity.
var severityName = []string{
infoLog: "INFO",
warningLog: "WARNING",
errorLog: "ERROR",
fatalLog: "FATAL",
}
// loggerT is the default logger used by grpclog.
type loggerT struct {
m []*log.Logger
v int
jsonFormat bool
}
func (g *loggerT) output(severity int, s string) {
sevStr := severityName[severity]
if !g.jsonFormat {
g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s))
return
}
// TODO: we can also include the logging component, but that needs more
// (API) changes.
b, _ := json.Marshal(map[string]string{
"severity": sevStr,
"message": s,
})
g.m[severity].Output(2, string(b))
}
func (g *loggerT) Info(args ...any) {
g.output(infoLog, fmt.Sprint(args...))
}
func (g *loggerT) Infoln(args ...any) {
g.output(infoLog, fmt.Sprintln(args...))
}
func (g *loggerT) Infof(format string, args ...any) {
g.output(infoLog, fmt.Sprintf(format, args...))
}
func (g *loggerT) Warning(args ...any) {
g.output(warningLog, fmt.Sprint(args...))
}
func (g *loggerT) Warningln(args ...any) {
g.output(warningLog, fmt.Sprintln(args...))
}
func (g *loggerT) Warningf(format string, args ...any) {
g.output(warningLog, fmt.Sprintf(format, args...))
}
func (g *loggerT) Error(args ...any) {
g.output(errorLog, fmt.Sprint(args...))
}
func (g *loggerT) Errorln(args ...any) {
g.output(errorLog, fmt.Sprintln(args...))
}
func (g *loggerT) Errorf(format string, args ...any) {
g.output(errorLog, fmt.Sprintf(format, args...))
}
func (g *loggerT) Fatal(args ...any) {
g.output(fatalLog, fmt.Sprint(args...))
os.Exit(1)
}
func (g *loggerT) Fatalln(args ...any) {
g.output(fatalLog, fmt.Sprintln(args...))
os.Exit(1)
}
func (g *loggerT) Fatalf(format string, args ...any) {
g.output(fatalLog, fmt.Sprintf(format, args...))
os.Exit(1)
}
func (g *loggerT) V(l int) bool {
return l <= g.v
}
// LoggerV2Config configures the LoggerV2 implementation.
type LoggerV2Config struct {
// Verbosity sets the verbosity level of the logger.
Verbosity int
// FormatJSON controls whether the logger should output logs in JSON format.
FormatJSON bool
}
// NewLoggerV2 creates a new LoggerV2 instance with the provided configuration.
// The infoW, warningW, and errorW writers are used to write log messages of
// different severity levels.
func NewLoggerV2(infoW, warningW, errorW io.Writer, c LoggerV2Config) LoggerV2 {
var m []*log.Logger
flag := log.LstdFlags
if c.FormatJSON {
flag = 0
}
m = append(m, log.New(infoW, "", flag))
m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag))
ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
m = append(m, log.New(ew, "", flag))
m = append(m, log.New(ew, "", flag))
return &loggerT{m: m, v: c.Verbosity, jsonFormat: c.FormatJSON}
}

View File

@ -18,70 +18,17 @@
package grpclog
import "google.golang.org/grpc/internal/grpclog"
import "google.golang.org/grpc/grpclog/internal"
// Logger mimics golang's standard Logger as an interface.
//
// Deprecated: use LoggerV2.
type Logger interface {
Fatal(args ...any)
Fatalf(format string, args ...any)
Fatalln(args ...any)
Print(args ...any)
Printf(format string, args ...any)
Println(args ...any)
}
type Logger internal.Logger
// SetLogger sets the logger that is used in grpc. Call only from
// init() functions.
//
// Deprecated: use SetLoggerV2.
func SetLogger(l Logger) {
grpclog.Logger = &loggerWrapper{Logger: l}
}
// loggerWrapper wraps Logger into a LoggerV2.
type loggerWrapper struct {
Logger
}
func (g *loggerWrapper) Info(args ...any) {
g.Logger.Print(args...)
}
func (g *loggerWrapper) Infoln(args ...any) {
g.Logger.Println(args...)
}
func (g *loggerWrapper) Infof(format string, args ...any) {
g.Logger.Printf(format, args...)
}
func (g *loggerWrapper) Warning(args ...any) {
g.Logger.Print(args...)
}
func (g *loggerWrapper) Warningln(args ...any) {
g.Logger.Println(args...)
}
func (g *loggerWrapper) Warningf(format string, args ...any) {
g.Logger.Printf(format, args...)
}
func (g *loggerWrapper) Error(args ...any) {
g.Logger.Print(args...)
}
func (g *loggerWrapper) Errorln(args ...any) {
g.Logger.Println(args...)
}
func (g *loggerWrapper) Errorf(format string, args ...any) {
g.Logger.Printf(format, args...)
}
func (g *loggerWrapper) V(l int) bool {
// Returns true for all verbose level.
return true
internal.LoggerV2Impl = &internal.LoggerWrapper{Logger: l}
}

View File

@ -19,52 +19,16 @@
package grpclog
import (
"encoding/json"
"fmt"
"io"
"log"
"os"
"strconv"
"strings"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/grpclog/internal"
)
// LoggerV2 does underlying logging work for grpclog.
type LoggerV2 interface {
// Info logs to INFO log. Arguments are handled in the manner of fmt.Print.
Info(args ...any)
// Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println.
Infoln(args ...any)
// Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf.
Infof(format string, args ...any)
// Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print.
Warning(args ...any)
// Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println.
Warningln(args ...any)
// Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf.
Warningf(format string, args ...any)
// Error logs to ERROR log. Arguments are handled in the manner of fmt.Print.
Error(args ...any)
// Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
Errorln(args ...any)
// Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
Errorf(format string, args ...any)
// Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print.
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code.
Fatal(args ...any)
// Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println.
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code.
Fatalln(args ...any)
// Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf.
// gRPC ensures that all Fatal logs will exit with os.Exit(1).
// Implementations may also call os.Exit() with a non-zero exit code.
Fatalf(format string, args ...any)
// V reports whether verbosity level l is at least the requested verbose level.
V(l int) bool
}
type LoggerV2 internal.LoggerV2
// SetLoggerV2 sets logger that is used in grpc to a V2 logger.
// Not mutex-protected, should be called before any gRPC functions.
@ -72,34 +36,8 @@ func SetLoggerV2(l LoggerV2) {
if _, ok := l.(*componentData); ok {
panic("cannot use component logger as grpclog logger")
}
grpclog.Logger = l
grpclog.DepthLogger, _ = l.(grpclog.DepthLoggerV2)
}
const (
// infoLog indicates Info severity.
infoLog int = iota
// warningLog indicates Warning severity.
warningLog
// errorLog indicates Error severity.
errorLog
// fatalLog indicates Fatal severity.
fatalLog
)
// severityName contains the string representation of each severity.
var severityName = []string{
infoLog: "INFO",
warningLog: "WARNING",
errorLog: "ERROR",
fatalLog: "FATAL",
}
// loggerT is the default logger used by grpclog.
type loggerT struct {
m []*log.Logger
v int
jsonFormat bool
internal.LoggerV2Impl = l
internal.DepthLoggerV2Impl, _ = l.(internal.DepthLoggerV2)
}
// NewLoggerV2 creates a loggerV2 with the provided writers.
@ -108,32 +46,13 @@ type loggerT struct {
// Warning logs will be written to warningW and infoW.
// Info logs will be written to infoW.
func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 {
return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{})
return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{})
}
// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and
// verbosity level.
func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 {
return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{verbose: v})
}
type loggerV2Config struct {
verbose int
jsonFormat bool
}
func newLoggerV2WithConfig(infoW, warningW, errorW io.Writer, c loggerV2Config) LoggerV2 {
var m []*log.Logger
flag := log.LstdFlags
if c.jsonFormat {
flag = 0
}
m = append(m, log.New(infoW, "", flag))
m = append(m, log.New(io.MultiWriter(infoW, warningW), "", flag))
ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal.
m = append(m, log.New(ew, "", flag))
m = append(m, log.New(ew, "", flag))
return &loggerT{m: m, v: c.verbose, jsonFormat: c.jsonFormat}
return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{Verbosity: v})
}
// newLoggerV2 creates a loggerV2 to be used as default logger.
@ -161,82 +80,12 @@ func newLoggerV2() LoggerV2 {
jsonFormat := strings.EqualFold(os.Getenv("GRPC_GO_LOG_FORMATTER"), "json")
return newLoggerV2WithConfig(infoW, warningW, errorW, loggerV2Config{
verbose: v,
jsonFormat: jsonFormat,
return internal.NewLoggerV2(infoW, warningW, errorW, internal.LoggerV2Config{
Verbosity: v,
FormatJSON: jsonFormat,
})
}
func (g *loggerT) output(severity int, s string) {
sevStr := severityName[severity]
if !g.jsonFormat {
g.m[severity].Output(2, fmt.Sprintf("%v: %v", sevStr, s))
return
}
// TODO: we can also include the logging component, but that needs more
// (API) changes.
b, _ := json.Marshal(map[string]string{
"severity": sevStr,
"message": s,
})
g.m[severity].Output(2, string(b))
}
func (g *loggerT) Info(args ...any) {
g.output(infoLog, fmt.Sprint(args...))
}
func (g *loggerT) Infoln(args ...any) {
g.output(infoLog, fmt.Sprintln(args...))
}
func (g *loggerT) Infof(format string, args ...any) {
g.output(infoLog, fmt.Sprintf(format, args...))
}
func (g *loggerT) Warning(args ...any) {
g.output(warningLog, fmt.Sprint(args...))
}
func (g *loggerT) Warningln(args ...any) {
g.output(warningLog, fmt.Sprintln(args...))
}
func (g *loggerT) Warningf(format string, args ...any) {
g.output(warningLog, fmt.Sprintf(format, args...))
}
func (g *loggerT) Error(args ...any) {
g.output(errorLog, fmt.Sprint(args...))
}
func (g *loggerT) Errorln(args ...any) {
g.output(errorLog, fmt.Sprintln(args...))
}
func (g *loggerT) Errorf(format string, args ...any) {
g.output(errorLog, fmt.Sprintf(format, args...))
}
func (g *loggerT) Fatal(args ...any) {
g.output(fatalLog, fmt.Sprint(args...))
os.Exit(1)
}
func (g *loggerT) Fatalln(args ...any) {
g.output(fatalLog, fmt.Sprintln(args...))
os.Exit(1)
}
func (g *loggerT) Fatalf(format string, args ...any) {
g.output(fatalLog, fmt.Sprintf(format, args...))
os.Exit(1)
}
func (g *loggerT) V(l int) bool {
return l <= g.v
}
// DepthLoggerV2 logs at a specified call frame. If a LoggerV2 also implements
// DepthLoggerV2, the below functions will be called with the appropriate stack
// depth set for trivial functions the logger may ignore.
@ -245,14 +94,4 @@ func (g *loggerT) V(l int) bool {
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type DepthLoggerV2 interface {
LoggerV2
// InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println.
InfoDepth(depth int, args ...any)
// WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println.
WarningDepth(depth int, args ...any)
// ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println.
ErrorDepth(depth int, args ...any)
// FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println.
FatalDepth(depth int, args ...any)
}
type DepthLoggerV2 internal.DepthLoggerV2

View File

@ -17,8 +17,8 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
// protoc-gen-go v1.34.1
// protoc v4.25.2
// protoc-gen-go v1.34.2
// protoc v5.27.1
// source: grpc/health/v1/health.proto
package grpc_health_v1
@ -237,7 +237,7 @@ func file_grpc_health_v1_health_proto_rawDescGZIP() []byte {
var file_grpc_health_v1_health_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_grpc_health_v1_health_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
var file_grpc_health_v1_health_proto_goTypes = []interface{}{
var file_grpc_health_v1_health_proto_goTypes = []any{
(HealthCheckResponse_ServingStatus)(0), // 0: grpc.health.v1.HealthCheckResponse.ServingStatus
(*HealthCheckRequest)(nil), // 1: grpc.health.v1.HealthCheckRequest
(*HealthCheckResponse)(nil), // 2: grpc.health.v1.HealthCheckResponse
@ -261,7 +261,7 @@ func file_grpc_health_v1_health_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
file_grpc_health_v1_health_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*HealthCheckRequest); i {
case 0:
return &v.state
@ -273,7 +273,7 @@ func file_grpc_health_v1_health_proto_init() {
return nil
}
}
file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
file_grpc_health_v1_health_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*HealthCheckResponse); i {
case 0:
return &v.state

View File

@ -17,8 +17,8 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
// - protoc-gen-go-grpc v1.4.0
// - protoc v4.25.2
// - protoc-gen-go-grpc v1.5.1
// - protoc v5.27.1
// source: grpc/health/v1/health.proto
package grpc_health_v1
@ -32,8 +32,8 @@ import (
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
// Requires gRPC-Go v1.62.0 or later.
const _ = grpc.SupportPackageIsVersion8
// Requires gRPC-Go v1.64.0 or later.
const _ = grpc.SupportPackageIsVersion9
const (
Health_Check_FullMethodName = "/grpc.health.v1.Health/Check"
@ -73,7 +73,7 @@ type HealthClient interface {
// should assume this method is not supported and should not retry the
// call. If the call terminates with any other status (including OK),
// clients should retry the call with appropriate exponential backoff.
Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error)
Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error)
}
type healthClient struct {
@ -94,13 +94,13 @@ func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts .
return out, nil
}
func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (Health_WatchClient, error) {
func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (grpc.ServerStreamingClient[HealthCheckResponse], error) {
cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
stream, err := c.cc.NewStream(ctx, &Health_ServiceDesc.Streams[0], Health_Watch_FullMethodName, cOpts...)
if err != nil {
return nil, err
}
x := &healthWatchClient{ClientStream: stream}
x := &grpc.GenericClientStream[HealthCheckRequest, HealthCheckResponse]{ClientStream: stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
@ -110,26 +110,12 @@ func (c *healthClient) Watch(ctx context.Context, in *HealthCheckRequest, opts .
return x, nil
}
type Health_WatchClient interface {
Recv() (*HealthCheckResponse, error)
grpc.ClientStream
}
type healthWatchClient struct {
grpc.ClientStream
}
func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) {
m := new(HealthCheckResponse)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Health_WatchClient = grpc.ServerStreamingClient[HealthCheckResponse]
// HealthServer is the server API for Health service.
// All implementations should embed UnimplementedHealthServer
// for forward compatibility
// for forward compatibility.
//
// Health is gRPC's mechanism for checking whether a server is able to handle
// RPCs. Its semantics are documented in
@ -160,19 +146,23 @@ type HealthServer interface {
// should assume this method is not supported and should not retry the
// call. If the call terminates with any other status (including OK),
// clients should retry the call with appropriate exponential backoff.
Watch(*HealthCheckRequest, Health_WatchServer) error
Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error
}
// UnimplementedHealthServer should be embedded to have forward compatible implementations.
type UnimplementedHealthServer struct {
}
// UnimplementedHealthServer should be embedded to have
// forward compatible implementations.
//
// NOTE: this should be embedded by value instead of pointer to avoid a nil
// pointer dereference when methods are called.
type UnimplementedHealthServer struct{}
func (UnimplementedHealthServer) Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Check not implemented")
}
func (UnimplementedHealthServer) Watch(*HealthCheckRequest, Health_WatchServer) error {
func (UnimplementedHealthServer) Watch(*HealthCheckRequest, grpc.ServerStreamingServer[HealthCheckResponse]) error {
return status.Errorf(codes.Unimplemented, "method Watch not implemented")
}
func (UnimplementedHealthServer) testEmbeddedByValue() {}
// UnsafeHealthServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to HealthServer will
@ -182,6 +172,13 @@ type UnsafeHealthServer interface {
}
func RegisterHealthServer(s grpc.ServiceRegistrar, srv HealthServer) {
// If the following call panics, it indicates UnimplementedHealthServer was
// embedded by pointer and is nil. This will cause panics if an
// unimplemented method is ever invoked, so we test this at initialization
// time to prevent it from happening at runtime later due to I/O.
if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
t.testEmbeddedByValue()
}
s.RegisterService(&Health_ServiceDesc, srv)
}
@ -208,21 +205,11 @@ func _Health_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(HealthServer).Watch(m, &healthWatchServer{ServerStream: stream})
return srv.(HealthServer).Watch(m, &grpc.GenericServerStream[HealthCheckRequest, HealthCheckResponse]{ServerStream: stream})
}
type Health_WatchServer interface {
Send(*HealthCheckResponse) error
grpc.ServerStream
}
type healthWatchServer struct {
grpc.ServerStream
}
func (x *healthWatchServer) Send(m *HealthCheckResponse) error {
return x.ServerStream.SendMsg(m)
}
// This type alias is provided for backwards compatibility with existing code that references the prior non-generic stream type by name.
type Health_WatchServer = grpc.ServerStreamingServer[HealthCheckResponse]
// Health_ServiceDesc is the grpc.ServiceDesc for Health service.
// It's only intended for direct use with grpc.RegisterService,

View File

@ -33,6 +33,8 @@ type lbConfig struct {
childConfig serviceconfig.LoadBalancingConfig
}
// ChildName returns the name of the child balancer of the gracefulswitch
// Balancer.
func ChildName(l serviceconfig.LoadBalancingConfig) string {
return l.(*lbConfig).childBuilder.Name()
}

View File

@ -106,7 +106,7 @@ func (ml *TruncatingMethodLogger) Build(c LogEntryConfig) *binlogpb.GrpcLogEntry
}
// Log creates a proto binary log entry, and logs it to the sink.
func (ml *TruncatingMethodLogger) Log(ctx context.Context, c LogEntryConfig) {
func (ml *TruncatingMethodLogger) Log(_ context.Context, c LogEntryConfig) {
ml.sink.Write(ml.Build(c))
}

View File

@ -43,6 +43,8 @@ type Channel struct {
// Non-zero traceRefCount means the trace of this channel cannot be deleted.
traceRefCount int32
// ChannelMetrics holds connectivity state, target and call metrics for the
// channel within channelz.
ChannelMetrics ChannelMetrics
}
@ -50,6 +52,8 @@ type Channel struct {
// nesting.
func (c *Channel) channelzIdentifier() {}
// String returns a string representation of the Channel, including its parent
// entity and ID.
func (c *Channel) String() string {
if c.Parent == nil {
return fmt.Sprintf("Channel #%d", c.ID)
@ -61,24 +65,31 @@ func (c *Channel) id() int64 {
return c.ID
}
// SubChans returns a copy of the map of sub-channels associated with the
// Channel.
func (c *Channel) SubChans() map[int64]string {
db.mu.RLock()
defer db.mu.RUnlock()
return copyMap(c.subChans)
}
// NestedChans returns a copy of the map of nested channels associated with the
// Channel.
func (c *Channel) NestedChans() map[int64]string {
db.mu.RLock()
defer db.mu.RUnlock()
return copyMap(c.nestedChans)
}
// Trace returns a copy of the Channel's trace data.
func (c *Channel) Trace() *ChannelTrace {
db.mu.RLock()
defer db.mu.RUnlock()
return c.trace.copy()
}
// ChannelMetrics holds connectivity state, target and call metrics for the
// channel within channelz.
type ChannelMetrics struct {
// The current connectivity state of the channel.
State atomic.Pointer[connectivity.State]
@ -136,12 +147,16 @@ func strFromPointer(s *string) string {
return *s
}
// String returns a string representation of the ChannelMetrics, including its
// state, target, and call metrics.
func (c *ChannelMetrics) String() string {
return fmt.Sprintf("State: %v, Target: %s, CallsStarted: %v, CallsSucceeded: %v, CallsFailed: %v, LastCallStartedTimestamp: %v",
c.State.Load(), strFromPointer(c.Target.Load()), c.CallsStarted.Load(), c.CallsSucceeded.Load(), c.CallsFailed.Load(), c.LastCallStartedTimestamp.Load(),
)
}
// NewChannelMetricForTesting creates a new instance of ChannelMetrics with
// specified initial values for testing purposes.
func NewChannelMetricForTesting(state connectivity.State, target string, started, succeeded, failed, timestamp int64) *ChannelMetrics {
c := &ChannelMetrics{}
c.State.Store(&state)

View File

@ -46,7 +46,7 @@ type entry interface {
// channelMap is the storage data structure for channelz.
//
// Methods of channelMap can be divided in two two categories with respect to
// Methods of channelMap can be divided into two categories with respect to
// locking.
//
// 1. Methods acquire the global lock.
@ -234,13 +234,6 @@ func copyMap(m map[int64]string) map[int64]string {
return n
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func (c *channelMap) getTopChannels(id int64, maxResults int) ([]*Channel, bool) {
if maxResults <= 0 {
maxResults = EntriesPerPage

View File

@ -33,7 +33,7 @@ var (
// outside this package except by tests.
IDGen IDGenerator
db *channelMap = newChannelMap()
db = newChannelMap()
// EntriesPerPage defines the number of channelz entries to be shown on a web page.
EntriesPerPage = 50
curState int32

View File

@ -59,6 +59,8 @@ func NewServerMetricsForTesting(started, succeeded, failed, timestamp int64) *Se
return sm
}
// CopyFrom copies the metrics data from the provided ServerMetrics
// instance into the current instance.
func (sm *ServerMetrics) CopyFrom(o *ServerMetrics) {
sm.CallsStarted.Store(o.CallsStarted.Load())
sm.CallsSucceeded.Store(o.CallsSucceeded.Load())

View File

@ -70,13 +70,18 @@ type EphemeralSocketMetrics struct {
RemoteFlowControlWindow int64
}
// SocketType represents the type of socket.
type SocketType string
// SocketType can be one of these.
const (
SocketTypeNormal = "NormalSocket"
SocketTypeListen = "ListenSocket"
)
// Socket represents a socket within channelz which includes socket
// metrics and data related to socket activity and provides methods
// for managing and interacting with sockets.
type Socket struct {
Entity
SocketType SocketType
@ -100,6 +105,8 @@ type Socket struct {
Security credentials.ChannelzSecurityValue
}
// String returns a string representation of the Socket, including its parent
// entity, socket type, and ID.
func (ls *Socket) String() string {
return fmt.Sprintf("%s %s #%d", ls.Parent, ls.SocketType, ls.ID)
}

View File

@ -47,12 +47,14 @@ func (sc *SubChannel) id() int64 {
return sc.ID
}
// Sockets returns a copy of the sockets map associated with the SubChannel.
func (sc *SubChannel) Sockets() map[int64]string {
db.mu.RLock()
defer db.mu.RUnlock()
return copyMap(sc.sockets)
}
// Trace returns a copy of the ChannelTrace associated with the SubChannel.
func (sc *SubChannel) Trace() *ChannelTrace {
db.mu.RLock()
defer db.mu.RUnlock()

View File

@ -35,13 +35,13 @@ type SocketOptionData struct {
// Getsockopt defines the function to get socket options requested by channelz.
// It is to be passed to syscall.RawConn.Control().
// Windows OS doesn't support Socket Option
func (s *SocketOptionData) Getsockopt(fd uintptr) {
func (s *SocketOptionData) Getsockopt(uintptr) {
once.Do(func() {
logger.Warning("Channelz: socket options are not supported on non-linux environments")
})
}
// GetSocketOption gets the socket option info of the conn.
func GetSocketOption(c any) *SocketOptionData {
func GetSocketOption(any) *SocketOptionData {
return nil
}

View File

@ -79,13 +79,21 @@ type TraceEvent struct {
Parent *TraceEvent
}
// ChannelTrace provides tracing information for a channel.
// It tracks various events and metadata related to the channel's lifecycle
// and operations.
type ChannelTrace struct {
cm *channelMap
clearCalled bool
cm *channelMap
clearCalled bool
// The time when the trace was created.
CreationTime time.Time
EventNum int64
mu sync.Mutex
Events []*traceEvent
// A counter for the number of events recorded in the
// trace.
EventNum int64
mu sync.Mutex
// A slice of traceEvent pointers representing the events recorded for
// this channel.
Events []*traceEvent
}
func (c *ChannelTrace) copy() *ChannelTrace {
@ -175,6 +183,7 @@ var refChannelTypeToString = map[RefChannelType]string{
RefNormalSocket: "NormalSocket",
}
// String returns a string representation of the RefChannelType
func (r RefChannelType) String() string {
return refChannelTypeToString[r]
}

View File

@ -45,7 +45,16 @@ var (
// option is present for backward compatibility. This option may be overridden
// by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true"
// or "false".
EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false)
EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true)
// XDSFallbackSupport is the env variable that controls whether support for
// xDS fallback is turned on. If this is unset or is false, only the first
// xDS server in the list of server configs will be used.
XDSFallbackSupport = boolFromEnv("GRPC_EXPERIMENTAL_XDS_FALLBACK", false)
// NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used
// instead of the exiting pickfirst implementation. This can be enabled by
// setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST"
// to "true".
NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", false)
)
func boolFromEnv(envVar string, def bool) bool {

View File

@ -18,11 +18,11 @@
package internal
var (
// WithRecvBufferPool is implemented by the grpc package and returns a dial
// WithBufferPool is implemented by the grpc package and returns a dial
// option to configure a shared buffer pool for a grpc.ClientConn.
WithRecvBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption
WithBufferPool any // func (grpc.SharedBufferPool) grpc.DialOption
// RecvBufferPool is implemented by the grpc package and returns a server
// BufferPool is implemented by the grpc package and returns a server
// option to configure a shared buffer pool for a grpc.Server.
RecvBufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
BufferPool any // func (grpc.SharedBufferPool) grpc.ServerOption
)

View File

@ -16,17 +16,21 @@
*
*/
// Package grpclog provides logging functionality for internal gRPC packages,
// outside of the functionality provided by the external `grpclog` package.
package grpclog
import (
"fmt"
"google.golang.org/grpc/grpclog"
)
// PrefixLogger does logging with a prefix.
//
// Logging method on a nil logs without any prefix.
type PrefixLogger struct {
logger DepthLoggerV2
logger grpclog.DepthLoggerV2
prefix string
}
@ -38,7 +42,7 @@ func (pl *PrefixLogger) Infof(format string, args ...any) {
pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
return
}
InfoDepth(1, fmt.Sprintf(format, args...))
grpclog.InfoDepth(1, fmt.Sprintf(format, args...))
}
// Warningf does warning logging.
@ -48,7 +52,7 @@ func (pl *PrefixLogger) Warningf(format string, args ...any) {
pl.logger.WarningDepth(1, fmt.Sprintf(format, args...))
return
}
WarningDepth(1, fmt.Sprintf(format, args...))
grpclog.WarningDepth(1, fmt.Sprintf(format, args...))
}
// Errorf does error logging.
@ -58,36 +62,18 @@ func (pl *PrefixLogger) Errorf(format string, args ...any) {
pl.logger.ErrorDepth(1, fmt.Sprintf(format, args...))
return
}
ErrorDepth(1, fmt.Sprintf(format, args...))
}
// Debugf does info logging at verbose level 2.
func (pl *PrefixLogger) Debugf(format string, args ...any) {
// TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
// rewrite PrefixLogger a little to ensure that we don't use the global
// `Logger` here, and instead use the `logger` field.
if !Logger.V(2) {
return
}
if pl != nil {
// Handle nil, so the tests can pass in a nil logger.
format = pl.prefix + format
pl.logger.InfoDepth(1, fmt.Sprintf(format, args...))
return
}
InfoDepth(1, fmt.Sprintf(format, args...))
grpclog.ErrorDepth(1, fmt.Sprintf(format, args...))
}
// V reports whether verbosity level l is at least the requested verbose level.
func (pl *PrefixLogger) V(l int) bool {
// TODO(6044): Refactor interfaces LoggerV2 and DepthLogger, and maybe
// rewrite PrefixLogger a little to ensure that we don't use the global
// `Logger` here, and instead use the `logger` field.
return Logger.V(l)
if pl != nil {
return pl.logger.V(l)
}
return true
}
// NewPrefixLogger creates a prefix logger with the given prefix.
func NewPrefixLogger(logger DepthLoggerV2, prefix string) *PrefixLogger {
func NewPrefixLogger(logger grpclog.DepthLoggerV2, prefix string) *PrefixLogger {
return &PrefixLogger{logger: logger, prefix: prefix}
}

View File

@ -53,16 +53,28 @@ func NewCallbackSerializer(ctx context.Context) *CallbackSerializer {
return cs
}
// Schedule adds a callback to be scheduled after existing callbacks are run.
// TrySchedule tries to schedule the provided callback function f to be
// executed in the order it was added. This is a best-effort operation. If the
// context passed to NewCallbackSerializer was canceled before this method is
// called, the callback will not be scheduled.
//
// Callbacks are expected to honor the context when performing any blocking
// operations, and should return early when the context is canceled.
func (cs *CallbackSerializer) TrySchedule(f func(ctx context.Context)) {
cs.callbacks.Put(f)
}
// ScheduleOr schedules the provided callback function f to be executed in the
// order it was added. If the context passed to NewCallbackSerializer has been
// canceled before this method is called, the onFailure callback will be
// executed inline instead.
//
// Return value indicates if the callback was successfully added to the list of
// callbacks to be executed by the serializer. It is not possible to add
// callbacks once the context passed to NewCallbackSerializer is cancelled.
func (cs *CallbackSerializer) Schedule(f func(ctx context.Context)) bool {
return cs.callbacks.Put(f) == nil
// Callbacks are expected to honor the context when performing any blocking
// operations, and should return early when the context is canceled.
func (cs *CallbackSerializer) ScheduleOr(f func(ctx context.Context), onFailure func()) {
if cs.callbacks.Put(f) != nil {
onFailure()
}
}
func (cs *CallbackSerializer) run(ctx context.Context) {

View File

@ -77,7 +77,7 @@ func (ps *PubSub) Subscribe(sub Subscriber) (cancel func()) {
if ps.msg != nil {
msg := ps.msg
ps.cs.Schedule(func(context.Context) {
ps.cs.TrySchedule(func(context.Context) {
ps.mu.Lock()
defer ps.mu.Unlock()
if !ps.subscribers[sub] {
@ -103,7 +103,7 @@ func (ps *PubSub) Publish(msg any) {
ps.msg = msg
for sub := range ps.subscribers {
s := sub
ps.cs.Schedule(func(context.Context) {
ps.cs.TrySchedule(func(context.Context) {
ps.mu.Lock()
defer ps.mu.Unlock()
if !ps.subscribers[s] {

View File

@ -39,7 +39,7 @@ func ParseMethod(methodName string) (service, method string, _ error) {
}
// baseContentType is the base content-type for gRPC. This is a valid
// content-type on it's own, but can also include a content-subtype such as
// content-type on its own, but can also include a content-subtype such as
// "proto" as a suffix after "+" or ";". See
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests
// for more details.

View File

@ -182,6 +182,7 @@ func (m *Manager) tryEnterIdleMode() bool {
return true
}
// EnterIdleModeForTesting instructs the channel to enter idle mode.
func (m *Manager) EnterIdleModeForTesting() {
m.tryEnterIdleMode()
}
@ -225,7 +226,7 @@ func (m *Manager) ExitIdleMode() error {
// came in and OnCallBegin() noticed that the calls count is negative.
// - Channel is in idle mode, and multiple new RPCs come in at the same
// time, all of them notice a negative calls count in OnCallBegin and get
// here. The first one to get the lock would got the channel to exit idle.
// here. The first one to get the lock would get the channel to exit idle.
// - Channel is not in idle mode, and the user calls Connect which calls
// m.ExitIdleMode.
//
@ -266,6 +267,7 @@ func (m *Manager) isClosed() bool {
return atomic.LoadInt32(&m.closed) == 1
}
// Close stops the timer associated with the Manager, if it exists.
func (m *Manager) Close() {
atomic.StoreInt32(&m.closed, 1)

View File

@ -183,7 +183,7 @@ var (
// GRPCResolverSchemeExtraMetadata determines when gRPC will add extra
// metadata to RPCs.
GRPCResolverSchemeExtraMetadata string = "xds"
GRPCResolverSchemeExtraMetadata = "xds"
// EnterIdleModeForTesting gets the ClientConn to enter IDLE mode.
EnterIdleModeForTesting any // func(*grpc.ClientConn)
@ -191,6 +191,8 @@ var (
// ExitIdleModeForTesting gets the ClientConn to exit IDLE mode.
ExitIdleModeForTesting any // func(*grpc.ClientConn) error
// ChannelzTurnOffForTesting disables the Channelz service for testing
// purposes.
ChannelzTurnOffForTesting func()
// TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to
@ -203,11 +205,27 @@ var (
// UserSetDefaultScheme is set to true if the user has overridden the
// default resolver scheme.
UserSetDefaultScheme bool = false
UserSetDefaultScheme = false
// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n
// is the number of elements. swap swaps the elements with indexes i and j.
ShuffleAddressListForTesting any // func(n int, swap func(i, j int))
// ConnectedAddress returns the connected address for a SubConnState. The
// address is only valid if the state is READY.
ConnectedAddress any // func (scs SubConnState) resolver.Address
// SetConnectedAddress sets the connected address for a SubConnState.
SetConnectedAddress any // func(scs *SubConnState, addr resolver.Address)
// SnapshotMetricRegistryForTesting snapshots the global data of the metric
// registry. Returns a cleanup function that sets the metric registry to its
// original state. Only called in testing functions.
SnapshotMetricRegistryForTesting func() func()
// SetDefaultBufferPoolForTesting updates the default buffer pool, for
// testing purposes.
SetDefaultBufferPoolForTesting any // func(mem.BufferPool)
// SetBufferPoolingThresholdForTesting updates the buffer pooling threshold, for
// testing purposes.
SetBufferPoolingThresholdForTesting any // func(int)
)
// HealthChecker defines the signature of the client-side LB channel health
@ -215,7 +233,7 @@ var (
//
// The implementation is expected to create a health checking RPC stream by
// calling newStream(), watch for the health status of serviceName, and report
// it's health back by calling setConnectivityState().
// its health back by calling setConnectivityState().
//
// The health checking protocol is defined at:
// https://github.com/grpc/grpc/blob/master/doc/health-checking.md

View File

@ -177,7 +177,7 @@ type dnsResolver struct {
// finished. Otherwise, data race will be possible. [Race Example] in
// dns_resolver_test we replace the real lookup functions with mocked ones to
// facilitate testing. If Close() doesn't wait for watcher() goroutine
// finishes, race detector sometimes will warns lookup (READ the lookup
// finishes, race detector sometimes will warn lookup (READ the lookup
// function pointers) inside watcher() goroutine has data race with
// replaceNetFunc (WRITE the lookup function pointers).
wg sync.WaitGroup

View File

@ -55,7 +55,7 @@ func (r *passthroughResolver) start() {
r.cc.UpdateState(resolver.State{Addresses: []resolver.Address{{Addr: r.target.Endpoint()}}})
}
func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOptions) {}
func (*passthroughResolver) ResolveNow(resolver.ResolveNowOptions) {}
func (*passthroughResolver) Close() {}

42
vendor/google.golang.org/grpc/internal/stats/labels.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
/*
*
* Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package stats provides internal stats related functionality.
package stats
import "context"
// Labels are the labels for metrics.
type Labels struct {
// TelemetryLabels are the telemetry labels to record.
TelemetryLabels map[string]string
}
type labelsKey struct{}
// GetLabels returns the Labels stored in the context, or nil if there is one.
func GetLabels(ctx context.Context) *Labels {
labels, _ := ctx.Value(labelsKey{}).(*Labels)
return labels
}
// SetLabels sets the Labels in the context.
func SetLabels(ctx context.Context, labels *Labels) context.Context {
// could also append
return context.WithValue(ctx, labelsKey{}, labels)
}

View File

@ -0,0 +1,105 @@
/*
* Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package stats
import (
"fmt"
estats "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/stats"
)
// MetricsRecorderList forwards Record calls to all of its metricsRecorders.
//
// It eats any record calls where the label values provided do not match the
// number of label keys.
type MetricsRecorderList struct {
// metricsRecorders are the metrics recorders this list will forward to.
metricsRecorders []estats.MetricsRecorder
}
// NewMetricsRecorderList creates a new metric recorder list with all the stats
// handlers provided which implement the MetricsRecorder interface.
// If no stats handlers provided implement the MetricsRecorder interface,
// the MetricsRecorder list returned is a no-op.
func NewMetricsRecorderList(shs []stats.Handler) *MetricsRecorderList {
var mrs []estats.MetricsRecorder
for _, sh := range shs {
if mr, ok := sh.(estats.MetricsRecorder); ok {
mrs = append(mrs, mr)
}
}
return &MetricsRecorderList{
metricsRecorders: mrs,
}
}
func verifyLabels(desc *estats.MetricDescriptor, labelsRecv ...string) {
if got, want := len(labelsRecv), len(desc.Labels)+len(desc.OptionalLabels); got != want {
panic(fmt.Sprintf("Received %d labels in call to record metric %q, but expected %d.", got, desc.Name, want))
}
}
// RecordInt64Count records the measurement alongside labels on the int
// count associated with the provided handle.
func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle, incr int64, labels ...string) {
verifyLabels(handle.Descriptor(), labels...)
for _, metricRecorder := range l.metricsRecorders {
metricRecorder.RecordInt64Count(handle, incr, labels...)
}
}
// RecordFloat64Count records the measurement alongside labels on the float
// count associated with the provided handle.
func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) {
verifyLabels(handle.Descriptor(), labels...)
for _, metricRecorder := range l.metricsRecorders {
metricRecorder.RecordFloat64Count(handle, incr, labels...)
}
}
// RecordInt64Histo records the measurement alongside labels on the int
// histo associated with the provided handle.
func (l *MetricsRecorderList) RecordInt64Histo(handle *estats.Int64HistoHandle, incr int64, labels ...string) {
verifyLabels(handle.Descriptor(), labels...)
for _, metricRecorder := range l.metricsRecorders {
metricRecorder.RecordInt64Histo(handle, incr, labels...)
}
}
// RecordFloat64Histo records the measurement alongside labels on the float
// histo associated with the provided handle.
func (l *MetricsRecorderList) RecordFloat64Histo(handle *estats.Float64HistoHandle, incr float64, labels ...string) {
verifyLabels(handle.Descriptor(), labels...)
for _, metricRecorder := range l.metricsRecorders {
metricRecorder.RecordFloat64Histo(handle, incr, labels...)
}
}
// RecordInt64Gauge records the measurement alongside labels on the int
// gauge associated with the provided handle.
func (l *MetricsRecorderList) RecordInt64Gauge(handle *estats.Int64GaugeHandle, incr int64, labels ...string) {
verifyLabels(handle.Descriptor(), labels...)
for _, metricRecorder := range l.metricsRecorders {
metricRecorder.RecordInt64Gauge(handle, incr, labels...)
}
}

View File

@ -138,17 +138,19 @@ func (s *Status) WithDetails(details ...protoadapt.MessageV1) (*Status, error) {
// s.Code() != OK implies that s.Proto() != nil.
p := s.Proto()
for _, detail := range details {
any, err := anypb.New(protoadapt.MessageV2Of(detail))
m, err := anypb.New(protoadapt.MessageV2Of(detail))
if err != nil {
return nil, err
}
p.Details = append(p.Details, any)
p.Details = append(p.Details, m)
}
return &Status{s: p}, nil
}
// Details returns a slice of details messages attached to the status.
// If a detail cannot be decoded, the error is returned in place of the detail.
// If the detail can be decoded, the proto message returned is of the same
// type that was given to WithDetails().
func (s *Status) Details() []any {
if s == nil || s.s == nil {
return nil
@ -160,7 +162,38 @@ func (s *Status) Details() []any {
details = append(details, err)
continue
}
details = append(details, detail)
// The call to MessageV1Of is required to unwrap the proto message if
// it implemented only the MessageV1 API. The proto message would have
// been wrapped in a V2 wrapper in Status.WithDetails. V2 messages are
// added to a global registry used by any.UnmarshalNew().
// MessageV1Of has the following behaviour:
// 1. If the given message is a wrapped MessageV1, it returns the
// unwrapped value.
// 2. If the given message already implements MessageV1, it returns it
// as is.
// 3. Else, it wraps the MessageV2 in a MessageV1 wrapper.
//
// Since the Status.WithDetails() API only accepts MessageV1, calling
// MessageV1Of ensures we return the same type that was given to
// WithDetails:
// * If the give type implemented only MessageV1, the unwrapping from
// point 1 above will restore the type.
// * If the given type implemented both MessageV1 and MessageV2, point 2
// above will ensure no wrapping is performed.
// * If the given type implemented only MessageV2 and was wrapped using
// MessageV1Of before passing to WithDetails(), it would be unwrapped
// in WithDetails by calling MessageV2Of(). Point 3 above will ensure
// that the type is wrapped in a MessageV1 wrapper again before
// returning. Note that protoc-gen-go doesn't generate code which
// implements ONLY MessageV2 at the time of writing.
//
// NOTE: Status details can also be added using the FromProto method.
// This could theoretically allow passing a Detail message that only
// implements the V2 API. In such a case the message will be wrapped in
// a MessageV1 wrapper when fetched using Details().
// Since protoc-gen-go generates only code that implements both V1 and
// V2 APIs for backward compatibility, this is not a concern.
details = append(details, protoadapt.MessageV1Of(detail))
}
return details
}

View File

@ -58,20 +58,20 @@ func GetRusage() *Rusage {
// CPUTimeDiff returns the differences of user CPU time and system CPU time used
// between two Rusage structs. It a no-op function for non-linux environments.
func CPUTimeDiff(first *Rusage, latest *Rusage) (float64, float64) {
func CPUTimeDiff(*Rusage, *Rusage) (float64, float64) {
log()
return 0, 0
}
// SetTCPUserTimeout is a no-op function under non-linux environments.
func SetTCPUserTimeout(conn net.Conn, timeout time.Duration) error {
func SetTCPUserTimeout(net.Conn, time.Duration) error {
log()
return nil
}
// GetTCPUserTimeout is a no-op function under non-linux environments.
// A negative return value indicates the operation is not supported
func GetTCPUserTimeout(conn net.Conn) (int, error) {
func GetTCPUserTimeout(net.Conn) (int, error) {
log()
return -1, nil
}

View File

@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer {
// combination of unconditionally enabling TCP keepalives here, and
// disabling the overriding of TCP keepalive parameters by setting the
// KeepAlive field to a negative value above, results in OS defaults for
// the TCP keealive interval and time parameters.
// the TCP keepalive interval and time parameters.
Control: func(_, _ string, c syscall.RawConn) error {
return c.Control(func(fd uintptr) {
unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_KEEPALIVE, 1)

View File

@ -44,7 +44,7 @@ func NetDialerWithTCPKeepalive() *net.Dialer {
// combination of unconditionally enabling TCP keepalives here, and
// disabling the overriding of TCP keepalive parameters by setting the
// KeepAlive field to a negative value above, results in OS defaults for
// the TCP keealive interval and time parameters.
// the TCP keepalive interval and time parameters.
Control: func(_, _ string, c syscall.RawConn) error {
return c.Control(func(fd uintptr) {
windows.SetsockoptInt(windows.Handle(fd), windows.SOL_SOCKET, windows.SO_KEEPALIVE, 1)

View File

@ -32,6 +32,7 @@ import (
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/mem"
"google.golang.org/grpc/status"
)
@ -148,9 +149,9 @@ type dataFrame struct {
streamID uint32
endStream bool
h []byte
d []byte
reader mem.Reader
// onEachWrite is called every time
// a part of d is written out.
// a part of data is written out.
onEachWrite func()
}
@ -289,18 +290,22 @@ func (l *outStreamList) dequeue() *outStream {
}
// controlBuffer is a way to pass information to loopy.
// Information is passed as specific struct types called control frames.
// A control frame not only represents data, messages or headers to be sent out
// but can also be used to instruct loopy to update its internal state.
// It shouldn't be confused with an HTTP2 frame, although some of the control frames
// like dataFrame and headerFrame do go out on wire as HTTP2 frames.
//
// Information is passed as specific struct types called control frames. A
// control frame not only represents data, messages or headers to be sent out
// but can also be used to instruct loopy to update its internal state. It
// shouldn't be confused with an HTTP2 frame, although some of the control
// frames like dataFrame and headerFrame do go out on wire as HTTP2 frames.
type controlBuffer struct {
ch chan struct{}
done <-chan struct{}
wakeupCh chan struct{} // Unblocks readers waiting for something to read.
done <-chan struct{} // Closed when the transport is done.
// Mutex guards all the fields below, except trfChan which can be read
// atomically without holding mu.
mu sync.Mutex
consumerWaiting bool
list *itemList
err error
consumerWaiting bool // True when readers are blocked waiting for new data.
closed bool // True when the controlbuf is finished.
list *itemList // List of queued control frames.
// transportResponseFrames counts the number of queued items that represent
// the response of an action initiated by the peer. trfChan is created
@ -308,47 +313,59 @@ type controlBuffer struct {
// closed and nilled when transportResponseFrames drops below the
// threshold. Both fields are protected by mu.
transportResponseFrames int
trfChan atomic.Value // chan struct{}
trfChan atomic.Pointer[chan struct{}]
}
func newControlBuffer(done <-chan struct{}) *controlBuffer {
return &controlBuffer{
ch: make(chan struct{}, 1),
list: &itemList{},
done: done,
wakeupCh: make(chan struct{}, 1),
list: &itemList{},
done: done,
}
}
// throttle blocks if there are too many incomingSettings/cleanupStreams in the
// controlbuf.
// throttle blocks if there are too many frames in the control buf that
// represent the response of an action initiated by the peer, like
// incomingSettings cleanupStreams etc.
func (c *controlBuffer) throttle() {
ch, _ := c.trfChan.Load().(chan struct{})
if ch != nil {
if ch := c.trfChan.Load(); ch != nil {
select {
case <-ch:
case <-(*ch):
case <-c.done:
}
}
}
// put adds an item to the controlbuf.
func (c *controlBuffer) put(it cbItem) error {
_, err := c.executeAndPut(nil, it)
return err
}
// executeAndPut runs f, and if the return value is true, adds the given item to
// the controlbuf. The item could be nil, in which case, this method simply
// executes f and does not add the item to the controlbuf.
//
// The first return value indicates whether the item was successfully added to
// the control buffer. A non-nil error, specifically ErrConnClosing, is returned
// if the control buffer is already closed.
func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) {
var wakeUp bool
c.mu.Lock()
if c.err != nil {
c.mu.Unlock()
return false, c.err
defer c.mu.Unlock()
if c.closed {
return false, ErrConnClosing
}
if f != nil {
if !f() { // f wasn't successful
c.mu.Unlock()
return false, nil
}
}
if it == nil {
return true, nil
}
var wakeUp bool
if c.consumerWaiting {
wakeUp = true
c.consumerWaiting = false
@ -359,98 +376,102 @@ func (c *controlBuffer) executeAndPut(f func() bool, it cbItem) (bool, error) {
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
// We are adding the frame that puts us over the threshold; create
// a throttling channel.
c.trfChan.Store(make(chan struct{}))
ch := make(chan struct{})
c.trfChan.Store(&ch)
}
}
c.mu.Unlock()
if wakeUp {
select {
case c.ch <- struct{}{}:
case c.wakeupCh <- struct{}{}:
default:
}
}
return true, nil
}
// Note argument f should never be nil.
func (c *controlBuffer) execute(f func(it any) bool, it any) (bool, error) {
c.mu.Lock()
if c.err != nil {
c.mu.Unlock()
return false, c.err
}
if !f(it) { // f wasn't successful
c.mu.Unlock()
return false, nil
}
c.mu.Unlock()
return true, nil
}
// get returns the next control frame from the control buffer. If block is true
// **and** there are no control frames in the control buffer, the call blocks
// until one of the conditions is met: there is a frame to return or the
// transport is closed.
func (c *controlBuffer) get(block bool) (any, error) {
for {
c.mu.Lock()
if c.err != nil {
frame, err := c.getOnceLocked()
if frame != nil || err != nil || !block {
// If we read a frame or an error, we can return to the caller. The
// call to getOnceLocked() returns a nil frame and a nil error if
// there is nothing to read, and in that case, if the caller asked
// us not to block, we can return now as well.
c.mu.Unlock()
return nil, c.err
}
if !c.list.isEmpty() {
h := c.list.dequeue().(cbItem)
if h.isTransportResponseFrame() {
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
// We are removing the frame that put us over the
// threshold; close and clear the throttling channel.
ch := c.trfChan.Load().(chan struct{})
close(ch)
c.trfChan.Store((chan struct{})(nil))
}
c.transportResponseFrames--
}
c.mu.Unlock()
return h, nil
}
if !block {
c.mu.Unlock()
return nil, nil
return frame, err
}
c.consumerWaiting = true
c.mu.Unlock()
// Release the lock above and wait to be woken up.
select {
case <-c.ch:
case <-c.wakeupCh:
case <-c.done:
return nil, errors.New("transport closed by client")
}
}
}
// Callers must not use this method, but should instead use get().
//
// Caller must hold c.mu.
func (c *controlBuffer) getOnceLocked() (any, error) {
if c.closed {
return false, ErrConnClosing
}
if c.list.isEmpty() {
return nil, nil
}
h := c.list.dequeue().(cbItem)
if h.isTransportResponseFrame() {
if c.transportResponseFrames == maxQueuedTransportResponseFrames {
// We are removing the frame that put us over the
// threshold; close and clear the throttling channel.
ch := c.trfChan.Swap(nil)
close(*ch)
}
c.transportResponseFrames--
}
return h, nil
}
// finish closes the control buffer, cleaning up any streams that have queued
// header frames. Once this method returns, no more frames can be added to the
// control buffer, and attempts to do so will return ErrConnClosing.
func (c *controlBuffer) finish() {
c.mu.Lock()
if c.err != nil {
c.mu.Unlock()
defer c.mu.Unlock()
if c.closed {
return
}
c.err = ErrConnClosing
c.closed = true
// There may be headers for streams in the control buffer.
// These streams need to be cleaned out since the transport
// is still not aware of these yet.
for head := c.list.dequeueAll(); head != nil; head = head.next {
hdr, ok := head.it.(*headerFrame)
if !ok {
continue
}
if hdr.onOrphaned != nil { // It will be nil on the server-side.
hdr.onOrphaned(ErrConnClosing)
switch v := head.it.(type) {
case *headerFrame:
if v.onOrphaned != nil { // It will be nil on the server-side.
v.onOrphaned(ErrConnClosing)
}
case *dataFrame:
_ = v.reader.Close()
}
}
// In case throttle() is currently in flight, it needs to be unblocked.
// Otherwise, the transport may not close, since the transport is closed by
// the reader encountering the connection error.
ch, _ := c.trfChan.Load().(chan struct{})
ch := c.trfChan.Swap(nil)
if ch != nil {
close(ch)
close(*ch)
}
c.trfChan.Store((chan struct{})(nil))
c.mu.Unlock()
}
type side int
@ -466,7 +487,7 @@ const (
// stream maintains a queue of data frames; as loopy receives data frames
// it gets added to the queue of the relevant stream.
// Loopy goes over this list of active streams by processing one node every iteration,
// thereby closely resemebling to a round-robin scheduling over all streams. While
// thereby closely resembling a round-robin scheduling over all streams. While
// processing a stream, loopy writes out data bytes from this stream capped by the min
// of http2MaxFrameLen, connection-level flow control and stream-level flow control.
type loopyWriter struct {
@ -490,12 +511,13 @@ type loopyWriter struct {
draining bool
conn net.Conn
logger *grpclog.PrefixLogger
bufferPool mem.BufferPool
// Side-specific handlers
ssGoAwayHandler func(*goAway) (bool, error)
}
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error)) *loopyWriter {
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter {
var buf bytes.Buffer
l := &loopyWriter{
side: s,
@ -511,6 +533,7 @@ func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimato
conn: conn,
logger: logger,
ssGoAwayHandler: goAwayHandler,
bufferPool: bufferPool,
}
return l
}
@ -768,6 +791,11 @@ func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error {
// not be established yet.
delete(l.estdStreams, c.streamID)
str.deleteSelf()
for head := str.itl.dequeueAll(); head != nil; head = head.next {
if df, ok := head.it.(*dataFrame); ok {
_ = df.reader.Close()
}
}
}
if c.rst { // If RST_STREAM needs to be sent.
if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil {
@ -903,16 +931,18 @@ func (l *loopyWriter) processData() (bool, error) {
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
// A data item is represented by a dataFrame, since it later translates into
// multiple HTTP2 data frames.
// Every dataFrame has two buffers; h that keeps grpc-message header and d that is actual data.
// As an optimization to keep wire traffic low, data from d is copied to h to make as big as the
// maximum possible HTTP2 frame size.
// Every dataFrame has two buffers; h that keeps grpc-message header and data
// that is the actual message. As an optimization to keep wire traffic low, data
// from data is copied to h to make as big as the maximum possible HTTP2 frame
// size.
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // Empty data frame
if len(dataItem.h) == 0 && dataItem.reader.Remaining() == 0 { // Empty data frame
// Client sends out empty data frame with endStream = true
if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
return false, err
}
str.itl.dequeue() // remove the empty data item from stream
_ = dataItem.reader.Close()
if str.itl.isEmpty() {
str.state = empty
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
@ -927,9 +957,7 @@ func (l *loopyWriter) processData() (bool, error) {
}
return false, nil
}
var (
buf []byte
)
// Figure out the maximum size we can send
maxSize := http2MaxFrameLen
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
@ -943,43 +971,50 @@ func (l *loopyWriter) processData() (bool, error) {
}
// Compute how much of the header and data we can send within quota and max frame length
hSize := min(maxSize, len(dataItem.h))
dSize := min(maxSize-hSize, len(dataItem.d))
if hSize != 0 {
if dSize == 0 {
buf = dataItem.h
} else {
// We can add some data to grpc message header to distribute bytes more equally across frames.
// Copy on the stack to avoid generating garbage
var localBuf [http2MaxFrameLen]byte
copy(localBuf[:hSize], dataItem.h)
copy(localBuf[hSize:], dataItem.d[:dSize])
buf = localBuf[:hSize+dSize]
}
} else {
buf = dataItem.d
}
dSize := min(maxSize-hSize, dataItem.reader.Remaining())
remainingBytes := len(dataItem.h) + dataItem.reader.Remaining() - hSize - dSize
size := hSize + dSize
var buf *[]byte
if hSize != 0 && dSize == 0 {
buf = &dataItem.h
} else {
// Note: this is only necessary because the http2.Framer does not support
// partially writing a frame, so the sequence must be materialized into a buffer.
// TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed.
pool := l.bufferPool
if pool == nil {
// Note that this is only supposed to be nil in tests. Otherwise, stream is
// always initialized with a BufferPool.
pool = mem.DefaultBufferPool()
}
buf = pool.Get(size)
defer pool.Put(buf)
copy((*buf)[:hSize], dataItem.h)
_, _ = dataItem.reader.Read((*buf)[hSize:])
}
// Now that outgoing flow controls are checked we can replenish str's write quota
str.wq.replenish(size)
var endStream bool
// If this is the last data message on this stream and all of it can be written in this iteration.
if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size {
if dataItem.endStream && remainingBytes == 0 {
endStream = true
}
if dataItem.onEachWrite != nil {
dataItem.onEachWrite()
}
if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil {
return false, err
}
str.bytesOutStanding += size
l.sendQuota -= uint32(size)
dataItem.h = dataItem.h[hSize:]
dataItem.d = dataItem.d[dSize:]
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
if remainingBytes == 0 { // All the data from that message was written out.
_ = dataItem.reader.Close()
str.itl.dequeue()
}
if str.itl.isEmpty() {
@ -998,10 +1033,3 @@ func (l *loopyWriter) processData() (bool, error) {
}
return false, nil
}
func min(a, b int) int {
if a < b {
return a
}
return b
}

View File

@ -24,7 +24,6 @@
package transport
import (
"bytes"
"context"
"errors"
"fmt"
@ -40,6 +39,7 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@ -50,7 +50,7 @@ import (
// NewServerHandlerTransport returns a ServerTransport handling gRPC from
// inside an http.Handler, or writes an HTTP error to w and returns an error.
// It requires that the http Server supports HTTP/2.
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler) (ServerTransport, error) {
func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) {
if r.Method != http.MethodPost {
w.Header().Set("Allow", http.MethodPost)
msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
@ -98,6 +98,7 @@ func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []s
contentType: contentType,
contentSubtype: contentSubtype,
stats: stats,
bufferPool: bufferPool,
}
st.logger = prefixLoggerForServerHandlerTransport(st)
@ -171,6 +172,8 @@ type serverHandlerTransport struct {
stats []stats.Handler
logger *grpclog.PrefixLogger
bufferPool mem.BufferPool
}
func (ht *serverHandlerTransport) Close(err error) {
@ -244,6 +247,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
}
s.hdrMu.Lock()
defer s.hdrMu.Unlock()
if p := st.Proto(); p != nil && len(p.Details) > 0 {
delete(s.trailer, grpcStatusDetailsBinHeader)
stBytes, err := proto.Marshal(p)
@ -268,7 +272,6 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro
}
}
}
s.hdrMu.Unlock()
})
if err == nil { // transport has not been closed
@ -330,16 +333,28 @@ func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) {
s.hdrMu.Unlock()
}
func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error {
// Always take a reference because otherwise there is no guarantee the data will
// be available after this function returns. This is what callers to Write
// expect.
data.Ref()
headersWritten := s.updateHeaderSent()
return ht.do(func() {
err := ht.do(func() {
defer data.Free()
if !headersWritten {
ht.writePendingHeaders(s)
}
ht.rw.Write(hdr)
ht.rw.Write(data)
for _, b := range data {
_, _ = ht.rw.Write(b.ReadOnlyData())
}
ht.rw.(http.Flusher).Flush()
})
if err != nil {
data.Free()
return err
}
return nil
}
func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error {
@ -406,7 +421,7 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
headerWireLength: 0, // won't have access to header wire length until golang/go#18997.
}
s.trReader = &transportReader{
reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf, freeBuffer: func(*bytes.Buffer) {}},
reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
windowHandler: func(int) {},
}
@ -415,21 +430,19 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
go func() {
defer close(readerDone)
// TODO: minimize garbage, optimize recvBuffer code/ownership
const readSize = 8196
for buf := make([]byte, readSize); ; {
n, err := req.Body.Read(buf)
for {
buf := ht.bufferPool.Get(http2MaxFrameLen)
n, err := req.Body.Read(*buf)
if n > 0 {
s.buf.put(recvMsg{buffer: bytes.NewBuffer(buf[:n:n])})
buf = buf[n:]
*buf = (*buf)[:n]
s.buf.put(recvMsg{buffer: mem.NewBuffer(buf, ht.bufferPool)})
} else {
ht.bufferPool.Put(buf)
}
if err != nil {
s.buf.put(recvMsg{err: mapRecvMsgError(err)})
return
}
if len(buf) == 0 {
buf = make([]byte, readSize)
}
}
}()
@ -462,7 +475,7 @@ func (ht *serverHandlerTransport) IncrMsgSent() {}
func (ht *serverHandlerTransport) IncrMsgRecv() {}
func (ht *serverHandlerTransport) Drain(debugData string) {
func (ht *serverHandlerTransport) Drain(string) {
panic("Drain() is not implemented")
}

View File

@ -47,6 +47,7 @@ import (
isyscall "google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/internal/transport/networktype"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/resolver"
@ -59,6 +60,8 @@ import (
// atomically.
var clientConnectionCounter uint64
var goAwayLoopyWriterTimeout = 5 * time.Second
var metadataFromOutgoingContextRaw = internal.FromOutgoingContextRaw.(func(context.Context) (metadata.MD, [][]string, bool))
// http2Client implements the ClientTransport interface with HTTP2.
@ -83,9 +86,9 @@ type http2Client struct {
writerDone chan struct{} // sync point to enable testing.
// goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor)
// that the server sent GoAway on this transport.
goAway chan struct{}
framer *framer
goAway chan struct{}
keepaliveDone chan struct{} // Closed when the keepalive goroutine exits.
framer *framer
// controlBuf delivers all the control related tasks (e.g., window
// updates, reset streams, and various settings) to the controller.
// Do not access controlBuf with mu held.
@ -144,7 +147,7 @@ type http2Client struct {
onClose func(GoAwayReason)
bufferPool *bufferPool
bufferPool mem.BufferPool
connectionID uint64
logger *grpclog.PrefixLogger
@ -229,7 +232,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
}
}(conn)
// The following defer and goroutine monitor the connectCtx for cancelation
// The following defer and goroutine monitor the connectCtx for cancellation
// and deadline. On context expiration, the connection is hard closed and
// this function will naturally fail as a result. Otherwise, the defer
// waits for the goroutine to exit to prevent the context from being
@ -332,6 +335,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
readerDone: make(chan struct{}),
writerDone: make(chan struct{}),
goAway: make(chan struct{}),
keepaliveDone: make(chan struct{}),
framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize),
fc: &trInFlow{limit: uint32(icwz)},
scheme: scheme,
@ -346,7 +350,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
streamQuota: defaultMaxStreamsClient,
streamsQuotaAvailable: make(chan struct{}, 1),
keepaliveEnabled: keepaliveEnabled,
bufferPool: newBufferPool(),
bufferPool: opts.BufferPool,
onClose: onClose,
}
var czSecurity credentials.ChannelzSecurityValue
@ -463,7 +467,7 @@ func newHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
return nil, err
}
go func() {
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler)
t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
if err := t.loopy.run(); !isIOError(err) {
// Immediately close the connection, as the loopy writer returns
// when there are no more active streams and we were draining (the
@ -504,7 +508,6 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream {
closeStream: func(err error) {
t.CloseStream(s, err)
},
freeBuffer: t.bufferPool.put,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
@ -525,8 +528,9 @@ func (t *http2Client) getPeer() *peer.Peer {
// to be the last frame loopy writes to the transport.
func (t *http2Client) outgoingGoAwayHandler(g *goAway) (bool, error) {
t.mu.Lock()
defer t.mu.Unlock()
if err := t.framer.fr.WriteGoAway(t.nextID-2, http2.ErrCodeNo, g.debugData); err != nil {
maxStreamID := t.nextID - 2
t.mu.Unlock()
if err := t.framer.fr.WriteGoAway(maxStreamID, http2.ErrCodeNo, g.debugData); err != nil {
return false, err
}
return false, g.closeConn
@ -770,7 +774,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*Stream,
hdr := &headerFrame{
hf: headerFields,
endStream: false,
initStream: func(id uint32) error {
initStream: func(uint32) error {
t.mu.Lock()
// TODO: handle transport closure in loopy instead and remove this
// initStream is never called when transport is draining.
@ -983,6 +987,7 @@ func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.
// only once on a transport. Once it is called, the transport should not be
// accessed anymore.
func (t *http2Client) Close(err error) {
t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10))
t.mu.Lock()
// Make sure we only close once.
if t.state == closing {
@ -1005,18 +1010,33 @@ func (t *http2Client) Close(err error) {
// should unblock it so that the goroutine eventually exits.
t.kpDormancyCond.Signal()
}
t.mu.Unlock()
// Per HTTP/2 spec, a GOAWAY frame must be sent before closing the
// connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY.
t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err})
<-t.writerDone
t.cancel()
t.conn.Close()
channelz.RemoveEntry(t.channelz.ID)
// Append info about previous goaways if there were any, since this may be important
// for understanding the root cause for this connection to be closed.
_, goAwayDebugMessage := t.GetGoAwayReason()
goAwayDebugMessage := t.goAwayDebugMessage
t.mu.Unlock()
// Per HTTP/2 spec, a GOAWAY frame must be sent before closing the
// connection. See https://httpwg.org/specs/rfc7540.html#GOAWAY. It
// also waits for loopyWriter to be closed with a timer to avoid the
// long blocking in case the connection is blackholed, i.e. TCP is
// just stuck.
t.controlBuf.put(&goAway{code: http2.ErrCodeNo, debugData: []byte("client transport shutdown"), closeConn: err})
timer := time.NewTimer(goAwayLoopyWriterTimeout)
defer timer.Stop()
select {
case <-t.writerDone: // success
case <-timer.C:
t.logger.Infof("Failed to write a GOAWAY frame as part of connection close after %s. Giving up and closing the transport.", goAwayLoopyWriterTimeout)
}
t.cancel()
t.conn.Close()
// Waits for the reader and keepalive goroutines to exit before returning to
// ensure all resources are cleaned up before Close can return.
<-t.readerDone
if t.keepaliveEnabled {
<-t.keepaliveDone
}
channelz.RemoveEntry(t.channelz.ID)
var st *status.Status
if len(goAwayDebugMessage) > 0 {
st = status.Newf(codes.Unavailable, "closing transport due to: %v, received prior goaway: %v", err, goAwayDebugMessage)
@ -1065,27 +1085,36 @@ func (t *http2Client) GracefulClose() {
// Write formats the data into HTTP2 data frame(s) and sends it out. The caller
// should proceed only if Write returns nil.
func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
func (t *http2Client) Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error {
reader := data.Reader()
if opts.Last {
// If it's the last message, update stream state.
if !s.compareAndSwapState(streamActive, streamWriteDone) {
_ = reader.Close()
return errStreamDone
}
} else if s.getState() != streamActive {
_ = reader.Close()
return errStreamDone
}
df := &dataFrame{
streamID: s.id,
endStream: opts.Last,
h: hdr,
d: data,
reader: reader,
}
if hdr != nil || data != nil { // If it's not an empty data frame, check quota.
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
if hdr != nil || df.reader.Remaining() != 0 { // If it's not an empty data frame, check quota.
if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
_ = reader.Close()
return err
}
}
return t.controlBuf.put(df)
if err := t.controlBuf.put(df); err != nil {
_ = reader.Close()
return err
}
return nil
}
func (t *http2Client) getStream(f http2.Frame) *Stream {
@ -1190,10 +1219,13 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
buffer := t.bufferPool.get()
buffer.Reset()
buffer.Write(f.Data())
s.write(recvMsg{buffer: buffer})
pool := t.bufferPool
if pool == nil {
// Note that this is only supposed to be nil in tests. Otherwise, stream is
// always initialized with a BufferPool.
pool = mem.DefaultBufferPool()
}
s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
}
}
// The server has closed the stream without sending trailers. Record that
@ -1222,7 +1254,7 @@ func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) {
if statusCode == codes.Canceled {
if d, ok := s.ctx.Deadline(); ok && !d.After(time.Now()) {
// Our deadline was already exceeded, and that was likely the cause
// of this cancelation. Alter the status code accordingly.
// of this cancellation. Alter the status code accordingly.
statusCode = codes.DeadlineExceeded
}
}
@ -1291,11 +1323,11 @@ func (t *http2Client) handlePing(f *http2.PingFrame) {
t.controlBuf.put(pingAck)
}
func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) error {
t.mu.Lock()
if t.state == closing {
t.mu.Unlock()
return
return nil
}
if f.ErrCode == http2.ErrCodeEnhanceYourCalm && string(f.DebugData()) == "too_many_pings" {
// When a client receives a GOAWAY with error code ENHANCE_YOUR_CALM and debug
@ -1307,8 +1339,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
id := f.LastStreamID
if id > 0 && id%2 == 0 {
t.mu.Unlock()
t.Close(connectionErrorf(true, nil, "received goaway with non-zero even-numbered numbered stream id: %v", id))
return
return connectionErrorf(true, nil, "received goaway with non-zero even-numbered stream id: %v", id)
}
// A client can receive multiple GoAways from the server (see
// https://github.com/grpc/grpc-go/issues/1387). The idea is that the first
@ -1325,8 +1356,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
// If there are multiple GoAways the first one should always have an ID greater than the following ones.
if id > t.prevGoAwayID {
t.mu.Unlock()
t.Close(connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID))
return
return connectionErrorf(true, nil, "received goaway with stream id: %v, which exceeds stream id of previous goaway: %v", id, t.prevGoAwayID)
}
default:
t.setGoAwayReason(f)
@ -1350,8 +1380,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
t.prevGoAwayID = id
if len(t.activeStreams) == 0 {
t.mu.Unlock()
t.Close(connectionErrorf(true, nil, "received goaway and there are no active streams"))
return
return connectionErrorf(true, nil, "received goaway and there are no active streams")
}
streamsToClose := make([]*Stream, 0)
@ -1368,6 +1397,7 @@ func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) {
for _, stream := range streamsToClose {
t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false)
}
return nil
}
// setGoAwayReason sets the value of t.goAwayReason based
@ -1603,7 +1633,13 @@ func (t *http2Client) readServerPreface() error {
// network connection. If the server preface is not read successfully, an
// error is pushed to errCh; otherwise errCh is closed with no error.
func (t *http2Client) reader(errCh chan<- error) {
defer close(t.readerDone)
var errClose error
defer func() {
close(t.readerDone)
if errClose != nil {
t.Close(errClose)
}
}()
if err := t.readServerPreface(); err != nil {
errCh <- err
@ -1642,11 +1678,10 @@ func (t *http2Client) reader(errCh chan<- error) {
t.closeStream(s, status.Error(code, msg), true, http2.ErrCodeProtocol, status.New(code, msg), nil, false)
}
continue
} else {
// Transport error.
t.Close(connectionErrorf(true, err, "error reading from server: %v", err))
return
}
// Transport error.
errClose = connectionErrorf(true, err, "error reading from server: %v", err)
return
}
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
@ -1660,7 +1695,7 @@ func (t *http2Client) reader(errCh chan<- error) {
case *http2.PingFrame:
t.handlePing(frame)
case *http2.GoAwayFrame:
t.handleGoAway(frame)
errClose = t.handleGoAway(frame)
case *http2.WindowUpdateFrame:
t.handleWindowUpdate(frame)
default:
@ -1671,15 +1706,15 @@ func (t *http2Client) reader(errCh chan<- error) {
}
}
func minTime(a, b time.Duration) time.Duration {
if a < b {
return a
}
return b
}
// keepalive running in a separate goroutine makes sure the connection is alive by sending pings.
func (t *http2Client) keepalive() {
var err error
defer func() {
close(t.keepaliveDone)
if err != nil {
t.Close(err)
}
}()
p := &ping{data: [8]byte{}}
// True iff a ping has been sent, and no data has been received since then.
outstandingPing := false
@ -1703,7 +1738,7 @@ func (t *http2Client) keepalive() {
continue
}
if outstandingPing && timeoutLeft <= 0 {
t.Close(connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout"))
err = connectionErrorf(true, nil, "keepalive ping failed to receive ACK within timeout")
return
}
t.mu.Lock()
@ -1745,7 +1780,7 @@ func (t *http2Client) keepalive() {
// timeoutLeft. This will ensure that we wait only for kp.Time
// before sending out the next ping (for cases where the ping is
// acked).
sleepDuration := minTime(t.kp.Time, timeoutLeft)
sleepDuration := min(t.kp.Time, timeoutLeft)
timeoutLeft -= sleepDuration
timer.Reset(sleepDuration)
case <-t.ctx.Done():

View File

@ -39,6 +39,7 @@ import (
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/mem"
"google.golang.org/protobuf/proto"
"google.golang.org/grpc/codes"
@ -119,7 +120,7 @@ type http2Server struct {
// Fields below are for channelz metric collection.
channelz *channelz.Socket
bufferPool *bufferPool
bufferPool mem.BufferPool
connectionID uint64
@ -261,7 +262,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
idle: time.Now(),
kep: kep,
initialWindowSize: iwz,
bufferPool: newBufferPool(),
bufferPool: config.BufferPool,
}
var czSecurity credentials.ChannelzSecurityValue
if au, ok := authInfo.(credentials.ChannelzSecurityInfo); ok {
@ -330,7 +331,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
t.handleSettings(sf)
go func() {
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler)
t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst, t.conn, t.logger, t.outgoingGoAwayHandler, t.bufferPool)
err := t.loopy.run()
close(t.loopyWriterDone)
if !isIOError(err) {
@ -613,10 +614,9 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
s.trReader = &transportReader{
reader: &recvBufferReader{
ctx: s.ctx,
ctxDone: s.ctxDone,
recv: s.buf,
freeBuffer: t.bufferPool.put,
ctx: s.ctx,
ctxDone: s.ctxDone,
recv: s.buf,
},
windowHandler: func(n int) {
t.updateWindow(s, uint32(n))
@ -813,10 +813,13 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
// guarantee f.Data() is consumed before the arrival of next frame.
// Can this copy be eliminated?
if len(f.Data()) > 0 {
buffer := t.bufferPool.get()
buffer.Reset()
buffer.Write(f.Data())
s.write(recvMsg{buffer: buffer})
pool := t.bufferPool
if pool == nil {
// Note that this is only supposed to be nil in tests. Otherwise, stream is
// always initialized with a BufferPool.
pool = mem.DefaultBufferPool()
}
s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
}
}
if f.StreamEnded() {
@ -1089,7 +1092,9 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
onWrite: t.setResetPingStrikes,
}
success, err := t.controlBuf.execute(t.checkForHeaderListSize, trailingHeader)
success, err := t.controlBuf.executeAndPut(func() bool {
return t.checkForHeaderListSize(trailingHeader)
}, nil)
if !success {
if err != nil {
return err
@ -1112,27 +1117,37 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error {
// Write converts the data into HTTP2 data frame and sends it out. Non-nil error
// is returns if it fails (e.g., framing error, transport error).
func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error {
func (t *http2Server) Write(s *Stream, hdr []byte, data mem.BufferSlice, _ *Options) error {
reader := data.Reader()
if !s.isHeaderSent() { // Headers haven't been written yet.
if err := t.WriteHeader(s, nil); err != nil {
_ = reader.Close()
return err
}
} else {
// Writing headers checks for this condition.
if s.getState() == streamDone {
_ = reader.Close()
return t.streamContextErr(s)
}
}
df := &dataFrame{
streamID: s.id,
h: hdr,
d: data,
reader: reader,
onEachWrite: t.setResetPingStrikes,
}
if err := s.wq.get(int32(len(hdr) + len(data))); err != nil {
if err := s.wq.get(int32(len(hdr) + df.reader.Remaining())); err != nil {
_ = reader.Close()
return t.streamContextErr(s)
}
return t.controlBuf.put(df)
if err := t.controlBuf.put(df); err != nil {
_ = reader.Close()
return err
}
return nil
}
// keepalive running in a separate goroutine does the following:
@ -1223,7 +1238,7 @@ func (t *http2Server) keepalive() {
// timeoutLeft. This will ensure that we wait only for kp.Time
// before sending out the next ping (for cases where the ping is
// acked).
sleepDuration := minTime(t.kp.Time, kpTimeoutLeft)
sleepDuration := min(t.kp.Time, kpTimeoutLeft)
kpTimeoutLeft -= sleepDuration
kpTimer.Reset(sleepDuration)
case <-t.done:

View File

@ -317,28 +317,32 @@ func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter {
return w
}
func (w *bufWriter) Write(b []byte) (n int, err error) {
func (w *bufWriter) Write(b []byte) (int, error) {
if w.err != nil {
return 0, w.err
}
if w.batchSize == 0 { // Buffer has been disabled.
n, err = w.conn.Write(b)
n, err := w.conn.Write(b)
return n, toIOError(err)
}
if w.buf == nil {
b := w.pool.Get().(*[]byte)
w.buf = *b
}
written := 0
for len(b) > 0 {
nn := copy(w.buf[w.offset:], b)
b = b[nn:]
w.offset += nn
n += nn
if w.offset >= w.batchSize {
err = w.flushKeepBuffer()
copied := copy(w.buf[w.offset:], b)
b = b[copied:]
written += copied
w.offset += copied
if w.offset < w.batchSize {
continue
}
if err := w.flushKeepBuffer(); err != nil {
return written, err
}
}
return n, err
return written, nil
}
func (w *bufWriter) Flush() error {
@ -389,7 +393,7 @@ type framer struct {
fr *http2.Framer
}
var writeBufferPoolMap map[int]*sync.Pool = make(map[int]*sync.Pool)
var writeBufferPoolMap = make(map[int]*sync.Pool)
var writeBufferMutex sync.Mutex
func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer {

View File

@ -107,8 +107,14 @@ func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, backendAddr stri
}
return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump)
}
return &bufConn{Conn: conn, r: r}, nil
// The buffer could contain extra bytes from the target server, so we can't
// discard it. However, in many cases where the server waits for the client
// to send the first message (e.g. when TLS is being used), the buffer will
// be empty, so we can avoid the overhead of reading through this buffer.
if r.Buffered() != 0 {
return &bufConn{Conn: conn, r: r}, nil
}
return conn, nil
}
// proxyDial dials, connecting to a proxy first if necessary. Checks if a proxy

View File

@ -22,7 +22,6 @@
package transport
import (
"bytes"
"context"
"errors"
"fmt"
@ -37,6 +36,7 @@ import (
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/channelz"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/resolver"
@ -47,32 +47,10 @@ import (
const logLevel = 2
type bufferPool struct {
pool sync.Pool
}
func newBufferPool() *bufferPool {
return &bufferPool{
pool: sync.Pool{
New: func() any {
return new(bytes.Buffer)
},
},
}
}
func (p *bufferPool) get() *bytes.Buffer {
return p.pool.Get().(*bytes.Buffer)
}
func (p *bufferPool) put(b *bytes.Buffer) {
p.pool.Put(b)
}
// recvMsg represents the received msg from the transport. All transport
// protocol specific info has been removed.
type recvMsg struct {
buffer *bytes.Buffer
buffer mem.Buffer
// nil: received some data
// io.EOF: stream is completed. data is nil.
// other non-nil error: transport failure. data is nil.
@ -102,6 +80,9 @@ func newRecvBuffer() *recvBuffer {
func (b *recvBuffer) put(r recvMsg) {
b.mu.Lock()
if b.err != nil {
// drop the buffer on the floor. Since b.err is not nil, any subsequent reads
// will always return an error, making this buffer inaccessible.
r.buffer.Free()
b.mu.Unlock()
// An error had occurred earlier, don't accept more
// data or errors.
@ -148,45 +129,70 @@ type recvBufferReader struct {
ctx context.Context
ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
recv *recvBuffer
last *bytes.Buffer // Stores the remaining data in the previous calls.
last mem.Buffer // Stores the remaining data in the previous calls.
err error
freeBuffer func(*bytes.Buffer)
}
// Read reads the next len(p) bytes from last. If last is drained, it tries to
// read additional data from recv. It blocks if there no additional data available
// in recv. If Read returns any non-nil error, it will continue to return that error.
func (r *recvBufferReader) Read(p []byte) (n int, err error) {
func (r *recvBufferReader) ReadHeader(header []byte) (n int, err error) {
if r.err != nil {
return 0, r.err
}
if r.last != nil {
// Read remaining data left in last call.
copied, _ := r.last.Read(p)
if r.last.Len() == 0 {
r.freeBuffer(r.last)
r.last = nil
}
return copied, nil
n, r.last = mem.ReadUnsafe(header, r.last)
return n, nil
}
if r.closeStream != nil {
n, r.err = r.readClient(p)
n, r.err = r.readHeaderClient(header)
} else {
n, r.err = r.read(p)
n, r.err = r.readHeader(header)
}
return n, r.err
}
func (r *recvBufferReader) read(p []byte) (n int, err error) {
// Read reads the next n bytes from last. If last is drained, it tries to read
// additional data from recv. It blocks if there no additional data available in
// recv. If Read returns any non-nil error, it will continue to return that
// error.
func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) {
if r.err != nil {
return nil, r.err
}
if r.last != nil {
buf = r.last
if r.last.Len() > n {
buf, r.last = mem.SplitUnsafe(buf, n)
} else {
r.last = nil
}
return buf, nil
}
if r.closeStream != nil {
buf, r.err = r.readClient(n)
} else {
buf, r.err = r.read(n)
}
return buf, r.err
}
func (r *recvBufferReader) readHeader(header []byte) (n int, err error) {
select {
case <-r.ctxDone:
return 0, ContextErr(r.ctx.Err())
case m := <-r.recv.get():
return r.readAdditional(m, p)
return r.readHeaderAdditional(m, header)
}
}
func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
func (r *recvBufferReader) read(n int) (buf mem.Buffer, err error) {
select {
case <-r.ctxDone:
return nil, ContextErr(r.ctx.Err())
case m := <-r.recv.get():
return r.readAdditional(m, n)
}
}
func (r *recvBufferReader) readHeaderClient(header []byte) (n int, err error) {
// If the context is canceled, then closes the stream with nil metadata.
// closeStream writes its error parameter to r.recv as a recvMsg.
// r.readAdditional acts on that message and returns the necessary error.
@ -207,25 +213,67 @@ func (r *recvBufferReader) readClient(p []byte) (n int, err error) {
// faster.
r.closeStream(ContextErr(r.ctx.Err()))
m := <-r.recv.get()
return r.readAdditional(m, p)
return r.readHeaderAdditional(m, header)
case m := <-r.recv.get():
return r.readAdditional(m, p)
return r.readHeaderAdditional(m, header)
}
}
func (r *recvBufferReader) readAdditional(m recvMsg, p []byte) (n int, err error) {
func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) {
// If the context is canceled, then closes the stream with nil metadata.
// closeStream writes its error parameter to r.recv as a recvMsg.
// r.readAdditional acts on that message and returns the necessary error.
select {
case <-r.ctxDone:
// Note that this adds the ctx error to the end of recv buffer, and
// reads from the head. This will delay the error until recv buffer is
// empty, thus will delay ctx cancellation in Recv().
//
// It's done this way to fix a race between ctx cancel and trailer. The
// race was, stream.Recv() may return ctx error if ctxDone wins the
// race, but stream.Trailer() may return a non-nil md because the stream
// was not marked as done when trailer is received. This closeStream
// call will mark stream as done, thus fix the race.
//
// TODO: delaying ctx error seems like a unnecessary side effect. What
// we really want is to mark the stream as done, and return ctx error
// faster.
r.closeStream(ContextErr(r.ctx.Err()))
m := <-r.recv.get()
return r.readAdditional(m, n)
case m := <-r.recv.get():
return r.readAdditional(m, n)
}
}
func (r *recvBufferReader) readHeaderAdditional(m recvMsg, header []byte) (n int, err error) {
r.recv.load()
if m.err != nil {
if m.buffer != nil {
m.buffer.Free()
}
return 0, m.err
}
copied, _ := m.buffer.Read(p)
if m.buffer.Len() == 0 {
r.freeBuffer(m.buffer)
r.last = nil
} else {
r.last = m.buffer
n, r.last = mem.ReadUnsafe(header, m.buffer)
return n, nil
}
func (r *recvBufferReader) readAdditional(m recvMsg, n int) (b mem.Buffer, err error) {
r.recv.load()
if m.err != nil {
if m.buffer != nil {
m.buffer.Free()
}
return nil, m.err
}
return copied, nil
if m.buffer.Len() > n {
m.buffer, r.last = mem.SplitUnsafe(m.buffer, n)
}
return m.buffer, nil
}
type streamState uint32
@ -241,7 +289,7 @@ const (
type Stream struct {
id uint32
st ServerTransport // nil for client side Stream
ct *http2Client // nil for server side Stream
ct ClientTransport // nil for server side Stream
ctx context.Context // the associated context of the stream
cancel context.CancelFunc // always nil for client side Stream
done chan struct{} // closed at the end of stream to unblock writers. On the client side.
@ -251,7 +299,7 @@ type Stream struct {
recvCompress string
sendCompress string
buf *recvBuffer
trReader io.Reader
trReader *transportReader
fc *inFlow
wq *writeQuota
@ -408,7 +456,7 @@ func (s *Stream) TrailersOnly() bool {
return s.noHeaders
}
// Trailer returns the cached trailer metedata. Note that if it is not called
// Trailer returns the cached trailer metadata. Note that if it is not called
// after the entire stream is done, it could return an empty MD. Client
// side only.
// It can be safely read only after stream has ended that is either read
@ -499,36 +547,96 @@ func (s *Stream) write(m recvMsg) {
s.buf.put(m)
}
// Read reads all p bytes from the wire for this stream.
func (s *Stream) Read(p []byte) (n int, err error) {
// ReadHeader reads data into the provided header slice from the stream. It
// first checks if there was an error during a previous read operation and
// returns it if present. It then requests a read operation for the length of
// the header. It continues to read from the stream until the entire header
// slice is filled or an error occurs. If an `io.EOF` error is encountered
// with partially read data, it is converted to `io.ErrUnexpectedEOF` to
// indicate an unexpected end of the stream. The method returns any error
// encountered during the read process or nil if the header was successfully
// read.
func (s *Stream) ReadHeader(header []byte) (err error) {
// Don't request a read if there was an error earlier
if er := s.trReader.(*transportReader).er; er != nil {
return 0, er
if er := s.trReader.er; er != nil {
return er
}
s.requestRead(len(p))
return io.ReadFull(s.trReader, p)
s.requestRead(len(header))
for len(header) != 0 {
n, err := s.trReader.ReadHeader(header)
header = header[n:]
if len(header) == 0 {
err = nil
}
if err != nil {
if n > 0 && err == io.EOF {
err = io.ErrUnexpectedEOF
}
return err
}
}
return nil
}
// tranportReader reads all the data available for this Stream from the transport and
// Read reads n bytes from the wire for this stream.
func (s *Stream) Read(n int) (data mem.BufferSlice, err error) {
// Don't request a read if there was an error earlier
if er := s.trReader.er; er != nil {
return nil, er
}
s.requestRead(n)
for n != 0 {
buf, err := s.trReader.Read(n)
var bufLen int
if buf != nil {
bufLen = buf.Len()
}
n -= bufLen
if n == 0 {
err = nil
}
if err != nil {
if bufLen > 0 && err == io.EOF {
err = io.ErrUnexpectedEOF
}
data.Free()
return nil, err
}
data = append(data, buf)
}
return data, nil
}
// transportReader reads all the data available for this Stream from the transport and
// passes them into the decoder, which converts them into a gRPC message stream.
// The error is io.EOF when the stream is done or another non-nil error if
// the stream broke.
type transportReader struct {
reader io.Reader
reader *recvBufferReader
// The handler to control the window update procedure for both this
// particular stream and the associated transport.
windowHandler func(int)
er error
}
func (t *transportReader) Read(p []byte) (n int, err error) {
n, err = t.reader.Read(p)
func (t *transportReader) ReadHeader(header []byte) (int, error) {
n, err := t.reader.ReadHeader(header)
if err != nil {
t.er = err
return
return 0, err
}
t.windowHandler(n)
return
return n, nil
}
func (t *transportReader) Read(n int) (mem.Buffer, error) {
buf, err := t.reader.Read(n)
if err != nil {
t.er = err
return buf, err
}
t.windowHandler(buf.Len())
return buf, nil
}
// BytesReceived indicates whether any bytes have been received on this stream.
@ -574,6 +682,7 @@ type ServerConfig struct {
ChannelzParent *channelz.Server
MaxHeaderListSize *uint32
HeaderTableSize *uint32
BufferPool mem.BufferPool
}
// ConnectOptions covers all relevant options for communicating with the server.
@ -612,6 +721,8 @@ type ConnectOptions struct {
MaxHeaderListSize *uint32
// UseProxy specifies if a proxy should be used.
UseProxy bool
// The mem.BufferPool to use when reading/writing to the wire.
BufferPool mem.BufferPool
}
// NewClientTransport establishes the transport with the required ConnectOptions
@ -673,7 +784,7 @@ type ClientTransport interface {
// Write sends the data for the given stream. A nil stream indicates
// the write is to be performed on the transport as a whole.
Write(s *Stream, hdr []byte, data []byte, opts *Options) error
Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error
// NewStream creates a Stream for an RPC.
NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error)
@ -725,7 +836,7 @@ type ServerTransport interface {
// Write sends the data for the given stream.
// Write may not be called on all streams.
Write(s *Stream, hdr []byte, data []byte, opts *Options) error
Write(s *Stream, hdr []byte, data mem.BufferSlice, opts *Options) error
// WriteStatus sends the status of a stream to the client. WriteStatus is
// the final call made on a stream and always occurs.
@ -798,7 +909,7 @@ var (
// connection is draining. This could be caused by goaway or balancer
// removing the address.
errStreamDrain = status.Error(codes.Unavailable, "the connection is draining")
// errStreamDone is returned from write at the client side to indiacte application
// errStreamDone is returned from write at the client side to indicate application
// layer of an error.
errStreamDone = errors.New("the stream is done")
// StatusGoAway indicates that the server sent a GOAWAY that included this

View File

@ -34,15 +34,29 @@ type ClientParameters struct {
// After a duration of this time if the client doesn't see any activity it
// pings the server to see if the transport is still alive.
// If set below 10s, a minimum value of 10s will be used instead.
Time time.Duration // The current default value is infinity.
//
// Note that gRPC servers have a default EnforcementPolicy.MinTime of 5
// minutes (which means the client shouldn't ping more frequently than every
// 5 minutes).
//
// Though not ideal, it's not a strong requirement for Time to be less than
// EnforcementPolicy.MinTime. Time will automatically double if the server
// disconnects due to its enforcement policy.
//
// For more details, see
// https://github.com/grpc/proposal/blob/master/A8-client-side-keepalive.md
Time time.Duration
// After having pinged for keepalive check, the client waits for a duration
// of Timeout and if no activity is seen even after that the connection is
// closed.
Timeout time.Duration // The current default value is 20 seconds.
//
// If keepalive is enabled, and this value is not explicitly set, the default
// is 20 seconds.
Timeout time.Duration
// If true, client sends keepalive pings even with no active RPCs. If false,
// when there are no active RPCs, Time and Timeout will be ignored and no
// keepalive pings will be sent.
PermitWithoutStream bool // false by default.
PermitWithoutStream bool
}
// ServerParameters is used to set keepalive and max-age parameters on the

194
vendor/google.golang.org/grpc/mem/buffer_pool.go generated vendored Normal file
View File

@ -0,0 +1,194 @@
/*
*
* Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package mem
import (
"sort"
"sync"
"google.golang.org/grpc/internal"
)
// BufferPool is a pool of buffers that can be shared and reused, resulting in
// decreased memory allocation.
type BufferPool interface {
// Get returns a buffer with specified length from the pool.
Get(length int) *[]byte
// Put returns a buffer to the pool.
Put(*[]byte)
}
var defaultBufferPoolSizes = []int{
256,
4 << 10, // 4KB (go page size)
16 << 10, // 16KB (max HTTP/2 frame size used by gRPC)
32 << 10, // 32KB (default buffer size for io.Copy)
1 << 20, // 1MB
}
var defaultBufferPool BufferPool
func init() {
defaultBufferPool = NewTieredBufferPool(defaultBufferPoolSizes...)
internal.SetDefaultBufferPoolForTesting = func(pool BufferPool) {
defaultBufferPool = pool
}
internal.SetBufferPoolingThresholdForTesting = func(threshold int) {
bufferPoolingThreshold = threshold
}
}
// DefaultBufferPool returns the current default buffer pool. It is a BufferPool
// created with NewBufferPool that uses a set of default sizes optimized for
// expected workflows.
func DefaultBufferPool() BufferPool {
return defaultBufferPool
}
// NewTieredBufferPool returns a BufferPool implementation that uses multiple
// underlying pools of the given pool sizes.
func NewTieredBufferPool(poolSizes ...int) BufferPool {
sort.Ints(poolSizes)
pools := make([]*sizedBufferPool, len(poolSizes))
for i, s := range poolSizes {
pools[i] = newSizedBufferPool(s)
}
return &tieredBufferPool{
sizedPools: pools,
}
}
// tieredBufferPool implements the BufferPool interface with multiple tiers of
// buffer pools for different sizes of buffers.
type tieredBufferPool struct {
sizedPools []*sizedBufferPool
fallbackPool simpleBufferPool
}
func (p *tieredBufferPool) Get(size int) *[]byte {
return p.getPool(size).Get(size)
}
func (p *tieredBufferPool) Put(buf *[]byte) {
p.getPool(cap(*buf)).Put(buf)
}
func (p *tieredBufferPool) getPool(size int) BufferPool {
poolIdx := sort.Search(len(p.sizedPools), func(i int) bool {
return p.sizedPools[i].defaultSize >= size
})
if poolIdx == len(p.sizedPools) {
return &p.fallbackPool
}
return p.sizedPools[poolIdx]
}
// sizedBufferPool is a BufferPool implementation that is optimized for specific
// buffer sizes. For example, HTTP/2 frames within gRPC have a default max size
// of 16kb and a sizedBufferPool can be configured to only return buffers with a
// capacity of 16kb. Note that however it does not support returning larger
// buffers and in fact panics if such a buffer is requested. Because of this,
// this BufferPool implementation is not meant to be used on its own and rather
// is intended to be embedded in a tieredBufferPool such that Get is only
// invoked when the required size is smaller than or equal to defaultSize.
type sizedBufferPool struct {
pool sync.Pool
defaultSize int
}
func (p *sizedBufferPool) Get(size int) *[]byte {
buf := p.pool.Get().(*[]byte)
b := *buf
clear(b[:cap(b)])
*buf = b[:size]
return buf
}
func (p *sizedBufferPool) Put(buf *[]byte) {
if cap(*buf) < p.defaultSize {
// Ignore buffers that are too small to fit in the pool. Otherwise, when
// Get is called it will panic as it tries to index outside the bounds
// of the buffer.
return
}
p.pool.Put(buf)
}
func newSizedBufferPool(size int) *sizedBufferPool {
return &sizedBufferPool{
pool: sync.Pool{
New: func() any {
buf := make([]byte, size)
return &buf
},
},
defaultSize: size,
}
}
var _ BufferPool = (*simpleBufferPool)(nil)
// simpleBufferPool is an implementation of the BufferPool interface that
// attempts to pool buffers with a sync.Pool. When Get is invoked, it tries to
// acquire a buffer from the pool but if that buffer is too small, it returns it
// to the pool and creates a new one.
type simpleBufferPool struct {
pool sync.Pool
}
func (p *simpleBufferPool) Get(size int) *[]byte {
bs, ok := p.pool.Get().(*[]byte)
if ok && cap(*bs) >= size {
*bs = (*bs)[:size]
return bs
}
// A buffer was pulled from the pool, but it is too small. Put it back in
// the pool and create one large enough.
if ok {
p.pool.Put(bs)
}
b := make([]byte, size)
return &b
}
func (p *simpleBufferPool) Put(buf *[]byte) {
p.pool.Put(buf)
}
var _ BufferPool = NopBufferPool{}
// NopBufferPool is a buffer pool that returns new buffers without pooling.
type NopBufferPool struct{}
// Get returns a buffer with specified length from the pool.
func (NopBufferPool) Get(length int) *[]byte {
b := make([]byte, length)
return &b
}
// Put returns a buffer to the pool.
func (NopBufferPool) Put(*[]byte) {
}

226
vendor/google.golang.org/grpc/mem/buffer_slice.go generated vendored Normal file
View File

@ -0,0 +1,226 @@
/*
*
* Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package mem
import (
"io"
)
// BufferSlice offers a means to represent data that spans one or more Buffer
// instances. A BufferSlice is meant to be immutable after creation, and methods
// like Ref create and return copies of the slice. This is why all methods have
// value receivers rather than pointer receivers.
//
// Note that any of the methods that read the underlying buffers such as Ref,
// Len or CopyTo etc., will panic if any underlying buffers have already been
// freed. It is recommended to not directly interact with any of the underlying
// buffers directly, rather such interactions should be mediated through the
// various methods on this type.
//
// By convention, any APIs that return (mem.BufferSlice, error) should reduce
// the burden on the caller by never returning a mem.BufferSlice that needs to
// be freed if the error is non-nil, unless explicitly stated.
type BufferSlice []Buffer
// Len returns the sum of the length of all the Buffers in this slice.
//
// # Warning
//
// Invoking the built-in len on a BufferSlice will return the number of buffers
// in the slice, and *not* the value returned by this function.
func (s BufferSlice) Len() int {
var length int
for _, b := range s {
length += b.Len()
}
return length
}
// Ref invokes Ref on each buffer in the slice.
func (s BufferSlice) Ref() {
for _, b := range s {
b.Ref()
}
}
// Free invokes Buffer.Free() on each Buffer in the slice.
func (s BufferSlice) Free() {
for _, b := range s {
b.Free()
}
}
// CopyTo copies each of the underlying Buffer's data into the given buffer,
// returning the number of bytes copied. Has the same semantics as the copy
// builtin in that it will copy as many bytes as it can, stopping when either dst
// is full or s runs out of data, returning the minimum of s.Len() and len(dst).
func (s BufferSlice) CopyTo(dst []byte) int {
off := 0
for _, b := range s {
off += copy(dst[off:], b.ReadOnlyData())
}
return off
}
// Materialize concatenates all the underlying Buffer's data into a single
// contiguous buffer using CopyTo.
func (s BufferSlice) Materialize() []byte {
l := s.Len()
if l == 0 {
return nil
}
out := make([]byte, l)
s.CopyTo(out)
return out
}
// MaterializeToBuffer functions like Materialize except that it writes the data
// to a single Buffer pulled from the given BufferPool.
//
// As a special case, if the input BufferSlice only actually has one Buffer, this
// function simply increases the refcount before returning said Buffer. Freeing this
// buffer won't release it until the BufferSlice is itself released.
func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer {
if len(s) == 1 {
s[0].Ref()
return s[0]
}
sLen := s.Len()
if sLen == 0 {
return emptyBuffer{}
}
buf := pool.Get(sLen)
s.CopyTo(*buf)
return NewBuffer(buf, pool)
}
// Reader returns a new Reader for the input slice after taking references to
// each underlying buffer.
func (s BufferSlice) Reader() Reader {
s.Ref()
return &sliceReader{
data: s,
len: s.Len(),
}
}
// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface
// with other parts systems. It also provides an additional convenience method
// Remaining(), which returns the number of unread bytes remaining in the slice.
// Buffers will be freed as they are read.
type Reader interface {
io.Reader
io.ByteReader
// Close frees the underlying BufferSlice and never returns an error. Subsequent
// calls to Read will return (0, io.EOF).
Close() error
// Remaining returns the number of unread bytes remaining in the slice.
Remaining() int
}
type sliceReader struct {
data BufferSlice
len int
// The index into data[0].ReadOnlyData().
bufferIdx int
}
func (r *sliceReader) Remaining() int {
return r.len
}
func (r *sliceReader) Close() error {
r.data.Free()
r.data = nil
r.len = 0
return nil
}
func (r *sliceReader) freeFirstBufferIfEmpty() bool {
if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) {
return false
}
r.data[0].Free()
r.data = r.data[1:]
r.bufferIdx = 0
return true
}
func (r *sliceReader) Read(buf []byte) (n int, _ error) {
if r.len == 0 {
return 0, io.EOF
}
for len(buf) != 0 && r.len != 0 {
// Copy as much as possible from the first Buffer in the slice into the
// given byte slice.
data := r.data[0].ReadOnlyData()
copied := copy(buf, data[r.bufferIdx:])
r.len -= copied // Reduce len by the number of bytes copied.
r.bufferIdx += copied // Increment the buffer index.
n += copied // Increment the total number of bytes read.
buf = buf[copied:] // Shrink the given byte slice.
// If we have copied all the data from the first Buffer, free it and advance to
// the next in the slice.
r.freeFirstBufferIfEmpty()
}
return n, nil
}
func (r *sliceReader) ReadByte() (byte, error) {
if r.len == 0 {
return 0, io.EOF
}
// There may be any number of empty buffers in the slice, clear them all until a
// non-empty buffer is reached. This is guaranteed to exit since r.len is not 0.
for r.freeFirstBufferIfEmpty() {
}
b := r.data[0].ReadOnlyData()[r.bufferIdx]
r.len--
r.bufferIdx++
// Free the first buffer in the slice if the last byte was read
r.freeFirstBufferIfEmpty()
return b, nil
}
var _ io.Writer = (*writer)(nil)
type writer struct {
buffers *BufferSlice
pool BufferPool
}
func (w *writer) Write(p []byte) (n int, err error) {
b := Copy(p, w.pool)
*w.buffers = append(*w.buffers, b)
return b.Len(), nil
}
// NewWriter wraps the given BufferSlice and BufferPool to implement the
// io.Writer interface. Every call to Write copies the contents of the given
// buffer into a new Buffer pulled from the given pool and the Buffer is added to
// the given BufferSlice.
func NewWriter(buffers *BufferSlice, pool BufferPool) io.Writer {
return &writer{buffers: buffers, pool: pool}
}

268
vendor/google.golang.org/grpc/mem/buffers.go generated vendored Normal file
View File

@ -0,0 +1,268 @@
/*
*
* Copyright 2024 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package mem provides utilities that facilitate memory reuse in byte slices
// that are used as buffers.
//
// # Experimental
//
// Notice: All APIs in this package are EXPERIMENTAL and may be changed or
// removed in a later release.
package mem
import (
"fmt"
"sync"
"sync/atomic"
)
// A Buffer represents a reference counted piece of data (in bytes) that can be
// acquired by a call to NewBuffer() or Copy(). A reference to a Buffer may be
// released by calling Free(), which invokes the free function given at creation
// only after all references are released.
//
// Note that a Buffer is not safe for concurrent access and instead each
// goroutine should use its own reference to the data, which can be acquired via
// a call to Ref().
//
// Attempts to access the underlying data after releasing the reference to the
// Buffer will panic.
type Buffer interface {
// ReadOnlyData returns the underlying byte slice. Note that it is undefined
// behavior to modify the contents of this slice in any way.
ReadOnlyData() []byte
// Ref increases the reference counter for this Buffer.
Ref()
// Free decrements this Buffer's reference counter and frees the underlying
// byte slice if the counter reaches 0 as a result of this call.
Free()
// Len returns the Buffer's size.
Len() int
split(n int) (left, right Buffer)
read(buf []byte) (int, Buffer)
}
var (
bufferPoolingThreshold = 1 << 10
bufferObjectPool = sync.Pool{New: func() any { return new(buffer) }}
refObjectPool = sync.Pool{New: func() any { return new(atomic.Int32) }}
)
// IsBelowBufferPoolingThreshold returns true if the given size is less than or
// equal to the threshold for buffer pooling. This is used to determine whether
// to pool buffers or allocate them directly.
func IsBelowBufferPoolingThreshold(size int) bool {
return size <= bufferPoolingThreshold
}
type buffer struct {
origData *[]byte
data []byte
refs *atomic.Int32
pool BufferPool
}
func newBuffer() *buffer {
return bufferObjectPool.Get().(*buffer)
}
// NewBuffer creates a new Buffer from the given data, initializing the reference
// counter to 1. The data will then be returned to the given pool when all
// references to the returned Buffer are released. As a special case to avoid
// additional allocations, if the given buffer pool is nil, the returned buffer
// will be a "no-op" Buffer where invoking Buffer.Free() does nothing and the
// underlying data is never freed.
//
// Note that the backing array of the given data is not copied.
func NewBuffer(data *[]byte, pool BufferPool) Buffer {
// Use the buffer's capacity instead of the length, otherwise buffers may
// not be reused under certain conditions. For example, if a large buffer
// is acquired from the pool, but fewer bytes than the buffering threshold
// are written to it, the buffer will not be returned to the pool.
if pool == nil || IsBelowBufferPoolingThreshold(cap(*data)) {
return (SliceBuffer)(*data)
}
b := newBuffer()
b.origData = data
b.data = *data
b.pool = pool
b.refs = refObjectPool.Get().(*atomic.Int32)
b.refs.Add(1)
return b
}
// Copy creates a new Buffer from the given data, initializing the reference
// counter to 1.
//
// It acquires a []byte from the given pool and copies over the backing array
// of the given data. The []byte acquired from the pool is returned to the
// pool when all references to the returned Buffer are released.
func Copy(data []byte, pool BufferPool) Buffer {
if IsBelowBufferPoolingThreshold(len(data)) {
buf := make(SliceBuffer, len(data))
copy(buf, data)
return buf
}
buf := pool.Get(len(data))
copy(*buf, data)
return NewBuffer(buf, pool)
}
func (b *buffer) ReadOnlyData() []byte {
if b.refs == nil {
panic("Cannot read freed buffer")
}
return b.data
}
func (b *buffer) Ref() {
if b.refs == nil {
panic("Cannot ref freed buffer")
}
b.refs.Add(1)
}
func (b *buffer) Free() {
if b.refs == nil {
panic("Cannot free freed buffer")
}
refs := b.refs.Add(-1)
switch {
case refs > 0:
return
case refs == 0:
if b.pool != nil {
b.pool.Put(b.origData)
}
refObjectPool.Put(b.refs)
b.origData = nil
b.data = nil
b.refs = nil
b.pool = nil
bufferObjectPool.Put(b)
default:
panic("Cannot free freed buffer")
}
}
func (b *buffer) Len() int {
return len(b.ReadOnlyData())
}
func (b *buffer) split(n int) (Buffer, Buffer) {
if b.refs == nil {
panic("Cannot split freed buffer")
}
b.refs.Add(1)
split := newBuffer()
split.origData = b.origData
split.data = b.data[n:]
split.refs = b.refs
split.pool = b.pool
b.data = b.data[:n]
return b, split
}
func (b *buffer) read(buf []byte) (int, Buffer) {
if b.refs == nil {
panic("Cannot read freed buffer")
}
n := copy(buf, b.data)
if n == len(b.data) {
b.Free()
return n, nil
}
b.data = b.data[n:]
return n, b
}
func (b *buffer) String() string {
return fmt.Sprintf("mem.Buffer(%p, data: %p, length: %d)", b, b.ReadOnlyData(), len(b.ReadOnlyData()))
}
// ReadUnsafe reads bytes from the given Buffer into the provided slice.
// It does not perform safety checks.
func ReadUnsafe(dst []byte, buf Buffer) (int, Buffer) {
return buf.read(dst)
}
// SplitUnsafe modifies the receiver to point to the first n bytes while it
// returns a new reference to the remaining bytes. The returned Buffer
// functions just like a normal reference acquired using Ref().
func SplitUnsafe(buf Buffer, n int) (left, right Buffer) {
return buf.split(n)
}
type emptyBuffer struct{}
func (e emptyBuffer) ReadOnlyData() []byte {
return nil
}
func (e emptyBuffer) Ref() {}
func (e emptyBuffer) Free() {}
func (e emptyBuffer) Len() int {
return 0
}
func (e emptyBuffer) split(int) (left, right Buffer) {
return e, e
}
func (e emptyBuffer) read([]byte) (int, Buffer) {
return 0, e
}
// SliceBuffer is a Buffer implementation that wraps a byte slice. It provides
// methods for reading, splitting, and managing the byte slice.
type SliceBuffer []byte
// ReadOnlyData returns the byte slice.
func (s SliceBuffer) ReadOnlyData() []byte { return s }
// Ref is a noop implementation of Ref.
func (s SliceBuffer) Ref() {}
// Free is a noop implementation of Free.
func (s SliceBuffer) Free() {}
// Len is a noop implementation of Len.
func (s SliceBuffer) Len() int { return len(s) }
func (s SliceBuffer) split(n int) (left, right Buffer) {
return s[:n], s[n:]
}
func (s SliceBuffer) read(buf []byte) (int, Buffer) {
n := copy(buf, s)
if n == len(s) {
return n, nil
}
return n, s[n:]
}

View File

@ -213,11 +213,6 @@ func FromIncomingContext(ctx context.Context) (MD, bool) {
// ValueFromIncomingContext returns the metadata value corresponding to the metadata
// key from the incoming metadata if it exists. Keys are matched in a case insensitive
// manner.
//
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func ValueFromIncomingContext(ctx context.Context, key string) []string {
md, ok := ctx.Value(mdIncomingKey{}).(MD)
if !ok {
@ -228,7 +223,7 @@ func ValueFromIncomingContext(ctx context.Context, key string) []string {
return copyOf(v)
}
for k, v := range md {
// Case insenitive comparison: MD is a map, and there's no guarantee
// Case insensitive comparison: MD is a map, and there's no guarantee
// that the MD attached to the context is created using our helper
// functions.
if strings.EqualFold(k, key) {

View File

@ -20,6 +20,7 @@ package grpc
import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/mem"
"google.golang.org/grpc/status"
)
@ -31,9 +32,10 @@ import (
// later release.
type PreparedMsg struct {
// Struct for preparing msg before sending them
encodedData []byte
encodedData mem.BufferSlice
hdr []byte
payload []byte
payload mem.BufferSlice
pf payloadFormat
}
// Encode marshalls and compresses the message using the codec and compressor for the stream.
@ -57,11 +59,27 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error {
if err != nil {
return err
}
p.encodedData = data
compData, err := compress(data, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp)
materializedData := data.Materialize()
data.Free()
p.encodedData = mem.BufferSlice{mem.NewBuffer(&materializedData, nil)}
// TODO: it should be possible to grab the bufferPool from the underlying
// stream implementation with a type cast to its actual type (such as
// addrConnStream) and accessing the buffer pool directly.
var compData mem.BufferSlice
compData, p.pf, err = compress(p.encodedData, rpcInfo.preloaderInfo.cp, rpcInfo.preloaderInfo.comp, mem.DefaultBufferPool())
if err != nil {
return err
}
p.hdr, p.payload = msgHeader(data, compData)
if p.pf.isCompressed() {
materializedCompData := compData.Materialize()
compData.Free()
compData = mem.BufferSlice{mem.NewBuffer(&materializedCompData, nil)}
}
p.hdr, p.payload = msgHeader(p.encodedData, compData, p.pf)
return nil
}

View File

@ -1,123 +0,0 @@
#!/bin/bash
# Copyright 2020 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -eu -o pipefail
WORKDIR=$(mktemp -d)
function finish {
rm -rf "$WORKDIR"
}
trap finish EXIT
export GOBIN=${WORKDIR}/bin
export PATH=${GOBIN}:${PATH}
mkdir -p ${GOBIN}
echo "remove existing generated files"
# grpc_testing_not_regenerate/*.pb.go is not re-generated,
# see grpc_testing_not_regenerate/README.md for details.
rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate')
echo "go install google.golang.org/protobuf/cmd/protoc-gen-go"
(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go)
echo "go install cmd/protoc-gen-go-grpc"
(cd cmd/protoc-gen-go-grpc && go install .)
echo "git clone https://github.com/grpc/grpc-proto"
git clone --quiet https://github.com/grpc/grpc-proto ${WORKDIR}/grpc-proto
echo "git clone https://github.com/protocolbuffers/protobuf"
git clone --quiet https://github.com/protocolbuffers/protobuf ${WORKDIR}/protobuf
# Pull in code.proto as a proto dependency
mkdir -p ${WORKDIR}/googleapis/google/rpc
echo "curl https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto"
curl --silent https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/code.proto > ${WORKDIR}/googleapis/google/rpc/code.proto
mkdir -p ${WORKDIR}/out
# Generates sources without the embed requirement
LEGACY_SOURCES=(
${WORKDIR}/grpc-proto/grpc/binlog/v1/binarylog.proto
${WORKDIR}/grpc-proto/grpc/channelz/v1/channelz.proto
${WORKDIR}/grpc-proto/grpc/health/v1/health.proto
${WORKDIR}/grpc-proto/grpc/lb/v1/load_balancer.proto
profiling/proto/service.proto
${WORKDIR}/grpc-proto/grpc/reflection/v1alpha/reflection.proto
${WORKDIR}/grpc-proto/grpc/reflection/v1/reflection.proto
)
# Generates only the new gRPC Service symbols
SOURCES=(
$(git ls-files --exclude-standard --cached --others "*.proto" | grep -v '^profiling/proto/service.proto$')
${WORKDIR}/grpc-proto/grpc/gcp/altscontext.proto
${WORKDIR}/grpc-proto/grpc/gcp/handshaker.proto
${WORKDIR}/grpc-proto/grpc/gcp/transport_security_common.proto
${WORKDIR}/grpc-proto/grpc/lookup/v1/rls.proto
${WORKDIR}/grpc-proto/grpc/lookup/v1/rls_config.proto
${WORKDIR}/grpc-proto/grpc/testing/*.proto
${WORKDIR}/grpc-proto/grpc/core/*.proto
)
# These options of the form 'Mfoo.proto=bar' instruct the codegen to use an
# import path of 'bar' in the generated code when 'foo.proto' is imported in
# one of the sources.
#
# Note that the protos listed here are all for testing purposes. All protos to
# be used externally should have a go_package option (and they don't need to be
# listed here).
OPTS=Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\
Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\
Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\
Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\
Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\
Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\
Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\
Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\
Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\
Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing
for src in ${SOURCES[@]}; do
echo "protoc ${src}"
protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},use_generic_streams_experimental=true:${WORKDIR}/out \
-I"." \
-I${WORKDIR}/grpc-proto \
-I${WORKDIR}/googleapis \
-I${WORKDIR}/protobuf/src \
${src}
done
for src in ${LEGACY_SOURCES[@]}; do
echo "protoc ${src}"
protoc --go_out=${OPTS}:${WORKDIR}/out --go-grpc_out=${OPTS},require_unimplemented_servers=false:${WORKDIR}/out \
-I"." \
-I${WORKDIR}/grpc-proto \
-I${WORKDIR}/googleapis \
-I${WORKDIR}/protobuf/src \
${src}
done
# The go_package option in grpc/lookup/v1/rls.proto doesn't match the
# current location. Move it into the right place.
mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
# grpc_testing_not_regenerate/*.pb.go are not re-generated,
# see grpc_testing_not_regenerate/README.md for details.
rm ${WORKDIR}/out/google.golang.org/grpc/reflection/test/grpc_testing_not_regenerate/*.pb.go
cp -R ${WORKDIR}/out/google.golang.org/grpc/* .

View File

@ -66,7 +66,7 @@ func newCCResolverWrapper(cc *ClientConn) *ccResolverWrapper {
// any newly created ccResolverWrapper, except that close may be called instead.
func (ccr *ccResolverWrapper) start() error {
errCh := make(chan error)
ccr.serializer.Schedule(func(ctx context.Context) {
ccr.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil {
return
}
@ -85,7 +85,7 @@ func (ccr *ccResolverWrapper) start() error {
}
func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOptions) {
ccr.serializer.Schedule(func(ctx context.Context) {
ccr.serializer.TrySchedule(func(ctx context.Context) {
if ctx.Err() != nil || ccr.resolver == nil {
return
}
@ -102,7 +102,7 @@ func (ccr *ccResolverWrapper) close() {
ccr.closed = true
ccr.mu.Unlock()
ccr.serializer.Schedule(func(context.Context) {
ccr.serializer.TrySchedule(func(context.Context) {
if ccr.resolver == nil {
return
}
@ -177,6 +177,9 @@ func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.P
// addChannelzTraceEvent adds a channelz trace event containing the new
// state received from resolver implementations.
func (ccr *ccResolverWrapper) addChannelzTraceEvent(s resolver.State) {
if !logger.V(0) && !channelz.IsOn() {
return
}
var updates []string
var oldSC, newSC *ServiceConfig
var oldOK, newOK bool

View File

@ -19,7 +19,6 @@
package grpc
import (
"bytes"
"compress/gzip"
"context"
"encoding/binary"
@ -35,6 +34,7 @@ import (
"google.golang.org/grpc/encoding"
"google.golang.org/grpc/encoding/proto"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@ -220,8 +220,8 @@ type HeaderCallOption struct {
HeaderAddr *metadata.MD
}
func (o HeaderCallOption) before(c *callInfo) error { return nil }
func (o HeaderCallOption) after(c *callInfo, attempt *csAttempt) {
func (o HeaderCallOption) before(*callInfo) error { return nil }
func (o HeaderCallOption) after(_ *callInfo, attempt *csAttempt) {
*o.HeaderAddr, _ = attempt.s.Header()
}
@ -242,8 +242,8 @@ type TrailerCallOption struct {
TrailerAddr *metadata.MD
}
func (o TrailerCallOption) before(c *callInfo) error { return nil }
func (o TrailerCallOption) after(c *callInfo, attempt *csAttempt) {
func (o TrailerCallOption) before(*callInfo) error { return nil }
func (o TrailerCallOption) after(_ *callInfo, attempt *csAttempt) {
*o.TrailerAddr = attempt.s.Trailer()
}
@ -264,24 +264,20 @@ type PeerCallOption struct {
PeerAddr *peer.Peer
}
func (o PeerCallOption) before(c *callInfo) error { return nil }
func (o PeerCallOption) after(c *callInfo, attempt *csAttempt) {
func (o PeerCallOption) before(*callInfo) error { return nil }
func (o PeerCallOption) after(_ *callInfo, attempt *csAttempt) {
if x, ok := peer.FromContext(attempt.s.Context()); ok {
*o.PeerAddr = *x
}
}
// WaitForReady configures the action to take when an RPC is attempted on broken
// connections or unreachable servers. If waitForReady is false and the
// connection is in the TRANSIENT_FAILURE state, the RPC will fail
// immediately. Otherwise, the RPC client will block the call until a
// connection is available (or the call is canceled or times out) and will
// retry the call if it fails due to a transient error. gRPC will not retry if
// data was written to the wire unless the server indicates it did not process
// the data. Please refer to
// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md.
// WaitForReady configures the RPC's behavior when the client is in
// TRANSIENT_FAILURE, which occurs when all addresses fail to connect. If
// waitForReady is false, the RPC will fail immediately. Otherwise, the client
// will wait until a connection becomes available or the RPC's deadline is
// reached.
//
// By default, RPCs don't "wait for ready".
// By default, RPCs do not "wait for ready".
func WaitForReady(waitForReady bool) CallOption {
return FailFastCallOption{FailFast: !waitForReady}
}
@ -308,7 +304,7 @@ func (o FailFastCallOption) before(c *callInfo) error {
c.failFast = o.FailFast
return nil
}
func (o FailFastCallOption) after(c *callInfo, attempt *csAttempt) {}
func (o FailFastCallOption) after(*callInfo, *csAttempt) {}
// OnFinish returns a CallOption that configures a callback to be called when
// the call completes. The error passed to the callback is the status of the
@ -343,7 +339,7 @@ func (o OnFinishCallOption) before(c *callInfo) error {
return nil
}
func (o OnFinishCallOption) after(c *callInfo, attempt *csAttempt) {}
func (o OnFinishCallOption) after(*callInfo, *csAttempt) {}
// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size
// in bytes the client can receive. If this is not set, gRPC uses the default
@ -367,7 +363,7 @@ func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error {
c.maxReceiveMessageSize = &o.MaxRecvMsgSize
return nil
}
func (o MaxRecvMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
func (o MaxRecvMsgSizeCallOption) after(*callInfo, *csAttempt) {}
// MaxCallSendMsgSize returns a CallOption which sets the maximum message size
// in bytes the client can send. If this is not set, gRPC uses the default
@ -391,7 +387,7 @@ func (o MaxSendMsgSizeCallOption) before(c *callInfo) error {
c.maxSendMessageSize = &o.MaxSendMsgSize
return nil
}
func (o MaxSendMsgSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
func (o MaxSendMsgSizeCallOption) after(*callInfo, *csAttempt) {}
// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials
// for a call.
@ -414,7 +410,7 @@ func (o PerRPCCredsCallOption) before(c *callInfo) error {
c.creds = o.Creds
return nil
}
func (o PerRPCCredsCallOption) after(c *callInfo, attempt *csAttempt) {}
func (o PerRPCCredsCallOption) after(*callInfo, *csAttempt) {}
// UseCompressor returns a CallOption which sets the compressor used when
// sending the request. If WithCompressor is also set, UseCompressor has
@ -442,7 +438,7 @@ func (o CompressorCallOption) before(c *callInfo) error {
c.compressorType = o.CompressorType
return nil
}
func (o CompressorCallOption) after(c *callInfo, attempt *csAttempt) {}
func (o CompressorCallOption) after(*callInfo, *csAttempt) {}
// CallContentSubtype returns a CallOption that will set the content-subtype
// for a call. For example, if content-subtype is "json", the Content-Type over
@ -479,7 +475,7 @@ func (o ContentSubtypeCallOption) before(c *callInfo) error {
c.contentSubtype = o.ContentSubtype
return nil
}
func (o ContentSubtypeCallOption) after(c *callInfo, attempt *csAttempt) {}
func (o ContentSubtypeCallOption) after(*callInfo, *csAttempt) {}
// ForceCodec returns a CallOption that will set codec to be used for all
// request and response messages for a call. The result of calling Name() will
@ -515,10 +511,50 @@ type ForceCodecCallOption struct {
}
func (o ForceCodecCallOption) before(c *callInfo) error {
c.codec = o.Codec
c.codec = newCodecV1Bridge(o.Codec)
return nil
}
func (o ForceCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
func (o ForceCodecCallOption) after(*callInfo, *csAttempt) {}
// ForceCodecV2 returns a CallOption that will set codec to be used for all
// request and response messages for a call. The result of calling Name() will
// be used as the content-subtype after converting to lowercase, unless
// CallContentSubtype is also used.
//
// See Content-Type on
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for
// more details. Also see the documentation on RegisterCodec and
// CallContentSubtype for more details on the interaction between Codec and
// content-subtype.
//
// This function is provided for advanced users; prefer to use only
// CallContentSubtype to select a registered codec instead.
//
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func ForceCodecV2(codec encoding.CodecV2) CallOption {
return ForceCodecV2CallOption{CodecV2: codec}
}
// ForceCodecV2CallOption is a CallOption that indicates the codec used for
// marshaling messages.
//
// # Experimental
//
// Notice: This type is EXPERIMENTAL and may be changed or removed in a
// later release.
type ForceCodecV2CallOption struct {
CodecV2 encoding.CodecV2
}
func (o ForceCodecV2CallOption) before(c *callInfo) error {
c.codec = o.CodecV2
return nil
}
func (o ForceCodecV2CallOption) after(*callInfo, *csAttempt) {}
// CallCustomCodec behaves like ForceCodec, but accepts a grpc.Codec instead of
// an encoding.Codec.
@ -540,10 +576,10 @@ type CustomCodecCallOption struct {
}
func (o CustomCodecCallOption) before(c *callInfo) error {
c.codec = o.Codec
c.codec = newCodecV0Bridge(o.Codec)
return nil
}
func (o CustomCodecCallOption) after(c *callInfo, attempt *csAttempt) {}
func (o CustomCodecCallOption) after(*callInfo, *csAttempt) {}
// MaxRetryRPCBufferSize returns a CallOption that limits the amount of memory
// used for buffering this RPC's requests for retry purposes.
@ -571,7 +607,7 @@ func (o MaxRetryRPCBufferSizeCallOption) before(c *callInfo) error {
c.maxRetryRPCBufferSize = o.MaxRetryRPCBufferSize
return nil
}
func (o MaxRetryRPCBufferSizeCallOption) after(c *callInfo, attempt *csAttempt) {}
func (o MaxRetryRPCBufferSizeCallOption) after(*callInfo, *csAttempt) {}
// The format of the payload: compressed or not?
type payloadFormat uint8
@ -581,19 +617,28 @@ const (
compressionMade payloadFormat = 1 // compressed
)
func (pf payloadFormat) isCompressed() bool {
return pf == compressionMade
}
type streamReader interface {
ReadHeader(header []byte) error
Read(n int) (mem.BufferSlice, error)
}
// parser reads complete gRPC messages from the underlying reader.
type parser struct {
// r is the underlying reader.
// See the comment on recvMsg for the permissible
// error types.
r io.Reader
r streamReader
// The header of a gRPC message. Find more detail at
// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md
header [5]byte
// recvBufferPool is the pool of shared receive buffers.
recvBufferPool SharedBufferPool
// bufferPool is the pool of shared receive buffers.
bufferPool mem.BufferPool
}
// recvMsg reads a complete gRPC message from the stream.
@ -608,14 +653,15 @@ type parser struct {
// - an error from the status package
//
// No other error values or types must be returned, which also means
// that the underlying io.Reader must not return an incompatible
// that the underlying streamReader must not return an incompatible
// error.
func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) {
if _, err := p.r.Read(p.header[:]); err != nil {
func (p *parser) recvMsg(maxReceiveMessageSize int) (payloadFormat, mem.BufferSlice, error) {
err := p.r.ReadHeader(p.header[:])
if err != nil {
return 0, nil, err
}
pf = payloadFormat(p.header[0])
pf := payloadFormat(p.header[0])
length := binary.BigEndian.Uint32(p.header[1:])
if length == 0 {
@ -627,20 +673,21 @@ func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byt
if int(length) > maxReceiveMessageSize {
return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize)
}
msg = p.recvBufferPool.Get(int(length))
if _, err := p.r.Read(msg); err != nil {
data, err := p.r.Read(int(length))
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return 0, nil, err
}
return pf, msg, nil
return pf, data, nil
}
// encode serializes msg and returns a buffer containing the message, or an
// error if it is too large to be transmitted by grpc. If msg is nil, it
// generates an empty message.
func encode(c baseCodec, msg any) ([]byte, error) {
func encode(c baseCodec, msg any) (mem.BufferSlice, error) {
if msg == nil { // NOTE: typed nils will not be caught by this check
return nil, nil
}
@ -648,7 +695,8 @@ func encode(c baseCodec, msg any) ([]byte, error) {
if err != nil {
return nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error())
}
if uint(len(b)) > math.MaxUint32 {
if uint(b.Len()) > math.MaxUint32 {
b.Free()
return nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b))
}
return b, nil
@ -659,34 +707,41 @@ func encode(c baseCodec, msg any) ([]byte, error) {
// indicating no compression was done.
//
// TODO(dfawley): eliminate cp parameter by wrapping Compressor in an encoding.Compressor.
func compress(in []byte, cp Compressor, compressor encoding.Compressor) ([]byte, error) {
if compressor == nil && cp == nil {
return nil, nil
}
if len(in) == 0 {
return nil, nil
func compress(in mem.BufferSlice, cp Compressor, compressor encoding.Compressor, pool mem.BufferPool) (mem.BufferSlice, payloadFormat, error) {
if (compressor == nil && cp == nil) || in.Len() == 0 {
return nil, compressionNone, nil
}
var out mem.BufferSlice
w := mem.NewWriter(&out, pool)
wrapErr := func(err error) error {
out.Free()
return status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error())
}
cbuf := &bytes.Buffer{}
if compressor != nil {
z, err := compressor.Compress(cbuf)
z, err := compressor.Compress(w)
if err != nil {
return nil, wrapErr(err)
return nil, 0, wrapErr(err)
}
if _, err := z.Write(in); err != nil {
return nil, wrapErr(err)
for _, b := range in {
if _, err := z.Write(b.ReadOnlyData()); err != nil {
return nil, 0, wrapErr(err)
}
}
if err := z.Close(); err != nil {
return nil, wrapErr(err)
return nil, 0, wrapErr(err)
}
} else {
if err := cp.Do(cbuf, in); err != nil {
return nil, wrapErr(err)
// This is obviously really inefficient since it fully materializes the data, but
// there is no way around this with the old Compressor API. At least it attempts
// to return the buffer to the provider, in the hopes it can be reused (maybe
// even by a subsequent call to this very function).
buf := in.MaterializeToBuffer(pool)
defer buf.Free()
if err := cp.Do(w, buf.ReadOnlyData()); err != nil {
return nil, 0, wrapErr(err)
}
}
return cbuf.Bytes(), nil
return out, compressionMade, nil
}
const (
@ -697,33 +752,36 @@ const (
// msgHeader returns a 5-byte header for the message being transmitted and the
// payload, which is compData if non-nil or data otherwise.
func msgHeader(data, compData []byte) (hdr []byte, payload []byte) {
func msgHeader(data, compData mem.BufferSlice, pf payloadFormat) (hdr []byte, payload mem.BufferSlice) {
hdr = make([]byte, headerLen)
if compData != nil {
hdr[0] = byte(compressionMade)
data = compData
hdr[0] = byte(pf)
var length uint32
if pf.isCompressed() {
length = uint32(compData.Len())
payload = compData
} else {
hdr[0] = byte(compressionNone)
length = uint32(data.Len())
payload = data
}
// Write length of payload into buf
binary.BigEndian.PutUint32(hdr[payloadLen:], uint32(len(data)))
return hdr, data
binary.BigEndian.PutUint32(hdr[payloadLen:], length)
return hdr, payload
}
func outPayload(client bool, msg any, data, payload []byte, t time.Time) *stats.OutPayload {
func outPayload(client bool, msg any, dataLength, payloadLength int, t time.Time) *stats.OutPayload {
return &stats.OutPayload{
Client: client,
Payload: msg,
Data: data,
Length: len(data),
WireLength: len(payload) + headerLen,
CompressedLength: len(payload),
Length: dataLength,
WireLength: payloadLength + headerLen,
CompressedLength: payloadLength,
SentTime: t,
}
}
func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status {
func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool, isServer bool) *status.Status {
switch pf {
case compressionNone:
case compressionMade:
@ -731,7 +789,10 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool
return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding")
}
if !haveCompressor {
return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
if isServer {
return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
}
return status.Newf(codes.Internal, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress)
}
default:
return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf)
@ -741,104 +802,129 @@ func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool
type payloadInfo struct {
compressedLength int // The compressed length got from wire.
uncompressedBytes []byte
uncompressedBytes mem.BufferSlice
}
func (p *payloadInfo) free() {
if p != nil && p.uncompressedBytes != nil {
p.uncompressedBytes.Free()
}
}
// recvAndDecompress reads a message from the stream, decompressing it if necessary.
//
// Cancelling the returned cancel function releases the buffer back to the pool. So the caller should cancel as soon as
// the buffer is no longer needed.
func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor,
) (uncompressedBuf []byte, cancel func(), err error) {
pf, compressedBuf, err := p.recvMsg(maxReceiveMessageSize)
// TODO: Refactor this function to reduce the number of arguments.
// See: https://google.github.io/styleguide/go/best-practices.html#function-argument-lists
func recvAndDecompress(p *parser, s *transport.Stream, dc Decompressor, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool,
) (out mem.BufferSlice, err error) {
pf, compressed, err := p.recvMsg(maxReceiveMessageSize)
if err != nil {
return nil, nil, err
return nil, err
}
if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil {
return nil, nil, st.Err()
compressedLength := compressed.Len()
if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil, isServer); st != nil {
compressed.Free()
return nil, st.Err()
}
var size int
if pf == compressionMade {
if pf.isCompressed() {
defer compressed.Free()
// To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor,
// use this decompressor as the default.
if dc != nil {
uncompressedBuf, err = dc.Do(bytes.NewReader(compressedBuf))
var uncompressedBuf []byte
uncompressedBuf, err = dc.Do(compressed.Reader())
if err == nil {
out = mem.BufferSlice{mem.NewBuffer(&uncompressedBuf, nil)}
}
size = len(uncompressedBuf)
} else {
uncompressedBuf, size, err = decompress(compressor, compressedBuf, maxReceiveMessageSize)
out, size, err = decompress(compressor, compressed, maxReceiveMessageSize, p.bufferPool)
}
if err != nil {
return nil, nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
return nil, status.Errorf(codes.Internal, "grpc: failed to decompress the received message: %v", err)
}
if size > maxReceiveMessageSize {
out.Free()
// TODO: Revisit the error code. Currently keep it consistent with java
// implementation.
return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
return nil, status.Errorf(codes.ResourceExhausted, "grpc: received message after decompression larger than max (%d vs. %d)", size, maxReceiveMessageSize)
}
} else {
uncompressedBuf = compressedBuf
out = compressed
}
if payInfo != nil {
payInfo.compressedLength = len(compressedBuf)
payInfo.uncompressedBytes = uncompressedBuf
cancel = func() {}
} else {
cancel = func() {
p.recvBufferPool.Put(&compressedBuf)
}
payInfo.compressedLength = compressedLength
out.Ref()
payInfo.uncompressedBytes = out
}
return uncompressedBuf, cancel, nil
return out, nil
}
// Using compressor, decompress d, returning data and size.
// Optionally, if data will be over maxReceiveMessageSize, just return the size.
func decompress(compressor encoding.Compressor, d []byte, maxReceiveMessageSize int) ([]byte, int, error) {
dcReader, err := compressor.Decompress(bytes.NewReader(d))
func decompress(compressor encoding.Compressor, d mem.BufferSlice, maxReceiveMessageSize int, pool mem.BufferPool) (mem.BufferSlice, int, error) {
dcReader, err := compressor.Decompress(d.Reader())
if err != nil {
return nil, 0, err
}
if sizer, ok := compressor.(interface {
DecompressedSize(compressedBytes []byte) int
}); ok {
if size := sizer.DecompressedSize(d); size >= 0 {
if size > maxReceiveMessageSize {
return nil, size, nil
}
// size is used as an estimate to size the buffer, but we
// will read more data if available.
// +MinRead so ReadFrom will not reallocate if size is correct.
//
// TODO: If we ensure that the buffer size is the same as the DecompressedSize,
// we can also utilize the recv buffer pool here.
buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
return buf.Bytes(), int(bytesRead), err
}
// TODO: Can/should this still be preserved with the new BufferSlice API? Are
// there any actual benefits to allocating a single large buffer instead of
// multiple smaller ones?
//if sizer, ok := compressor.(interface {
// DecompressedSize(compressedBytes []byte) int
//}); ok {
// if size := sizer.DecompressedSize(d); size >= 0 {
// if size > maxReceiveMessageSize {
// return nil, size, nil
// }
// // size is used as an estimate to size the buffer, but we
// // will read more data if available.
// // +MinRead so ReadFrom will not reallocate if size is correct.
// //
// // TODO: If we ensure that the buffer size is the same as the DecompressedSize,
// // we can also utilize the recv buffer pool here.
// buf := bytes.NewBuffer(make([]byte, 0, size+bytes.MinRead))
// bytesRead, err := buf.ReadFrom(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
// return buf.Bytes(), int(bytesRead), err
// }
//}
var out mem.BufferSlice
_, err = io.Copy(mem.NewWriter(&out, pool), io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
if err != nil {
out.Free()
return nil, 0, err
}
// Read from LimitReader with limit max+1. So if the underlying
// reader is over limit, the result will be bigger than max.
d, err = io.ReadAll(io.LimitReader(dcReader, int64(maxReceiveMessageSize)+1))
return d, len(d), err
return out, out.Len(), nil
}
// For the two compressor parameters, both should not be set, but if they are,
// dc takes precedence over compressor.
// TODO(dfawley): wrap the old compressor/decompressor using the new API?
func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor) error {
buf, cancel, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor)
func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m any, maxReceiveMessageSize int, payInfo *payloadInfo, compressor encoding.Compressor, isServer bool) error {
data, err := recvAndDecompress(p, s, dc, maxReceiveMessageSize, payInfo, compressor, isServer)
if err != nil {
return err
}
defer cancel()
if err := c.Unmarshal(buf, m); err != nil {
// If the codec wants its own reference to the data, it can get it. Otherwise, always
// free the buffers.
defer data.Free()
if err := c.Unmarshal(data, m); err != nil {
return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message: %v", err)
}
return nil
}
@ -941,7 +1027,7 @@ func setCallInfoCodec(c *callInfo) error {
// encoding.Codec (Name vs. String method name). We only support
// setting content subtype from encoding.Codec to avoid a behavior
// change with the deprecated version.
if ec, ok := c.codec.(encoding.Codec); ok {
if ec, ok := c.codec.(encoding.CodecV2); ok {
c.contentSubtype = strings.ToLower(ec.Name())
}
}
@ -950,12 +1036,12 @@ func setCallInfoCodec(c *callInfo) error {
if c.contentSubtype == "" {
// No codec specified in CallOptions; use proto by default.
c.codec = encoding.GetCodec(proto.Name)
c.codec = getCodec(proto.Name)
return nil
}
// c.contentSubtype is already lowercased in CallContentSubtype
c.codec = encoding.GetCodec(c.contentSubtype)
c.codec = getCodec(c.contentSubtype)
if c.codec == nil {
return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype)
}

View File

@ -45,6 +45,7 @@ import (
"google.golang.org/grpc/internal/grpcutil"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@ -80,7 +81,7 @@ func init() {
}
internal.BinaryLogger = binaryLogger
internal.JoinServerOptions = newJoinServerOption
internal.RecvBufferPool = recvBufferPool
internal.BufferPool = bufferPool
}
var statusOK = status.New(codes.OK, "")
@ -170,7 +171,7 @@ type serverOptions struct {
maxHeaderListSize *uint32
headerTableSize *uint32
numServerWorkers uint32
recvBufferPool SharedBufferPool
bufferPool mem.BufferPool
waitForHandlers bool
}
@ -181,7 +182,7 @@ var defaultServerOptions = serverOptions{
connectionTimeout: 120 * time.Second,
writeBufferSize: defaultWriteBufSize,
readBufferSize: defaultReadBufSize,
recvBufferPool: nopBufferPool{},
bufferPool: mem.DefaultBufferPool(),
}
var globalServerOptions []ServerOption
@ -313,7 +314,7 @@ func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption {
// Will be supported throughout 1.x.
func CustomCodec(codec Codec) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.codec = codec
o.codec = newCodecV0Bridge(codec)
})
}
@ -342,7 +343,22 @@ func CustomCodec(codec Codec) ServerOption {
// later release.
func ForceServerCodec(codec encoding.Codec) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.codec = codec
o.codec = newCodecV1Bridge(codec)
})
}
// ForceServerCodecV2 is the equivalent of ForceServerCodec, but for the new
// CodecV2 interface.
//
// Will be supported throughout 1.x.
//
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.codec = codecV2
})
}
@ -592,26 +608,9 @@ func WaitForHandlers(w bool) ServerOption {
})
}
// RecvBufferPool returns a ServerOption that configures the server
// to use the provided shared buffer pool for parsing incoming messages. Depending
// on the application's workload, this could result in reduced memory allocation.
//
// If you are unsure about how to implement a memory pool but want to utilize one,
// begin with grpc.NewSharedBufferPool.
//
// Note: The shared buffer pool feature will not be active if any of the following
// options are used: StatsHandler, EnableTracing, or binary logging. In such
// cases, the shared buffer pool will be ignored.
//
// Deprecated: use experimental.WithRecvBufferPool instead. Will be deleted in
// v1.60.0 or later.
func RecvBufferPool(bufferPool SharedBufferPool) ServerOption {
return recvBufferPool(bufferPool)
}
func recvBufferPool(bufferPool SharedBufferPool) ServerOption {
func bufferPool(bufferPool mem.BufferPool) ServerOption {
return newFuncServerOption(func(o *serverOptions) {
o.recvBufferPool = bufferPool
o.bufferPool = bufferPool
})
}
@ -622,7 +621,7 @@ func recvBufferPool(bufferPool SharedBufferPool) ServerOption {
// workload (assuming a QPS of a few thousand requests/sec).
const serverWorkerResetThreshold = 1 << 16
// serverWorkers blocks on a *transport.Stream channel forever and waits for
// serverWorker blocks on a *transport.Stream channel forever and waits for
// data to be fed by serveStreams. This allows multiple requests to be
// processed by the same goroutine, removing the need for expensive stack
// re-allocations (see the runtime.morestack problem [1]).
@ -980,6 +979,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
ChannelzParent: s.channelz,
MaxHeaderListSize: s.opts.maxHeaderListSize,
HeaderTableSize: s.opts.headerTableSize,
BufferPool: s.opts.bufferPool,
}
st, err := transport.NewServerTransport(c, config)
if err != nil {
@ -1072,7 +1072,7 @@ var _ http.Handler = (*Server)(nil)
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers)
st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool)
if err != nil {
// Errors returned from transport.NewServerHandlerTransport have
// already been written to w.
@ -1142,20 +1142,35 @@ func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport,
channelz.Error(logger, s.channelz, "grpc: server failed to encode response: ", err)
return err
}
compData, err := compress(data, cp, comp)
compData, pf, err := compress(data, cp, comp, s.opts.bufferPool)
if err != nil {
data.Free()
channelz.Error(logger, s.channelz, "grpc: server failed to compress response: ", err)
return err
}
hdr, payload := msgHeader(data, compData)
hdr, payload := msgHeader(data, compData, pf)
defer func() {
compData.Free()
data.Free()
// payload does not need to be freed here, it is either data or compData, both of
// which are already freed.
}()
dataLen := data.Len()
payloadLen := payload.Len()
// TODO(dfawley): should we be checking len(data) instead?
if len(payload) > s.opts.maxSendMessageSize {
return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(payload), s.opts.maxSendMessageSize)
if payloadLen > s.opts.maxSendMessageSize {
return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize)
}
err = t.Write(stream, hdr, payload, opts)
if err == nil {
for _, sh := range s.opts.statsHandlers {
sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now()))
if len(s.opts.statsHandlers) != 0 {
for _, sh := range s.opts.statsHandlers {
sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
}
}
}
return err
@ -1334,37 +1349,37 @@ func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTranspor
var payInfo *payloadInfo
if len(shs) != 0 || len(binlogs) != 0 {
payInfo = &payloadInfo{}
defer payInfo.free()
}
d, cancel, err := recvAndDecompress(&parser{r: stream, recvBufferPool: s.opts.recvBufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp)
d, err := recvAndDecompress(&parser{r: stream, bufferPool: s.opts.bufferPool}, stream, dc, s.opts.maxReceiveMessageSize, payInfo, decomp, true)
if err != nil {
if e := t.WriteStatus(stream, status.Convert(err)); e != nil {
channelz.Warningf(logger, s.channelz, "grpc: Server.processUnaryRPC failed to write status: %v", e)
}
return err
}
defer d.Free()
if channelz.IsOn() {
t.IncrMsgRecv()
}
df := func(v any) error {
defer cancel()
if err := s.getCodec(stream.ContentSubtype()).Unmarshal(d, v); err != nil {
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
}
for _, sh := range shs {
sh.HandleRPC(ctx, &stats.InPayload{
RecvTime: time.Now(),
Payload: v,
Length: len(d),
Length: d.Len(),
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
Data: d,
})
}
if len(binlogs) != 0 {
cm := &binarylog.ClientMessage{
Message: d,
Message: d.Materialize(),
}
for _, binlog := range binlogs {
binlog.Log(ctx, cm)
@ -1548,7 +1563,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTran
ctx: ctx,
t: t,
s: stream,
p: &parser{r: stream, recvBufferPool: s.opts.recvBufferPool},
p: &parser{r: stream, bufferPool: s.opts.bufferPool},
codec: s.getCodec(stream.ContentSubtype()),
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
maxSendMessageSize: s.opts.maxSendMessageSize,
@ -1963,12 +1978,12 @@ func (s *Server) getCodec(contentSubtype string) baseCodec {
return s.opts.codec
}
if contentSubtype == "" {
return encoding.GetCodec(proto.Name)
return getCodec(proto.Name)
}
codec := encoding.GetCodec(contentSubtype)
codec := getCodec(contentSubtype)
if codec == nil {
logger.Warningf("Unsupported codec %q. Defaulting to %q for now. This will start to fail in future releases.", contentSubtype, proto.Name)
return encoding.GetCodec(proto.Name)
return getCodec(proto.Name)
}
return codec
}

View File

@ -1,154 +0,0 @@
/*
*
* Copyright 2023 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package grpc
import "sync"
// SharedBufferPool is a pool of buffers that can be shared, resulting in
// decreased memory allocation. Currently, in gRPC-go, it is only utilized
// for parsing incoming messages.
//
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
type SharedBufferPool interface {
// Get returns a buffer with specified length from the pool.
//
// The returned byte slice may be not zero initialized.
Get(length int) []byte
// Put returns a buffer to the pool.
Put(*[]byte)
}
// NewSharedBufferPool creates a simple SharedBufferPool with buckets
// of different sizes to optimize memory usage. This prevents the pool from
// wasting large amounts of memory, even when handling messages of varying sizes.
//
// # Experimental
//
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func NewSharedBufferPool() SharedBufferPool {
return &simpleSharedBufferPool{
pools: [poolArraySize]simpleSharedBufferChildPool{
newBytesPool(level0PoolMaxSize),
newBytesPool(level1PoolMaxSize),
newBytesPool(level2PoolMaxSize),
newBytesPool(level3PoolMaxSize),
newBytesPool(level4PoolMaxSize),
newBytesPool(0),
},
}
}
// simpleSharedBufferPool is a simple implementation of SharedBufferPool.
type simpleSharedBufferPool struct {
pools [poolArraySize]simpleSharedBufferChildPool
}
func (p *simpleSharedBufferPool) Get(size int) []byte {
return p.pools[p.poolIdx(size)].Get(size)
}
func (p *simpleSharedBufferPool) Put(bs *[]byte) {
p.pools[p.poolIdx(cap(*bs))].Put(bs)
}
func (p *simpleSharedBufferPool) poolIdx(size int) int {
switch {
case size <= level0PoolMaxSize:
return level0PoolIdx
case size <= level1PoolMaxSize:
return level1PoolIdx
case size <= level2PoolMaxSize:
return level2PoolIdx
case size <= level3PoolMaxSize:
return level3PoolIdx
case size <= level4PoolMaxSize:
return level4PoolIdx
default:
return levelMaxPoolIdx
}
}
const (
level0PoolMaxSize = 16 // 16 B
level1PoolMaxSize = level0PoolMaxSize * 16 // 256 B
level2PoolMaxSize = level1PoolMaxSize * 16 // 4 KB
level3PoolMaxSize = level2PoolMaxSize * 16 // 64 KB
level4PoolMaxSize = level3PoolMaxSize * 16 // 1 MB
)
const (
level0PoolIdx = iota
level1PoolIdx
level2PoolIdx
level3PoolIdx
level4PoolIdx
levelMaxPoolIdx
poolArraySize
)
type simpleSharedBufferChildPool interface {
Get(size int) []byte
Put(any)
}
type bufferPool struct {
sync.Pool
defaultSize int
}
func (p *bufferPool) Get(size int) []byte {
bs := p.Pool.Get().(*[]byte)
if cap(*bs) < size {
p.Pool.Put(bs)
return make([]byte, size)
}
return (*bs)[:size]
}
func newBytesPool(size int) simpleSharedBufferChildPool {
return &bufferPool{
Pool: sync.Pool{
New: func() any {
bs := make([]byte, size)
return &bs
},
},
defaultSize: size,
}
}
// nopBufferPool is a buffer pool just makes new buffer without pooling.
type nopBufferPool struct {
}
func (nopBufferPool) Get(length int) []byte {
return make([]byte, length)
}
func (nopBufferPool) Put(*[]byte) {
}

View File

@ -77,9 +77,6 @@ type InPayload struct {
// the call to HandleRPC which provides the InPayload returns and must be
// copied if needed later.
Payload any
// Data is the serialized message payload.
// Deprecated: Data will be removed in the next release.
Data []byte
// Length is the size of the uncompressed payload data. Does not include any
// framing (gRPC or HTTP/2).
@ -150,9 +147,6 @@ type OutPayload struct {
// the call to HandleRPC which provides the OutPayload returns and must be
// copied if needed later.
Payload any
// Data is the serialized message payload.
// Deprecated: Data will be removed in the next release.
Data []byte
// Length is the size of the uncompressed payload data. Does not include any
// framing (gRPC or HTTP/2).
Length int

View File

@ -41,6 +41,7 @@ import (
"google.golang.org/grpc/internal/serviceconfig"
istatus "google.golang.org/grpc/internal/status"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/mem"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/peer"
"google.golang.org/grpc/stats"
@ -359,7 +360,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
cs.attempt = a
return nil
}
if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) }); err != nil {
if err := cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) }); err != nil {
return nil, err
}
@ -517,7 +518,7 @@ func (a *csAttempt) newStream() error {
}
a.s = s
a.ctx = s.Context()
a.p = &parser{r: s, recvBufferPool: a.cs.cc.dopts.recvBufferPool}
a.p = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
return nil
}
@ -566,10 +567,15 @@ type clientStream struct {
// place where we need to check if the attempt is nil.
attempt *csAttempt
// TODO(hedging): hedging will have multiple attempts simultaneously.
committed bool // active attempt committed for retry?
onCommit func()
buffer []func(a *csAttempt) error // operations to replay on retry
bufferSize int // current size of buffer
committed bool // active attempt committed for retry?
onCommit func()
replayBuffer []replayOp // operations to replay on retry
replayBufferSize int // current size of replayBuffer
}
type replayOp struct {
op func(a *csAttempt) error
cleanup func()
}
// csAttempt implements a single transport stream attempt within a
@ -607,7 +613,12 @@ func (cs *clientStream) commitAttemptLocked() {
cs.onCommit()
}
cs.committed = true
cs.buffer = nil
for _, op := range cs.replayBuffer {
if op.cleanup != nil {
op.cleanup()
}
}
cs.replayBuffer = nil
}
func (cs *clientStream) commitAttempt() {
@ -732,7 +743,7 @@ func (cs *clientStream) retryLocked(attempt *csAttempt, lastErr error) error {
// the stream is canceled.
return err
}
// Note that the first op in the replay buffer always sets cs.attempt
// Note that the first op in replayBuffer always sets cs.attempt
// if it is able to pick a transport and create a stream.
if lastErr = cs.replayBufferLocked(attempt); lastErr == nil {
return nil
@ -761,7 +772,7 @@ func (cs *clientStream) withRetry(op func(a *csAttempt) error, onSuccess func())
// already be status errors.
return toRPCErr(op(cs.attempt))
}
if len(cs.buffer) == 0 {
if len(cs.replayBuffer) == 0 {
// For the first op, which controls creation of the stream and
// assigns cs.attempt, we need to create a new attempt inline
// before executing the first op. On subsequent ops, the attempt
@ -851,25 +862,26 @@ func (cs *clientStream) Trailer() metadata.MD {
}
func (cs *clientStream) replayBufferLocked(attempt *csAttempt) error {
for _, f := range cs.buffer {
if err := f(attempt); err != nil {
for _, f := range cs.replayBuffer {
if err := f.op(attempt); err != nil {
return err
}
}
return nil
}
func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error) {
func (cs *clientStream) bufferForRetryLocked(sz int, op func(a *csAttempt) error, cleanup func()) {
// Note: we still will buffer if retry is disabled (for transparent retries).
if cs.committed {
return
}
cs.bufferSize += sz
if cs.bufferSize > cs.callInfo.maxRetryRPCBufferSize {
cs.replayBufferSize += sz
if cs.replayBufferSize > cs.callInfo.maxRetryRPCBufferSize {
cs.commitAttemptLocked()
cleanup()
return
}
cs.buffer = append(cs.buffer, op)
cs.replayBuffer = append(cs.replayBuffer, replayOp{op: op, cleanup: cleanup})
}
func (cs *clientStream) SendMsg(m any) (err error) {
@ -891,23 +903,50 @@ func (cs *clientStream) SendMsg(m any) (err error) {
}
// load hdr, payload, data
hdr, payload, data, err := prepareMsg(m, cs.codec, cs.cp, cs.comp)
hdr, data, payload, pf, err := prepareMsg(m, cs.codec, cs.cp, cs.comp, cs.cc.dopts.copts.BufferPool)
if err != nil {
return err
}
defer func() {
data.Free()
// only free payload if compression was made, and therefore it is a different set
// of buffers from data.
if pf.isCompressed() {
payload.Free()
}
}()
dataLen := data.Len()
payloadLen := payload.Len()
// TODO(dfawley): should we be checking len(data) instead?
if len(payload) > *cs.callInfo.maxSendMessageSize {
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), *cs.callInfo.maxSendMessageSize)
if payloadLen > *cs.callInfo.maxSendMessageSize {
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, *cs.callInfo.maxSendMessageSize)
}
// always take an extra ref in case data == payload (i.e. when the data isn't
// compressed). The original ref will always be freed by the deferred free above.
payload.Ref()
op := func(a *csAttempt) error {
return a.sendMsg(m, hdr, payload, data)
return a.sendMsg(m, hdr, payload, dataLen, payloadLen)
}
// onSuccess is invoked when the op is captured for a subsequent retry. If the
// stream was established by a previous message and therefore retries are
// disabled, onSuccess will not be invoked, and payloadRef can be freed
// immediately.
onSuccessCalled := false
err = cs.withRetry(op, func() {
cs.bufferForRetryLocked(len(hdr)+payloadLen, op, payload.Free)
onSuccessCalled = true
})
if !onSuccessCalled {
payload.Free()
}
err = cs.withRetry(op, func() { cs.bufferForRetryLocked(len(hdr)+len(payload), op) })
if len(cs.binlogs) != 0 && err == nil {
cm := &binarylog.ClientMessage{
OnClientSide: true,
Message: data,
Message: data.Materialize(),
}
for _, binlog := range cs.binlogs {
binlog.Log(cs.ctx, cm)
@ -924,6 +963,7 @@ func (cs *clientStream) RecvMsg(m any) error {
var recvInfo *payloadInfo
if len(cs.binlogs) != 0 {
recvInfo = &payloadInfo{}
defer recvInfo.free()
}
err := cs.withRetry(func(a *csAttempt) error {
return a.recvMsg(m, recvInfo)
@ -931,7 +971,7 @@ func (cs *clientStream) RecvMsg(m any) error {
if len(cs.binlogs) != 0 && err == nil {
sm := &binarylog.ServerMessage{
OnClientSide: true,
Message: recvInfo.uncompressedBytes,
Message: recvInfo.uncompressedBytes.Materialize(),
}
for _, binlog := range cs.binlogs {
binlog.Log(cs.ctx, sm)
@ -958,7 +998,7 @@ func (cs *clientStream) CloseSend() error {
// RecvMsg. This also matches historical behavior.
return nil
}
cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op) })
cs.withRetry(op, func() { cs.bufferForRetryLocked(0, op, nil) })
if len(cs.binlogs) != 0 {
chc := &binarylog.ClientHalfClose{
OnClientSide: true,
@ -1034,7 +1074,7 @@ func (cs *clientStream) finish(err error) {
cs.cancel()
}
func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error {
func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength, payloadLength int) error {
cs := a.cs
if a.trInfo != nil {
a.mu.Lock()
@ -1052,8 +1092,10 @@ func (a *csAttempt) sendMsg(m any, hdr, payld, data []byte) error {
}
return io.EOF
}
for _, sh := range a.statsHandlers {
sh.HandleRPC(a.ctx, outPayload(true, m, data, payld, time.Now()))
if len(a.statsHandlers) != 0 {
for _, sh := range a.statsHandlers {
sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
}
}
if channelz.IsOn() {
a.t.IncrMsgSent()
@ -1065,6 +1107,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
cs := a.cs
if len(a.statsHandlers) != 0 && payInfo == nil {
payInfo = &payloadInfo{}
defer payInfo.free()
}
if !a.decompSet {
@ -1083,8 +1126,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
// Only initialize this state once per stream.
a.decompSet = true
}
err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp)
if err != nil {
if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decomp, false); err != nil {
if err == io.EOF {
if statusErr := a.s.Status().Err(); statusErr != nil {
return statusErr
@ -1103,14 +1145,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
}
for _, sh := range a.statsHandlers {
sh.HandleRPC(a.ctx, &stats.InPayload{
Client: true,
RecvTime: time.Now(),
Payload: m,
// TODO truncate large payload.
Data: payInfo.uncompressedBytes,
Client: true,
RecvTime: time.Now(),
Payload: m,
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
Length: len(payInfo.uncompressedBytes),
Length: payInfo.uncompressedBytes.Len(),
})
}
if channelz.IsOn() {
@ -1122,14 +1162,12 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
}
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp)
if err == nil {
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
}
if err == io.EOF {
if err := recv(a.p, cs.codec, a.s, a.dc, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decomp, false); err == io.EOF {
return a.s.Status().Err() // non-server streaming Recv returns nil on success
} else if err != nil {
return toRPCErr(err)
}
return toRPCErr(err)
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
}
func (a *csAttempt) finish(err error) {
@ -1185,12 +1223,12 @@ func (a *csAttempt) finish(err error) {
a.mu.Unlock()
}
// newClientStream creates a ClientStream with the specified transport, on the
// newNonRetryClientStream creates a ClientStream with the specified transport, on the
// given addrConn.
//
// It's expected that the given transport is either the same one in addrConn, or
// is already closed. To avoid race, transport is specified separately, instead
// of using ac.transpot.
// of using ac.transport.
//
// Main difference between this and ClientConn.NewStream:
// - no retry
@ -1276,7 +1314,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
return nil, err
}
as.s = s
as.p = &parser{r: s, recvBufferPool: ac.dopts.recvBufferPool}
as.p = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
ac.incrCallsStarted()
if desc != unaryStreamDesc {
// Listen on stream context to cleanup when the stream context is
@ -1373,17 +1411,26 @@ func (as *addrConnStream) SendMsg(m any) (err error) {
}
// load hdr, payload, data
hdr, payld, _, err := prepareMsg(m, as.codec, as.cp, as.comp)
hdr, data, payload, pf, err := prepareMsg(m, as.codec, as.cp, as.comp, as.ac.dopts.copts.BufferPool)
if err != nil {
return err
}
defer func() {
data.Free()
// only free payload if compression was made, and therefore it is a different set
// of buffers from data.
if pf.isCompressed() {
payload.Free()
}
}()
// TODO(dfawley): should we be checking len(data) instead?
if len(payld) > *as.callInfo.maxSendMessageSize {
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payld), *as.callInfo.maxSendMessageSize)
if payload.Len() > *as.callInfo.maxSendMessageSize {
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payload.Len(), *as.callInfo.maxSendMessageSize)
}
if err := as.t.Write(as.s, hdr, payld, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
if err := as.t.Write(as.s, hdr, payload, &transport.Options{Last: !as.desc.ClientStreams}); err != nil {
if !as.desc.ClientStreams {
// For non-client-streaming RPCs, we return nil instead of EOF on error
// because the generated code requires it. finish is not called; RecvMsg()
@ -1423,8 +1470,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
// Only initialize this state once per stream.
as.decompSet = true
}
err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
if err != nil {
if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err != nil {
if err == io.EOF {
if statusErr := as.s.Status().Err(); statusErr != nil {
return statusErr
@ -1444,14 +1490,12 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
err = recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp)
if err == nil {
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
}
if err == io.EOF {
if err := recv(as.p, as.codec, as.s, as.dc, m, *as.callInfo.maxReceiveMessageSize, nil, as.decomp, false); err == io.EOF {
return as.s.Status().Err() // non-server streaming Recv returns nil on success
} else if err != nil {
return toRPCErr(err)
}
return toRPCErr(err)
return toRPCErr(errors.New("grpc: client streaming protocol violation: get <nil>, want <EOF>"))
}
func (as *addrConnStream) finish(err error) {
@ -1645,18 +1689,31 @@ func (ss *serverStream) SendMsg(m any) (err error) {
}
// load hdr, payload, data
hdr, payload, data, err := prepareMsg(m, ss.codec, ss.cp, ss.comp)
hdr, data, payload, pf, err := prepareMsg(m, ss.codec, ss.cp, ss.comp, ss.p.bufferPool)
if err != nil {
return err
}
defer func() {
data.Free()
// only free payload if compression was made, and therefore it is a different set
// of buffers from data.
if pf.isCompressed() {
payload.Free()
}
}()
dataLen := data.Len()
payloadLen := payload.Len()
// TODO(dfawley): should we be checking len(data) instead?
if len(payload) > ss.maxSendMessageSize {
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(payload), ss.maxSendMessageSize)
if payloadLen > ss.maxSendMessageSize {
return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", payloadLen, ss.maxSendMessageSize)
}
if err := ss.t.Write(ss.s, hdr, payload, &transport.Options{Last: false}); err != nil {
return toRPCErr(err)
}
if len(ss.binlogs) != 0 {
if !ss.serverHeaderBinlogged {
h, _ := ss.s.Header()
@ -1669,7 +1726,7 @@ func (ss *serverStream) SendMsg(m any) (err error) {
}
}
sm := &binarylog.ServerMessage{
Message: data,
Message: data.Materialize(),
}
for _, binlog := range ss.binlogs {
binlog.Log(ss.ctx, sm)
@ -1677,7 +1734,7 @@ func (ss *serverStream) SendMsg(m any) (err error) {
}
if len(ss.statsHandler) != 0 {
for _, sh := range ss.statsHandler {
sh.HandleRPC(ss.s.Context(), outPayload(false, m, data, payload, time.Now()))
sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now()))
}
}
return nil
@ -1714,8 +1771,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
var payInfo *payloadInfo
if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
payInfo = &payloadInfo{}
defer payInfo.free()
}
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp); err != nil {
if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, payInfo, ss.decomp, true); err != nil {
if err == io.EOF {
if len(ss.binlogs) != 0 {
chc := &binarylog.ClientHalfClose{}
@ -1733,11 +1791,9 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
if len(ss.statsHandler) != 0 {
for _, sh := range ss.statsHandler {
sh.HandleRPC(ss.s.Context(), &stats.InPayload{
RecvTime: time.Now(),
Payload: m,
// TODO truncate large payload.
Data: payInfo.uncompressedBytes,
Length: len(payInfo.uncompressedBytes),
RecvTime: time.Now(),
Payload: m,
Length: payInfo.uncompressedBytes.Len(),
WireLength: payInfo.compressedLength + headerLen,
CompressedLength: payInfo.compressedLength,
})
@ -1745,7 +1801,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
}
if len(ss.binlogs) != 0 {
cm := &binarylog.ClientMessage{
Message: payInfo.uncompressedBytes,
Message: payInfo.uncompressedBytes.Materialize(),
}
for _, binlog := range ss.binlogs {
binlog.Log(ss.ctx, cm)
@ -1760,23 +1816,26 @@ func MethodFromServerStream(stream ServerStream) (string, bool) {
return Method(stream.Context())
}
// prepareMsg returns the hdr, payload and data
// using the compressors passed or using the
// passed preparedmsg
func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor) (hdr, payload, data []byte, err error) {
// prepareMsg returns the hdr, payload and data using the compressors passed or
// using the passed preparedmsg. The returned boolean indicates whether
// compression was made and therefore whether the payload needs to be freed in
// addition to the returned data. Freeing the payload if the returned boolean is
// false can lead to undefined behavior.
func prepareMsg(m any, codec baseCodec, cp Compressor, comp encoding.Compressor, pool mem.BufferPool) (hdr []byte, data, payload mem.BufferSlice, pf payloadFormat, err error) {
if preparedMsg, ok := m.(*PreparedMsg); ok {
return preparedMsg.hdr, preparedMsg.payload, preparedMsg.encodedData, nil
return preparedMsg.hdr, preparedMsg.encodedData, preparedMsg.payload, preparedMsg.pf, nil
}
// The input interface is not a prepared msg.
// Marshal and Compress the data at this point
data, err = encode(codec, m)
if err != nil {
return nil, nil, nil, err
return nil, nil, nil, 0, err
}
compData, err := compress(data, cp, comp)
compData, pf, err := compress(data, cp, comp, pool)
if err != nil {
return nil, nil, nil, err
data.Free()
return nil, nil, nil, 0, err
}
hdr, payload = msgHeader(data, compData)
return hdr, payload, data, nil
hdr, payload = msgHeader(data, compData, pf)
return hdr, data, payload, pf, nil
}

View File

@ -22,15 +22,35 @@ package grpc
// request, many responses) RPC. It is generic over the type of the response
// message. It is used in generated code.
type ServerStreamingClient[Res any] interface {
// Recv receives the next response message from the server. The client may
// repeatedly call Recv to read messages from the response stream. If
// io.EOF is returned, the stream has terminated with an OK status. Any
// other error is compatible with the status package and indicates the
// RPC's status code and message.
Recv() (*Res, error)
// ClientStream is embedded to provide Context, Header, and Trailer
// functionality. No other methods in the ClientStream should be called
// directly.
ClientStream
}
// ServerStreamingServer represents the server side of a server-streaming (one
// request, many responses) RPC. It is generic over the type of the response
// message. It is used in generated code.
//
// To terminate the response stream, return from the handler method and return
// an error from the status package, or use nil to indicate an OK status code.
type ServerStreamingServer[Res any] interface {
// Send sends a response message to the client. The server handler may
// call Send multiple times to send multiple messages to the client. An
// error is returned if the stream was terminated unexpectedly, and the
// handler method should return, as the stream is no longer usable.
Send(*Res) error
// ServerStream is embedded to provide Context, SetHeader, SendHeader, and
// SetTrailer functionality. No other methods in the ServerStream should
// be called directly.
ServerStream
}
@ -39,8 +59,22 @@ type ServerStreamingServer[Res any] interface {
// message stream and the type of the unary response message. It is used in
// generated code.
type ClientStreamingClient[Req any, Res any] interface {
// Send sends a request message to the server. The client may call Send
// multiple times to send multiple messages to the server. On error, Send
// aborts the stream. If the error was generated by the client, the status
// is returned directly. Otherwise, io.EOF is returned, and the status of
// the stream may be discovered using CloseAndRecv().
Send(*Req) error
// CloseAndRecv closes the request stream and waits for the server's
// response. This method must be called once and only once after sending
// all request messages. Any error returned is implemented by the status
// package.
CloseAndRecv() (*Res, error)
// ClientStream is embedded to provide Context, Header, and Trailer
// functionality. No other methods in the ClientStream should be called
// directly.
ClientStream
}
@ -48,9 +82,28 @@ type ClientStreamingClient[Req any, Res any] interface {
// requests, one response) RPC. It is generic over both the type of the request
// message stream and the type of the unary response message. It is used in
// generated code.
//
// To terminate the RPC, call SendAndClose and return nil from the method
// handler or do not call SendAndClose and return an error from the status
// package.
type ClientStreamingServer[Req any, Res any] interface {
// Recv receives the next request message from the client. The server may
// repeatedly call Recv to read messages from the request stream. If
// io.EOF is returned, it indicates the client called CloseAndRecv on its
// ClientStreamingClient. Any other error indicates the stream was
// terminated unexpectedly, and the handler method should return, as the
// stream is no longer usable.
Recv() (*Req, error)
// SendAndClose sends a single response message to the client and closes
// the stream. This method must be called once and only once after all
// request messages have been processed. Recv should not be called after
// calling SendAndClose.
SendAndClose(*Res) error
// ServerStream is embedded to provide Context, SetHeader, SendHeader, and
// SetTrailer functionality. No other methods in the ServerStream should
// be called directly.
ServerStream
}
@ -59,8 +112,23 @@ type ClientStreamingServer[Req any, Res any] interface {
// request message stream and the type of the response message stream. It is
// used in generated code.
type BidiStreamingClient[Req any, Res any] interface {
// Send sends a request message to the server. The client may call Send
// multiple times to send multiple messages to the server. On error, Send
// aborts the stream. If the error was generated by the client, the status
// is returned directly. Otherwise, io.EOF is returned, and the status of
// the stream may be discovered using Recv().
Send(*Req) error
// Recv receives the next response message from the server. The client may
// repeatedly call Recv to read messages from the response stream. If
// io.EOF is returned, the stream has terminated with an OK status. Any
// other error is compatible with the status package and indicates the
// RPC's status code and message.
Recv() (*Res, error)
// ClientStream is embedded to provide Context, Header, Trailer, and
// CloseSend functionality. No other methods in the ClientStream should be
// called directly.
ClientStream
}
@ -68,9 +136,27 @@ type BidiStreamingClient[Req any, Res any] interface {
// (many requests, many responses) RPC. It is generic over both the type of the
// request message stream and the type of the response message stream. It is
// used in generated code.
//
// To terminate the stream, return from the handler method and return
// an error from the status package, or use nil to indicate an OK status code.
type BidiStreamingServer[Req any, Res any] interface {
// Recv receives the next request message from the client. The server may
// repeatedly call Recv to read messages from the request stream. If
// io.EOF is returned, it indicates the client called CloseSend on its
// BidiStreamingClient. Any other error indicates the stream was
// terminated unexpectedly, and the handler method should return, as the
// stream is no longer usable.
Recv() (*Req, error)
// Send sends a response message to the client. The server handler may
// call Send multiple times to send multiple messages to the client. An
// error is returned if the stream was terminated unexpectedly, and the
// handler method should return, as the stream is no longer usable.
Send(*Res) error
// ServerStream is embedded to provide Context, SetHeader, SendHeader, and
// SetTrailer functionality. No other methods in the ServerStream should
// be called directly.
ServerStream
}

View File

@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
const Version = "1.65.0"
const Version = "1.68.0"

View File

@ -351,7 +351,7 @@ func (d decoder) unmarshalScalar(fd protoreflect.FieldDescriptor) (protoreflect.
panic(fmt.Sprintf("unmarshalScalar: invalid scalar kind %v", kind))
}
return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString())
return protoreflect.Value{}, d.newError(tok.Pos(), "invalid value for %v field %v: %v", kind, fd.JSONName(), tok.RawString())
}
func unmarshalInt(tok json.Token, bitSize int) (protoreflect.Value, bool) {

View File

@ -216,9 +216,7 @@ func (m unpopulatedFieldRanger) Range(f func(protoreflect.FieldDescriptor, proto
}
v := m.Get(fd)
isProto2Scalar := fd.Syntax() == protoreflect.Proto2 && fd.Default().IsValid()
isSingularMessage := fd.Cardinality() != protoreflect.Repeated && fd.Message() != nil
if isProto2Scalar || isSingularMessage {
if fd.HasPresence() {
if m.skipNull {
continue
}

View File

@ -348,7 +348,11 @@ func (d decoder) unmarshalAnyValue(unmarshal unmarshalFunc, m protoreflect.Messa
switch tok.Kind() {
case json.ObjectClose:
if !found {
return d.newError(tok.Pos(), `missing "value" field`)
// We tolerate an omitted `value` field with the google.protobuf.Empty Well-Known-Type,
// for compatibility with other proto runtimes that have interpreted the spec differently.
if m.Descriptor().FullName() != genid.Empty_message_fullname {
return d.newError(tok.Pos(), `missing "value" field`)
}
}
return nil

View File

@ -9,7 +9,7 @@
// dependency on the descriptor proto package).
package descopts
import pref "google.golang.org/protobuf/reflect/protoreflect"
import "google.golang.org/protobuf/reflect/protoreflect"
// These variables are set by the init function in descriptor.pb.go via logic
// in internal/filetype. In other words, so long as the descriptor proto package
@ -17,13 +17,13 @@ import pref "google.golang.org/protobuf/reflect/protoreflect"
//
// Each variable is populated with a nil pointer to the options struct.
var (
File pref.ProtoMessage
Enum pref.ProtoMessage
EnumValue pref.ProtoMessage
Message pref.ProtoMessage
Field pref.ProtoMessage
Oneof pref.ProtoMessage
ExtensionRange pref.ProtoMessage
Service pref.ProtoMessage
Method pref.ProtoMessage
File protoreflect.ProtoMessage
Enum protoreflect.ProtoMessage
EnumValue protoreflect.ProtoMessage
Message protoreflect.ProtoMessage
Field protoreflect.ProtoMessage
Oneof protoreflect.ProtoMessage
ExtensionRange protoreflect.ProtoMessage
Service protoreflect.ProtoMessage
Method protoreflect.ProtoMessage
)

View File

@ -32,6 +32,7 @@ const (
EditionProto2 Edition = 998
EditionProto3 Edition = 999
Edition2023 Edition = 1000
Edition2024 Edition = 1001
EditionUnsupported Edition = 100000
)
@ -77,28 +78,42 @@ type (
Locations SourceLocations
}
// EditionFeatures is a frequently-instantiated struct, so please take care
// to minimize padding when adding new fields to this struct (add them in
// the right place/order).
EditionFeatures struct {
// StripEnumPrefix determines if the plugin generates enum value
// constants as-is, with their prefix stripped, or both variants.
StripEnumPrefix int
// IsFieldPresence is true if field_presence is EXPLICIT
// https://protobuf.dev/editions/features/#field_presence
IsFieldPresence bool
// IsFieldPresence is true if field_presence is LEGACY_REQUIRED
// https://protobuf.dev/editions/features/#field_presence
IsLegacyRequired bool
// IsOpenEnum is true if enum_type is OPEN
// https://protobuf.dev/editions/features/#enum_type
IsOpenEnum bool
// IsPacked is true if repeated_field_encoding is PACKED
// https://protobuf.dev/editions/features/#repeated_field_encoding
IsPacked bool
// IsUTF8Validated is true if utf_validation is VERIFY
// https://protobuf.dev/editions/features/#utf8_validation
IsUTF8Validated bool
// IsDelimitedEncoded is true if message_encoding is DELIMITED
// https://protobuf.dev/editions/features/#message_encoding
IsDelimitedEncoded bool
// IsJSONCompliant is true if json_format is ALLOW
// https://protobuf.dev/editions/features/#json_format
IsJSONCompliant bool
// GenerateLegacyUnmarshalJSON determines if the plugin generates the
// UnmarshalJSON([]byte) error method for enums.
GenerateLegacyUnmarshalJSON bool
@ -258,6 +273,7 @@ type (
StringName stringName
IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
IsWeak bool // promoted from google.protobuf.FieldOptions
IsLazy bool // promoted from google.protobuf.FieldOptions
Default defaultValue
ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
Enum protoreflect.EnumDescriptor
@ -351,6 +367,7 @@ func (fd *Field) IsPacked() bool {
}
func (fd *Field) IsExtension() bool { return false }
func (fd *Field) IsWeak() bool { return fd.L1.IsWeak }
func (fd *Field) IsLazy() bool { return fd.L1.IsLazy }
func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() }
func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() }
func (fd *Field) MapKey() protoreflect.FieldDescriptor {
@ -425,6 +442,7 @@ type (
Extendee protoreflect.MessageDescriptor
Cardinality protoreflect.Cardinality
Kind protoreflect.Kind
IsLazy bool
EditionFeatures EditionFeatures
}
ExtensionL2 struct {
@ -465,6 +483,7 @@ func (xd *Extension) IsPacked() bool {
}
func (xd *Extension) IsExtension() bool { return true }
func (xd *Extension) IsWeak() bool { return false }
func (xd *Extension) IsLazy() bool { return xd.L1.IsLazy }
func (xd *Extension) IsList() bool { return xd.Cardinality() == protoreflect.Repeated }
func (xd *Extension) IsMap() bool { return false }
func (xd *Extension) MapKey() protoreflect.FieldDescriptor { return nil }

View File

@ -495,6 +495,8 @@ func (xd *Extension) unmarshalOptions(b []byte) {
switch num {
case genid.FieldOptions_Packed_field_number:
xd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
case genid.FieldOptions_Lazy_field_number:
xd.L1.IsLazy = protowire.DecodeBool(v)
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)

View File

@ -504,6 +504,8 @@ func (fd *Field) unmarshalOptions(b []byte) {
fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
case genid.FieldOptions_Weak_field_number:
fd.L1.IsWeak = protowire.DecodeBool(v)
case genid.FieldOptions_Lazy_field_number:
fd.L1.IsLazy = protowire.DecodeBool(v)
case FieldOptions_EnforceUTF8:
fd.L1.EditionFeatures.IsUTF8Validated = protowire.DecodeBool(v)
}

View File

@ -32,6 +32,10 @@ func unmarshalGoFeature(b []byte, parent EditionFeatures) EditionFeatures {
v, m := protowire.ConsumeVarint(b)
b = b[m:]
parent.GenerateLegacyUnmarshalJSON = protowire.DecodeBool(v)
case genid.GoFeatures_StripEnumPrefix_field_number:
v, m := protowire.ConsumeVarint(b)
b = b[m:]
parent.StripEnumPrefix = int(v)
default:
panic(fmt.Sprintf("unkown field number %d while unmarshalling GoFeatures", num))
}
@ -68,7 +72,7 @@ func unmarshalFeatureSet(b []byte, parent EditionFeatures) EditionFeatures {
v, m := protowire.ConsumeBytes(b)
b = b[m:]
switch num {
case genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number:
case genid.FeatureSet_Go_ext_number:
parent = unmarshalGoFeature(v, parent)
}
}

View File

@ -6,6 +6,6 @@
// and the well-known types.
package genid
import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
import "google.golang.org/protobuf/reflect/protoreflect"
const GoogleProtobuf_package protoreflect.FullName = "google.protobuf"

View File

@ -12,20 +12,42 @@ import (
const File_google_protobuf_go_features_proto = "google/protobuf/go_features.proto"
// Names for google.protobuf.GoFeatures.
// Names for pb.GoFeatures.
const (
GoFeatures_message_name protoreflect.Name = "GoFeatures"
GoFeatures_message_fullname protoreflect.FullName = "google.protobuf.GoFeatures"
GoFeatures_message_fullname protoreflect.FullName = "pb.GoFeatures"
)
// Field names for google.protobuf.GoFeatures.
// Field names for pb.GoFeatures.
const (
GoFeatures_LegacyUnmarshalJsonEnum_field_name protoreflect.Name = "legacy_unmarshal_json_enum"
GoFeatures_StripEnumPrefix_field_name protoreflect.Name = "strip_enum_prefix"
GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "google.protobuf.GoFeatures.legacy_unmarshal_json_enum"
GoFeatures_LegacyUnmarshalJsonEnum_field_fullname protoreflect.FullName = "pb.GoFeatures.legacy_unmarshal_json_enum"
GoFeatures_StripEnumPrefix_field_fullname protoreflect.FullName = "pb.GoFeatures.strip_enum_prefix"
)
// Field numbers for google.protobuf.GoFeatures.
// Field numbers for pb.GoFeatures.
const (
GoFeatures_LegacyUnmarshalJsonEnum_field_number protoreflect.FieldNumber = 1
GoFeatures_StripEnumPrefix_field_number protoreflect.FieldNumber = 3
)
// Full and short names for pb.GoFeatures.StripEnumPrefix.
const (
GoFeatures_StripEnumPrefix_enum_fullname = "pb.GoFeatures.StripEnumPrefix"
GoFeatures_StripEnumPrefix_enum_name = "StripEnumPrefix"
)
// Enum values for pb.GoFeatures.StripEnumPrefix.
const (
GoFeatures_STRIP_ENUM_PREFIX_UNSPECIFIED_enum_value = 0
GoFeatures_STRIP_ENUM_PREFIX_KEEP_enum_value = 1
GoFeatures_STRIP_ENUM_PREFIX_GENERATE_BOTH_enum_value = 2
GoFeatures_STRIP_ENUM_PREFIX_STRIP_enum_value = 3
)
// Extension numbers
const (
FeatureSet_Go_ext_number protoreflect.FieldNumber = 1002
)

View File

@ -4,7 +4,7 @@
package genid
import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
import "google.golang.org/protobuf/reflect/protoreflect"
// Generic field names and numbers for synthetic map entry messages.
const (

View File

@ -4,7 +4,7 @@
package genid
import protoreflect "google.golang.org/protobuf/reflect/protoreflect"
import "google.golang.org/protobuf/reflect/protoreflect"
// Generic field name and number for messages in wrappers.proto.
const (

View File

@ -67,7 +67,6 @@ type lazyExtensionValue struct {
xi *extensionFieldInfo
value protoreflect.Value
b []byte
fn func() protoreflect.Value
}
type ExtensionField struct {
@ -158,10 +157,9 @@ func (f *ExtensionField) lazyInit() {
}
f.lazy.value = val
} else {
f.lazy.value = f.lazy.fn()
panic("No support for lazy fns for ExtensionField")
}
f.lazy.xi = nil
f.lazy.fn = nil
f.lazy.b = nil
atomic.StoreUint32(&f.lazy.atomicOnce, 1)
}
@ -174,13 +172,6 @@ func (f *ExtensionField) Set(t protoreflect.ExtensionType, v protoreflect.Value)
f.lazy = nil
}
// SetLazy sets the type and a value that is to be lazily evaluated upon first use.
// This must not be called concurrently.
func (f *ExtensionField) SetLazy(t protoreflect.ExtensionType, fn func() protoreflect.Value) {
f.typ = t
f.lazy = &lazyExtensionValue{fn: fn}
}
// Value returns the value of the extension field.
// This may be called concurrently.
func (f *ExtensionField) Value() protoreflect.Value {

View File

@ -65,6 +65,9 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si
if err != nil {
return out, err
}
if cf.funcs.isInit == nil {
out.initialized = true
}
vi.Set(vw)
return out, nil
}

View File

@ -189,6 +189,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
if mi.methods.Merge == nil {
mi.methods.Merge = mi.merge
}
if mi.methods.Equal == nil {
mi.methods.Equal = equal
}
}
// getUnknownBytes returns a *[]byte for the unknown fields.

View File

@ -1,210 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build purego || appengine
// +build purego appengine
package impl
import (
"reflect"
"google.golang.org/protobuf/encoding/protowire"
)
func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) {
v := p.v.Elem().Int()
return f.tagsize + protowire.SizeVarint(uint64(v))
}
func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
v := p.v.Elem().Int()
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, uint64(v))
return b, nil
}
func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return out, errUnknown
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
return out, errDecode
}
p.v.Elem().SetInt(int64(v))
out.n = n
return out, nil
}
func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
dst.v.Elem().Set(src.v.Elem())
}
var coderEnum = pointerCoderFuncs{
size: sizeEnum,
marshal: appendEnum,
unmarshal: consumeEnum,
merge: mergeEnum,
}
func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
if p.v.Elem().Int() == 0 {
return 0
}
return sizeEnum(p, f, opts)
}
func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
if p.v.Elem().Int() == 0 {
return b, nil
}
return appendEnum(b, p, f, opts)
}
func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
if src.v.Elem().Int() != 0 {
dst.v.Elem().Set(src.v.Elem())
}
}
var coderEnumNoZero = pointerCoderFuncs{
size: sizeEnumNoZero,
marshal: appendEnumNoZero,
unmarshal: consumeEnum,
merge: mergeEnumNoZero,
}
func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
return sizeEnum(pointer{p.v.Elem()}, f, opts)
}
func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
return appendEnum(b, pointer{p.v.Elem()}, f, opts)
}
func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
if wtyp != protowire.VarintType {
return out, errUnknown
}
if p.v.Elem().IsNil() {
p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem()))
}
return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts)
}
func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
if !src.v.Elem().IsNil() {
v := reflect.New(dst.v.Type().Elem().Elem())
v.Elem().Set(src.v.Elem().Elem())
dst.v.Elem().Set(v)
}
}
var coderEnumPtr = pointerCoderFuncs{
size: sizeEnumPtr,
marshal: appendEnumPtr,
unmarshal: consumeEnumPtr,
merge: mergeEnumPtr,
}
func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := p.v.Elem()
for i, llen := 0, s.Len(); i < llen; i++ {
size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize
}
return size
}
func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := p.v.Elem()
for i, llen := 0, s.Len(); i < llen; i++ {
b = protowire.AppendVarint(b, f.wiretag)
b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
}
return b, nil
}
func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) {
s := p.v.Elem()
if wtyp == protowire.BytesType {
b, n := protowire.ConsumeBytes(b)
if n < 0 {
return out, errDecode
}
for len(b) > 0 {
v, n := protowire.ConsumeVarint(b)
if n < 0 {
return out, errDecode
}
rv := reflect.New(s.Type().Elem()).Elem()
rv.SetInt(int64(v))
s.Set(reflect.Append(s, rv))
b = b[n:]
}
out.n = n
return out, nil
}
if wtyp != protowire.VarintType {
return out, errUnknown
}
v, n := protowire.ConsumeVarint(b)
if n < 0 {
return out, errDecode
}
rv := reflect.New(s.Type().Elem()).Elem()
rv.SetInt(int64(v))
s.Set(reflect.Append(s, rv))
out.n = n
return out, nil
}
func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) {
dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem()))
}
var coderEnumSlice = pointerCoderFuncs{
size: sizeEnumSlice,
marshal: appendEnumSlice,
unmarshal: consumeEnumSlice,
merge: mergeEnumSlice,
}
func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) {
s := p.v.Elem()
llen := s.Len()
if llen == 0 {
return 0
}
n := 0
for i := 0; i < llen; i++ {
n += protowire.SizeVarint(uint64(s.Index(i).Int()))
}
return f.tagsize + protowire.SizeBytes(n)
}
func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
s := p.v.Elem()
llen := s.Len()
if llen == 0 {
return b, nil
}
b = protowire.AppendVarint(b, f.wiretag)
n := 0
for i := 0; i < llen; i++ {
n += protowire.SizeVarint(uint64(s.Index(i).Int()))
}
b = protowire.AppendVarint(b, uint64(n))
for i := 0; i < llen; i++ {
b = protowire.AppendVarint(b, uint64(s.Index(i).Int()))
}
return b, nil
}
var coderEnumPackedSlice = pointerCoderFuncs{
size: sizeEnumPackedSlice,
marshal: appendEnumPackedSlice,
unmarshal: consumeEnumSlice,
merge: mergeEnumSlice,
}

View File

@ -2,9 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !purego && !appengine
// +build !purego,!appengine
package impl
// When using unsafe pointers, we can just treat enum values as int32s.

View File

@ -322,7 +322,7 @@ func (c *stringConverter) PBValueOf(v reflect.Value) protoreflect.Value {
return protoreflect.ValueOfString(v.Convert(stringType).String())
}
func (c *stringConverter) GoValueOf(v protoreflect.Value) reflect.Value {
// pref.Value.String never panics, so we go through an interface
// protoreflect.Value.String never panics, so we go through an interface
// conversion here to check the type.
s := v.Interface().(string)
if c.goType.Kind() == reflect.Slice && s == "" {

View File

@ -10,7 +10,7 @@ import (
"sync/atomic"
"google.golang.org/protobuf/internal/flags"
proto "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/proto"
piface "google.golang.org/protobuf/runtime/protoiface"
)

View File

@ -0,0 +1,224 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package impl
import (
"bytes"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/runtime/protoiface"
)
func equal(in protoiface.EqualInput) protoiface.EqualOutput {
return protoiface.EqualOutput{Equal: equalMessage(in.MessageA, in.MessageB)}
}
// equalMessage is a fast-path variant of protoreflect.equalMessage.
// It takes advantage of the internal messageState type to avoid
// unnecessary allocations, type assertions.
func equalMessage(mx, my protoreflect.Message) bool {
if mx == nil || my == nil {
return mx == my
}
if mx.Descriptor() != my.Descriptor() {
return false
}
msx, ok := mx.(*messageState)
if !ok {
return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
}
msy, ok := my.(*messageState)
if !ok {
return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
}
mi := msx.messageInfo()
miy := msy.messageInfo()
if mi != miy {
return protoreflect.ValueOfMessage(mx).Equal(protoreflect.ValueOfMessage(my))
}
mi.init()
// Compares regular fields
// Modified Message.Range code that compares two messages of the same type
// while going over the fields.
for _, ri := range mi.rangeInfos {
var fd protoreflect.FieldDescriptor
var vx, vy protoreflect.Value
switch ri := ri.(type) {
case *fieldInfo:
hx := ri.has(msx.pointer())
hy := ri.has(msy.pointer())
if hx != hy {
return false
}
if !hx {
continue
}
fd = ri.fieldDesc
vx = ri.get(msx.pointer())
vy = ri.get(msy.pointer())
case *oneofInfo:
fnx := ri.which(msx.pointer())
fny := ri.which(msy.pointer())
if fnx != fny {
return false
}
if fnx <= 0 {
continue
}
fi := mi.fields[fnx]
fd = fi.fieldDesc
vx = fi.get(msx.pointer())
vy = fi.get(msy.pointer())
}
if !equalValue(fd, vx, vy) {
return false
}
}
// Compare extensions.
// This is more complicated because mx or my could have empty/nil extension maps,
// however some populated extension map values are equal to nil extension maps.
emx := mi.extensionMap(msx.pointer())
emy := mi.extensionMap(msy.pointer())
if emx != nil {
for k, x := range *emx {
xd := x.Type().TypeDescriptor()
xv := x.Value()
var y ExtensionField
ok := false
if emy != nil {
y, ok = (*emy)[k]
}
// We need to treat empty lists as equal to nil values
if emy == nil || !ok {
if xd.IsList() && xv.List().Len() == 0 {
continue
}
return false
}
if !equalValue(xd, xv, y.Value()) {
return false
}
}
}
if emy != nil {
// emy may have extensions emx does not have, need to check them as well
for k, y := range *emy {
if emx != nil {
// emx has the field, so we already checked it
if _, ok := (*emx)[k]; ok {
continue
}
}
// Empty lists are equal to nil
if y.Type().TypeDescriptor().IsList() && y.Value().List().Len() == 0 {
continue
}
// Cant be equal if the extension is populated
return false
}
}
return equalUnknown(mx.GetUnknown(), my.GetUnknown())
}
func equalValue(fd protoreflect.FieldDescriptor, vx, vy protoreflect.Value) bool {
// slow path
if fd.Kind() != protoreflect.MessageKind {
return vx.Equal(vy)
}
// fast path special cases
if fd.IsMap() {
if fd.MapValue().Kind() == protoreflect.MessageKind {
return equalMessageMap(vx.Map(), vy.Map())
}
return vx.Equal(vy)
}
if fd.IsList() {
return equalMessageList(vx.List(), vy.List())
}
return equalMessage(vx.Message(), vy.Message())
}
// Mostly copied from protoreflect.equalMap.
// This variant only works for messages as map types.
// All other map types should be handled via Value.Equal.
func equalMessageMap(mx, my protoreflect.Map) bool {
if mx.Len() != my.Len() {
return false
}
equal := true
mx.Range(func(k protoreflect.MapKey, vx protoreflect.Value) bool {
if !my.Has(k) {
equal = false
return false
}
vy := my.Get(k)
equal = equalMessage(vx.Message(), vy.Message())
return equal
})
return equal
}
// Mostly copied from protoreflect.equalList.
// The only change is the usage of equalImpl instead of protoreflect.equalValue.
func equalMessageList(lx, ly protoreflect.List) bool {
if lx.Len() != ly.Len() {
return false
}
for i := 0; i < lx.Len(); i++ {
// We only operate on messages here since equalImpl will not call us in any other case.
if !equalMessage(lx.Get(i).Message(), ly.Get(i).Message()) {
return false
}
}
return true
}
// equalUnknown compares unknown fields by direct comparison on the raw bytes
// of each individual field number.
// Copied from protoreflect.equalUnknown.
func equalUnknown(x, y protoreflect.RawFields) bool {
if len(x) != len(y) {
return false
}
if bytes.Equal([]byte(x), []byte(y)) {
return true
}
mx := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
my := make(map[protoreflect.FieldNumber]protoreflect.RawFields)
for len(x) > 0 {
fnum, _, n := protowire.ConsumeField(x)
mx[fnum] = append(mx[fnum], x[:n]...)
x = x[n:]
}
for len(y) > 0 {
fnum, _, n := protowire.ConsumeField(y)
my[fnum] = append(my[fnum], y[:n]...)
y = y[n:]
}
if len(mx) != len(my) {
return false
}
for k, v1 := range mx {
if v2, ok := my[k]; !ok || !bytes.Equal([]byte(v1), []byte(v2)) {
return false
}
}
return true
}

Some files were not shown because too many files have changed in this diff Show More