chore: go mod tidy / vendor / make deps
This commit is contained in:
35
vendor/golang.org/x/net/context/context.go
generated
vendored
35
vendor/golang.org/x/net/context/context.go
generated
vendored
@ -6,7 +6,7 @@
|
||||
// cancellation signals, and other request-scoped values across API boundaries
|
||||
// and between processes.
|
||||
// As of Go 1.7 this package is available in the standard library under the
|
||||
// name [context], and migrating to it can be done automatically with [go fix].
|
||||
// name [context].
|
||||
//
|
||||
// Incoming requests to a server should create a [Context], and outgoing
|
||||
// calls to servers should accept a Context. The chain of function
|
||||
@ -38,8 +38,6 @@
|
||||
//
|
||||
// See https://go.dev/blog/context for example code for a server that uses
|
||||
// Contexts.
|
||||
//
|
||||
// [go fix]: https://go.dev/cmd/go#hdr-Update_packages_to_use_new_APIs
|
||||
package context
|
||||
|
||||
import (
|
||||
@ -51,36 +49,37 @@ import (
|
||||
// API boundaries.
|
||||
//
|
||||
// Context's methods may be called by multiple goroutines simultaneously.
|
||||
//
|
||||
//go:fix inline
|
||||
type Context = context.Context
|
||||
|
||||
// Canceled is the error returned by [Context.Err] when the context is canceled
|
||||
// for some reason other than its deadline passing.
|
||||
//
|
||||
//go:fix inline
|
||||
var Canceled = context.Canceled
|
||||
|
||||
// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled
|
||||
// due to its deadline passing.
|
||||
//
|
||||
//go:fix inline
|
||||
var DeadlineExceeded = context.DeadlineExceeded
|
||||
|
||||
// Background returns a non-nil, empty Context. It is never canceled, has no
|
||||
// values, and has no deadline. It is typically used by the main function,
|
||||
// initialization, and tests, and as the top-level Context for incoming
|
||||
// requests.
|
||||
func Background() Context {
|
||||
return background
|
||||
}
|
||||
//
|
||||
//go:fix inline
|
||||
func Background() Context { return context.Background() }
|
||||
|
||||
// TODO returns a non-nil, empty Context. Code should use context.TODO when
|
||||
// it's unclear which Context to use or it is not yet available (because the
|
||||
// surrounding function has not yet been extended to accept a Context
|
||||
// parameter).
|
||||
func TODO() Context {
|
||||
return todo
|
||||
}
|
||||
|
||||
var (
|
||||
background = context.Background()
|
||||
todo = context.TODO()
|
||||
)
|
||||
//
|
||||
//go:fix inline
|
||||
func TODO() Context { return context.TODO() }
|
||||
|
||||
// A CancelFunc tells an operation to abandon its work.
|
||||
// A CancelFunc does not wait for the work to stop.
|
||||
@ -95,6 +94,8 @@ type CancelFunc = context.CancelFunc
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this [Context] complete.
|
||||
//
|
||||
//go:fix inline
|
||||
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
||||
return context.WithCancel(parent)
|
||||
}
|
||||
@ -108,6 +109,8 @@ func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
|
||||
//
|
||||
// Canceling this context releases resources associated with it, so code should
|
||||
// call cancel as soon as the operations running in this [Context] complete.
|
||||
//
|
||||
//go:fix inline
|
||||
func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) {
|
||||
return context.WithDeadline(parent, d)
|
||||
}
|
||||
@ -122,6 +125,8 @@ func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) {
|
||||
// defer cancel() // releases resources if slowOperation completes before timeout elapses
|
||||
// return slowOperation(ctx)
|
||||
// }
|
||||
//
|
||||
//go:fix inline
|
||||
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
|
||||
return context.WithTimeout(parent, timeout)
|
||||
}
|
||||
@ -139,6 +144,8 @@ func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
|
||||
// interface{}, context keys often have concrete type
|
||||
// struct{}. Alternatively, exported context key variables' static
|
||||
// type should be a pointer or interface.
|
||||
//
|
||||
//go:fix inline
|
||||
func WithValue(parent Context, key, val interface{}) Context {
|
||||
return context.WithValue(parent, key, val)
|
||||
}
|
||||
|
46
vendor/golang.org/x/net/http2/config.go
generated
vendored
46
vendor/golang.org/x/net/http2/config.go
generated
vendored
@ -55,7 +55,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config {
|
||||
PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites,
|
||||
CountError: h2.CountError,
|
||||
}
|
||||
fillNetHTTPServerConfig(&conf, h1)
|
||||
fillNetHTTPConfig(&conf, h1.HTTP2)
|
||||
setConfigDefaults(&conf, true)
|
||||
return conf
|
||||
}
|
||||
@ -81,7 +81,7 @@ func configFromTransport(h2 *Transport) http2Config {
|
||||
}
|
||||
|
||||
if h2.t1 != nil {
|
||||
fillNetHTTPTransportConfig(&conf, h2.t1)
|
||||
fillNetHTTPConfig(&conf, h2.t1.HTTP2)
|
||||
}
|
||||
setConfigDefaults(&conf, false)
|
||||
return conf
|
||||
@ -120,3 +120,45 @@ func adjustHTTP1MaxHeaderSize(n int64) int64 {
|
||||
const typicalHeaders = 10 // conservative
|
||||
return n + typicalHeaders*perFieldOverhead
|
||||
}
|
||||
|
||||
func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
|
||||
if h2 == nil {
|
||||
return
|
||||
}
|
||||
if h2.MaxConcurrentStreams != 0 {
|
||||
conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
|
||||
}
|
||||
if h2.MaxEncoderHeaderTableSize != 0 {
|
||||
conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
|
||||
}
|
||||
if h2.MaxDecoderHeaderTableSize != 0 {
|
||||
conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
|
||||
}
|
||||
if h2.MaxConcurrentStreams != 0 {
|
||||
conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
|
||||
}
|
||||
if h2.MaxReadFrameSize != 0 {
|
||||
conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
|
||||
}
|
||||
if h2.MaxReceiveBufferPerConnection != 0 {
|
||||
conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
|
||||
}
|
||||
if h2.MaxReceiveBufferPerStream != 0 {
|
||||
conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
|
||||
}
|
||||
if h2.SendPingTimeout != 0 {
|
||||
conf.SendPingTimeout = h2.SendPingTimeout
|
||||
}
|
||||
if h2.PingTimeout != 0 {
|
||||
conf.PingTimeout = h2.PingTimeout
|
||||
}
|
||||
if h2.WriteByteTimeout != 0 {
|
||||
conf.WriteByteTimeout = h2.WriteByteTimeout
|
||||
}
|
||||
if h2.PermitProhibitedCipherSuites {
|
||||
conf.PermitProhibitedCipherSuites = true
|
||||
}
|
||||
if h2.CountError != nil {
|
||||
conf.CountError = h2.CountError
|
||||
}
|
||||
}
|
||||
|
61
vendor/golang.org/x/net/http2/config_go124.go
generated
vendored
61
vendor/golang.org/x/net/http2/config_go124.go
generated
vendored
@ -1,61 +0,0 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.24
|
||||
|
||||
package http2
|
||||
|
||||
import "net/http"
|
||||
|
||||
// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2.
|
||||
func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
|
||||
fillNetHTTPConfig(conf, srv.HTTP2)
|
||||
}
|
||||
|
||||
// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2.
|
||||
func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
|
||||
fillNetHTTPConfig(conf, tr.HTTP2)
|
||||
}
|
||||
|
||||
func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) {
|
||||
if h2 == nil {
|
||||
return
|
||||
}
|
||||
if h2.MaxConcurrentStreams != 0 {
|
||||
conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
|
||||
}
|
||||
if h2.MaxEncoderHeaderTableSize != 0 {
|
||||
conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize)
|
||||
}
|
||||
if h2.MaxDecoderHeaderTableSize != 0 {
|
||||
conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize)
|
||||
}
|
||||
if h2.MaxConcurrentStreams != 0 {
|
||||
conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams)
|
||||
}
|
||||
if h2.MaxReadFrameSize != 0 {
|
||||
conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize)
|
||||
}
|
||||
if h2.MaxReceiveBufferPerConnection != 0 {
|
||||
conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection)
|
||||
}
|
||||
if h2.MaxReceiveBufferPerStream != 0 {
|
||||
conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream)
|
||||
}
|
||||
if h2.SendPingTimeout != 0 {
|
||||
conf.SendPingTimeout = h2.SendPingTimeout
|
||||
}
|
||||
if h2.PingTimeout != 0 {
|
||||
conf.PingTimeout = h2.PingTimeout
|
||||
}
|
||||
if h2.WriteByteTimeout != 0 {
|
||||
conf.WriteByteTimeout = h2.WriteByteTimeout
|
||||
}
|
||||
if h2.PermitProhibitedCipherSuites {
|
||||
conf.PermitProhibitedCipherSuites = true
|
||||
}
|
||||
if h2.CountError != nil {
|
||||
conf.CountError = h2.CountError
|
||||
}
|
||||
}
|
16
vendor/golang.org/x/net/http2/config_pre_go124.go
generated
vendored
16
vendor/golang.org/x/net/http2/config_pre_go124.go
generated
vendored
@ -1,16 +0,0 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.24
|
||||
|
||||
package http2
|
||||
|
||||
import "net/http"
|
||||
|
||||
// Pre-Go 1.24 fallback.
|
||||
// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24.
|
||||
|
||||
func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {}
|
||||
|
||||
func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {}
|
17
vendor/golang.org/x/net/http2/gotrack.go
generated
vendored
17
vendor/golang.org/x/net/http2/gotrack.go
generated
vendored
@ -15,21 +15,32 @@ import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1"
|
||||
|
||||
// Setting DebugGoroutines to false during a test to disable goroutine debugging
|
||||
// results in race detector complaints when a test leaves goroutines running before
|
||||
// returning. Tests shouldn't do this, of course, but when they do it generally shows
|
||||
// up as infrequent, hard-to-debug flakes. (See #66519.)
|
||||
//
|
||||
// Disable goroutine debugging during individual tests with an atomic bool.
|
||||
// (Note that it's safe to enable/disable debugging mid-test, so the actual race condition
|
||||
// here is harmless.)
|
||||
var disableDebugGoroutines atomic.Bool
|
||||
|
||||
type goroutineLock uint64
|
||||
|
||||
func newGoroutineLock() goroutineLock {
|
||||
if !DebugGoroutines {
|
||||
if !DebugGoroutines || disableDebugGoroutines.Load() {
|
||||
return 0
|
||||
}
|
||||
return goroutineLock(curGoroutineID())
|
||||
}
|
||||
|
||||
func (g goroutineLock) check() {
|
||||
if !DebugGoroutines {
|
||||
if !DebugGoroutines || disableDebugGoroutines.Load() {
|
||||
return
|
||||
}
|
||||
if curGoroutineID() != uint64(g) {
|
||||
@ -38,7 +49,7 @@ func (g goroutineLock) check() {
|
||||
}
|
||||
|
||||
func (g goroutineLock) checkNotOn() {
|
||||
if !DebugGoroutines {
|
||||
if !DebugGoroutines || disableDebugGoroutines.Load() {
|
||||
return
|
||||
}
|
||||
if curGoroutineID() == uint64(g) {
|
||||
|
34
vendor/golang.org/x/net/http2/http2.go
generated
vendored
34
vendor/golang.org/x/net/http2/http2.go
generated
vendored
@ -15,7 +15,6 @@ package http2 // import "golang.org/x/net/http2"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -255,15 +254,13 @@ func (cw closeWaiter) Wait() {
|
||||
// idle memory usage with many connections.
|
||||
type bufferedWriter struct {
|
||||
_ incomparable
|
||||
group synctestGroupInterface // immutable
|
||||
conn net.Conn // immutable
|
||||
bw *bufio.Writer // non-nil when data is buffered
|
||||
byteTimeout time.Duration // immutable, WriteByteTimeout
|
||||
conn net.Conn // immutable
|
||||
bw *bufio.Writer // non-nil when data is buffered
|
||||
byteTimeout time.Duration // immutable, WriteByteTimeout
|
||||
}
|
||||
|
||||
func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter {
|
||||
func newBufferedWriter(conn net.Conn, timeout time.Duration) *bufferedWriter {
|
||||
return &bufferedWriter{
|
||||
group: group,
|
||||
conn: conn,
|
||||
byteTimeout: timeout,
|
||||
}
|
||||
@ -314,24 +311,18 @@ func (w *bufferedWriter) Flush() error {
|
||||
type bufferedWriterTimeoutWriter bufferedWriter
|
||||
|
||||
func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) {
|
||||
return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p)
|
||||
return writeWithByteTimeout(w.conn, w.byteTimeout, p)
|
||||
}
|
||||
|
||||
// writeWithByteTimeout writes to conn.
|
||||
// If more than timeout passes without any bytes being written to the connection,
|
||||
// the write fails.
|
||||
func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
|
||||
func writeWithByteTimeout(conn net.Conn, timeout time.Duration, p []byte) (n int, err error) {
|
||||
if timeout <= 0 {
|
||||
return conn.Write(p)
|
||||
}
|
||||
for {
|
||||
var now time.Time
|
||||
if group == nil {
|
||||
now = time.Now()
|
||||
} else {
|
||||
now = group.Now()
|
||||
}
|
||||
conn.SetWriteDeadline(now.Add(timeout))
|
||||
conn.SetWriteDeadline(time.Now().Add(timeout))
|
||||
nn, err := conn.Write(p[n:])
|
||||
n += nn
|
||||
if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) {
|
||||
@ -417,14 +408,3 @@ func (s *sorter) SortStrings(ss []string) {
|
||||
// makes that struct also non-comparable, and generally doesn't add
|
||||
// any size (as long as it's first).
|
||||
type incomparable [0]func()
|
||||
|
||||
// synctestGroupInterface is the methods of synctestGroup used by Server and Transport.
|
||||
// It's defined as an interface here to let us keep synctestGroup entirely test-only
|
||||
// and not a part of non-test builds.
|
||||
type synctestGroupInterface interface {
|
||||
Join()
|
||||
Now() time.Time
|
||||
NewTimer(d time.Duration) timer
|
||||
AfterFunc(d time.Duration, f func()) timer
|
||||
ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc)
|
||||
}
|
||||
|
124
vendor/golang.org/x/net/http2/server.go
generated
vendored
124
vendor/golang.org/x/net/http2/server.go
generated
vendored
@ -176,39 +176,6 @@ type Server struct {
|
||||
// so that we don't embed a Mutex in this struct, which will make the
|
||||
// struct non-copyable, which might break some callers.
|
||||
state *serverInternalState
|
||||
|
||||
// Synchronization group used for testing.
|
||||
// Outside of tests, this is nil.
|
||||
group synctestGroupInterface
|
||||
}
|
||||
|
||||
func (s *Server) markNewGoroutine() {
|
||||
if s.group != nil {
|
||||
s.group.Join()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) now() time.Time {
|
||||
if s.group != nil {
|
||||
return s.group.Now()
|
||||
}
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// newTimer creates a new time.Timer, or a synthetic timer in tests.
|
||||
func (s *Server) newTimer(d time.Duration) timer {
|
||||
if s.group != nil {
|
||||
return s.group.NewTimer(d)
|
||||
}
|
||||
return timeTimer{time.NewTimer(d)}
|
||||
}
|
||||
|
||||
// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
|
||||
func (s *Server) afterFunc(d time.Duration, f func()) timer {
|
||||
if s.group != nil {
|
||||
return s.group.AfterFunc(d, f)
|
||||
}
|
||||
return timeTimer{time.AfterFunc(d, f)}
|
||||
}
|
||||
|
||||
type serverInternalState struct {
|
||||
@ -423,6 +390,9 @@ func (o *ServeConnOpts) handler() http.Handler {
|
||||
//
|
||||
// The opts parameter is optional. If nil, default values are used.
|
||||
func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
||||
if opts == nil {
|
||||
opts = &ServeConnOpts{}
|
||||
}
|
||||
s.serveConn(c, opts, nil)
|
||||
}
|
||||
|
||||
@ -438,7 +408,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon
|
||||
conn: c,
|
||||
baseCtx: baseCtx,
|
||||
remoteAddrStr: c.RemoteAddr().String(),
|
||||
bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout),
|
||||
bw: newBufferedWriter(c, conf.WriteByteTimeout),
|
||||
handler: opts.handler(),
|
||||
streams: make(map[uint32]*stream),
|
||||
readFrameCh: make(chan readFrameResult),
|
||||
@ -638,11 +608,11 @@ type serverConn struct {
|
||||
pingSent bool
|
||||
sentPingData [8]byte
|
||||
goAwayCode ErrCode
|
||||
shutdownTimer timer // nil until used
|
||||
idleTimer timer // nil if unused
|
||||
shutdownTimer *time.Timer // nil until used
|
||||
idleTimer *time.Timer // nil if unused
|
||||
readIdleTimeout time.Duration
|
||||
pingTimeout time.Duration
|
||||
readIdleTimer timer // nil if unused
|
||||
readIdleTimer *time.Timer // nil if unused
|
||||
|
||||
// Owned by the writeFrameAsync goroutine:
|
||||
headerWriteBuf bytes.Buffer
|
||||
@ -687,12 +657,12 @@ type stream struct {
|
||||
flow outflow // limits writing from Handler to client
|
||||
inflow inflow // what the client is allowed to POST/etc to us
|
||||
state streamState
|
||||
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
|
||||
gotTrailerHeader bool // HEADER frame for trailers was seen
|
||||
wroteHeaders bool // whether we wrote headers (not status 100)
|
||||
readDeadline timer // nil if unused
|
||||
writeDeadline timer // nil if unused
|
||||
closeErr error // set before cw is closed
|
||||
resetQueued bool // RST_STREAM queued for write; set by sc.resetStream
|
||||
gotTrailerHeader bool // HEADER frame for trailers was seen
|
||||
wroteHeaders bool // whether we wrote headers (not status 100)
|
||||
readDeadline *time.Timer // nil if unused
|
||||
writeDeadline *time.Timer // nil if unused
|
||||
closeErr error // set before cw is closed
|
||||
|
||||
trailer http.Header // accumulated trailers
|
||||
reqTrailer http.Header // handler's Request.Trailer
|
||||
@ -848,7 +818,6 @@ type readFrameResult struct {
|
||||
// consumer is done with the frame.
|
||||
// It's run on its own goroutine.
|
||||
func (sc *serverConn) readFrames() {
|
||||
sc.srv.markNewGoroutine()
|
||||
gate := make(chan struct{})
|
||||
gateDone := func() { gate <- struct{}{} }
|
||||
for {
|
||||
@ -881,7 +850,6 @@ type frameWriteResult struct {
|
||||
// At most one goroutine can be running writeFrameAsync at a time per
|
||||
// serverConn.
|
||||
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) {
|
||||
sc.srv.markNewGoroutine()
|
||||
var err error
|
||||
if wd == nil {
|
||||
err = wr.write.writeFrame(sc)
|
||||
@ -965,22 +933,22 @@ func (sc *serverConn) serve(conf http2Config) {
|
||||
sc.setConnState(http.StateIdle)
|
||||
|
||||
if sc.srv.IdleTimeout > 0 {
|
||||
sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
|
||||
sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer)
|
||||
defer sc.idleTimer.Stop()
|
||||
}
|
||||
|
||||
if conf.SendPingTimeout > 0 {
|
||||
sc.readIdleTimeout = conf.SendPingTimeout
|
||||
sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
|
||||
sc.readIdleTimer = time.AfterFunc(conf.SendPingTimeout, sc.onReadIdleTimer)
|
||||
defer sc.readIdleTimer.Stop()
|
||||
}
|
||||
|
||||
go sc.readFrames() // closed by defer sc.conn.Close above
|
||||
|
||||
settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer)
|
||||
settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer)
|
||||
defer settingsTimer.Stop()
|
||||
|
||||
lastFrameTime := sc.srv.now()
|
||||
lastFrameTime := time.Now()
|
||||
loopNum := 0
|
||||
for {
|
||||
loopNum++
|
||||
@ -994,7 +962,7 @@ func (sc *serverConn) serve(conf http2Config) {
|
||||
case res := <-sc.wroteFrameCh:
|
||||
sc.wroteFrame(res)
|
||||
case res := <-sc.readFrameCh:
|
||||
lastFrameTime = sc.srv.now()
|
||||
lastFrameTime = time.Now()
|
||||
// Process any written frames before reading new frames from the client since a
|
||||
// written frame could have triggered a new stream to be started.
|
||||
if sc.writingFrameAsync {
|
||||
@ -1077,7 +1045,7 @@ func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
|
||||
}
|
||||
|
||||
pingAt := lastFrameReadTime.Add(sc.readIdleTimeout)
|
||||
now := sc.srv.now()
|
||||
now := time.Now()
|
||||
if pingAt.After(now) {
|
||||
// We received frames since arming the ping timer.
|
||||
// Reset it for the next possible timeout.
|
||||
@ -1141,10 +1109,10 @@ func (sc *serverConn) readPreface() error {
|
||||
errc <- nil
|
||||
}
|
||||
}()
|
||||
timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server?
|
||||
timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server?
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-timer.C():
|
||||
case <-timer.C:
|
||||
return errPrefaceTimeout
|
||||
case err := <-errc:
|
||||
if err == nil {
|
||||
@ -1160,6 +1128,21 @@ var errChanPool = sync.Pool{
|
||||
New: func() interface{} { return make(chan error, 1) },
|
||||
}
|
||||
|
||||
func getErrChan() chan error {
|
||||
if inTests {
|
||||
// Channels cannot be reused across synctest tests.
|
||||
return make(chan error, 1)
|
||||
} else {
|
||||
return errChanPool.Get().(chan error)
|
||||
}
|
||||
}
|
||||
|
||||
func putErrChan(ch chan error) {
|
||||
if !inTests {
|
||||
errChanPool.Put(ch)
|
||||
}
|
||||
}
|
||||
|
||||
var writeDataPool = sync.Pool{
|
||||
New: func() interface{} { return new(writeData) },
|
||||
}
|
||||
@ -1167,7 +1150,7 @@ var writeDataPool = sync.Pool{
|
||||
// writeDataFromHandler writes DATA response frames from a handler on
|
||||
// the given stream.
|
||||
func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error {
|
||||
ch := errChanPool.Get().(chan error)
|
||||
ch := getErrChan()
|
||||
writeArg := writeDataPool.Get().(*writeData)
|
||||
*writeArg = writeData{stream.id, data, endStream}
|
||||
err := sc.writeFrameFromHandler(FrameWriteRequest{
|
||||
@ -1199,7 +1182,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea
|
||||
return errStreamClosed
|
||||
}
|
||||
}
|
||||
errChanPool.Put(ch)
|
||||
putErrChan(ch)
|
||||
if frameWriteDone {
|
||||
writeDataPool.Put(writeArg)
|
||||
}
|
||||
@ -1513,7 +1496,7 @@ func (sc *serverConn) goAway(code ErrCode) {
|
||||
|
||||
func (sc *serverConn) shutDownIn(d time.Duration) {
|
||||
sc.serveG.check()
|
||||
sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer)
|
||||
sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer)
|
||||
}
|
||||
|
||||
func (sc *serverConn) resetStream(se StreamError) {
|
||||
@ -2118,7 +2101,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
||||
// (in Go 1.8), though. That's a more sane option anyway.
|
||||
if sc.hs.ReadTimeout > 0 {
|
||||
sc.conn.SetReadDeadline(time.Time{})
|
||||
st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
|
||||
st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout)
|
||||
}
|
||||
|
||||
return sc.scheduleHandler(id, rw, req, handler)
|
||||
@ -2216,7 +2199,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
|
||||
st.flow.add(sc.initialStreamSendWindowSize)
|
||||
st.inflow.init(sc.initialStreamRecvWindowSize)
|
||||
if sc.hs.WriteTimeout > 0 {
|
||||
st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
|
||||
st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout)
|
||||
}
|
||||
|
||||
sc.streams[id] = st
|
||||
@ -2405,7 +2388,6 @@ func (sc *serverConn) handlerDone() {
|
||||
|
||||
// Run on its own goroutine.
|
||||
func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) {
|
||||
sc.srv.markNewGoroutine()
|
||||
defer sc.sendServeMsg(handlerDoneMsg)
|
||||
didPanic := true
|
||||
defer func() {
|
||||
@ -2454,7 +2436,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
|
||||
// waiting for this frame to be written, so an http.Flush mid-handler
|
||||
// writes out the correct value of keys, before a handler later potentially
|
||||
// mutates it.
|
||||
errc = errChanPool.Get().(chan error)
|
||||
errc = getErrChan()
|
||||
}
|
||||
if err := sc.writeFrameFromHandler(FrameWriteRequest{
|
||||
write: headerData,
|
||||
@ -2466,7 +2448,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro
|
||||
if errc != nil {
|
||||
select {
|
||||
case err := <-errc:
|
||||
errChanPool.Put(errc)
|
||||
putErrChan(errc)
|
||||
return err
|
||||
case <-sc.doneServing:
|
||||
return errClientDisconnected
|
||||
@ -2573,7 +2555,7 @@ func (b *requestBody) Read(p []byte) (n int, err error) {
|
||||
if err == io.EOF {
|
||||
b.sawEOF = true
|
||||
}
|
||||
if b.conn == nil && inTests {
|
||||
if b.conn == nil {
|
||||
return
|
||||
}
|
||||
b.conn.noteBodyReadFromHandler(b.stream, n, err)
|
||||
@ -2702,7 +2684,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
|
||||
var date string
|
||||
if _, ok := rws.snapHeader["Date"]; !ok {
|
||||
// TODO(bradfitz): be faster here, like net/http? measure.
|
||||
date = rws.conn.srv.now().UTC().Format(http.TimeFormat)
|
||||
date = time.Now().UTC().Format(http.TimeFormat)
|
||||
}
|
||||
|
||||
for _, v := range rws.snapHeader["Trailer"] {
|
||||
@ -2824,7 +2806,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() {
|
||||
|
||||
func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
|
||||
st := w.rws.stream
|
||||
if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
|
||||
if !deadline.IsZero() && deadline.Before(time.Now()) {
|
||||
// If we're setting a deadline in the past, reset the stream immediately
|
||||
// so writes after SetWriteDeadline returns will fail.
|
||||
st.onReadTimeout()
|
||||
@ -2840,9 +2822,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
|
||||
if deadline.IsZero() {
|
||||
st.readDeadline = nil
|
||||
} else if st.readDeadline == nil {
|
||||
st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout)
|
||||
st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout)
|
||||
} else {
|
||||
st.readDeadline.Reset(deadline.Sub(sc.srv.now()))
|
||||
st.readDeadline.Reset(deadline.Sub(time.Now()))
|
||||
}
|
||||
})
|
||||
return nil
|
||||
@ -2850,7 +2832,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error {
|
||||
|
||||
func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
|
||||
st := w.rws.stream
|
||||
if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) {
|
||||
if !deadline.IsZero() && deadline.Before(time.Now()) {
|
||||
// If we're setting a deadline in the past, reset the stream immediately
|
||||
// so writes after SetWriteDeadline returns will fail.
|
||||
st.onWriteTimeout()
|
||||
@ -2866,9 +2848,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
|
||||
if deadline.IsZero() {
|
||||
st.writeDeadline = nil
|
||||
} else if st.writeDeadline == nil {
|
||||
st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout)
|
||||
st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout)
|
||||
} else {
|
||||
st.writeDeadline.Reset(deadline.Sub(sc.srv.now()))
|
||||
st.writeDeadline.Reset(deadline.Sub(time.Now()))
|
||||
}
|
||||
})
|
||||
return nil
|
||||
@ -3147,7 +3129,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
|
||||
method: opts.Method,
|
||||
url: u,
|
||||
header: cloneHeader(opts.Header),
|
||||
done: errChanPool.Get().(chan error),
|
||||
done: getErrChan(),
|
||||
}
|
||||
|
||||
select {
|
||||
@ -3164,7 +3146,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error {
|
||||
case <-st.cw:
|
||||
return errStreamClosed
|
||||
case err := <-msg.done:
|
||||
errChanPool.Put(msg.done)
|
||||
putErrChan(msg.done)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
20
vendor/golang.org/x/net/http2/timer.go
generated
vendored
20
vendor/golang.org/x/net/http2/timer.go
generated
vendored
@ -1,20 +0,0 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package http2
|
||||
|
||||
import "time"
|
||||
|
||||
// A timer is a time.Timer, as an interface which can be replaced in tests.
|
||||
type timer = interface {
|
||||
C() <-chan time.Time
|
||||
Reset(d time.Duration) bool
|
||||
Stop() bool
|
||||
}
|
||||
|
||||
// timeTimer adapts a time.Timer to the timer interface.
|
||||
type timeTimer struct {
|
||||
*time.Timer
|
||||
}
|
||||
|
||||
func (t timeTimer) C() <-chan time.Time { return t.Timer.C }
|
94
vendor/golang.org/x/net/http2/transport.go
generated
vendored
94
vendor/golang.org/x/net/http2/transport.go
generated
vendored
@ -193,50 +193,6 @@ type Transport struct {
|
||||
|
||||
type transportTestHooks struct {
|
||||
newclientconn func(*ClientConn)
|
||||
group synctestGroupInterface
|
||||
}
|
||||
|
||||
func (t *Transport) markNewGoroutine() {
|
||||
if t != nil && t.transportTestHooks != nil {
|
||||
t.transportTestHooks.group.Join()
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Transport) now() time.Time {
|
||||
if t != nil && t.transportTestHooks != nil {
|
||||
return t.transportTestHooks.group.Now()
|
||||
}
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
func (t *Transport) timeSince(when time.Time) time.Duration {
|
||||
if t != nil && t.transportTestHooks != nil {
|
||||
return t.now().Sub(when)
|
||||
}
|
||||
return time.Since(when)
|
||||
}
|
||||
|
||||
// newTimer creates a new time.Timer, or a synthetic timer in tests.
|
||||
func (t *Transport) newTimer(d time.Duration) timer {
|
||||
if t.transportTestHooks != nil {
|
||||
return t.transportTestHooks.group.NewTimer(d)
|
||||
}
|
||||
return timeTimer{time.NewTimer(d)}
|
||||
}
|
||||
|
||||
// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests.
|
||||
func (t *Transport) afterFunc(d time.Duration, f func()) timer {
|
||||
if t.transportTestHooks != nil {
|
||||
return t.transportTestHooks.group.AfterFunc(d, f)
|
||||
}
|
||||
return timeTimer{time.AfterFunc(d, f)}
|
||||
}
|
||||
|
||||
func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) {
|
||||
if t.transportTestHooks != nil {
|
||||
return t.transportTestHooks.group.ContextWithTimeout(ctx, d)
|
||||
}
|
||||
return context.WithTimeout(ctx, d)
|
||||
}
|
||||
|
||||
func (t *Transport) maxHeaderListSize() uint32 {
|
||||
@ -366,7 +322,7 @@ type ClientConn struct {
|
||||
readerErr error // set before readerDone is closed
|
||||
|
||||
idleTimeout time.Duration // or 0 for never
|
||||
idleTimer timer
|
||||
idleTimer *time.Timer
|
||||
|
||||
mu sync.Mutex // guards following
|
||||
cond *sync.Cond // hold mu; broadcast on flow/closed changes
|
||||
@ -534,14 +490,12 @@ func (cs *clientStream) closeReqBodyLocked() {
|
||||
cs.reqBodyClosed = make(chan struct{})
|
||||
reqBodyClosed := cs.reqBodyClosed
|
||||
go func() {
|
||||
cs.cc.t.markNewGoroutine()
|
||||
cs.reqBody.Close()
|
||||
close(reqBodyClosed)
|
||||
}()
|
||||
}
|
||||
|
||||
type stickyErrWriter struct {
|
||||
group synctestGroupInterface
|
||||
conn net.Conn
|
||||
timeout time.Duration
|
||||
err *error
|
||||
@ -551,7 +505,7 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
|
||||
if *sew.err != nil {
|
||||
return 0, *sew.err
|
||||
}
|
||||
n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p)
|
||||
n, err = writeWithByteTimeout(sew.conn, sew.timeout, p)
|
||||
*sew.err = err
|
||||
return n, err
|
||||
}
|
||||
@ -650,9 +604,9 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
|
||||
backoff := float64(uint(1) << (uint(retry) - 1))
|
||||
backoff += backoff * (0.1 * mathrand.Float64())
|
||||
d := time.Second * time.Duration(backoff)
|
||||
tm := t.newTimer(d)
|
||||
tm := time.NewTimer(d)
|
||||
select {
|
||||
case <-tm.C():
|
||||
case <-tm.C:
|
||||
t.vlogf("RoundTrip retrying after failure: %v", roundTripErr)
|
||||
continue
|
||||
case <-req.Context().Done():
|
||||
@ -699,6 +653,7 @@ var (
|
||||
errClientConnUnusable = errors.New("http2: client conn not usable")
|
||||
errClientConnNotEstablished = errors.New("http2: client conn could not be established")
|
||||
errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
|
||||
errClientConnForceClosed = errors.New("http2: client connection force closed via ClientConn.Close")
|
||||
)
|
||||
|
||||
// shouldRetryRequest is called by RoundTrip when a request fails to get
|
||||
@ -838,14 +793,11 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
pingTimeout: conf.PingTimeout,
|
||||
pings: make(map[[8]byte]chan struct{}),
|
||||
reqHeaderMu: make(chan struct{}, 1),
|
||||
lastActive: t.now(),
|
||||
lastActive: time.Now(),
|
||||
}
|
||||
var group synctestGroupInterface
|
||||
if t.transportTestHooks != nil {
|
||||
t.markNewGoroutine()
|
||||
t.transportTestHooks.newclientconn(cc)
|
||||
c = cc.tconn
|
||||
group = t.group
|
||||
}
|
||||
if VerboseLogs {
|
||||
t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr())
|
||||
@ -857,7 +809,6 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
// TODO: adjust this writer size to account for frame size +
|
||||
// MTU + crypto/tls record padding.
|
||||
cc.bw = bufio.NewWriter(stickyErrWriter{
|
||||
group: group,
|
||||
conn: c,
|
||||
timeout: conf.WriteByteTimeout,
|
||||
err: &cc.werr,
|
||||
@ -906,7 +857,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
// Start the idle timer after the connection is fully initialized.
|
||||
if d := t.idleConnTimeout(); d != 0 {
|
||||
cc.idleTimeout = d
|
||||
cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout)
|
||||
cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout)
|
||||
}
|
||||
|
||||
go cc.readLoop()
|
||||
@ -917,7 +868,7 @@ func (cc *ClientConn) healthCheck() {
|
||||
pingTimeout := cc.pingTimeout
|
||||
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
|
||||
// trigger the healthCheck again if there is no frame received.
|
||||
ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
|
||||
defer cancel()
|
||||
cc.vlogf("http2: Transport sending health check")
|
||||
err := cc.Ping(ctx)
|
||||
@ -1120,7 +1071,7 @@ func (cc *ClientConn) tooIdleLocked() bool {
|
||||
// times are compared based on their wall time. We don't want
|
||||
// to reuse a connection that's been sitting idle during
|
||||
// VM/laptop suspend if monotonic time was also frozen.
|
||||
return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout
|
||||
return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout
|
||||
}
|
||||
|
||||
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
|
||||
@ -1186,7 +1137,6 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error {
|
||||
done := make(chan struct{})
|
||||
cancelled := false // guarded by cc.mu
|
||||
go func() {
|
||||
cc.t.markNewGoroutine()
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
for {
|
||||
@ -1257,8 +1207,7 @@ func (cc *ClientConn) closeForError(err error) {
|
||||
//
|
||||
// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
|
||||
func (cc *ClientConn) Close() error {
|
||||
err := errors.New("http2: client connection force closed via ClientConn.Close")
|
||||
cc.closeForError(err)
|
||||
cc.closeForError(errClientConnForceClosed)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -1427,7 +1376,6 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
|
||||
//
|
||||
// It sends the request and performs post-request cleanup (closing Request.Body, etc.).
|
||||
func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) {
|
||||
cs.cc.t.markNewGoroutine()
|
||||
err := cs.writeRequest(req, streamf)
|
||||
cs.cleanupWriteRequest(err)
|
||||
}
|
||||
@ -1558,9 +1506,9 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre
|
||||
var respHeaderTimer <-chan time.Time
|
||||
var respHeaderRecv chan struct{}
|
||||
if d := cc.responseHeaderTimeout(); d != 0 {
|
||||
timer := cc.t.newTimer(d)
|
||||
timer := time.NewTimer(d)
|
||||
defer timer.Stop()
|
||||
respHeaderTimer = timer.C()
|
||||
respHeaderTimer = timer.C
|
||||
respHeaderRecv = cs.respHeaderRecv
|
||||
}
|
||||
// Wait until the peer half-closes its end of the stream,
|
||||
@ -1753,7 +1701,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error {
|
||||
// Return a fatal error which aborts the retry loop.
|
||||
return errClientConnNotEstablished
|
||||
}
|
||||
cc.lastActive = cc.t.now()
|
||||
cc.lastActive = time.Now()
|
||||
if cc.closed || !cc.canTakeNewRequestLocked() {
|
||||
return errClientConnUnusable
|
||||
}
|
||||
@ -2092,10 +2040,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) {
|
||||
if len(cc.streams) != slen-1 {
|
||||
panic("forgetting unknown stream id")
|
||||
}
|
||||
cc.lastActive = cc.t.now()
|
||||
cc.lastActive = time.Now()
|
||||
if len(cc.streams) == 0 && cc.idleTimer != nil {
|
||||
cc.idleTimer.Reset(cc.idleTimeout)
|
||||
cc.lastIdle = cc.t.now()
|
||||
cc.lastIdle = time.Now()
|
||||
}
|
||||
// Wake up writeRequestBody via clientStream.awaitFlowControl and
|
||||
// wake up RoundTrip if there is a pending request.
|
||||
@ -2121,7 +2069,6 @@ type clientConnReadLoop struct {
|
||||
|
||||
// readLoop runs in its own goroutine and reads and dispatches frames.
|
||||
func (cc *ClientConn) readLoop() {
|
||||
cc.t.markNewGoroutine()
|
||||
rl := &clientConnReadLoop{cc: cc}
|
||||
defer rl.cleanup()
|
||||
cc.readerErr = rl.run()
|
||||
@ -2188,9 +2135,9 @@ func (rl *clientConnReadLoop) cleanup() {
|
||||
if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout {
|
||||
unusedWaitTime = cc.idleTimeout
|
||||
}
|
||||
idleTime := cc.t.now().Sub(cc.lastActive)
|
||||
idleTime := time.Now().Sub(cc.lastActive)
|
||||
if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle {
|
||||
cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() {
|
||||
cc.idleTimer = time.AfterFunc(unusedWaitTime-idleTime, func() {
|
||||
cc.t.connPool().MarkDead(cc)
|
||||
})
|
||||
} else {
|
||||
@ -2250,9 +2197,9 @@ func (rl *clientConnReadLoop) run() error {
|
||||
cc := rl.cc
|
||||
gotSettings := false
|
||||
readIdleTimeout := cc.readIdleTimeout
|
||||
var t timer
|
||||
var t *time.Timer
|
||||
if readIdleTimeout != 0 {
|
||||
t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck)
|
||||
t = time.AfterFunc(readIdleTimeout, cc.healthCheck)
|
||||
}
|
||||
for {
|
||||
f, err := cc.fr.ReadFrame()
|
||||
@ -2998,7 +2945,6 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
|
||||
var pingError error
|
||||
errc := make(chan struct{})
|
||||
go func() {
|
||||
cc.t.markNewGoroutine()
|
||||
cc.wmu.Lock()
|
||||
defer cc.wmu.Unlock()
|
||||
if pingError = cc.fr.WritePing(false, p); pingError != nil {
|
||||
@ -3228,7 +3174,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) {
|
||||
cc.mu.Lock()
|
||||
ci.WasIdle = len(cc.streams) == 0 && reused
|
||||
if ci.WasIdle && !cc.lastActive.IsZero() {
|
||||
ci.IdleTime = cc.t.timeSince(cc.lastActive)
|
||||
ci.IdleTime = time.Since(cc.lastActive)
|
||||
}
|
||||
cc.mu.Unlock()
|
||||
|
||||
|
Reference in New Issue
Block a user