forked from toolshed/abra
chore: bump deps
This commit is contained in:
62
vendor/github.com/cenkalti/backoff/v4/context.go
generated
vendored
62
vendor/github.com/cenkalti/backoff/v4/context.go
generated
vendored
@ -1,62 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
)
|
||||
|
||||
// BackOffContext is a backoff policy that stops retrying after the context
|
||||
// is canceled.
|
||||
type BackOffContext interface { // nolint: golint
|
||||
BackOff
|
||||
Context() context.Context
|
||||
}
|
||||
|
||||
type backOffContext struct {
|
||||
BackOff
|
||||
ctx context.Context
|
||||
}
|
||||
|
||||
// WithContext returns a BackOffContext with context ctx
|
||||
//
|
||||
// ctx must not be nil
|
||||
func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
|
||||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
|
||||
if b, ok := b.(*backOffContext); ok {
|
||||
return &backOffContext{
|
||||
BackOff: b.BackOff,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
return &backOffContext{
|
||||
BackOff: b,
|
||||
ctx: ctx,
|
||||
}
|
||||
}
|
||||
|
||||
func getContext(b BackOff) context.Context {
|
||||
if cb, ok := b.(BackOffContext); ok {
|
||||
return cb.Context()
|
||||
}
|
||||
if tb, ok := b.(*backOffTries); ok {
|
||||
return getContext(tb.delegate)
|
||||
}
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
func (b *backOffContext) Context() context.Context {
|
||||
return b.ctx
|
||||
}
|
||||
|
||||
func (b *backOffContext) NextBackOff() time.Duration {
|
||||
select {
|
||||
case <-b.ctx.Done():
|
||||
return Stop
|
||||
default:
|
||||
return b.BackOff.NextBackOff()
|
||||
}
|
||||
}
|
216
vendor/github.com/cenkalti/backoff/v4/exponential.go
generated
vendored
216
vendor/github.com/cenkalti/backoff/v4/exponential.go
generated
vendored
@ -1,216 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
/*
|
||||
ExponentialBackOff is a backoff implementation that increases the backoff
|
||||
period for each retry attempt using a randomization function that grows exponentially.
|
||||
|
||||
NextBackOff() is calculated using the following formula:
|
||||
|
||||
randomized interval =
|
||||
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
|
||||
|
||||
In other words NextBackOff() will range between the randomization factor
|
||||
percentage below and above the retry interval.
|
||||
|
||||
For example, given the following parameters:
|
||||
|
||||
RetryInterval = 2
|
||||
RandomizationFactor = 0.5
|
||||
Multiplier = 2
|
||||
|
||||
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
|
||||
multiplied by the exponential, that is, between 2 and 6 seconds.
|
||||
|
||||
Note: MaxInterval caps the RetryInterval and not the randomized interval.
|
||||
|
||||
If the time elapsed since an ExponentialBackOff instance is created goes past the
|
||||
MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop.
|
||||
|
||||
The elapsed time can be reset by calling Reset().
|
||||
|
||||
Example: Given the following default arguments, for 10 tries the sequence will be,
|
||||
and assuming we go over the MaxElapsedTime on the 10th try:
|
||||
|
||||
Request # RetryInterval (seconds) Randomized Interval (seconds)
|
||||
|
||||
1 0.5 [0.25, 0.75]
|
||||
2 0.75 [0.375, 1.125]
|
||||
3 1.125 [0.562, 1.687]
|
||||
4 1.687 [0.8435, 2.53]
|
||||
5 2.53 [1.265, 3.795]
|
||||
6 3.795 [1.897, 5.692]
|
||||
7 5.692 [2.846, 8.538]
|
||||
8 8.538 [4.269, 12.807]
|
||||
9 12.807 [6.403, 19.210]
|
||||
10 19.210 backoff.Stop
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
type ExponentialBackOff struct {
|
||||
InitialInterval time.Duration
|
||||
RandomizationFactor float64
|
||||
Multiplier float64
|
||||
MaxInterval time.Duration
|
||||
// After MaxElapsedTime the ExponentialBackOff returns Stop.
|
||||
// It never stops if MaxElapsedTime == 0.
|
||||
MaxElapsedTime time.Duration
|
||||
Stop time.Duration
|
||||
Clock Clock
|
||||
|
||||
currentInterval time.Duration
|
||||
startTime time.Time
|
||||
}
|
||||
|
||||
// Clock is an interface that returns current time for BackOff.
|
||||
type Clock interface {
|
||||
Now() time.Time
|
||||
}
|
||||
|
||||
// ExponentialBackOffOpts is a function type used to configure ExponentialBackOff options.
|
||||
type ExponentialBackOffOpts func(*ExponentialBackOff)
|
||||
|
||||
// Default values for ExponentialBackOff.
|
||||
const (
|
||||
DefaultInitialInterval = 500 * time.Millisecond
|
||||
DefaultRandomizationFactor = 0.5
|
||||
DefaultMultiplier = 1.5
|
||||
DefaultMaxInterval = 60 * time.Second
|
||||
DefaultMaxElapsedTime = 15 * time.Minute
|
||||
)
|
||||
|
||||
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
|
||||
func NewExponentialBackOff(opts ...ExponentialBackOffOpts) *ExponentialBackOff {
|
||||
b := &ExponentialBackOff{
|
||||
InitialInterval: DefaultInitialInterval,
|
||||
RandomizationFactor: DefaultRandomizationFactor,
|
||||
Multiplier: DefaultMultiplier,
|
||||
MaxInterval: DefaultMaxInterval,
|
||||
MaxElapsedTime: DefaultMaxElapsedTime,
|
||||
Stop: Stop,
|
||||
Clock: SystemClock,
|
||||
}
|
||||
for _, fn := range opts {
|
||||
fn(b)
|
||||
}
|
||||
b.Reset()
|
||||
return b
|
||||
}
|
||||
|
||||
// WithInitialInterval sets the initial interval between retries.
|
||||
func WithInitialInterval(duration time.Duration) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.InitialInterval = duration
|
||||
}
|
||||
}
|
||||
|
||||
// WithRandomizationFactor sets the randomization factor to add jitter to intervals.
|
||||
func WithRandomizationFactor(randomizationFactor float64) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.RandomizationFactor = randomizationFactor
|
||||
}
|
||||
}
|
||||
|
||||
// WithMultiplier sets the multiplier for increasing the interval after each retry.
|
||||
func WithMultiplier(multiplier float64) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.Multiplier = multiplier
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxInterval sets the maximum interval between retries.
|
||||
func WithMaxInterval(duration time.Duration) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.MaxInterval = duration
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxElapsedTime sets the maximum total time for retries.
|
||||
func WithMaxElapsedTime(duration time.Duration) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.MaxElapsedTime = duration
|
||||
}
|
||||
}
|
||||
|
||||
// WithRetryStopDuration sets the duration after which retries should stop.
|
||||
func WithRetryStopDuration(duration time.Duration) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.Stop = duration
|
||||
}
|
||||
}
|
||||
|
||||
// WithClockProvider sets the clock used to measure time.
|
||||
func WithClockProvider(clock Clock) ExponentialBackOffOpts {
|
||||
return func(ebo *ExponentialBackOff) {
|
||||
ebo.Clock = clock
|
||||
}
|
||||
}
|
||||
|
||||
type systemClock struct{}
|
||||
|
||||
func (t systemClock) Now() time.Time {
|
||||
return time.Now()
|
||||
}
|
||||
|
||||
// SystemClock implements Clock interface that uses time.Now().
|
||||
var SystemClock = systemClock{}
|
||||
|
||||
// Reset the interval back to the initial retry interval and restarts the timer.
|
||||
// Reset must be called before using b.
|
||||
func (b *ExponentialBackOff) Reset() {
|
||||
b.currentInterval = b.InitialInterval
|
||||
b.startTime = b.Clock.Now()
|
||||
}
|
||||
|
||||
// NextBackOff calculates the next backoff interval using the formula:
|
||||
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
|
||||
func (b *ExponentialBackOff) NextBackOff() time.Duration {
|
||||
// Make sure we have not gone over the maximum elapsed time.
|
||||
elapsed := b.GetElapsedTime()
|
||||
next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
|
||||
b.incrementCurrentInterval()
|
||||
if b.MaxElapsedTime != 0 && elapsed+next > b.MaxElapsedTime {
|
||||
return b.Stop
|
||||
}
|
||||
return next
|
||||
}
|
||||
|
||||
// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance
|
||||
// is created and is reset when Reset() is called.
|
||||
//
|
||||
// The elapsed time is computed using time.Now().UnixNano(). It is
|
||||
// safe to call even while the backoff policy is used by a running
|
||||
// ticker.
|
||||
func (b *ExponentialBackOff) GetElapsedTime() time.Duration {
|
||||
return b.Clock.Now().Sub(b.startTime)
|
||||
}
|
||||
|
||||
// Increments the current interval by multiplying it with the multiplier.
|
||||
func (b *ExponentialBackOff) incrementCurrentInterval() {
|
||||
// Check for overflow, if overflow is detected set the current interval to the max interval.
|
||||
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
|
||||
b.currentInterval = b.MaxInterval
|
||||
} else {
|
||||
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a random value from the following interval:
|
||||
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
|
||||
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
|
||||
if randomizationFactor == 0 {
|
||||
return currentInterval // make sure no randomness is used when randomizationFactor is 0.
|
||||
}
|
||||
var delta = randomizationFactor * float64(currentInterval)
|
||||
var minInterval = float64(currentInterval) - delta
|
||||
var maxInterval = float64(currentInterval) + delta
|
||||
|
||||
// Get a random value from the range [minInterval, maxInterval].
|
||||
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
|
||||
// we want a 33% chance for selecting either 1, 2 or 3.
|
||||
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
|
||||
}
|
146
vendor/github.com/cenkalti/backoff/v4/retry.go
generated
vendored
146
vendor/github.com/cenkalti/backoff/v4/retry.go
generated
vendored
@ -1,146 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An OperationWithData is executing by RetryWithData() or RetryNotifyWithData().
|
||||
// The operation will be retried using a backoff policy if it returns an error.
|
||||
type OperationWithData[T any] func() (T, error)
|
||||
|
||||
// An Operation is executing by Retry() or RetryNotify().
|
||||
// The operation will be retried using a backoff policy if it returns an error.
|
||||
type Operation func() error
|
||||
|
||||
func (o Operation) withEmptyData() OperationWithData[struct{}] {
|
||||
return func() (struct{}, error) {
|
||||
return struct{}{}, o()
|
||||
}
|
||||
}
|
||||
|
||||
// Notify is a notify-on-error function. It receives an operation error and
|
||||
// backoff delay if the operation failed (with an error).
|
||||
//
|
||||
// NOTE that if the backoff policy stated to stop retrying,
|
||||
// the notify function isn't called.
|
||||
type Notify func(error, time.Duration)
|
||||
|
||||
// Retry the operation o until it does not return error or BackOff stops.
|
||||
// o is guaranteed to be run at least once.
|
||||
//
|
||||
// If o returns a *PermanentError, the operation is not retried, and the
|
||||
// wrapped error is returned.
|
||||
//
|
||||
// Retry sleeps the goroutine for the duration returned by BackOff after a
|
||||
// failed operation returns.
|
||||
func Retry(o Operation, b BackOff) error {
|
||||
return RetryNotify(o, b, nil)
|
||||
}
|
||||
|
||||
// RetryWithData is like Retry but returns data in the response too.
|
||||
func RetryWithData[T any](o OperationWithData[T], b BackOff) (T, error) {
|
||||
return RetryNotifyWithData(o, b, nil)
|
||||
}
|
||||
|
||||
// RetryNotify calls notify function with the error and wait duration
|
||||
// for each failed attempt before sleep.
|
||||
func RetryNotify(operation Operation, b BackOff, notify Notify) error {
|
||||
return RetryNotifyWithTimer(operation, b, notify, nil)
|
||||
}
|
||||
|
||||
// RetryNotifyWithData is like RetryNotify but returns data in the response too.
|
||||
func RetryNotifyWithData[T any](operation OperationWithData[T], b BackOff, notify Notify) (T, error) {
|
||||
return doRetryNotify(operation, b, notify, nil)
|
||||
}
|
||||
|
||||
// RetryNotifyWithTimer calls notify function with the error and wait duration using the given Timer
|
||||
// for each failed attempt before sleep.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func RetryNotifyWithTimer(operation Operation, b BackOff, notify Notify, t Timer) error {
|
||||
_, err := doRetryNotify(operation.withEmptyData(), b, notify, t)
|
||||
return err
|
||||
}
|
||||
|
||||
// RetryNotifyWithTimerAndData is like RetryNotifyWithTimer but returns data in the response too.
|
||||
func RetryNotifyWithTimerAndData[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
|
||||
return doRetryNotify(operation, b, notify, t)
|
||||
}
|
||||
|
||||
func doRetryNotify[T any](operation OperationWithData[T], b BackOff, notify Notify, t Timer) (T, error) {
|
||||
var (
|
||||
err error
|
||||
next time.Duration
|
||||
res T
|
||||
)
|
||||
if t == nil {
|
||||
t = &defaultTimer{}
|
||||
}
|
||||
|
||||
defer func() {
|
||||
t.Stop()
|
||||
}()
|
||||
|
||||
ctx := getContext(b)
|
||||
|
||||
b.Reset()
|
||||
for {
|
||||
res, err = operation()
|
||||
if err == nil {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
var permanent *PermanentError
|
||||
if errors.As(err, &permanent) {
|
||||
return res, permanent.Err
|
||||
}
|
||||
|
||||
if next = b.NextBackOff(); next == Stop {
|
||||
if cerr := ctx.Err(); cerr != nil {
|
||||
return res, cerr
|
||||
}
|
||||
|
||||
return res, err
|
||||
}
|
||||
|
||||
if notify != nil {
|
||||
notify(err, next)
|
||||
}
|
||||
|
||||
t.Start(next)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return res, ctx.Err()
|
||||
case <-t.C():
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PermanentError signals that the operation should not be retried.
|
||||
type PermanentError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e *PermanentError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
func (e *PermanentError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
func (e *PermanentError) Is(target error) bool {
|
||||
_, ok := target.(*PermanentError)
|
||||
return ok
|
||||
}
|
||||
|
||||
// Permanent wraps the given err in a *PermanentError.
|
||||
func Permanent(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &PermanentError{
|
||||
Err: err,
|
||||
}
|
||||
}
|
38
vendor/github.com/cenkalti/backoff/v4/tries.go
generated
vendored
38
vendor/github.com/cenkalti/backoff/v4/tries.go
generated
vendored
@ -1,38 +0,0 @@
|
||||
package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
/*
|
||||
WithMaxRetries creates a wrapper around another BackOff, which will
|
||||
return Stop if NextBackOff() has been called too many times since
|
||||
the last time Reset() was called
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
func WithMaxRetries(b BackOff, max uint64) BackOff {
|
||||
return &backOffTries{delegate: b, maxTries: max}
|
||||
}
|
||||
|
||||
type backOffTries struct {
|
||||
delegate BackOff
|
||||
maxTries uint64
|
||||
numTries uint64
|
||||
}
|
||||
|
||||
func (b *backOffTries) NextBackOff() time.Duration {
|
||||
if b.maxTries == 0 {
|
||||
return Stop
|
||||
}
|
||||
if b.maxTries > 0 {
|
||||
if b.maxTries <= b.numTries {
|
||||
return Stop
|
||||
}
|
||||
b.numTries++
|
||||
}
|
||||
return b.delegate.NextBackOff()
|
||||
}
|
||||
|
||||
func (b *backOffTries) Reset() {
|
||||
b.numTries = 0
|
||||
b.delegate.Reset()
|
||||
}
|
29
vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md
generated
vendored
Normal file
29
vendor/github.com/cenkalti/backoff/v5/CHANGELOG.md
generated
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
|
||||
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## [5.0.0] - 2024-12-19
|
||||
|
||||
### Added
|
||||
|
||||
- RetryAfterError can be returned from an operation to indicate how long to wait before the next retry.
|
||||
|
||||
### Changed
|
||||
|
||||
- Retry function now accepts additional options for specifying max number of tries and max elapsed time.
|
||||
- Retry function now accepts a context.Context.
|
||||
- Operation function signature changed to return result (any type) and error.
|
||||
|
||||
### Removed
|
||||
|
||||
- RetryNotify* and RetryWithData functions. Only single Retry function remains.
|
||||
- Optional arguments from ExponentialBackoff constructor.
|
||||
- Clock and Timer interfaces.
|
||||
|
||||
### Fixed
|
||||
|
||||
- The original error is returned from Retry if there's a PermanentError. (#144)
|
||||
- The Retry function respects the wrapped PermanentError. (#140)
|
@ -1,4 +1,4 @@
|
||||
# Exponential Backoff [![GoDoc][godoc image]][godoc] [![Coverage Status][coveralls image]][coveralls]
|
||||
# Exponential Backoff [![GoDoc][godoc image]][godoc]
|
||||
|
||||
This is a Go port of the exponential backoff algorithm from [Google's HTTP Client Library for Java][google-http-java-client].
|
||||
|
||||
@ -9,9 +9,11 @@ The retries exponentially increase and stop increasing when a certain threshold
|
||||
|
||||
## Usage
|
||||
|
||||
Import path is `github.com/cenkalti/backoff/v4`. Please note the version part at the end.
|
||||
Import path is `github.com/cenkalti/backoff/v5`. Please note the version part at the end.
|
||||
|
||||
Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
|
||||
For most cases, use `Retry` function. See [example_test.go][example] for an example.
|
||||
|
||||
If you have specific needs, copy `Retry` function (from [retry.go][retry-src]) into your code and modify it as needed.
|
||||
|
||||
## Contributing
|
||||
|
||||
@ -19,12 +21,11 @@ Use https://pkg.go.dev/github.com/cenkalti/backoff/v4 to view the documentation.
|
||||
* Please don't send a PR without opening an issue and discussing it first.
|
||||
* If proposed change is not a common use case, I will probably not accept it.
|
||||
|
||||
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v4
|
||||
[godoc]: https://pkg.go.dev/github.com/cenkalti/backoff/v5
|
||||
[godoc image]: https://godoc.org/github.com/cenkalti/backoff?status.png
|
||||
[coveralls]: https://coveralls.io/github/cenkalti/backoff?branch=master
|
||||
[coveralls image]: https://coveralls.io/repos/github/cenkalti/backoff/badge.svg?branch=master
|
||||
|
||||
[google-http-java-client]: https://github.com/google/google-http-java-client/blob/da1aa993e90285ec18579f1553339b00e19b3ab5/google-http-client/src/main/java/com/google/api/client/util/ExponentialBackOff.java
|
||||
[exponential backoff wiki]: http://en.wikipedia.org/wiki/Exponential_backoff
|
||||
|
||||
[advanced example]: https://pkg.go.dev/github.com/cenkalti/backoff/v4?tab=doc#pkg-examples
|
||||
[retry-src]: https://github.com/cenkalti/backoff/blob/v5/retry.go
|
||||
[example]: https://github.com/cenkalti/backoff/blob/v5/example_test.go
|
@ -15,16 +15,16 @@ import "time"
|
||||
// BackOff is a backoff policy for retrying an operation.
|
||||
type BackOff interface {
|
||||
// NextBackOff returns the duration to wait before retrying the operation,
|
||||
// or backoff. Stop to indicate that no more retries should be made.
|
||||
// backoff.Stop to indicate that no more retries should be made.
|
||||
//
|
||||
// Example usage:
|
||||
//
|
||||
// duration := backoff.NextBackOff();
|
||||
// if (duration == backoff.Stop) {
|
||||
// // Do not retry operation.
|
||||
// } else {
|
||||
// // Sleep for duration and retry operation.
|
||||
// }
|
||||
// duration := backoff.NextBackOff()
|
||||
// if duration == backoff.Stop {
|
||||
// // Do not retry operation.
|
||||
// } else {
|
||||
// // Sleep for duration and retry operation.
|
||||
// }
|
||||
//
|
||||
NextBackOff() time.Duration
|
||||
|
46
vendor/github.com/cenkalti/backoff/v5/error.go
generated
vendored
Normal file
46
vendor/github.com/cenkalti/backoff/v5/error.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// PermanentError signals that the operation should not be retried.
|
||||
type PermanentError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
// Permanent wraps the given err in a *PermanentError.
|
||||
func Permanent(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return &PermanentError{
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
// Error returns a string representation of the Permanent error.
|
||||
func (e *PermanentError) Error() string {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
// Unwrap returns the wrapped error.
|
||||
func (e *PermanentError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
// RetryAfterError signals that the operation should be retried after the given duration.
|
||||
type RetryAfterError struct {
|
||||
Duration time.Duration
|
||||
}
|
||||
|
||||
// RetryAfter returns a RetryAfter error that specifies how long to wait before retrying.
|
||||
func RetryAfter(seconds int) error {
|
||||
return &RetryAfterError{Duration: time.Duration(seconds) * time.Second}
|
||||
}
|
||||
|
||||
// Error returns a string representation of the RetryAfter error.
|
||||
func (e *RetryAfterError) Error() string {
|
||||
return fmt.Sprintf("retry after %s", e.Duration)
|
||||
}
|
118
vendor/github.com/cenkalti/backoff/v5/exponential.go
generated
vendored
Normal file
118
vendor/github.com/cenkalti/backoff/v5/exponential.go
generated
vendored
Normal file
@ -0,0 +1,118 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"math/rand/v2"
|
||||
"time"
|
||||
)
|
||||
|
||||
/*
|
||||
ExponentialBackOff is a backoff implementation that increases the backoff
|
||||
period for each retry attempt using a randomization function that grows exponentially.
|
||||
|
||||
NextBackOff() is calculated using the following formula:
|
||||
|
||||
randomized interval =
|
||||
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
|
||||
|
||||
In other words NextBackOff() will range between the randomization factor
|
||||
percentage below and above the retry interval.
|
||||
|
||||
For example, given the following parameters:
|
||||
|
||||
RetryInterval = 2
|
||||
RandomizationFactor = 0.5
|
||||
Multiplier = 2
|
||||
|
||||
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
|
||||
multiplied by the exponential, that is, between 2 and 6 seconds.
|
||||
|
||||
Note: MaxInterval caps the RetryInterval and not the randomized interval.
|
||||
|
||||
Example: Given the following default arguments, for 9 tries the sequence will be:
|
||||
|
||||
Request # RetryInterval (seconds) Randomized Interval (seconds)
|
||||
|
||||
1 0.5 [0.25, 0.75]
|
||||
2 0.75 [0.375, 1.125]
|
||||
3 1.125 [0.562, 1.687]
|
||||
4 1.687 [0.8435, 2.53]
|
||||
5 2.53 [1.265, 3.795]
|
||||
6 3.795 [1.897, 5.692]
|
||||
7 5.692 [2.846, 8.538]
|
||||
8 8.538 [4.269, 12.807]
|
||||
9 12.807 [6.403, 19.210]
|
||||
|
||||
Note: Implementation is not thread-safe.
|
||||
*/
|
||||
type ExponentialBackOff struct {
|
||||
InitialInterval time.Duration
|
||||
RandomizationFactor float64
|
||||
Multiplier float64
|
||||
MaxInterval time.Duration
|
||||
|
||||
currentInterval time.Duration
|
||||
}
|
||||
|
||||
// Default values for ExponentialBackOff.
|
||||
const (
|
||||
DefaultInitialInterval = 500 * time.Millisecond
|
||||
DefaultRandomizationFactor = 0.5
|
||||
DefaultMultiplier = 1.5
|
||||
DefaultMaxInterval = 60 * time.Second
|
||||
)
|
||||
|
||||
// NewExponentialBackOff creates an instance of ExponentialBackOff using default values.
|
||||
func NewExponentialBackOff() *ExponentialBackOff {
|
||||
return &ExponentialBackOff{
|
||||
InitialInterval: DefaultInitialInterval,
|
||||
RandomizationFactor: DefaultRandomizationFactor,
|
||||
Multiplier: DefaultMultiplier,
|
||||
MaxInterval: DefaultMaxInterval,
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the interval back to the initial retry interval and restarts the timer.
|
||||
// Reset must be called before using b.
|
||||
func (b *ExponentialBackOff) Reset() {
|
||||
b.currentInterval = b.InitialInterval
|
||||
}
|
||||
|
||||
// NextBackOff calculates the next backoff interval using the formula:
|
||||
//
|
||||
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
|
||||
func (b *ExponentialBackOff) NextBackOff() time.Duration {
|
||||
if b.currentInterval == 0 {
|
||||
b.currentInterval = b.InitialInterval
|
||||
}
|
||||
|
||||
next := getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval)
|
||||
b.incrementCurrentInterval()
|
||||
return next
|
||||
}
|
||||
|
||||
// Increments the current interval by multiplying it with the multiplier.
|
||||
func (b *ExponentialBackOff) incrementCurrentInterval() {
|
||||
// Check for overflow, if overflow is detected set the current interval to the max interval.
|
||||
if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier {
|
||||
b.currentInterval = b.MaxInterval
|
||||
} else {
|
||||
b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a random value from the following interval:
|
||||
//
|
||||
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
|
||||
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
|
||||
if randomizationFactor == 0 {
|
||||
return currentInterval // make sure no randomness is used when randomizationFactor is 0.
|
||||
}
|
||||
var delta = randomizationFactor * float64(currentInterval)
|
||||
var minInterval = float64(currentInterval) - delta
|
||||
var maxInterval = float64(currentInterval) + delta
|
||||
|
||||
// Get a random value from the range [minInterval, maxInterval].
|
||||
// The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then
|
||||
// we want a 33% chance for selecting either 1, 2 or 3.
|
||||
return time.Duration(minInterval + (random * (maxInterval - minInterval + 1)))
|
||||
}
|
139
vendor/github.com/cenkalti/backoff/v5/retry.go
generated
vendored
Normal file
139
vendor/github.com/cenkalti/backoff/v5/retry.go
generated
vendored
Normal file
@ -0,0 +1,139 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
// DefaultMaxElapsedTime sets a default limit for the total retry duration.
|
||||
const DefaultMaxElapsedTime = 15 * time.Minute
|
||||
|
||||
// Operation is a function that attempts an operation and may be retried.
|
||||
type Operation[T any] func() (T, error)
|
||||
|
||||
// Notify is a function called on operation error with the error and backoff duration.
|
||||
type Notify func(error, time.Duration)
|
||||
|
||||
// retryOptions holds configuration settings for the retry mechanism.
|
||||
type retryOptions struct {
|
||||
BackOff BackOff // Strategy for calculating backoff periods.
|
||||
Timer timer // Timer to manage retry delays.
|
||||
Notify Notify // Optional function to notify on each retry error.
|
||||
MaxTries uint // Maximum number of retry attempts.
|
||||
MaxElapsedTime time.Duration // Maximum total time for all retries.
|
||||
}
|
||||
|
||||
type RetryOption func(*retryOptions)
|
||||
|
||||
// WithBackOff configures a custom backoff strategy.
|
||||
func WithBackOff(b BackOff) RetryOption {
|
||||
return func(args *retryOptions) {
|
||||
args.BackOff = b
|
||||
}
|
||||
}
|
||||
|
||||
// withTimer sets a custom timer for managing delays between retries.
|
||||
func withTimer(t timer) RetryOption {
|
||||
return func(args *retryOptions) {
|
||||
args.Timer = t
|
||||
}
|
||||
}
|
||||
|
||||
// WithNotify sets a notification function to handle retry errors.
|
||||
func WithNotify(n Notify) RetryOption {
|
||||
return func(args *retryOptions) {
|
||||
args.Notify = n
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxTries limits the number of all attempts.
|
||||
func WithMaxTries(n uint) RetryOption {
|
||||
return func(args *retryOptions) {
|
||||
args.MaxTries = n
|
||||
}
|
||||
}
|
||||
|
||||
// WithMaxElapsedTime limits the total duration for retry attempts.
|
||||
func WithMaxElapsedTime(d time.Duration) RetryOption {
|
||||
return func(args *retryOptions) {
|
||||
args.MaxElapsedTime = d
|
||||
}
|
||||
}
|
||||
|
||||
// Retry attempts the operation until success, a permanent error, or backoff completion.
|
||||
// It ensures the operation is executed at least once.
|
||||
//
|
||||
// Returns the operation result or error if retries are exhausted or context is cancelled.
|
||||
func Retry[T any](ctx context.Context, operation Operation[T], opts ...RetryOption) (T, error) {
|
||||
// Initialize default retry options.
|
||||
args := &retryOptions{
|
||||
BackOff: NewExponentialBackOff(),
|
||||
Timer: &defaultTimer{},
|
||||
MaxElapsedTime: DefaultMaxElapsedTime,
|
||||
}
|
||||
|
||||
// Apply user-provided options to the default settings.
|
||||
for _, opt := range opts {
|
||||
opt(args)
|
||||
}
|
||||
|
||||
defer args.Timer.Stop()
|
||||
|
||||
startedAt := time.Now()
|
||||
args.BackOff.Reset()
|
||||
for numTries := uint(1); ; numTries++ {
|
||||
// Execute the operation.
|
||||
res, err := operation()
|
||||
if err == nil {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Stop retrying if maximum tries exceeded.
|
||||
if args.MaxTries > 0 && numTries >= args.MaxTries {
|
||||
return res, err
|
||||
}
|
||||
|
||||
// Handle permanent errors without retrying.
|
||||
var permanent *PermanentError
|
||||
if errors.As(err, &permanent) {
|
||||
return res, permanent.Unwrap()
|
||||
}
|
||||
|
||||
// Stop retrying if context is cancelled.
|
||||
if cerr := context.Cause(ctx); cerr != nil {
|
||||
return res, cerr
|
||||
}
|
||||
|
||||
// Calculate next backoff duration.
|
||||
next := args.BackOff.NextBackOff()
|
||||
if next == Stop {
|
||||
return res, err
|
||||
}
|
||||
|
||||
// Reset backoff if RetryAfterError is encountered.
|
||||
var retryAfter *RetryAfterError
|
||||
if errors.As(err, &retryAfter) {
|
||||
next = retryAfter.Duration
|
||||
args.BackOff.Reset()
|
||||
}
|
||||
|
||||
// Stop retrying if maximum elapsed time exceeded.
|
||||
if args.MaxElapsedTime > 0 && time.Since(startedAt)+next > args.MaxElapsedTime {
|
||||
return res, err
|
||||
}
|
||||
|
||||
// Notify on error if a notifier function is provided.
|
||||
if args.Notify != nil {
|
||||
args.Notify(err, next)
|
||||
}
|
||||
|
||||
// Wait for the next backoff period or context cancellation.
|
||||
args.Timer.Start(next)
|
||||
select {
|
||||
case <-args.Timer.C():
|
||||
case <-ctx.Done():
|
||||
return res, context.Cause(ctx)
|
||||
}
|
||||
}
|
||||
}
|
@ -1,7 +1,6 @@
|
||||
package backoff
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
@ -14,8 +13,7 @@ type Ticker struct {
|
||||
C <-chan time.Time
|
||||
c chan time.Time
|
||||
b BackOff
|
||||
ctx context.Context
|
||||
timer Timer
|
||||
timer timer
|
||||
stop chan struct{}
|
||||
stopOnce sync.Once
|
||||
}
|
||||
@ -27,22 +25,12 @@ type Ticker struct {
|
||||
// provided backoff policy (notably calling NextBackOff or Reset)
|
||||
// while the ticker is running.
|
||||
func NewTicker(b BackOff) *Ticker {
|
||||
return NewTickerWithTimer(b, &defaultTimer{})
|
||||
}
|
||||
|
||||
// NewTickerWithTimer returns a new Ticker with a custom timer.
|
||||
// A default timer that uses system timer is used when nil is passed.
|
||||
func NewTickerWithTimer(b BackOff, timer Timer) *Ticker {
|
||||
if timer == nil {
|
||||
timer = &defaultTimer{}
|
||||
}
|
||||
c := make(chan time.Time)
|
||||
t := &Ticker{
|
||||
C: c,
|
||||
c: c,
|
||||
b: b,
|
||||
ctx: getContext(b),
|
||||
timer: timer,
|
||||
timer: &defaultTimer{},
|
||||
stop: make(chan struct{}),
|
||||
}
|
||||
t.b.Reset()
|
||||
@ -73,8 +61,6 @@ func (t *Ticker) run() {
|
||||
case <-t.stop:
|
||||
t.c = nil // Prevent future ticks from being sent to the channel.
|
||||
return
|
||||
case <-t.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
@ -2,7 +2,7 @@ package backoff
|
||||
|
||||
import "time"
|
||||
|
||||
type Timer interface {
|
||||
type timer interface {
|
||||
Start(duration time.Duration)
|
||||
Stop()
|
||||
C() <-chan time.Time
|
Reference in New Issue
Block a user