Work on #61: Add support for ICMP

+ Update dependencies
This commit is contained in:
TwinProduction
2020-12-25 00:07:18 -05:00
parent c86173d46f
commit 83a5813daf
1004 changed files with 182274 additions and 64323 deletions

View File

@ -28,7 +28,7 @@ import (
"io/ioutil"
"math/big"
"net"
"path"
"path/filepath"
"strings"
"time"
@ -96,8 +96,8 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a
maxAge := time.Hour * 24 * 365 // one year self-signed certs
baseName := fmt.Sprintf("%s_%s_%s", host, strings.Join(ipsToStrings(alternateIPs), "-"), strings.Join(alternateDNS, "-"))
certFixturePath := path.Join(fixtureDirectory, baseName+".crt")
keyFixturePath := path.Join(fixtureDirectory, baseName+".key")
certFixturePath := filepath.Join(fixtureDirectory, baseName+".crt")
keyFixturePath := filepath.Join(fixtureDirectory, baseName+".key")
if len(fixtureDirectory) > 0 {
cert, err := ioutil.ReadFile(certFixturePath)
if err == nil {

View File

@ -72,7 +72,22 @@ func WriteCert(certPath string, data []byte) error {
// NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file.
// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
func NewPool(filename string) (*x509.CertPool, error) {
certs, err := CertsFromFile(filename)
pemBlock, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
pool, err := NewPoolFromBytes(pemBlock)
if err != nil {
return nil, fmt.Errorf("error creating pool from %s: %s", filename, err)
}
return pool, nil
}
// NewPoolFromBytes returns an x509.CertPool containing the certificates in the given PEM-encoded bytes.
// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates
func NewPoolFromBytes(pemBlock []byte) (*x509.CertPool, error) {
certs, err := ParseCertsPEM(pemBlock)
if err != nil {
return nil, err
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package cert
import (
"bytes"
"crypto/x509"
"encoding/pem"
"errors"
@ -59,3 +60,14 @@ func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) {
}
return certs, nil
}
// EncodeCertificates returns the PEM-encoded byte array that represents by the specified certs.
func EncodeCertificates(certs ...*x509.Certificate) ([]byte, error) {
b := bytes.Buffer{}
for _, cert := range certs {
if err := pem.Encode(&b, &pem.Block{Type: CertificateBlockType, Bytes: cert.Raw}); err != nil {
return []byte{}, err
}
}
return b.Bytes(), nil
}

102
vendor/k8s.io/client-go/util/cert/server_inspection.go generated vendored Normal file
View File

@ -0,0 +1,102 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cert
import (
"crypto/tls"
"crypto/x509"
"fmt"
"net/url"
"strings"
)
// GetClientCANames gets the CA names for client certs that a server accepts. This is useful when inspecting the
// state of particular servers. apiHost is "host:port"
func GetClientCANames(apiHost string) ([]string, error) {
// when we run this the second time, we know which one we are expecting
acceptableCAs := []string{}
tlsConfig := &tls.Config{
InsecureSkipVerify: true, // this is insecure to always get to the GetClientCertificate
GetClientCertificate: func(hello *tls.CertificateRequestInfo) (*tls.Certificate, error) {
acceptableCAs = []string{}
for _, curr := range hello.AcceptableCAs {
acceptableCAs = append(acceptableCAs, string(curr))
}
return &tls.Certificate{}, nil
},
}
conn, err := tls.Dial("tcp", apiHost, tlsConfig)
if err != nil {
return nil, err
}
if err := conn.Close(); err != nil {
return nil, err
}
return acceptableCAs, nil
}
// GetClientCANamesForURL is GetClientCANames against a URL string like we use in kubeconfigs
func GetClientCANamesForURL(kubeConfigURL string) ([]string, error) {
apiserverURL, err := url.Parse(kubeConfigURL)
if err != nil {
return nil, err
}
return GetClientCANames(apiserverURL.Host)
}
// GetServingCertificates returns the x509 certs used by a server as certificates and pem encoded bytes.
// The serverName is optional for specifying a different name to get SNI certificates. apiHost is "host:port"
func GetServingCertificates(apiHost, serverName string) ([]*x509.Certificate, [][]byte, error) {
tlsConfig := &tls.Config{
InsecureSkipVerify: true, // this is insecure so that we always get connected
}
// if a name is specified for SNI, set it.
if len(serverName) > 0 {
tlsConfig.ServerName = serverName
}
conn, err := tls.Dial("tcp", apiHost, tlsConfig)
if err != nil {
return nil, nil, err
}
if err = conn.Close(); err != nil {
return nil, nil, fmt.Errorf("failed to close connection : %v", err)
}
peerCerts := conn.ConnectionState().PeerCertificates
peerCertBytes := [][]byte{}
for _, a := range peerCerts {
actualCert, err := EncodeCertificates(a)
if err != nil {
return nil, nil, err
}
peerCertBytes = append(peerCertBytes, []byte(strings.TrimSpace(string(actualCert))))
}
return peerCerts, peerCertBytes, err
}
// GetServingCertificatesForURL is GetServingCertificates against a URL string like we use in kubeconfigs
func GetServingCertificatesForURL(kubeConfigURL, serverName string) ([]*x509.Certificate, [][]byte, error) {
apiserverURL, err := url.Parse(kubeConfigURL)
if err != nil {
return nil, nil, err
}
return GetServingCertificates(apiserverURL.Host, serverName)
}

View File

@ -77,11 +77,6 @@ func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.
closable := &closableConn{Conn: conn}
// Start tracking the connection
d.mu.Lock()
d.conns[closable] = struct{}{}
d.mu.Unlock()
// When the connection is closed, remove it from the map. This will
// be no-op if the connection isn't in the map, e.g. if CloseAll()
// is called.
@ -91,6 +86,11 @@ func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.
d.mu.Unlock()
}
// Start tracking the connection
d.mu.Lock()
d.conns[closable] = struct{}{}
d.mu.Unlock()
return closable, nil
}

View File

@ -30,7 +30,7 @@ type backoffEntry struct {
}
type Backoff struct {
sync.Mutex
sync.RWMutex
Clock clock.Clock
defaultDuration time.Duration
maxDuration time.Duration
@ -57,8 +57,8 @@ func NewBackOff(initial, max time.Duration) *Backoff {
// Get the current backoff Duration
func (p *Backoff) Get(id string) time.Duration {
p.Lock()
defer p.Unlock()
p.RLock()
defer p.RUnlock()
var delay time.Duration
entry, ok := p.perItemBackoff[id]
if ok {
@ -90,8 +90,8 @@ func (p *Backoff) Reset(id string) {
// Returns True if the elapsed time since eventTime is smaller than the current backoff window
func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool {
p.Lock()
defer p.Unlock()
p.RLock()
defer p.RUnlock()
entry, ok := p.perItemBackoff[id]
if !ok {
return false
@ -99,13 +99,13 @@ func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool {
if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) {
return false
}
return p.Clock.Now().Sub(eventTime) < entry.backoff
return p.Clock.Since(eventTime) < entry.backoff
}
// Returns True if time since lastupdate is less than the current backoff window.
func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool {
p.Lock()
defer p.Unlock()
p.RLock()
defer p.RUnlock()
entry, ok := p.perItemBackoff[id]
if !ok {
return false

View File

@ -17,6 +17,8 @@ limitations under the License.
package flowcontrol
import (
"context"
"errors"
"sync"
"time"
@ -33,6 +35,8 @@ type RateLimiter interface {
Stop()
// QPS returns QPS of this rate limiter
QPS() float32
// Wait returns nil if a token is taken before the Context is done.
Wait(ctx context.Context) error
}
type tokenBucketRateLimiter struct {
@ -98,6 +102,10 @@ func (t *tokenBucketRateLimiter) QPS() float32 {
return t.qps
}
func (t *tokenBucketRateLimiter) Wait(ctx context.Context) error {
return t.limiter.Wait(ctx)
}
type fakeAlwaysRateLimiter struct{}
func NewFakeAlwaysRateLimiter() RateLimiter {
@ -116,6 +124,10 @@ func (t *fakeAlwaysRateLimiter) QPS() float32 {
return 1
}
func (t *fakeAlwaysRateLimiter) Wait(ctx context.Context) error {
return nil
}
type fakeNeverRateLimiter struct {
wg sync.WaitGroup
}
@ -141,3 +153,7 @@ func (t *fakeNeverRateLimiter) Accept() {
func (t *fakeNeverRateLimiter) QPS() float32 {
return 1
}
func (t *fakeNeverRateLimiter) Wait(ctx context.Context) error {
return errors.New("can not be accept")
}

View File

@ -18,30 +18,75 @@ package homedir
import (
"os"
"path/filepath"
"runtime"
)
// HomeDir returns the home directory for the current user
// HomeDir returns the home directory for the current user.
// On Windows:
// 1. the first of %HOME%, %HOMEDRIVE%%HOMEPATH%, %USERPROFILE% containing a `.kube\config` file is returned.
// 2. if none of those locations contain a `.kube\config` file, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists and is writeable is returned.
// 3. if none of those locations are writeable, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that exists is returned.
// 4. if none of those locations exists, the first of %HOME%, %USERPROFILE%, %HOMEDRIVE%%HOMEPATH% that is set is returned.
func HomeDir() string {
if runtime.GOOS == "windows" {
// First prefer the HOME environmental variable
if home := os.Getenv("HOME"); len(home) > 0 {
if _, err := os.Stat(home); err == nil {
return home
}
}
home := os.Getenv("HOME")
homeDriveHomePath := ""
if homeDrive, homePath := os.Getenv("HOMEDRIVE"), os.Getenv("HOMEPATH"); len(homeDrive) > 0 && len(homePath) > 0 {
homeDir := homeDrive + homePath
if _, err := os.Stat(homeDir); err == nil {
return homeDir
homeDriveHomePath = homeDrive + homePath
}
userProfile := os.Getenv("USERPROFILE")
// Return first of %HOME%, %HOMEDRIVE%/%HOMEPATH%, %USERPROFILE% that contains a `.kube\config` file.
// %HOMEDRIVE%/%HOMEPATH% is preferred over %USERPROFILE% for backwards-compatibility.
for _, p := range []string{home, homeDriveHomePath, userProfile} {
if len(p) == 0 {
continue
}
if _, err := os.Stat(filepath.Join(p, ".kube", "config")); err != nil {
continue
}
return p
}
firstSetPath := ""
firstExistingPath := ""
// Prefer %USERPROFILE% over %HOMEDRIVE%/%HOMEPATH% for compatibility with other auth-writing tools
for _, p := range []string{home, userProfile, homeDriveHomePath} {
if len(p) == 0 {
continue
}
if len(firstSetPath) == 0 {
// remember the first path that is set
firstSetPath = p
}
info, err := os.Stat(p)
if err != nil {
continue
}
if len(firstExistingPath) == 0 {
// remember the first path that exists
firstExistingPath = p
}
if info.IsDir() && info.Mode().Perm()&(1<<(uint(7))) != 0 {
// return first path that is writeable
return p
}
}
if userProfile := os.Getenv("USERPROFILE"); len(userProfile) > 0 {
if _, err := os.Stat(userProfile); err == nil {
return userProfile
}
// If none are writeable, return first location that exists
if len(firstExistingPath) > 0 {
return firstExistingPath
}
// If none exist, return first location that is set
if len(firstSetPath) > 0 {
return firstSetPath
}
// We've got nothing
return ""
}
return os.Getenv("HOME")
}

View File

@ -18,6 +18,7 @@ package jsonpath
import (
"bytes"
"encoding/json"
"fmt"
"io"
"reflect"
@ -29,13 +30,14 @@ import (
type JSONPath struct {
name string
parser *Parser
stack [][]reflect.Value // push and pop values in different scopes
cur []reflect.Value // current scope values
beginRange int
inRange int
endRange int
lastEndNode *Node
allowMissingKeys bool
outputJSON bool
}
// New creates a new JSONPath with the given name.
@ -81,47 +83,103 @@ func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) {
return nil, fmt.Errorf("%s is an incomplete jsonpath template", j.name)
}
j.cur = []reflect.Value{reflect.ValueOf(data)}
cur := []reflect.Value{reflect.ValueOf(data)}
nodes := j.parser.Root.Nodes
fullResult := [][]reflect.Value{}
for i := 0; i < len(nodes); i++ {
node := nodes[i]
results, err := j.walk(j.cur, node)
results, err := j.walk(cur, node)
if err != nil {
return nil, err
}
// encounter an end node, break the current block
if j.endRange > 0 && j.endRange <= j.inRange {
j.endRange -= 1
j.endRange--
j.lastEndNode = &nodes[i]
break
}
// encounter a range node, start a range loop
if j.beginRange > 0 {
j.beginRange -= 1
j.inRange += 1
for k, value := range results {
j.parser.Root.Nodes = nodes[i+1:]
if k == len(results)-1 {
j.inRange -= 1
j.beginRange--
j.inRange++
if len(results) > 0 {
for _, value := range results {
j.parser.Root.Nodes = nodes[i+1:]
nextResults, err := j.FindResults(value.Interface())
if err != nil {
return nil, err
}
fullResult = append(fullResult, nextResults...)
}
nextResults, err := j.FindResults(value.Interface())
} else {
// If the range has no results, we still need to process the nodes within the range
// so the position will advance to the end node
j.parser.Root.Nodes = nodes[i+1:]
_, err := j.FindResults(nil)
if err != nil {
return nil, err
}
fullResult = append(fullResult, nextResults...)
}
break
j.inRange--
// Fast forward to resume processing after the most recent end node that was encountered
for k := i + 1; k < len(nodes); k++ {
if &nodes[k] == j.lastEndNode {
i = k
break
}
}
continue
}
fullResult = append(fullResult, results)
}
return fullResult, nil
}
// EnableJSONOutput changes the PrintResults behavior to return a JSON array of results
func (j *JSONPath) EnableJSONOutput(v bool) {
j.outputJSON = v
}
// PrintResults writes the results into writer
func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error {
if j.outputJSON {
// convert the []reflect.Value to something that json
// will be able to marshal
r := make([]interface{}, 0, len(results))
for i := range results {
r = append(r, results[i].Interface())
}
results = []reflect.Value{reflect.ValueOf(r)}
}
for i, r := range results {
text, err := j.evalToText(r)
var text []byte
var err error
outputJSON := true
kind := r.Kind()
if kind == reflect.Interface {
kind = r.Elem().Kind()
}
switch kind {
case reflect.Map:
case reflect.Array:
case reflect.Slice:
case reflect.Struct:
default:
outputJSON = false
}
switch {
case outputJSON || j.outputJSON:
if j.outputJSON {
text, err = json.MarshalIndent(r.Interface(), "", " ")
text = append(text, '\n')
} else {
text, err = json.Marshal(r.Interface())
}
default:
text, err = j.evalToText(r)
}
if err != nil {
return err
}
@ -132,7 +190,9 @@ func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error {
return err
}
}
return nil
}
// walk visits tree rooted at the given node in DFS order
@ -212,17 +272,11 @@ func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) (
results := []reflect.Value{}
switch node.Name {
case "range":
j.stack = append(j.stack, j.cur)
j.beginRange += 1
j.beginRange++
results = input
case "end":
if j.endRange < j.inRange { // inside a loop, break the current block
j.endRange += 1
break
}
// the loop is about to end, pop value and continue the following execution
if len(j.stack) > 0 {
j.cur, j.stack = j.stack[len(j.stack)-1], j.stack[:len(j.stack)-1]
if j.inRange > 0 {
j.endRange++
} else {
return results, fmt.Errorf("not in range, nothing to end")
}

View File

@ -37,7 +37,6 @@ type Parser struct {
Name string
Root *ListNode
input string
cur *ListNode
pos int
start int
width int
@ -186,8 +185,7 @@ func (p *Parser) parseInsideAction(cur *ListNode) error {
func (p *Parser) parseRightDelim(cur *ListNode) error {
p.pos += len(rightDelim)
p.consumeText()
cur = p.Root
return p.parseText(cur)
return p.parseText(p.Root)
}
// parseIdentifier scans build-in keywords, like "range" "end"
@ -216,8 +214,11 @@ func (p *Parser) parseIdentifier(cur *ListNode) error {
return p.parseInsideAction(cur)
}
// parseRecursive scans the recursive desent operator ..
// parseRecursive scans the recursive descent operator ..
func (p *Parser) parseRecursive(cur *ListNode) error {
if lastIndex := len(cur.Nodes) - 1; lastIndex >= 0 && cur.Nodes[lastIndex].Type() == NodeRecursive {
return fmt.Errorf("invalid multiple recursive descent")
}
p.pos += len("..")
p.consumeText()
cur.append(newRecursive())
@ -231,7 +232,7 @@ func (p *Parser) parseRecursive(cur *ListNode) error {
func (p *Parser) parseNumber(cur *ListNode) error {
r := p.peek()
if r == '+' || r == '-' {
r = p.next()
p.next()
}
for {
r = p.next()

View File

@ -0,0 +1,211 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"math"
"sync"
"time"
"golang.org/x/time/rate"
)
type RateLimiter interface {
// When gets an item and gets to decide how long that item should wait
When(item interface{}) time.Duration
// Forget indicates that an item is finished being retried. Doesn't matter whether its for perm failing
// or for success, we'll stop tracking it
Forget(item interface{})
// NumRequeues returns back how many failures the item has had
NumRequeues(item interface{}) int
}
// DefaultControllerRateLimiter is a no-arg constructor for a default rate limiter for a workqueue. It has
// both overall and per-item rate limiting. The overall is a token bucket and the per-item is exponential
func DefaultControllerRateLimiter() RateLimiter {
return NewMaxOfRateLimiter(
NewItemExponentialFailureRateLimiter(5*time.Millisecond, 1000*time.Second),
// 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item)
&BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)},
)
}
// BucketRateLimiter adapts a standard bucket to the workqueue ratelimiter API
type BucketRateLimiter struct {
*rate.Limiter
}
var _ RateLimiter = &BucketRateLimiter{}
func (r *BucketRateLimiter) When(item interface{}) time.Duration {
return r.Limiter.Reserve().Delay()
}
func (r *BucketRateLimiter) NumRequeues(item interface{}) int {
return 0
}
func (r *BucketRateLimiter) Forget(item interface{}) {
}
// ItemExponentialFailureRateLimiter does a simple baseDelay*2^<num-failures> limit
// dealing with max failures and expiration are up to the caller
type ItemExponentialFailureRateLimiter struct {
failuresLock sync.Mutex
failures map[interface{}]int
baseDelay time.Duration
maxDelay time.Duration
}
var _ RateLimiter = &ItemExponentialFailureRateLimiter{}
func NewItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) RateLimiter {
return &ItemExponentialFailureRateLimiter{
failures: map[interface{}]int{},
baseDelay: baseDelay,
maxDelay: maxDelay,
}
}
func DefaultItemBasedRateLimiter() RateLimiter {
return NewItemExponentialFailureRateLimiter(time.Millisecond, 1000*time.Second)
}
func (r *ItemExponentialFailureRateLimiter) When(item interface{}) time.Duration {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
exp := r.failures[item]
r.failures[item] = r.failures[item] + 1
// The backoff is capped such that 'calculated' value never overflows.
backoff := float64(r.baseDelay.Nanoseconds()) * math.Pow(2, float64(exp))
if backoff > math.MaxInt64 {
return r.maxDelay
}
calculated := time.Duration(backoff)
if calculated > r.maxDelay {
return r.maxDelay
}
return calculated
}
func (r *ItemExponentialFailureRateLimiter) NumRequeues(item interface{}) int {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
return r.failures[item]
}
func (r *ItemExponentialFailureRateLimiter) Forget(item interface{}) {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
delete(r.failures, item)
}
// ItemFastSlowRateLimiter does a quick retry for a certain number of attempts, then a slow retry after that
type ItemFastSlowRateLimiter struct {
failuresLock sync.Mutex
failures map[interface{}]int
maxFastAttempts int
fastDelay time.Duration
slowDelay time.Duration
}
var _ RateLimiter = &ItemFastSlowRateLimiter{}
func NewItemFastSlowRateLimiter(fastDelay, slowDelay time.Duration, maxFastAttempts int) RateLimiter {
return &ItemFastSlowRateLimiter{
failures: map[interface{}]int{},
fastDelay: fastDelay,
slowDelay: slowDelay,
maxFastAttempts: maxFastAttempts,
}
}
func (r *ItemFastSlowRateLimiter) When(item interface{}) time.Duration {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
r.failures[item] = r.failures[item] + 1
if r.failures[item] <= r.maxFastAttempts {
return r.fastDelay
}
return r.slowDelay
}
func (r *ItemFastSlowRateLimiter) NumRequeues(item interface{}) int {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
return r.failures[item]
}
func (r *ItemFastSlowRateLimiter) Forget(item interface{}) {
r.failuresLock.Lock()
defer r.failuresLock.Unlock()
delete(r.failures, item)
}
// MaxOfRateLimiter calls every RateLimiter and returns the worst case response
// When used with a token bucket limiter, the burst could be apparently exceeded in cases where particular items
// were separately delayed a longer time.
type MaxOfRateLimiter struct {
limiters []RateLimiter
}
func (r *MaxOfRateLimiter) When(item interface{}) time.Duration {
ret := time.Duration(0)
for _, limiter := range r.limiters {
curr := limiter.When(item)
if curr > ret {
ret = curr
}
}
return ret
}
func NewMaxOfRateLimiter(limiters ...RateLimiter) RateLimiter {
return &MaxOfRateLimiter{limiters: limiters}
}
func (r *MaxOfRateLimiter) NumRequeues(item interface{}) int {
ret := 0
for _, limiter := range r.limiters {
curr := limiter.NumRequeues(item)
if curr > ret {
ret = curr
}
}
return ret
}
func (r *MaxOfRateLimiter) Forget(item interface{}) {
for _, limiter := range r.limiters {
limiter.Forget(item)
}
}

View File

@ -0,0 +1,280 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"container/heap"
"sync"
"time"
"k8s.io/apimachinery/pkg/util/clock"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
// DelayingInterface is an Interface that can Add an item at a later time. This makes it easier to
// requeue items after failures without ending up in a hot-loop.
type DelayingInterface interface {
Interface
// AddAfter adds an item to the workqueue after the indicated duration has passed
AddAfter(item interface{}, duration time.Duration)
}
// NewDelayingQueue constructs a new workqueue with delayed queuing ability
func NewDelayingQueue() DelayingInterface {
return NewDelayingQueueWithCustomClock(clock.RealClock{}, "")
}
// NewDelayingQueueWithCustomQueue constructs a new workqueue with ability to
// inject custom queue Interface instead of the default one
func NewDelayingQueueWithCustomQueue(q Interface, name string) DelayingInterface {
return newDelayingQueue(clock.RealClock{}, q, name)
}
// NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability
func NewNamedDelayingQueue(name string) DelayingInterface {
return NewDelayingQueueWithCustomClock(clock.RealClock{}, name)
}
// NewDelayingQueueWithCustomClock constructs a new named workqueue
// with ability to inject real or fake clock for testing purposes
func NewDelayingQueueWithCustomClock(clock clock.Clock, name string) DelayingInterface {
return newDelayingQueue(clock, NewNamed(name), name)
}
func newDelayingQueue(clock clock.Clock, q Interface, name string) *delayingType {
ret := &delayingType{
Interface: q,
clock: clock,
heartbeat: clock.NewTicker(maxWait),
stopCh: make(chan struct{}),
waitingForAddCh: make(chan *waitFor, 1000),
metrics: newRetryMetrics(name),
}
go ret.waitingLoop()
return ret
}
// delayingType wraps an Interface and provides delayed re-enquing
type delayingType struct {
Interface
// clock tracks time for delayed firing
clock clock.Clock
// stopCh lets us signal a shutdown to the waiting loop
stopCh chan struct{}
// stopOnce guarantees we only signal shutdown a single time
stopOnce sync.Once
// heartbeat ensures we wait no more than maxWait before firing
heartbeat clock.Ticker
// waitingForAddCh is a buffered channel that feeds waitingForAdd
waitingForAddCh chan *waitFor
// metrics counts the number of retries
metrics retryMetrics
}
// waitFor holds the data to add and the time it should be added
type waitFor struct {
data t
readyAt time.Time
// index in the priority queue (heap)
index int
}
// waitForPriorityQueue implements a priority queue for waitFor items.
//
// waitForPriorityQueue implements heap.Interface. The item occurring next in
// time (i.e., the item with the smallest readyAt) is at the root (index 0).
// Peek returns this minimum item at index 0. Pop returns the minimum item after
// it has been removed from the queue and placed at index Len()-1 by
// container/heap. Push adds an item at index Len(), and container/heap
// percolates it into the correct location.
type waitForPriorityQueue []*waitFor
func (pq waitForPriorityQueue) Len() int {
return len(pq)
}
func (pq waitForPriorityQueue) Less(i, j int) bool {
return pq[i].readyAt.Before(pq[j].readyAt)
}
func (pq waitForPriorityQueue) Swap(i, j int) {
pq[i], pq[j] = pq[j], pq[i]
pq[i].index = i
pq[j].index = j
}
// Push adds an item to the queue. Push should not be called directly; instead,
// use `heap.Push`.
func (pq *waitForPriorityQueue) Push(x interface{}) {
n := len(*pq)
item := x.(*waitFor)
item.index = n
*pq = append(*pq, item)
}
// Pop removes an item from the queue. Pop should not be called directly;
// instead, use `heap.Pop`.
func (pq *waitForPriorityQueue) Pop() interface{} {
n := len(*pq)
item := (*pq)[n-1]
item.index = -1
*pq = (*pq)[0:(n - 1)]
return item
}
// Peek returns the item at the beginning of the queue, without removing the
// item or otherwise mutating the queue. It is safe to call directly.
func (pq waitForPriorityQueue) Peek() interface{} {
return pq[0]
}
// ShutDown stops the queue. After the queue drains, the returned shutdown bool
// on Get() will be true. This method may be invoked more than once.
func (q *delayingType) ShutDown() {
q.stopOnce.Do(func() {
q.Interface.ShutDown()
close(q.stopCh)
q.heartbeat.Stop()
})
}
// AddAfter adds the given item to the work queue after the given delay
func (q *delayingType) AddAfter(item interface{}, duration time.Duration) {
// don't add if we're already shutting down
if q.ShuttingDown() {
return
}
q.metrics.retry()
// immediately add things with no delay
if duration <= 0 {
q.Add(item)
return
}
select {
case <-q.stopCh:
// unblock if ShutDown() is called
case q.waitingForAddCh <- &waitFor{data: item, readyAt: q.clock.Now().Add(duration)}:
}
}
// maxWait keeps a max bound on the wait time. It's just insurance against weird things happening.
// Checking the queue every 10 seconds isn't expensive and we know that we'll never end up with an
// expired item sitting for more than 10 seconds.
const maxWait = 10 * time.Second
// waitingLoop runs until the workqueue is shutdown and keeps a check on the list of items to be added.
func (q *delayingType) waitingLoop() {
defer utilruntime.HandleCrash()
// Make a placeholder channel to use when there are no items in our list
never := make(<-chan time.Time)
// Make a timer that expires when the item at the head of the waiting queue is ready
var nextReadyAtTimer clock.Timer
waitingForQueue := &waitForPriorityQueue{}
heap.Init(waitingForQueue)
waitingEntryByData := map[t]*waitFor{}
for {
if q.Interface.ShuttingDown() {
return
}
now := q.clock.Now()
// Add ready entries
for waitingForQueue.Len() > 0 {
entry := waitingForQueue.Peek().(*waitFor)
if entry.readyAt.After(now) {
break
}
entry = heap.Pop(waitingForQueue).(*waitFor)
q.Add(entry.data)
delete(waitingEntryByData, entry.data)
}
// Set up a wait for the first item's readyAt (if one exists)
nextReadyAt := never
if waitingForQueue.Len() > 0 {
if nextReadyAtTimer != nil {
nextReadyAtTimer.Stop()
}
entry := waitingForQueue.Peek().(*waitFor)
nextReadyAtTimer = q.clock.NewTimer(entry.readyAt.Sub(now))
nextReadyAt = nextReadyAtTimer.C()
}
select {
case <-q.stopCh:
return
case <-q.heartbeat.C():
// continue the loop, which will add ready items
case <-nextReadyAt:
// continue the loop, which will add ready items
case waitEntry := <-q.waitingForAddCh:
if waitEntry.readyAt.After(q.clock.Now()) {
insert(waitingForQueue, waitingEntryByData, waitEntry)
} else {
q.Add(waitEntry.data)
}
drained := false
for !drained {
select {
case waitEntry := <-q.waitingForAddCh:
if waitEntry.readyAt.After(q.clock.Now()) {
insert(waitingForQueue, waitingEntryByData, waitEntry)
} else {
q.Add(waitEntry.data)
}
default:
drained = true
}
}
}
}
}
// insert adds the entry to the priority queue, or updates the readyAt if it already exists in the queue
func insert(q *waitForPriorityQueue, knownEntries map[t]*waitFor, entry *waitFor) {
// if the entry already exists, update the time only if it would cause the item to be queued sooner
existing, exists := knownEntries[entry.data]
if exists {
if existing.readyAt.After(entry.readyAt) {
existing.readyAt = entry.readyAt
heap.Fix(q, existing.index)
}
return
}
heap.Push(q, entry)
knownEntries[entry.data] = entry
}

26
vendor/k8s.io/client-go/util/workqueue/doc.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package workqueue provides a simple queue that supports the following
// features:
// * Fair: items processed in the order in which they are added.
// * Stingy: a single item will not be processed multiple times concurrently,
// and if an item is added multiple times before it can be processed, it
// will only be processed once.
// * Multiple consumers and producers. In particular, it is allowed for an
// item to be reenqueued while it is being processed.
// * Shutdown notifications.
package workqueue // import "k8s.io/client-go/util/workqueue"

261
vendor/k8s.io/client-go/util/workqueue/metrics.go generated vendored Normal file
View File

@ -0,0 +1,261 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"sync"
"time"
"k8s.io/apimachinery/pkg/util/clock"
)
// This file provides abstractions for setting the provider (e.g., prometheus)
// of metrics.
type queueMetrics interface {
add(item t)
get(item t)
done(item t)
updateUnfinishedWork()
}
// GaugeMetric represents a single numerical value that can arbitrarily go up
// and down.
type GaugeMetric interface {
Inc()
Dec()
}
// SettableGaugeMetric represents a single numerical value that can arbitrarily go up
// and down. (Separate from GaugeMetric to preserve backwards compatibility.)
type SettableGaugeMetric interface {
Set(float64)
}
// CounterMetric represents a single numerical value that only ever
// goes up.
type CounterMetric interface {
Inc()
}
// SummaryMetric captures individual observations.
type SummaryMetric interface {
Observe(float64)
}
// HistogramMetric counts individual observations.
type HistogramMetric interface {
Observe(float64)
}
type noopMetric struct{}
func (noopMetric) Inc() {}
func (noopMetric) Dec() {}
func (noopMetric) Set(float64) {}
func (noopMetric) Observe(float64) {}
// defaultQueueMetrics expects the caller to lock before setting any metrics.
type defaultQueueMetrics struct {
clock clock.Clock
// current depth of a workqueue
depth GaugeMetric
// total number of adds handled by a workqueue
adds CounterMetric
// how long an item stays in a workqueue
latency HistogramMetric
// how long processing an item from a workqueue takes
workDuration HistogramMetric
addTimes map[t]time.Time
processingStartTimes map[t]time.Time
// how long have current threads been working?
unfinishedWorkSeconds SettableGaugeMetric
longestRunningProcessor SettableGaugeMetric
}
func (m *defaultQueueMetrics) add(item t) {
if m == nil {
return
}
m.adds.Inc()
m.depth.Inc()
if _, exists := m.addTimes[item]; !exists {
m.addTimes[item] = m.clock.Now()
}
}
func (m *defaultQueueMetrics) get(item t) {
if m == nil {
return
}
m.depth.Dec()
m.processingStartTimes[item] = m.clock.Now()
if startTime, exists := m.addTimes[item]; exists {
m.latency.Observe(m.sinceInSeconds(startTime))
delete(m.addTimes, item)
}
}
func (m *defaultQueueMetrics) done(item t) {
if m == nil {
return
}
if startTime, exists := m.processingStartTimes[item]; exists {
m.workDuration.Observe(m.sinceInSeconds(startTime))
delete(m.processingStartTimes, item)
}
}
func (m *defaultQueueMetrics) updateUnfinishedWork() {
// Note that a summary metric would be better for this, but prometheus
// doesn't seem to have non-hacky ways to reset the summary metrics.
var total float64
var oldest float64
for _, t := range m.processingStartTimes {
age := m.sinceInSeconds(t)
total += age
if age > oldest {
oldest = age
}
}
m.unfinishedWorkSeconds.Set(total)
m.longestRunningProcessor.Set(oldest)
}
type noMetrics struct{}
func (noMetrics) add(item t) {}
func (noMetrics) get(item t) {}
func (noMetrics) done(item t) {}
func (noMetrics) updateUnfinishedWork() {}
// Gets the time since the specified start in seconds.
func (m *defaultQueueMetrics) sinceInSeconds(start time.Time) float64 {
return m.clock.Since(start).Seconds()
}
type retryMetrics interface {
retry()
}
type defaultRetryMetrics struct {
retries CounterMetric
}
func (m *defaultRetryMetrics) retry() {
if m == nil {
return
}
m.retries.Inc()
}
// MetricsProvider generates various metrics used by the queue.
type MetricsProvider interface {
NewDepthMetric(name string) GaugeMetric
NewAddsMetric(name string) CounterMetric
NewLatencyMetric(name string) HistogramMetric
NewWorkDurationMetric(name string) HistogramMetric
NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric
NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric
NewRetriesMetric(name string) CounterMetric
}
type noopMetricsProvider struct{}
func (_ noopMetricsProvider) NewDepthMetric(name string) GaugeMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewAddsMetric(name string) CounterMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewLatencyMetric(name string) HistogramMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewWorkDurationMetric(name string) HistogramMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) SettableGaugeMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) SettableGaugeMetric {
return noopMetric{}
}
func (_ noopMetricsProvider) NewRetriesMetric(name string) CounterMetric {
return noopMetric{}
}
var globalMetricsFactory = queueMetricsFactory{
metricsProvider: noopMetricsProvider{},
}
type queueMetricsFactory struct {
metricsProvider MetricsProvider
onlyOnce sync.Once
}
func (f *queueMetricsFactory) setProvider(mp MetricsProvider) {
f.onlyOnce.Do(func() {
f.metricsProvider = mp
})
}
func (f *queueMetricsFactory) newQueueMetrics(name string, clock clock.Clock) queueMetrics {
mp := f.metricsProvider
if len(name) == 0 || mp == (noopMetricsProvider{}) {
return noMetrics{}
}
return &defaultQueueMetrics{
clock: clock,
depth: mp.NewDepthMetric(name),
adds: mp.NewAddsMetric(name),
latency: mp.NewLatencyMetric(name),
workDuration: mp.NewWorkDurationMetric(name),
unfinishedWorkSeconds: mp.NewUnfinishedWorkSecondsMetric(name),
longestRunningProcessor: mp.NewLongestRunningProcessorSecondsMetric(name),
addTimes: map[t]time.Time{},
processingStartTimes: map[t]time.Time{},
}
}
func newRetryMetrics(name string) retryMetrics {
var ret *defaultRetryMetrics
if len(name) == 0 {
return ret
}
return &defaultRetryMetrics{
retries: globalMetricsFactory.metricsProvider.NewRetriesMetric(name),
}
}
// SetProvider sets the metrics provider for all subsequently created work
// queues. Only the first call has an effect.
func SetProvider(metricsProvider MetricsProvider) {
globalMetricsFactory.setProvider(metricsProvider)
}

101
vendor/k8s.io/client-go/util/workqueue/parallelizer.go generated vendored Normal file
View File

@ -0,0 +1,101 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"context"
"sync"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
type DoWorkPieceFunc func(piece int)
type options struct {
chunkSize int
}
type Options func(*options)
// WithChunkSize allows to set chunks of work items to the workers, rather than
// processing one by one.
// It is recommended to use this option if the number of pieces significantly
// higher than the number of workers and the work done for each item is small.
func WithChunkSize(c int) func(*options) {
return func(o *options) {
o.chunkSize = c
}
}
// ParallelizeUntil is a framework that allows for parallelizing N
// independent pieces of work until done or the context is canceled.
func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc, opts ...Options) {
if pieces == 0 {
return
}
o := options{}
for _, opt := range opts {
opt(&o)
}
chunkSize := o.chunkSize
if chunkSize < 1 {
chunkSize = 1
}
chunks := ceilDiv(pieces, chunkSize)
toProcess := make(chan int, chunks)
for i := 0; i < chunks; i++ {
toProcess <- i
}
close(toProcess)
var stop <-chan struct{}
if ctx != nil {
stop = ctx.Done()
}
if chunks < workers {
workers = chunks
}
wg := sync.WaitGroup{}
wg.Add(workers)
for i := 0; i < workers; i++ {
go func() {
defer utilruntime.HandleCrash()
defer wg.Done()
for chunk := range toProcess {
start := chunk * chunkSize
end := start + chunkSize
if end > pieces {
end = pieces
}
for p := start; p < end; p++ {
select {
case <-stop:
return
default:
doWorkPiece(p)
}
}
}
}()
}
wg.Wait()
}
func ceilDiv(a, b int) int {
return (a + b - 1) / b
}

212
vendor/k8s.io/client-go/util/workqueue/queue.go generated vendored Normal file
View File

@ -0,0 +1,212 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
import (
"sync"
"time"
"k8s.io/apimachinery/pkg/util/clock"
)
type Interface interface {
Add(item interface{})
Len() int
Get() (item interface{}, shutdown bool)
Done(item interface{})
ShutDown()
ShuttingDown() bool
}
// New constructs a new work queue (see the package comment).
func New() *Type {
return NewNamed("")
}
func NewNamed(name string) *Type {
rc := clock.RealClock{}
return newQueue(
rc,
globalMetricsFactory.newQueueMetrics(name, rc),
defaultUnfinishedWorkUpdatePeriod,
)
}
func newQueue(c clock.Clock, metrics queueMetrics, updatePeriod time.Duration) *Type {
t := &Type{
clock: c,
dirty: set{},
processing: set{},
cond: sync.NewCond(&sync.Mutex{}),
metrics: metrics,
unfinishedWorkUpdatePeriod: updatePeriod,
}
go t.updateUnfinishedWorkLoop()
return t
}
const defaultUnfinishedWorkUpdatePeriod = 500 * time.Millisecond
// Type is a work queue (see the package comment).
type Type struct {
// queue defines the order in which we will work on items. Every
// element of queue should be in the dirty set and not in the
// processing set.
queue []t
// dirty defines all of the items that need to be processed.
dirty set
// Things that are currently being processed are in the processing set.
// These things may be simultaneously in the dirty set. When we finish
// processing something and remove it from this set, we'll check if
// it's in the dirty set, and if so, add it to the queue.
processing set
cond *sync.Cond
shuttingDown bool
metrics queueMetrics
unfinishedWorkUpdatePeriod time.Duration
clock clock.Clock
}
type empty struct{}
type t interface{}
type set map[t]empty
func (s set) has(item t) bool {
_, exists := s[item]
return exists
}
func (s set) insert(item t) {
s[item] = empty{}
}
func (s set) delete(item t) {
delete(s, item)
}
// Add marks item as needing processing.
func (q *Type) Add(item interface{}) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
if q.shuttingDown {
return
}
if q.dirty.has(item) {
return
}
q.metrics.add(item)
q.dirty.insert(item)
if q.processing.has(item) {
return
}
q.queue = append(q.queue, item)
q.cond.Signal()
}
// Len returns the current queue length, for informational purposes only. You
// shouldn't e.g. gate a call to Add() or Get() on Len() being a particular
// value, that can't be synchronized properly.
func (q *Type) Len() int {
q.cond.L.Lock()
defer q.cond.L.Unlock()
return len(q.queue)
}
// Get blocks until it can return an item to be processed. If shutdown = true,
// the caller should end their goroutine. You must call Done with item when you
// have finished processing it.
func (q *Type) Get() (item interface{}, shutdown bool) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
for len(q.queue) == 0 && !q.shuttingDown {
q.cond.Wait()
}
if len(q.queue) == 0 {
// We must be shutting down.
return nil, true
}
item, q.queue = q.queue[0], q.queue[1:]
q.metrics.get(item)
q.processing.insert(item)
q.dirty.delete(item)
return item, false
}
// Done marks item as done processing, and if it has been marked as dirty again
// while it was being processed, it will be re-added to the queue for
// re-processing.
func (q *Type) Done(item interface{}) {
q.cond.L.Lock()
defer q.cond.L.Unlock()
q.metrics.done(item)
q.processing.delete(item)
if q.dirty.has(item) {
q.queue = append(q.queue, item)
q.cond.Signal()
}
}
// ShutDown will cause q to ignore all new items added to it. As soon as the
// worker goroutines have drained the existing items in the queue, they will be
// instructed to exit.
func (q *Type) ShutDown() {
q.cond.L.Lock()
defer q.cond.L.Unlock()
q.shuttingDown = true
q.cond.Broadcast()
}
func (q *Type) ShuttingDown() bool {
q.cond.L.Lock()
defer q.cond.L.Unlock()
return q.shuttingDown
}
func (q *Type) updateUnfinishedWorkLoop() {
t := q.clock.NewTicker(q.unfinishedWorkUpdatePeriod)
defer t.Stop()
for range t.C() {
if !func() bool {
q.cond.L.Lock()
defer q.cond.L.Unlock()
if !q.shuttingDown {
q.metrics.updateUnfinishedWork()
return true
}
return false
}() {
return
}
}
}

View File

@ -0,0 +1,69 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package workqueue
// RateLimitingInterface is an interface that rate limits items being added to the queue.
type RateLimitingInterface interface {
DelayingInterface
// AddRateLimited adds an item to the workqueue after the rate limiter says it's ok
AddRateLimited(item interface{})
// Forget indicates that an item is finished being retried. Doesn't matter whether it's for perm failing
// or for success, we'll stop the rate limiter from tracking it. This only clears the `rateLimiter`, you
// still have to call `Done` on the queue.
Forget(item interface{})
// NumRequeues returns back how many times the item was requeued
NumRequeues(item interface{}) int
}
// NewRateLimitingQueue constructs a new workqueue with rateLimited queuing ability
// Remember to call Forget! If you don't, you may end up tracking failures forever.
func NewRateLimitingQueue(rateLimiter RateLimiter) RateLimitingInterface {
return &rateLimitingType{
DelayingInterface: NewDelayingQueue(),
rateLimiter: rateLimiter,
}
}
func NewNamedRateLimitingQueue(rateLimiter RateLimiter, name string) RateLimitingInterface {
return &rateLimitingType{
DelayingInterface: NewNamedDelayingQueue(name),
rateLimiter: rateLimiter,
}
}
// rateLimitingType wraps an Interface and provides rateLimited re-enquing
type rateLimitingType struct {
DelayingInterface
rateLimiter RateLimiter
}
// AddRateLimited AddAfter's the item based on the time when the rate limiter says it's ok
func (q *rateLimitingType) AddRateLimited(item interface{}) {
q.DelayingInterface.AddAfter(item, q.rateLimiter.When(item))
}
func (q *rateLimitingType) NumRequeues(item interface{}) int {
return q.rateLimiter.NumRequeues(item)
}
func (q *rateLimitingType) Forget(item interface{}) {
q.rateLimiter.Forget(item)
}