Fix dependencies
This commit is contained in:
6
vendor/k8s.io/client-go/util/cert/cert.go
generated
vendored
6
vendor/k8s.io/client-go/util/cert/cert.go
generated
vendored
@ -28,7 +28,7 @@ import (
|
||||
"io/ioutil"
|
||||
"math/big"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"path"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -96,8 +96,8 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a
|
||||
maxAge := time.Hour * 24 * 365 // one year self-signed certs
|
||||
|
||||
baseName := fmt.Sprintf("%s_%s_%s", host, strings.Join(ipsToStrings(alternateIPs), "-"), strings.Join(alternateDNS, "-"))
|
||||
certFixturePath := filepath.Join(fixtureDirectory, baseName+".crt")
|
||||
keyFixturePath := filepath.Join(fixtureDirectory, baseName+".key")
|
||||
certFixturePath := path.Join(fixtureDirectory, baseName+".crt")
|
||||
keyFixturePath := path.Join(fixtureDirectory, baseName+".key")
|
||||
if len(fixtureDirectory) > 0 {
|
||||
cert, err := ioutil.ReadFile(certFixturePath)
|
||||
if err == nil {
|
||||
|
92
vendor/k8s.io/client-go/util/jsonpath/jsonpath.go
generated
vendored
92
vendor/k8s.io/client-go/util/jsonpath/jsonpath.go
generated
vendored
@ -18,7 +18,6 @@ package jsonpath
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
@ -30,14 +29,13 @@ import (
|
||||
type JSONPath struct {
|
||||
name string
|
||||
parser *Parser
|
||||
stack [][]reflect.Value // push and pop values in different scopes
|
||||
cur []reflect.Value // current scope values
|
||||
beginRange int
|
||||
inRange int
|
||||
endRange int
|
||||
|
||||
lastEndNode *Node
|
||||
|
||||
allowMissingKeys bool
|
||||
outputJSON bool
|
||||
}
|
||||
|
||||
// New creates a new JSONPath with the given name.
|
||||
@ -83,12 +81,12 @@ func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) {
|
||||
return nil, fmt.Errorf("%s is an incomplete jsonpath template", j.name)
|
||||
}
|
||||
|
||||
cur := []reflect.Value{reflect.ValueOf(data)}
|
||||
j.cur = []reflect.Value{reflect.ValueOf(data)}
|
||||
nodes := j.parser.Root.Nodes
|
||||
fullResult := [][]reflect.Value{}
|
||||
for i := 0; i < len(nodes); i++ {
|
||||
node := nodes[i]
|
||||
results, err := j.walk(cur, node)
|
||||
results, err := j.walk(j.cur, node)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -96,90 +94,34 @@ func (j *JSONPath) FindResults(data interface{}) ([][]reflect.Value, error) {
|
||||
// encounter an end node, break the current block
|
||||
if j.endRange > 0 && j.endRange <= j.inRange {
|
||||
j.endRange--
|
||||
j.lastEndNode = &nodes[i]
|
||||
break
|
||||
}
|
||||
// encounter a range node, start a range loop
|
||||
if j.beginRange > 0 {
|
||||
j.beginRange--
|
||||
j.inRange++
|
||||
if len(results) > 0 {
|
||||
for _, value := range results {
|
||||
j.parser.Root.Nodes = nodes[i+1:]
|
||||
nextResults, err := j.FindResults(value.Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullResult = append(fullResult, nextResults...)
|
||||
}
|
||||
} else {
|
||||
// If the range has no results, we still need to process the nodes within the range
|
||||
// so the position will advance to the end node
|
||||
for k, value := range results {
|
||||
j.parser.Root.Nodes = nodes[i+1:]
|
||||
_, err := j.FindResults(nil)
|
||||
if k == len(results)-1 {
|
||||
j.inRange--
|
||||
}
|
||||
nextResults, err := j.FindResults(value.Interface())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fullResult = append(fullResult, nextResults...)
|
||||
}
|
||||
j.inRange--
|
||||
|
||||
// Fast forward to resume processing after the most recent end node that was encountered
|
||||
for k := i + 1; k < len(nodes); k++ {
|
||||
if &nodes[k] == j.lastEndNode {
|
||||
i = k
|
||||
break
|
||||
}
|
||||
}
|
||||
continue
|
||||
break
|
||||
}
|
||||
fullResult = append(fullResult, results)
|
||||
}
|
||||
return fullResult, nil
|
||||
}
|
||||
|
||||
// EnableJSONOutput changes the PrintResults behavior to return a JSON array of results
|
||||
func (j *JSONPath) EnableJSONOutput(v bool) {
|
||||
j.outputJSON = v
|
||||
}
|
||||
|
||||
// PrintResults writes the results into writer
|
||||
func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error {
|
||||
if j.outputJSON {
|
||||
// convert the []reflect.Value to something that json
|
||||
// will be able to marshal
|
||||
r := make([]interface{}, 0, len(results))
|
||||
for i := range results {
|
||||
r = append(r, results[i].Interface())
|
||||
}
|
||||
results = []reflect.Value{reflect.ValueOf(r)}
|
||||
}
|
||||
for i, r := range results {
|
||||
var text []byte
|
||||
var err error
|
||||
outputJSON := true
|
||||
kind := r.Kind()
|
||||
if kind == reflect.Interface {
|
||||
kind = r.Elem().Kind()
|
||||
}
|
||||
switch kind {
|
||||
case reflect.Map:
|
||||
case reflect.Array:
|
||||
case reflect.Slice:
|
||||
case reflect.Struct:
|
||||
default:
|
||||
outputJSON = false
|
||||
}
|
||||
switch {
|
||||
case outputJSON || j.outputJSON:
|
||||
if j.outputJSON {
|
||||
text, err = json.MarshalIndent(r.Interface(), "", " ")
|
||||
text = append(text, '\n')
|
||||
} else {
|
||||
text, err = json.Marshal(r.Interface())
|
||||
}
|
||||
default:
|
||||
text, err = j.evalToText(r)
|
||||
}
|
||||
text, err := j.evalToText(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -190,9 +132,7 @@ func (j *JSONPath) PrintResults(wr io.Writer, results []reflect.Value) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// walk visits tree rooted at the given node in DFS order
|
||||
@ -272,11 +212,17 @@ func (j *JSONPath) evalIdentifier(input []reflect.Value, node *IdentifierNode) (
|
||||
results := []reflect.Value{}
|
||||
switch node.Name {
|
||||
case "range":
|
||||
j.stack = append(j.stack, j.cur)
|
||||
j.beginRange++
|
||||
results = input
|
||||
case "end":
|
||||
if j.inRange > 0 {
|
||||
if j.endRange < j.inRange { // inside a loop, break the current block
|
||||
j.endRange++
|
||||
break
|
||||
}
|
||||
// the loop is about to end, pop value and continue the following execution
|
||||
if len(j.stack) > 0 {
|
||||
j.cur, j.stack = j.stack[len(j.stack)-1], j.stack[:len(j.stack)-1]
|
||||
} else {
|
||||
return results, fmt.Errorf("not in range, nothing to end")
|
||||
}
|
||||
|
5
vendor/k8s.io/client-go/util/jsonpath/parser.go
generated
vendored
5
vendor/k8s.io/client-go/util/jsonpath/parser.go
generated
vendored
@ -214,11 +214,8 @@ func (p *Parser) parseIdentifier(cur *ListNode) error {
|
||||
return p.parseInsideAction(cur)
|
||||
}
|
||||
|
||||
// parseRecursive scans the recursive descent operator ..
|
||||
// parseRecursive scans the recursive desent operator ..
|
||||
func (p *Parser) parseRecursive(cur *ListNode) error {
|
||||
if lastIndex := len(cur.Nodes) - 1; lastIndex >= 0 && cur.Nodes[lastIndex].Type() == NodeRecursive {
|
||||
return fmt.Errorf("invalid multiple recursive descent")
|
||||
}
|
||||
p.pos += len("..")
|
||||
p.consumeText()
|
||||
cur.append(newRecursive())
|
||||
|
48
vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
generated
vendored
48
vendor/k8s.io/client-go/util/workqueue/default_rate_limiters.go
generated
vendored
@ -62,6 +62,54 @@ func (r *BucketRateLimiter) NumRequeues(item interface{}) int {
|
||||
func (r *BucketRateLimiter) Forget(item interface{}) {
|
||||
}
|
||||
|
||||
// ItemBucketRateLimiter implements a workqueue ratelimiter API using standard rate.Limiter.
|
||||
// Each key is using a separate limiter.
|
||||
type ItemBucketRateLimiter struct {
|
||||
r rate.Limit
|
||||
burst int
|
||||
|
||||
limitersLock sync.Mutex
|
||||
limiters map[interface{}]*rate.Limiter
|
||||
}
|
||||
|
||||
var _ RateLimiter = &ItemBucketRateLimiter{}
|
||||
|
||||
// NewItemBucketRateLimiter creates new ItemBucketRateLimiter instance.
|
||||
func NewItemBucketRateLimiter(r rate.Limit, burst int) *ItemBucketRateLimiter {
|
||||
return &ItemBucketRateLimiter{
|
||||
r: r,
|
||||
burst: burst,
|
||||
limiters: make(map[interface{}]*rate.Limiter),
|
||||
}
|
||||
}
|
||||
|
||||
// When returns a time.Duration which we need to wait before item is processed.
|
||||
func (r *ItemBucketRateLimiter) When(item interface{}) time.Duration {
|
||||
r.limitersLock.Lock()
|
||||
defer r.limitersLock.Unlock()
|
||||
|
||||
limiter, ok := r.limiters[item]
|
||||
if !ok {
|
||||
limiter = rate.NewLimiter(r.r, r.burst)
|
||||
r.limiters[item] = limiter
|
||||
}
|
||||
|
||||
return limiter.Reserve().Delay()
|
||||
}
|
||||
|
||||
// NumRequeues returns always 0 (doesn't apply to ItemBucketRateLimiter).
|
||||
func (r *ItemBucketRateLimiter) NumRequeues(item interface{}) int {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Forget removes item from the internal state.
|
||||
func (r *ItemBucketRateLimiter) Forget(item interface{}) {
|
||||
r.limitersLock.Lock()
|
||||
defer r.limitersLock.Unlock()
|
||||
|
||||
delete(r.limiters, item)
|
||||
}
|
||||
|
||||
// ItemExponentialFailureRateLimiter does a simple baseDelay*2^<num-failures> limit
|
||||
// dealing with max failures and expiration are up to the caller
|
||||
type ItemExponentialFailureRateLimiter struct {
|
||||
|
13
vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
generated
vendored
13
vendor/k8s.io/client-go/util/workqueue/delaying_queue.go
generated
vendored
@ -38,12 +38,6 @@ func NewDelayingQueue() DelayingInterface {
|
||||
return NewDelayingQueueWithCustomClock(clock.RealClock{}, "")
|
||||
}
|
||||
|
||||
// NewDelayingQueueWithCustomQueue constructs a new workqueue with ability to
|
||||
// inject custom queue Interface instead of the default one
|
||||
func NewDelayingQueueWithCustomQueue(q Interface, name string) DelayingInterface {
|
||||
return newDelayingQueue(clock.RealClock{}, q, name)
|
||||
}
|
||||
|
||||
// NewNamedDelayingQueue constructs a new named workqueue with delayed queuing ability
|
||||
func NewNamedDelayingQueue(name string) DelayingInterface {
|
||||
return NewDelayingQueueWithCustomClock(clock.RealClock{}, name)
|
||||
@ -52,12 +46,8 @@ func NewNamedDelayingQueue(name string) DelayingInterface {
|
||||
// NewDelayingQueueWithCustomClock constructs a new named workqueue
|
||||
// with ability to inject real or fake clock for testing purposes
|
||||
func NewDelayingQueueWithCustomClock(clock clock.Clock, name string) DelayingInterface {
|
||||
return newDelayingQueue(clock, NewNamed(name), name)
|
||||
}
|
||||
|
||||
func newDelayingQueue(clock clock.Clock, q Interface, name string) *delayingType {
|
||||
ret := &delayingType{
|
||||
Interface: q,
|
||||
Interface: NewNamed(name),
|
||||
clock: clock,
|
||||
heartbeat: clock.NewTicker(maxWait),
|
||||
stopCh: make(chan struct{}),
|
||||
@ -66,6 +56,7 @@ func newDelayingQueue(clock clock.Clock, q Interface, name string) *delayingType
|
||||
}
|
||||
|
||||
go ret.waitingLoop()
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
|
72
vendor/k8s.io/client-go/util/workqueue/parallelizer.go
generated
vendored
72
vendor/k8s.io/client-go/util/workqueue/parallelizer.go
generated
vendored
@ -25,77 +25,39 @@ import (
|
||||
|
||||
type DoWorkPieceFunc func(piece int)
|
||||
|
||||
type options struct {
|
||||
chunkSize int
|
||||
}
|
||||
|
||||
type Options func(*options)
|
||||
|
||||
// WithChunkSize allows to set chunks of work items to the workers, rather than
|
||||
// processing one by one.
|
||||
// It is recommended to use this option if the number of pieces significantly
|
||||
// higher than the number of workers and the work done for each item is small.
|
||||
func WithChunkSize(c int) func(*options) {
|
||||
return func(o *options) {
|
||||
o.chunkSize = c
|
||||
}
|
||||
}
|
||||
|
||||
// ParallelizeUntil is a framework that allows for parallelizing N
|
||||
// independent pieces of work until done or the context is canceled.
|
||||
func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc, opts ...Options) {
|
||||
if pieces == 0 {
|
||||
return
|
||||
}
|
||||
o := options{}
|
||||
for _, opt := range opts {
|
||||
opt(&o)
|
||||
}
|
||||
chunkSize := o.chunkSize
|
||||
if chunkSize < 1 {
|
||||
chunkSize = 1
|
||||
}
|
||||
|
||||
chunks := ceilDiv(pieces, chunkSize)
|
||||
toProcess := make(chan int, chunks)
|
||||
for i := 0; i < chunks; i++ {
|
||||
toProcess <- i
|
||||
}
|
||||
close(toProcess)
|
||||
|
||||
func ParallelizeUntil(ctx context.Context, workers, pieces int, doWorkPiece DoWorkPieceFunc) {
|
||||
var stop <-chan struct{}
|
||||
if ctx != nil {
|
||||
stop = ctx.Done()
|
||||
}
|
||||
if chunks < workers {
|
||||
workers = chunks
|
||||
|
||||
toProcess := make(chan int, pieces)
|
||||
for i := 0; i < pieces; i++ {
|
||||
toProcess <- i
|
||||
}
|
||||
close(toProcess)
|
||||
|
||||
if pieces < workers {
|
||||
workers = pieces
|
||||
}
|
||||
|
||||
wg := sync.WaitGroup{}
|
||||
wg.Add(workers)
|
||||
for i := 0; i < workers; i++ {
|
||||
go func() {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer wg.Done()
|
||||
for chunk := range toProcess {
|
||||
start := chunk * chunkSize
|
||||
end := start + chunkSize
|
||||
if end > pieces {
|
||||
end = pieces
|
||||
}
|
||||
for p := start; p < end; p++ {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
default:
|
||||
doWorkPiece(p)
|
||||
}
|
||||
for piece := range toProcess {
|
||||
select {
|
||||
case <-stop:
|
||||
return
|
||||
default:
|
||||
doWorkPiece(piece)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func ceilDiv(a, b int) int {
|
||||
return (a + b - 1) / b
|
||||
}
|
||||
|
Reference in New Issue
Block a user