Work on #61: Add support for ICMP

+ Update dependencies
This commit is contained in:
TwinProduction
2020-12-25 00:07:18 -05:00
parent c86173d46f
commit 83a5813daf
1004 changed files with 182274 additions and 64323 deletions

View File

@ -19,13 +19,17 @@ package runtime
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/url"
"reflect"
"strconv"
"strings"
"k8s.io/apimachinery/pkg/conversion/queryparams"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/klog/v2"
)
// codec binds an encoder and decoder.
@ -100,10 +104,19 @@ type NoopEncoder struct {
var _ Serializer = NoopEncoder{}
const noopEncoderIdentifier Identifier = "noop"
func (n NoopEncoder) Encode(obj Object, w io.Writer) error {
// There is no need to handle runtime.CacheableObject, as we don't
// process the obj at all.
return fmt.Errorf("encoding is not allowed for this codec: %v", reflect.TypeOf(n.Decoder))
}
// Identifier implements runtime.Encoder interface.
func (n NoopEncoder) Identifier() Identifier {
return noopEncoderIdentifier
}
// NoopDecoder converts an Encoder to a Serializer or Codec for code that expects them but only uses encoding.
type NoopDecoder struct {
Encoder
@ -193,19 +206,51 @@ func (c *parameterCodec) EncodeParameters(obj Object, to schema.GroupVersion) (u
type base64Serializer struct {
Encoder
Decoder
identifier Identifier
}
func NewBase64Serializer(e Encoder, d Decoder) Serializer {
return &base64Serializer{e, d}
return &base64Serializer{
Encoder: e,
Decoder: d,
identifier: identifier(e),
}
}
func identifier(e Encoder) Identifier {
result := map[string]string{
"name": "base64",
}
if e != nil {
result["encoder"] = string(e.Identifier())
}
identifier, err := json.Marshal(result)
if err != nil {
klog.Fatalf("Failed marshaling identifier for base64Serializer: %v", err)
}
return Identifier(identifier)
}
func (s base64Serializer) Encode(obj Object, stream io.Writer) error {
if co, ok := obj.(CacheableObject); ok {
return co.CacheEncode(s.Identifier(), s.doEncode, stream)
}
return s.doEncode(obj, stream)
}
func (s base64Serializer) doEncode(obj Object, stream io.Writer) error {
e := base64.NewEncoder(base64.StdEncoding, stream)
err := s.Encoder.Encode(obj, e)
e.Close()
return err
}
// Identifier implements runtime.Encoder interface.
func (s base64Serializer) Identifier() Identifier {
return s.identifier
}
func (s base64Serializer) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) {
out := make([]byte, base64.StdEncoding.DecodedLen(len(data)))
n, err := base64.StdEncoding.Decode(out, data)
@ -238,6 +283,11 @@ var (
DisabledGroupVersioner GroupVersioner = disabledGroupVersioner{}
)
const (
internalGroupVersionerIdentifier = "internal"
disabledGroupVersionerIdentifier = "disabled"
)
type internalGroupVersioner struct{}
// KindForGroupVersionKinds returns an internal Kind if one is found, or converts the first provided kind to the internal version.
@ -253,6 +303,11 @@ func (internalGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersi
return schema.GroupVersionKind{}, false
}
// Identifier implements GroupVersioner interface.
func (internalGroupVersioner) Identifier() string {
return internalGroupVersionerIdentifier
}
type disabledGroupVersioner struct{}
// KindForGroupVersionKinds returns false for any input.
@ -260,19 +315,9 @@ func (disabledGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersi
return schema.GroupVersionKind{}, false
}
// GroupVersioners implements GroupVersioner and resolves to the first exact match for any kind.
type GroupVersioners []GroupVersioner
// KindForGroupVersionKinds returns the first match of any of the group versioners, or false if no match occurred.
func (gvs GroupVersioners) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) {
for _, gv := range gvs {
target, ok := gv.KindForGroupVersionKinds(kinds)
if !ok {
continue
}
return target, true
}
return schema.GroupVersionKind{}, false
// Identifier implements GroupVersioner interface.
func (disabledGroupVersioner) Identifier() string {
return disabledGroupVersionerIdentifier
}
// Assert that schema.GroupVersion and GroupVersions implement GroupVersioner
@ -330,3 +375,22 @@ func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersio
}
return schema.GroupVersionKind{}, false
}
// Identifier implements GroupVersioner interface.
func (v multiGroupVersioner) Identifier() string {
groupKinds := make([]string, 0, len(v.acceptedGroupKinds))
for _, gk := range v.acceptedGroupKinds {
groupKinds = append(groupKinds, gk.String())
}
result := map[string]string{
"name": "multi",
"target": v.target.String(),
"accepted": strings.Join(groupKinds, ","),
"coerce": strconv.FormatBool(v.coerce),
}
identifier, err := json.Marshal(result)
if err != nil {
klog.Fatalf("Failed marshaling Identifier for %#v: %v", v, err)
}
return string(identifier)
}

View File

@ -21,6 +21,7 @@ import (
"reflect"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/json"
)
// CheckCodec makes sure that the codec can encode objects like internalType,
@ -32,7 +33,14 @@ func CheckCodec(c Codec, internalType Object, externalTypes ...schema.GroupVersi
return fmt.Errorf("Internal type not encodable: %v", err)
}
for _, et := range externalTypes {
exBytes := []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v"}`, et.Kind, et.GroupVersion().String()))
typeMeta := TypeMeta{
Kind: et.Kind,
APIVersion: et.GroupVersion().String(),
}
exBytes, err := json.Marshal(&typeMeta)
if err != nil {
return err
}
obj, err := Decode(c, exBytes)
if err != nil {
return fmt.Errorf("external type %s not interpretable: %v", et, err)

View File

@ -53,27 +53,21 @@ func JSONKeyMapper(key string, sourceTag, destTag reflect.StructTag) (string, st
return key, key
}
// DefaultStringConversions are helpers for converting []string and string to real values.
var DefaultStringConversions = []interface{}{
Convert_Slice_string_To_string,
Convert_Slice_string_To_int,
Convert_Slice_string_To_bool,
Convert_Slice_string_To_int64,
}
func Convert_Slice_string_To_string(input *[]string, out *string, s conversion.Scope) error {
if len(*input) == 0 {
func Convert_Slice_string_To_string(in *[]string, out *string, s conversion.Scope) error {
if len(*in) == 0 {
*out = ""
return nil
}
*out = (*input)[0]
*out = (*in)[0]
return nil
}
func Convert_Slice_string_To_int(input *[]string, out *int, s conversion.Scope) error {
if len(*input) == 0 {
func Convert_Slice_string_To_int(in *[]string, out *int, s conversion.Scope) error {
if len(*in) == 0 {
*out = 0
return nil
}
str := (*input)[0]
str := (*in)[0]
i, err := strconv.Atoi(str)
if err != nil {
return err
@ -83,15 +77,16 @@ func Convert_Slice_string_To_int(input *[]string, out *int, s conversion.Scope)
}
// Convert_Slice_string_To_bool will convert a string parameter to boolean.
// Only the absence of a value, a value of "false", or a value of "0" resolve to false.
// Only the absence of a value (i.e. zero-length slice), a value of "false", or a
// value of "0" resolve to false.
// Any other value (including empty string) resolves to true.
func Convert_Slice_string_To_bool(input *[]string, out *bool, s conversion.Scope) error {
if len(*input) == 0 {
func Convert_Slice_string_To_bool(in *[]string, out *bool, s conversion.Scope) error {
if len(*in) == 0 {
*out = false
return nil
}
switch strings.ToLower((*input)[0]) {
case "false", "0":
switch {
case (*in)[0] == "0", strings.EqualFold((*in)[0], "false"):
*out = false
default:
*out = true
@ -99,15 +94,103 @@ func Convert_Slice_string_To_bool(input *[]string, out *bool, s conversion.Scope
return nil
}
func Convert_Slice_string_To_int64(input *[]string, out *int64, s conversion.Scope) error {
if len(*input) == 0 {
*out = 0
// Convert_Slice_string_To_bool will convert a string parameter to boolean.
// Only the absence of a value (i.e. zero-length slice), a value of "false", or a
// value of "0" resolve to false.
// Any other value (including empty string) resolves to true.
func Convert_Slice_string_To_Pointer_bool(in *[]string, out **bool, s conversion.Scope) error {
if len(*in) == 0 {
boolVar := false
*out = &boolVar
return nil
}
str := (*input)[0]
i, err := strconv.ParseInt(str, 10, 64)
switch {
case (*in)[0] == "0", strings.EqualFold((*in)[0], "false"):
boolVar := false
*out = &boolVar
default:
boolVar := true
*out = &boolVar
}
return nil
}
func string_to_int64(in string) (int64, error) {
return strconv.ParseInt(in, 10, 64)
}
func Convert_string_To_int64(in *string, out *int64, s conversion.Scope) error {
if in == nil {
*out = 0
return nil
}
i, err := string_to_int64(*in)
if err != nil {
return err
}
*out = i
return nil
}
func Convert_Slice_string_To_int64(in *[]string, out *int64, s conversion.Scope) error {
if len(*in) == 0 {
*out = 0
return nil
}
i, err := string_to_int64((*in)[0])
if err != nil {
return err
}
*out = i
return nil
}
func Convert_string_To_Pointer_int64(in *string, out **int64, s conversion.Scope) error {
if in == nil {
*out = nil
return nil
}
i, err := string_to_int64(*in)
if err != nil {
return err
}
*out = &i
return nil
}
func Convert_Slice_string_To_Pointer_int64(in *[]string, out **int64, s conversion.Scope) error {
if len(*in) == 0 {
*out = nil
return nil
}
i, err := string_to_int64((*in)[0])
if err != nil {
return err
}
*out = &i
return nil
}
func RegisterStringConversions(s *Scheme) error {
if err := s.AddConversionFunc((*[]string)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_Slice_string_To_string(a.(*[]string), b.(*string), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*[]string)(nil), (*int)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_Slice_string_To_int(a.(*[]string), b.(*int), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*[]string)(nil), (*bool)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_Slice_string_To_bool(a.(*[]string), b.(*bool), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*[]string)(nil), (*int64)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_Slice_string_To_int64(a.(*[]string), b.(*int64), scope)
}); err != nil {
return err
}
return nil
}

View File

@ -17,7 +17,6 @@ limitations under the License.
package runtime
import (
"bytes"
encodingjson "encoding/json"
"fmt"
"math"
@ -32,8 +31,9 @@ import (
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/util/json"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"sigs.k8s.io/structured-merge-diff/v4/value"
"k8s.io/klog"
"k8s.io/klog/v2"
)
// UnstructuredConverter is an interface for converting between interface{}
@ -68,13 +68,8 @@ func newFieldsCache() *fieldsCache {
}
var (
marshalerType = reflect.TypeOf(new(encodingjson.Marshaler)).Elem()
unmarshalerType = reflect.TypeOf(new(encodingjson.Unmarshaler)).Elem()
mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{})
stringType = reflect.TypeOf(string(""))
int64Type = reflect.TypeOf(int64(0))
float64Type = reflect.TypeOf(float64(0))
boolType = reflect.TypeOf(bool(false))
fieldCache = newFieldsCache()
// DefaultUnstructuredConverter performs unstructured to Go typed object conversions.
@ -94,7 +89,7 @@ func parseBool(key string) bool {
}
value, err := strconv.ParseBool(key)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Couldn't parse '%s' as bool for unstructured mismatch detection", key))
utilruntime.HandleError(fmt.Errorf("couldn't parse '%s' as bool for unstructured mismatch detection", key))
}
return value
}
@ -208,13 +203,9 @@ func fromUnstructured(sv, dv reflect.Value) error {
}
// Check if the object has a custom JSON marshaller/unmarshaller.
if reflect.PtrTo(dt).Implements(unmarshalerType) {
data, err := json.Marshal(sv.Interface())
if err != nil {
return fmt.Errorf("error encoding %s to json: %v", st.String(), err)
}
unmarshaler := dv.Addr().Interface().(encodingjson.Unmarshaler)
return unmarshaler.UnmarshalJSON(data)
entry := value.TypeReflectEntryOf(dv.Type())
if entry.CanConvertFromUnstructured() {
return entry.FromUnstructured(sv, dv)
}
switch dt.Kind() {
@ -256,6 +247,7 @@ func fieldInfoFromField(structType reflect.Type, field int) *fieldInfo {
for i := range items {
if items[i] == "omitempty" {
info.omitempty = true
break
}
}
}
@ -483,112 +475,28 @@ func toUnstructuredViaJSON(obj interface{}, u *map[string]interface{}) error {
return json.Unmarshal(data, u)
}
var (
nullBytes = []byte("null")
trueBytes = []byte("true")
falseBytes = []byte("false")
)
func getMarshaler(v reflect.Value) (encodingjson.Marshaler, bool) {
// Check value receivers if v is not a pointer and pointer receivers if v is a pointer
if v.Type().Implements(marshalerType) {
return v.Interface().(encodingjson.Marshaler), true
}
// Check pointer receivers if v is not a pointer
if v.Kind() != reflect.Ptr && v.CanAddr() {
v = v.Addr()
if v.Type().Implements(marshalerType) {
return v.Interface().(encodingjson.Marshaler), true
}
}
return nil, false
}
func toUnstructured(sv, dv reflect.Value) error {
// Check if the object has a custom JSON marshaller/unmarshaller.
if marshaler, ok := getMarshaler(sv); ok {
if sv.Kind() == reflect.Ptr && sv.IsNil() {
// We're done - we don't need to store anything.
return nil
}
data, err := marshaler.MarshalJSON()
// Check if the object has a custom string converter.
entry := value.TypeReflectEntryOf(sv.Type())
if entry.CanConvertToUnstructured() {
v, err := entry.ToUnstructured(sv)
if err != nil {
return err
}
switch {
case len(data) == 0:
return fmt.Errorf("error decoding from json: empty value")
case bytes.Equal(data, nullBytes):
// We're done - we don't need to store anything.
case bytes.Equal(data, trueBytes):
dv.Set(reflect.ValueOf(true))
case bytes.Equal(data, falseBytes):
dv.Set(reflect.ValueOf(false))
case data[0] == '"':
var result string
err := json.Unmarshal(data, &result)
if err != nil {
return fmt.Errorf("error decoding string from json: %v", err)
}
dv.Set(reflect.ValueOf(result))
case data[0] == '{':
result := make(map[string]interface{})
err := json.Unmarshal(data, &result)
if err != nil {
return fmt.Errorf("error decoding object from json: %v", err)
}
dv.Set(reflect.ValueOf(result))
case data[0] == '[':
result := make([]interface{}, 0)
err := json.Unmarshal(data, &result)
if err != nil {
return fmt.Errorf("error decoding array from json: %v", err)
}
dv.Set(reflect.ValueOf(result))
default:
var (
resultInt int64
resultFloat float64
err error
)
if err = json.Unmarshal(data, &resultInt); err == nil {
dv.Set(reflect.ValueOf(resultInt))
} else if err = json.Unmarshal(data, &resultFloat); err == nil {
dv.Set(reflect.ValueOf(resultFloat))
} else {
return fmt.Errorf("error decoding number from json: %v", err)
}
if v != nil {
dv.Set(reflect.ValueOf(v))
}
return nil
}
st, dt := sv.Type(), dv.Type()
st := sv.Type()
switch st.Kind() {
case reflect.String:
if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
dv.Set(reflect.New(stringType))
}
dv.Set(reflect.ValueOf(sv.String()))
return nil
case reflect.Bool:
if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
dv.Set(reflect.New(boolType))
}
dv.Set(reflect.ValueOf(sv.Bool()))
return nil
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
dv.Set(reflect.New(int64Type))
}
dv.Set(reflect.ValueOf(sv.Int()))
return nil
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
@ -596,15 +504,9 @@ func toUnstructured(sv, dv reflect.Value) error {
if uVal > math.MaxInt64 {
return fmt.Errorf("unsigned value %d does not fit into int64 (overflow)", uVal)
}
if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
dv.Set(reflect.New(int64Type))
}
dv.Set(reflect.ValueOf(int64(uVal)))
return nil
case reflect.Float32, reflect.Float64:
if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 {
dv.Set(reflect.New(float64Type))
}
dv.Set(reflect.ValueOf(sv.Float()))
return nil
case reflect.Map:

View File

@ -134,9 +134,16 @@ func Convert_runtime_RawExtension_To_runtime_Object(in *RawExtension, out *Objec
return nil
}
func DefaultEmbeddedConversions() []interface{} {
return []interface{}{
Convert_runtime_Object_To_runtime_RawExtension,
Convert_runtime_RawExtension_To_runtime_Object,
func RegisterEmbeddedConversions(s *Scheme) error {
if err := s.AddConversionFunc((*Object)(nil), (*RawExtension)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_runtime_Object_To_runtime_RawExtension(a.(*Object), b.(*RawExtension), scope)
}); err != nil {
return err
}
if err := s.AddConversionFunc((*RawExtension)(nil), (*Object)(nil), func(a, b interface{}, scope conversion.Scope) error {
return Convert_runtime_RawExtension_To_runtime_Object(a.(*RawExtension), b.(*Object), scope)
}); err != nil {
return err
}
return nil
}

View File

@ -120,3 +120,32 @@ func IsMissingVersion(err error) bool {
_, ok := err.(*missingVersionErr)
return ok
}
// strictDecodingError is a base error type that is returned by a strict Decoder such
// as UniversalStrictDecoder.
type strictDecodingError struct {
message string
data string
}
// NewStrictDecodingError creates a new strictDecodingError object.
func NewStrictDecodingError(message string, data string) error {
return &strictDecodingError{
message: message,
data: data,
}
}
func (e *strictDecodingError) Error() string {
return fmt.Sprintf("strict decoder error for %s: %s", e.data, e.message)
}
// IsStrictDecodingError returns true if the error indicates that the provided object
// strictness violations.
func IsStrictDecodingError(err error) bool {
if err == nil {
return false
}
_, ok := err.(*strictDecodingError)
return ok
}

View File

@ -17,31 +17,18 @@ limitations under the License.
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto
/*
Package runtime is a generated protocol buffer package.
It is generated from these files:
k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto
It has these top-level messages:
RawExtension
TypeMeta
Unknown
*/
package runtime
import (
fmt "fmt"
proto "github.com/gogo/protobuf/proto"
io "io"
math "math"
math_bits "math/bits"
reflect "reflect"
strings "strings"
reflect "reflect"
io "io"
proto "github.com/gogo/protobuf/proto"
)
// Reference imports to suppress errors if they are not otherwise used.
@ -53,29 +40,134 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
func (m *RawExtension) Reset() { *m = RawExtension{} }
func (*RawExtension) ProtoMessage() {}
func (*RawExtension) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} }
func (m *RawExtension) Reset() { *m = RawExtension{} }
func (*RawExtension) ProtoMessage() {}
func (*RawExtension) Descriptor() ([]byte, []int) {
return fileDescriptor_9d3c45d7f546725c, []int{0}
}
func (m *RawExtension) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *RawExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *RawExtension) XXX_Merge(src proto.Message) {
xxx_messageInfo_RawExtension.Merge(m, src)
}
func (m *RawExtension) XXX_Size() int {
return m.Size()
}
func (m *RawExtension) XXX_DiscardUnknown() {
xxx_messageInfo_RawExtension.DiscardUnknown(m)
}
func (m *TypeMeta) Reset() { *m = TypeMeta{} }
func (*TypeMeta) ProtoMessage() {}
func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} }
var xxx_messageInfo_RawExtension proto.InternalMessageInfo
func (m *Unknown) Reset() { *m = Unknown{} }
func (*Unknown) ProtoMessage() {}
func (*Unknown) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} }
func (m *TypeMeta) Reset() { *m = TypeMeta{} }
func (*TypeMeta) ProtoMessage() {}
func (*TypeMeta) Descriptor() ([]byte, []int) {
return fileDescriptor_9d3c45d7f546725c, []int{1}
}
func (m *TypeMeta) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *TypeMeta) XXX_Merge(src proto.Message) {
xxx_messageInfo_TypeMeta.Merge(m, src)
}
func (m *TypeMeta) XXX_Size() int {
return m.Size()
}
func (m *TypeMeta) XXX_DiscardUnknown() {
xxx_messageInfo_TypeMeta.DiscardUnknown(m)
}
var xxx_messageInfo_TypeMeta proto.InternalMessageInfo
func (m *Unknown) Reset() { *m = Unknown{} }
func (*Unknown) ProtoMessage() {}
func (*Unknown) Descriptor() ([]byte, []int) {
return fileDescriptor_9d3c45d7f546725c, []int{2}
}
func (m *Unknown) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Unknown) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
b = b[:cap(b)]
n, err := m.MarshalToSizedBuffer(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
func (m *Unknown) XXX_Merge(src proto.Message) {
xxx_messageInfo_Unknown.Merge(m, src)
}
func (m *Unknown) XXX_Size() int {
return m.Size()
}
func (m *Unknown) XXX_DiscardUnknown() {
xxx_messageInfo_Unknown.DiscardUnknown(m)
}
var xxx_messageInfo_Unknown proto.InternalMessageInfo
func init() {
proto.RegisterType((*RawExtension)(nil), "k8s.io.apimachinery.pkg.runtime.RawExtension")
proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.runtime.TypeMeta")
proto.RegisterType((*Unknown)(nil), "k8s.io.apimachinery.pkg.runtime.Unknown")
}
func init() {
proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptor_9d3c45d7f546725c)
}
var fileDescriptor_9d3c45d7f546725c = []byte{
// 378 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x4f, 0xab, 0x13, 0x31,
0x14, 0xc5, 0x27, 0xaf, 0x85, 0x3e, 0xd3, 0xc2, 0x93, 0xb8, 0x70, 0x74, 0x91, 0x79, 0x74, 0xe5,
0x5b, 0xbc, 0x04, 0x1e, 0x08, 0x6e, 0x3b, 0xa5, 0xa0, 0x88, 0x20, 0xc1, 0x3f, 0xe0, 0xca, 0x74,
0x26, 0x4e, 0xc3, 0xd0, 0x9b, 0x21, 0xcd, 0x38, 0x76, 0xe7, 0x47, 0xf0, 0x63, 0x75, 0xd9, 0x65,
0x57, 0xc5, 0x8e, 0x1f, 0xc2, 0xad, 0x34, 0x4d, 0x6b, 0xd5, 0x85, 0xbb, 0xe4, 0x9e, 0xf3, 0x3b,
0xf7, 0x1e, 0xfc, 0xbc, 0x7c, 0xb6, 0x60, 0xda, 0xf0, 0xb2, 0x9e, 0x2a, 0x0b, 0xca, 0xa9, 0x05,
0xff, 0xac, 0x20, 0x37, 0x96, 0x07, 0x41, 0x56, 0x7a, 0x2e, 0xb3, 0x99, 0x06, 0x65, 0x97, 0xbc,
0x2a, 0x0b, 0x6e, 0x6b, 0x70, 0x7a, 0xae, 0x78, 0xa1, 0x40, 0x59, 0xe9, 0x54, 0xce, 0x2a, 0x6b,
0x9c, 0x21, 0xc9, 0x01, 0x60, 0xe7, 0x00, 0xab, 0xca, 0x82, 0x05, 0xe0, 0xf1, 0x6d, 0xa1, 0xdd,
0xac, 0x9e, 0xb2, 0xcc, 0xcc, 0x79, 0x61, 0x0a, 0xc3, 0x3d, 0x37, 0xad, 0x3f, 0xf9, 0x9f, 0xff,
0xf8, 0xd7, 0x21, 0x6f, 0x78, 0x83, 0x07, 0x42, 0x36, 0x93, 0x2f, 0x4e, 0xc1, 0x42, 0x1b, 0x20,
0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0xd7, 0xe8, 0xc9, 0x20, 0xed, 0xb5, 0xdb, 0xa4, 0x23, 0x64,
0x23, 0xf6, 0xb3, 0xe1, 0x47, 0x7c, 0xf9, 0x66, 0x59, 0xa9, 0x57, 0xca, 0x49, 0x72, 0x87, 0xb1,
0xac, 0xf4, 0x3b, 0x65, 0xf7, 0x90, 0x77, 0xdf, 0x4b, 0xc9, 0x6a, 0x9b, 0x44, 0xed, 0x36, 0xc1,
0xa3, 0xd7, 0x2f, 0x82, 0x22, 0xce, 0x5c, 0xe4, 0x1a, 0x77, 0x4b, 0x0d, 0x79, 0x7c, 0xe1, 0xdd,
0x83, 0xe0, 0xee, 0xbe, 0xd4, 0x90, 0x0b, 0xaf, 0x0c, 0x7f, 0x22, 0xdc, 0x7b, 0x0b, 0x25, 0x98,
0x06, 0xc8, 0x7b, 0x7c, 0xe9, 0xc2, 0x36, 0x9f, 0xdf, 0xbf, 0xbb, 0x61, 0xff, 0xe9, 0xce, 0x8e,
0xe7, 0xa5, 0xf7, 0x43, 0xf8, 0xe9, 0x60, 0x71, 0x0a, 0x3b, 0x36, 0xbc, 0xf8, 0xb7, 0x21, 0x19,
0xe1, 0xab, 0xcc, 0x80, 0x53, 0xe0, 0x26, 0x90, 0x99, 0x5c, 0x43, 0x11, 0x77, 0xfc, 0xb1, 0x0f,
0x43, 0xde, 0xd5, 0xf8, 0x4f, 0x59, 0xfc, 0xed, 0x27, 0x4f, 0x71, 0x3f, 0x8c, 0xf6, 0xab, 0xe3,
0xae, 0xc7, 0x1f, 0x04, 0xbc, 0x3f, 0xfe, 0x2d, 0x89, 0x73, 0x5f, 0x7a, 0xbb, 0xda, 0xd1, 0x68,
0xbd, 0xa3, 0xd1, 0x66, 0x47, 0xa3, 0xaf, 0x2d, 0x45, 0xab, 0x96, 0xa2, 0x75, 0x4b, 0xd1, 0xa6,
0xa5, 0xe8, 0x7b, 0x4b, 0xd1, 0xb7, 0x1f, 0x34, 0xfa, 0xd0, 0x0b, 0x45, 0x7f, 0x05, 0x00, 0x00,
0xff, 0xff, 0xe3, 0x33, 0x18, 0x0b, 0x50, 0x02, 0x00, 0x00,
}
func (m *RawExtension) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@ -83,23 +175,29 @@ func (m *RawExtension) Marshal() (dAtA []byte, err error) {
}
func (m *RawExtension) MarshalTo(dAtA []byte) (int, error) {
var i int
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *RawExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
if m.Raw != nil {
dAtA[i] = 0xa
i++
i -= len(m.Raw)
copy(dAtA[i:], m.Raw)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw)))
i += copy(dAtA[i:], m.Raw)
i--
dAtA[i] = 0xa
}
return i, nil
return len(dAtA) - i, nil
}
func (m *TypeMeta) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@ -107,25 +205,32 @@ func (m *TypeMeta) Marshal() (dAtA []byte, err error) {
}
func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) {
var i int
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *TypeMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
i += copy(dAtA[i:], m.APIVersion)
dAtA[i] = 0x12
i++
i -= len(m.Kind)
copy(dAtA[i:], m.Kind)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind)))
i += copy(dAtA[i:], m.Kind)
return i, nil
i--
dAtA[i] = 0x12
i -= len(m.APIVersion)
copy(dAtA[i:], m.APIVersion)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion)))
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func (m *Unknown) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
n, err := m.MarshalToSizedBuffer(dAtA[:size])
if err != nil {
return nil, err
}
@ -133,45 +238,60 @@ func (m *Unknown) Marshal() (dAtA []byte, err error) {
}
func (m *Unknown) MarshalTo(dAtA []byte) (int, error) {
var i int
size := m.Size()
return m.MarshalToSizedBuffer(dAtA[:size])
}
func (m *Unknown) MarshalToSizedBuffer(dAtA []byte) (int, error) {
i := len(dAtA)
_ = i
var l int
_ = l
dAtA[i] = 0xa
i++
i = encodeVarintGenerated(dAtA, i, uint64(m.TypeMeta.Size()))
n1, err := m.TypeMeta.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n1
if m.Raw != nil {
dAtA[i] = 0x12
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw)))
i += copy(dAtA[i:], m.Raw)
}
dAtA[i] = 0x1a
i++
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentEncoding)))
i += copy(dAtA[i:], m.ContentEncoding)
dAtA[i] = 0x22
i++
i -= len(m.ContentType)
copy(dAtA[i:], m.ContentType)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentType)))
i += copy(dAtA[i:], m.ContentType)
return i, nil
i--
dAtA[i] = 0x22
i -= len(m.ContentEncoding)
copy(dAtA[i:], m.ContentEncoding)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentEncoding)))
i--
dAtA[i] = 0x1a
if m.Raw != nil {
i -= len(m.Raw)
copy(dAtA[i:], m.Raw)
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw)))
i--
dAtA[i] = 0x12
}
{
size, err := m.TypeMeta.MarshalToSizedBuffer(dAtA[:i])
if err != nil {
return 0, err
}
i -= size
i = encodeVarintGenerated(dAtA, i, uint64(size))
}
i--
dAtA[i] = 0xa
return len(dAtA) - i, nil
}
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
offset -= sovGenerated(v)
base := offset
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
return base
}
func (m *RawExtension) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
if m.Raw != nil {
@ -182,6 +302,9 @@ func (m *RawExtension) Size() (n int) {
}
func (m *TypeMeta) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = len(m.APIVersion)
@ -192,6 +315,9 @@ func (m *TypeMeta) Size() (n int) {
}
func (m *Unknown) Size() (n int) {
if m == nil {
return 0
}
var l int
_ = l
l = m.TypeMeta.Size()
@ -208,14 +334,7 @@ func (m *Unknown) Size() (n int) {
}
func sovGenerated(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
return (math_bits.Len64(x|1) + 6) / 7
}
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
@ -277,7 +396,7 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@ -305,7 +424,7 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@ -314,6 +433,9 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
@ -331,6 +453,9 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@ -358,7 +483,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@ -386,7 +511,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@ -396,6 +521,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
@ -415,7 +543,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@ -425,6 +553,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
@ -439,6 +570,9 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@ -466,7 +600,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
wire |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@ -494,7 +628,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
msglen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@ -503,6 +637,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + msglen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
@ -524,7 +661,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
@ -533,6 +670,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
@ -555,7 +695,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@ -565,6 +705,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
@ -584,7 +727,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
@ -594,6 +737,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthGenerated
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
@ -608,6 +754,9 @@ func (m *Unknown) Unmarshal(dAtA []byte) error {
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
@ -623,6 +772,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error {
func skipGenerated(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
depth := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
@ -654,10 +804,8 @@ func skipGenerated(dAtA []byte) (n int, err error) {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
@ -674,85 +822,34 @@ func skipGenerated(dAtA []byte) (n int, err error) {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthGenerated
}
return iNdEx, nil
iNdEx += length
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipGenerated(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
depth++
case 4:
return iNdEx, nil
if depth == 0 {
return 0, ErrUnexpectedEndOfGroupGenerated
}
depth--
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
if iNdEx < 0 {
return 0, ErrInvalidLengthGenerated
}
if depth == 0 {
return iNdEx, nil
}
}
panic("unreachable")
return 0, io.ErrUnexpectedEOF
}
var (
ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group")
)
func init() {
proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptorGenerated)
}
var fileDescriptorGenerated = []byte{
// 378 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x4f, 0xab, 0x13, 0x31,
0x14, 0xc5, 0x27, 0xaf, 0x85, 0x3e, 0xd3, 0xc2, 0x93, 0xb8, 0x70, 0x74, 0x91, 0x79, 0x74, 0xe5,
0x5b, 0xbc, 0x04, 0x1e, 0x08, 0x6e, 0x3b, 0xa5, 0xa0, 0x88, 0x20, 0xc1, 0x3f, 0xe0, 0xca, 0x74,
0x26, 0x4e, 0xc3, 0xd0, 0x9b, 0x21, 0xcd, 0x38, 0x76, 0xe7, 0x47, 0xf0, 0x63, 0x75, 0xd9, 0x65,
0x57, 0xc5, 0x8e, 0x1f, 0xc2, 0xad, 0x34, 0x4d, 0x6b, 0xd5, 0x85, 0xbb, 0xe4, 0x9e, 0xf3, 0x3b,
0xf7, 0x1e, 0xfc, 0xbc, 0x7c, 0xb6, 0x60, 0xda, 0xf0, 0xb2, 0x9e, 0x2a, 0x0b, 0xca, 0xa9, 0x05,
0xff, 0xac, 0x20, 0x37, 0x96, 0x07, 0x41, 0x56, 0x7a, 0x2e, 0xb3, 0x99, 0x06, 0x65, 0x97, 0xbc,
0x2a, 0x0b, 0x6e, 0x6b, 0x70, 0x7a, 0xae, 0x78, 0xa1, 0x40, 0x59, 0xe9, 0x54, 0xce, 0x2a, 0x6b,
0x9c, 0x21, 0xc9, 0x01, 0x60, 0xe7, 0x00, 0xab, 0xca, 0x82, 0x05, 0xe0, 0xf1, 0x6d, 0xa1, 0xdd,
0xac, 0x9e, 0xb2, 0xcc, 0xcc, 0x79, 0x61, 0x0a, 0xc3, 0x3d, 0x37, 0xad, 0x3f, 0xf9, 0x9f, 0xff,
0xf8, 0xd7, 0x21, 0x6f, 0x78, 0x83, 0x07, 0x42, 0x36, 0x93, 0x2f, 0x4e, 0xc1, 0x42, 0x1b, 0x20,
0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0xd7, 0xe8, 0xc9, 0x20, 0xed, 0xb5, 0xdb, 0xa4, 0x23, 0x64,
0x23, 0xf6, 0xb3, 0xe1, 0x47, 0x7c, 0xf9, 0x66, 0x59, 0xa9, 0x57, 0xca, 0x49, 0x72, 0x87, 0xb1,
0xac, 0xf4, 0x3b, 0x65, 0xf7, 0x90, 0x77, 0xdf, 0x4b, 0xc9, 0x6a, 0x9b, 0x44, 0xed, 0x36, 0xc1,
0xa3, 0xd7, 0x2f, 0x82, 0x22, 0xce, 0x5c, 0xe4, 0x1a, 0x77, 0x4b, 0x0d, 0x79, 0x7c, 0xe1, 0xdd,
0x83, 0xe0, 0xee, 0xbe, 0xd4, 0x90, 0x0b, 0xaf, 0x0c, 0x7f, 0x22, 0xdc, 0x7b, 0x0b, 0x25, 0x98,
0x06, 0xc8, 0x7b, 0x7c, 0xe9, 0xc2, 0x36, 0x9f, 0xdf, 0xbf, 0xbb, 0x61, 0xff, 0xe9, 0xce, 0x8e,
0xe7, 0xa5, 0xf7, 0x43, 0xf8, 0xe9, 0x60, 0x71, 0x0a, 0x3b, 0x36, 0xbc, 0xf8, 0xb7, 0x21, 0x19,
0xe1, 0xab, 0xcc, 0x80, 0x53, 0xe0, 0x26, 0x90, 0x99, 0x5c, 0x43, 0x11, 0x77, 0xfc, 0xb1, 0x0f,
0x43, 0xde, 0xd5, 0xf8, 0x4f, 0x59, 0xfc, 0xed, 0x27, 0x4f, 0x71, 0x3f, 0x8c, 0xf6, 0xab, 0xe3,
0xae, 0xc7, 0x1f, 0x04, 0xbc, 0x3f, 0xfe, 0x2d, 0x89, 0x73, 0x5f, 0x7a, 0xbb, 0xda, 0xd1, 0x68,
0xbd, 0xa3, 0xd1, 0x66, 0x47, 0xa3, 0xaf, 0x2d, 0x45, 0xab, 0x96, 0xa2, 0x75, 0x4b, 0xd1, 0xa6,
0xa5, 0xe8, 0x7b, 0x4b, 0xd1, 0xb7, 0x1f, 0x34, 0xfa, 0xd0, 0x0b, 0x45, 0x7f, 0x05, 0x00, 0x00,
0xff, 0xff, 0xe3, 0x33, 0x18, 0x0b, 0x50, 0x02, 0x00, 0x00,
}

View File

@ -51,7 +51,7 @@ func UnsafeObjectConvertor(scheme *Scheme) ObjectConvertor {
func SetField(src interface{}, v reflect.Value, fieldName string) error {
field := v.FieldByName(fieldName)
if !field.IsValid() {
return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface())
return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface())
}
srcValue := reflect.ValueOf(src)
if srcValue.Type().AssignableTo(field.Type()) {
@ -70,7 +70,7 @@ func SetField(src interface{}, v reflect.Value, fieldName string) error {
func Field(v reflect.Value, fieldName string, dest interface{}) error {
field := v.FieldByName(fieldName)
if !field.IsValid() {
return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface())
return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface())
}
destValue, err := conversion.EnforcePtr(dest)
if err != nil {
@ -93,7 +93,7 @@ func Field(v reflect.Value, fieldName string, dest interface{}) error {
func FieldPtr(v reflect.Value, fieldName string, dest interface{}) error {
field := v.FieldByName(fieldName)
if !field.IsValid() {
return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface())
return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface())
}
v, err := conversion.EnforcePtr(dest)
if err != nil {
@ -210,3 +210,50 @@ type defaultFramer struct{}
func (defaultFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { return r }
func (defaultFramer) NewFrameWriter(w io.Writer) io.Writer { return w }
// WithVersionEncoder serializes an object and ensures the GVK is set.
type WithVersionEncoder struct {
Version GroupVersioner
Encoder
ObjectTyper
}
// Encode does not do conversion. It sets the gvk during serialization.
func (e WithVersionEncoder) Encode(obj Object, stream io.Writer) error {
gvks, _, err := e.ObjectTyper.ObjectKinds(obj)
if err != nil {
if IsNotRegisteredError(err) {
return e.Encoder.Encode(obj, stream)
}
return err
}
kind := obj.GetObjectKind()
oldGVK := kind.GroupVersionKind()
gvk := gvks[0]
if e.Version != nil {
preferredGVK, ok := e.Version.KindForGroupVersionKinds(gvks)
if ok {
gvk = preferredGVK
}
}
kind.SetGroupVersionKind(gvk)
err = e.Encoder.Encode(obj, stream)
kind.SetGroupVersionKind(oldGVK)
return err
}
// WithoutVersionDecoder clears the group version kind of a deserialized object.
type WithoutVersionDecoder struct {
Decoder
}
// Decode does not do conversion. It removes the gvk during deserialization.
func (d WithoutVersionDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) {
obj, gvk, err := d.Decoder.Decode(data, defaults, into)
if obj != nil {
kind := obj.GetObjectKind()
// clearing the gvk is just a convention of a codec
kind.SetGroupVersionKind(schema.GroupVersionKind{})
}
return obj, gvk, err
}

View File

@ -37,13 +37,36 @@ type GroupVersioner interface {
// Scheme.New(target) and then perform a conversion between the current Go type and the destination Go type.
// Sophisticated implementations may use additional information about the input kinds to pick a destination kind.
KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (target schema.GroupVersionKind, ok bool)
// Identifier returns string representation of the object.
// Identifiers of two different encoders should be equal only if for every input
// kinds they return the same result.
Identifier() string
}
// Identifier represents an identifier.
// Identitier of two different objects should be equal if and only if for every
// input the output they produce is exactly the same.
type Identifier string
// Encoder writes objects to a serialized form
type Encoder interface {
// Encode writes an object to a stream. Implementations may return errors if the versions are
// incompatible, or if no conversion is defined.
Encode(obj Object, w io.Writer) error
// Identifier returns an identifier of the encoder.
// Identifiers of two different encoders should be equal if and only if for every input
// object it will be encoded to the same representation by both of them.
//
// Identifier is inteted for use with CacheableObject#CacheEncode method. In order to
// correctly handle CacheableObject, Encode() method should look similar to below, where
// doEncode() is the encoding logic of implemented encoder:
// func (e *MyEncoder) Encode(obj Object, w io.Writer) error {
// if co, ok := obj.(CacheableObject); ok {
// return co.CacheEncode(e.Identifier(), e.doEncode, w)
// }
// return e.doEncode(obj, w)
// }
Identifier() Identifier
}
// Decoder attempts to load an object from data.
@ -91,6 +114,10 @@ type Framer interface {
type SerializerInfo struct {
// MediaType is the value that represents this serializer over the wire.
MediaType string
// MediaTypeType is the first part of the MediaType ("application" in "application/json").
MediaTypeType string
// MediaTypeSubType is the second part of the MediaType ("json" in "application/json").
MediaTypeSubType string
// EncodesAsText indicates this serializer can be encoded to UTF-8 safely.
EncodesAsText bool
// Serializer is the individual object serializer for this media type.
@ -128,6 +155,28 @@ type NegotiatedSerializer interface {
DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder
}
// ClientNegotiator handles turning an HTTP content type into the appropriate encoder.
// Use NewClientNegotiator or NewVersionedClientNegotiator to create this interface from
// a NegotiatedSerializer.
type ClientNegotiator interface {
// Encoder returns the appropriate encoder for the provided contentType (e.g. application/json)
// and any optional mediaType parameters (e.g. pretty=1), or an error. If no serializer is found
// a NegotiateError will be returned. The current client implementations consider params to be
// optional modifiers to the contentType and will ignore unrecognized parameters.
Encoder(contentType string, params map[string]string) (Encoder, error)
// Decoder returns the appropriate decoder for the provided contentType (e.g. application/json)
// and any optional mediaType parameters (e.g. pretty=1), or an error. If no serializer is found
// a NegotiateError will be returned. The current client implementations consider params to be
// optional modifiers to the contentType and will ignore unrecognized parameters.
Decoder(contentType string, params map[string]string) (Decoder, error)
// StreamDecoder returns the appropriate stream decoder for the provided contentType (e.g.
// application/json) and any optional mediaType parameters (e.g. pretty=1), or an error. If no
// serializer is found a NegotiateError will be returned. The Serializer and Framer will always
// be returned if a Decoder is returned. The current client implementations consider params to be
// optional modifiers to the contentType and will ignore unrecognized parameters.
StreamDecoder(contentType string, params map[string]string) (Decoder, Serializer, Framer, error)
}
// StorageSerializer is an interface used for obtaining encoders, decoders, and serializers
// that can read and write data at rest. This would commonly be used by client tools that must
// read files, or server side storage interfaces that persist restful objects.
@ -206,6 +255,25 @@ type ObjectCreater interface {
New(kind schema.GroupVersionKind) (out Object, err error)
}
// EquivalentResourceMapper provides information about resources that address the same underlying data as a specified resource
type EquivalentResourceMapper interface {
// EquivalentResourcesFor returns a list of resources that address the same underlying data as resource.
// If subresource is specified, only equivalent resources which also have the same subresource are included.
// The specified resource can be included in the returned list.
EquivalentResourcesFor(resource schema.GroupVersionResource, subresource string) []schema.GroupVersionResource
// KindFor returns the kind expected by the specified resource[/subresource].
// A zero value is returned if the kind is unknown.
KindFor(resource schema.GroupVersionResource, subresource string) schema.GroupVersionKind
}
// EquivalentResourceRegistry provides an EquivalentResourceMapper interface,
// and allows registering known resource[/subresource] -> kind
type EquivalentResourceRegistry interface {
EquivalentResourceMapper
// RegisterKindFor registers the existence of the specified resource[/subresource] along with its expected kind.
RegisterKindFor(resource schema.GroupVersionResource, subresource string, kind schema.GroupVersionKind)
}
// ResourceVersioner provides methods for setting and retrieving
// the resource version from an API object.
type ResourceVersioner interface {
@ -233,10 +301,34 @@ type Object interface {
DeepCopyObject() Object
}
// CacheableObject allows an object to cache its different serializations
// to avoid performing the same serialization multiple times.
type CacheableObject interface {
// CacheEncode writes an object to a stream. The <encode> function will
// be used in case of cache miss. The <encode> function takes ownership
// of the object.
// If CacheableObject is a wrapper, then deep-copy of the wrapped object
// should be passed to <encode> function.
// CacheEncode assumes that for two different calls with the same <id>,
// <encode> function will also be the same.
CacheEncode(id Identifier, encode func(Object, io.Writer) error, w io.Writer) error
// GetObject returns a deep-copy of an object to be encoded - the caller of
// GetObject() is the owner of returned object. The reason for making a copy
// is to avoid bugs, where caller modifies the object and forgets to copy it,
// thus modifying the object for everyone.
// The object returned by GetObject should be the same as the one that is supposed
// to be passed to <encode> function in CacheEncode method.
// If CacheableObject is a wrapper, the copy of wrapped object should be returned.
GetObject() Object
}
// Unstructured objects store values as map[string]interface{}, with only values that can be serialized
// to JSON allowed.
type Unstructured interface {
Object
// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data.
// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info.
NewEmptyInstance() Unstructured
// UnstructuredContent returns a non-nil map with this object's contents. Values may be
// []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to
// and from JSON. SetUnstructuredContent should be used to mutate the contents.

98
vendor/k8s.io/apimachinery/pkg/runtime/mapper.go generated vendored Normal file
View File

@ -0,0 +1,98 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package runtime
import (
"sync"
"k8s.io/apimachinery/pkg/runtime/schema"
)
type equivalentResourceRegistry struct {
// keyFunc computes a key for the specified resource (this allows honoring colocated resources across API groups).
// if null, or if "" is returned, resource.String() is used as the key
keyFunc func(resource schema.GroupResource) string
// resources maps key -> subresource -> equivalent resources (subresource is not included in the returned resources).
// main resources are stored with subresource="".
resources map[string]map[string][]schema.GroupVersionResource
// kinds maps resource -> subresource -> kind
kinds map[schema.GroupVersionResource]map[string]schema.GroupVersionKind
// keys caches the computed key for each GroupResource
keys map[schema.GroupResource]string
mutex sync.RWMutex
}
var _ EquivalentResourceMapper = (*equivalentResourceRegistry)(nil)
var _ EquivalentResourceRegistry = (*equivalentResourceRegistry)(nil)
// NewEquivalentResourceRegistry creates a resource registry that considers all versions of a GroupResource to be equivalent.
func NewEquivalentResourceRegistry() EquivalentResourceRegistry {
return &equivalentResourceRegistry{}
}
// NewEquivalentResourceRegistryWithIdentity creates a resource mapper with a custom identity function.
// If "" is returned by the function, GroupResource#String is used as the identity.
// GroupResources with the same identity string are considered equivalent.
func NewEquivalentResourceRegistryWithIdentity(keyFunc func(schema.GroupResource) string) EquivalentResourceRegistry {
return &equivalentResourceRegistry{keyFunc: keyFunc}
}
func (r *equivalentResourceRegistry) EquivalentResourcesFor(resource schema.GroupVersionResource, subresource string) []schema.GroupVersionResource {
r.mutex.RLock()
defer r.mutex.RUnlock()
return r.resources[r.keys[resource.GroupResource()]][subresource]
}
func (r *equivalentResourceRegistry) KindFor(resource schema.GroupVersionResource, subresource string) schema.GroupVersionKind {
r.mutex.RLock()
defer r.mutex.RUnlock()
return r.kinds[resource][subresource]
}
func (r *equivalentResourceRegistry) RegisterKindFor(resource schema.GroupVersionResource, subresource string, kind schema.GroupVersionKind) {
r.mutex.Lock()
defer r.mutex.Unlock()
if r.kinds == nil {
r.kinds = map[schema.GroupVersionResource]map[string]schema.GroupVersionKind{}
}
if r.kinds[resource] == nil {
r.kinds[resource] = map[string]schema.GroupVersionKind{}
}
r.kinds[resource][subresource] = kind
// get the shared key of the parent resource
key := ""
gr := resource.GroupResource()
if r.keyFunc != nil {
key = r.keyFunc(gr)
}
if key == "" {
key = gr.String()
}
if r.keys == nil {
r.keys = map[schema.GroupResource]string{}
}
r.keys[gr] = key
if r.resources == nil {
r.resources = map[string]map[string][]schema.GroupVersionResource{}
}
if r.resources[key] == nil {
r.resources[key] = map[string][]schema.GroupVersionResource{}
}
r.resources[key][subresource] = append(r.resources[key][subresource], resource)
}

146
vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go generated vendored Normal file
View File

@ -0,0 +1,146 @@
/*
Copyright 2019 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package runtime
import (
"fmt"
"k8s.io/apimachinery/pkg/runtime/schema"
)
// NegotiateError is returned when a ClientNegotiator is unable to locate
// a serializer for the requested operation.
type NegotiateError struct {
ContentType string
Stream bool
}
func (e NegotiateError) Error() string {
if e.Stream {
return fmt.Sprintf("no stream serializers registered for %s", e.ContentType)
}
return fmt.Sprintf("no serializers registered for %s", e.ContentType)
}
type clientNegotiator struct {
serializer NegotiatedSerializer
encode, decode GroupVersioner
}
func (n *clientNegotiator) Encoder(contentType string, params map[string]string) (Encoder, error) {
// TODO: `pretty=1` is handled in NegotiateOutputMediaType, consider moving it to this method
// if client negotiators truly need to use it
mediaTypes := n.serializer.SupportedMediaTypes()
info, ok := SerializerInfoForMediaType(mediaTypes, contentType)
if !ok {
if len(contentType) != 0 || len(mediaTypes) == 0 {
return nil, NegotiateError{ContentType: contentType}
}
info = mediaTypes[0]
}
return n.serializer.EncoderForVersion(info.Serializer, n.encode), nil
}
func (n *clientNegotiator) Decoder(contentType string, params map[string]string) (Decoder, error) {
mediaTypes := n.serializer.SupportedMediaTypes()
info, ok := SerializerInfoForMediaType(mediaTypes, contentType)
if !ok {
if len(contentType) != 0 || len(mediaTypes) == 0 {
return nil, NegotiateError{ContentType: contentType}
}
info = mediaTypes[0]
}
return n.serializer.DecoderToVersion(info.Serializer, n.decode), nil
}
func (n *clientNegotiator) StreamDecoder(contentType string, params map[string]string) (Decoder, Serializer, Framer, error) {
mediaTypes := n.serializer.SupportedMediaTypes()
info, ok := SerializerInfoForMediaType(mediaTypes, contentType)
if !ok {
if len(contentType) != 0 || len(mediaTypes) == 0 {
return nil, nil, nil, NegotiateError{ContentType: contentType, Stream: true}
}
info = mediaTypes[0]
}
if info.StreamSerializer == nil {
return nil, nil, nil, NegotiateError{ContentType: info.MediaType, Stream: true}
}
return n.serializer.DecoderToVersion(info.Serializer, n.decode), info.StreamSerializer.Serializer, info.StreamSerializer.Framer, nil
}
// NewClientNegotiator will attempt to retrieve the appropriate encoder, decoder, or
// stream decoder for a given content type. Does not perform any conversion, but will
// encode the object to the desired group, version, and kind. Use when creating a client.
func NewClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion) ClientNegotiator {
return &clientNegotiator{
serializer: serializer,
encode: gv,
}
}
// NewInternalClientNegotiator applies the default client rules for connecting to a Kubernetes apiserver
// where objects are converted to gv prior to sending and decoded to their internal representation prior
// to retrieval.
//
// DEPRECATED: Internal clients are deprecated and will be removed in a future Kubernetes release.
func NewInternalClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion) ClientNegotiator {
decode := schema.GroupVersions{
{
Group: gv.Group,
Version: APIVersionInternal,
},
// always include the legacy group as a decoding target to handle non-error `Status` return types
{
Group: "",
Version: APIVersionInternal,
},
}
return &clientNegotiator{
encode: gv,
decode: decode,
serializer: serializer,
}
}
// NewSimpleClientNegotiator will negotiate for a single serializer. This should only be used
// for testing or when the caller is taking responsibility for setting the GVK on encoded objects.
func NewSimpleClientNegotiator(info SerializerInfo, gv schema.GroupVersion) ClientNegotiator {
return &clientNegotiator{
serializer: &simpleNegotiatedSerializer{info: info},
encode: gv,
}
}
type simpleNegotiatedSerializer struct {
info SerializerInfo
}
func NewSimpleNegotiatedSerializer(info SerializerInfo) NegotiatedSerializer {
return &simpleNegotiatedSerializer{info: info}
}
func (n *simpleNegotiatedSerializer) SupportedMediaTypes() []SerializerInfo {
return []SerializerInfo{n.info}
}
func (n *simpleNegotiatedSerializer) EncoderForVersion(e Encoder, _ GroupVersioner) Encoder {
return e
}
func (n *simpleNegotiatedSerializer) DecoderToVersion(d Decoder, _gv GroupVersioner) Decoder {
return d
}

View File

@ -29,33 +29,3 @@ func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind {
}
func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj }
// GetObjectKind implements Object for VersionedObjects, returning an empty ObjectKind
// interface if no objects are provided, or the ObjectKind interface of the object in the
// highest array position.
func (obj *VersionedObjects) GetObjectKind() schema.ObjectKind {
last := obj.Last()
if last == nil {
return schema.EmptyObjectKind
}
return last.GetObjectKind()
}
// First returns the leftmost object in the VersionedObjects array, which is usually the
// object as serialized on the wire.
func (obj *VersionedObjects) First() Object {
if len(obj.Objects) == 0 {
return nil
}
return obj.Objects[0]
}
// Last is the rightmost object in the VersionedObjects array, which is the object after
// all transformations have been applied. This is the same object that would be returned
// by Decode in a normal invocation (without VersionedObjects in the into argument).
func (obj *VersionedObjects) Last() Object {
if len(obj.Objects) == 0 {
return nil
}
return obj.Objects[len(obj.Objects)-1]
}

View File

@ -17,22 +17,14 @@ limitations under the License.
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto
/*
Package schema is a generated protocol buffer package.
It is generated from these files:
k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto
It has these top-level messages:
*/
package schema
import (
fmt "fmt"
proto "github.com/gogo/protobuf/proto"
math "math"
proto "github.com/gogo/protobuf/proto"
)
// Reference imports to suppress errors if they are not otherwise used.
@ -44,13 +36,13 @@ var _ = math.Inf
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
func init() {
proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptorGenerated)
proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptor_0462724132518e0d)
}
var fileDescriptorGenerated = []byte{
var fileDescriptor_0462724132518e0d = []byte{
// 185 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0xcc, 0xaf, 0x6e, 0xc3, 0x30,
0x10, 0xc7, 0x71, 0x9b, 0x0c, 0x0c, 0x0e, 0x0e, 0x1c, 0x1c, 0xda, 0x7c, 0x74, 0xb8, 0x2f, 0x50,

View File

@ -176,21 +176,17 @@ func (gv GroupVersion) Empty() bool {
// String puts "group" and "version" into a single "group/version" string. For the legacy v1
// it returns "v1".
func (gv GroupVersion) String() string {
// special case the internal apiVersion for the legacy kube types
if gv.Empty() {
return ""
}
// special case of "v1" for backward compatibility
if len(gv.Group) == 0 && gv.Version == "v1" {
return gv.Version
}
if len(gv.Group) > 0 {
return gv.Group + "/" + gv.Version
}
return gv.Version
}
// Identifier implements runtime.GroupVersioner interface.
func (gv GroupVersion) Identifier() string {
return gv.String()
}
// KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false
// if none of the options match the group. It prefers a match to group and version over just group.
// TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme.
@ -246,6 +242,15 @@ func (gv GroupVersion) WithResource(resource string) GroupVersionResource {
// in fewer places.
type GroupVersions []GroupVersion
// Identifier implements runtime.GroupVersioner interface.
func (gvs GroupVersions) Identifier() string {
groupVersions := make([]string, 0, len(gvs))
for i := range gvs {
groupVersions = append(groupVersions, gvs[i].String())
}
return fmt.Sprintf("[%s]", strings.Join(groupVersions, ","))
}
// KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false
// if none of the options match the group.
func (gvs GroupVersions) KindForGroupVersionKinds(kinds []GroupVersionKind) (GroupVersionKind, bool) {

View File

@ -102,10 +102,10 @@ func NewScheme() *Scheme {
}
s.converter = conversion.NewConverter(s.nameFunc)
utilruntime.Must(s.AddConversionFuncs(DefaultEmbeddedConversions()...))
// Enable couple default conversions by default.
utilruntime.Must(RegisterEmbeddedConversions(s))
utilruntime.Must(RegisterStringConversions(s))
// Enable map[string][]string conversions by default
utilruntime.Must(s.AddConversionFuncs(DefaultStringConversions...))
utilruntime.Must(s.RegisterInputDefaults(&map[string][]string{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields))
utilruntime.Must(s.RegisterInputDefaults(&url.Values{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields))
return s
@ -211,6 +211,19 @@ func (s *Scheme) AddKnownTypeWithName(gvk schema.GroupVersionKind, obj Object) {
}
}
s.typeToGVK[t] = append(s.typeToGVK[t], gvk)
// if the type implements DeepCopyInto(<obj>), register a self-conversion
if m := reflect.ValueOf(obj).MethodByName("DeepCopyInto"); m.IsValid() && m.Type().NumIn() == 1 && m.Type().NumOut() == 0 && m.Type().In(0) == reflect.TypeOf(obj) {
if err := s.AddGeneratedConversionFunc(obj, obj, func(a, b interface{}, scope conversion.Scope) error {
// copy a to b
reflect.ValueOf(a).MethodByName("DeepCopyInto").Call([]reflect.Value{reflect.ValueOf(b)})
// clear TypeMeta to match legacy reflective conversion
b.(Object).GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{})
return nil
}); err != nil {
panic(err)
}
}
}
// KnownTypes returns the types known for the given version.
@ -308,45 +321,6 @@ func (s *Scheme) AddIgnoredConversionType(from, to interface{}) error {
return s.converter.RegisterIgnoredConversion(from, to)
}
// AddConversionFuncs adds functions to the list of conversion functions. The given
// functions should know how to convert between two of your API objects, or their
// sub-objects. We deduce how to call these functions from the types of their two
// parameters; see the comment for Converter.Register.
//
// Note that, if you need to copy sub-objects that didn't change, you can use the
// conversion.Scope object that will be passed to your conversion function.
// Additionally, all conversions started by Scheme will set the SrcVersion and
// DestVersion fields on the Meta object. Example:
//
// s.AddConversionFuncs(
// func(in *InternalObject, out *ExternalObject, scope conversion.Scope) error {
// // You can depend on Meta() being non-nil, and this being set to
// // the source version, e.g., ""
// s.Meta().SrcVersion
// // You can depend on this being set to the destination version,
// // e.g., "v1".
// s.Meta().DestVersion
// // Call scope.Convert to copy sub-fields.
// s.Convert(&in.SubFieldThatMoved, &out.NewLocation.NewName, 0)
// return nil
// },
// )
//
// (For more detail about conversion functions, see Converter.Register's comment.)
//
// Also note that the default behavior, if you don't add a conversion function, is to
// sanely copy fields that have the same names and same type names. It's OK if the
// destination type has extra fields, but it must not remove any. So you only need to
// add conversion functions for things with changed/removed fields.
func (s *Scheme) AddConversionFuncs(conversionFuncs ...interface{}) error {
for _, f := range conversionFuncs {
if err := s.converter.RegisterConversionFunc(f); err != nil {
return err
}
}
return nil
}
// AddConversionFunc registers a function that converts between a and b by passing objects of those
// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce
// any other guarantee.

View File

@ -17,9 +17,13 @@ limitations under the License.
package serializer
import (
"mime"
"strings"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
"k8s.io/apimachinery/pkg/runtime/serializer/protobuf"
"k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
"k8s.io/apimachinery/pkg/runtime/serializer/versioning"
)
@ -44,30 +48,53 @@ type serializerType struct {
StreamSerializer runtime.Serializer
}
func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory) []serializerType {
jsonSerializer := json.NewSerializer(mf, scheme, scheme, false)
jsonPrettySerializer := json.NewSerializer(mf, scheme, scheme, true)
yamlSerializer := json.NewYAMLSerializer(mf, scheme, scheme)
func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []serializerType {
jsonSerializer := json.NewSerializerWithOptions(
mf, scheme, scheme,
json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict},
)
jsonSerializerType := serializerType{
AcceptContentTypes: []string{runtime.ContentTypeJSON},
ContentType: runtime.ContentTypeJSON,
FileExtensions: []string{"json"},
EncodesAsText: true,
Serializer: jsonSerializer,
Framer: json.Framer,
StreamSerializer: jsonSerializer,
}
if options.Pretty {
jsonSerializerType.PrettySerializer = json.NewSerializerWithOptions(
mf, scheme, scheme,
json.SerializerOptions{Yaml: false, Pretty: true, Strict: options.Strict},
)
}
yamlSerializer := json.NewSerializerWithOptions(
mf, scheme, scheme,
json.SerializerOptions{Yaml: true, Pretty: false, Strict: options.Strict},
)
protoSerializer := protobuf.NewSerializer(scheme, scheme)
protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme)
serializers := []serializerType{
jsonSerializerType,
{
AcceptContentTypes: []string{"application/json"},
ContentType: "application/json",
FileExtensions: []string{"json"},
EncodesAsText: true,
Serializer: jsonSerializer,
PrettySerializer: jsonPrettySerializer,
Framer: json.Framer,
StreamSerializer: jsonSerializer,
},
{
AcceptContentTypes: []string{"application/yaml"},
ContentType: "application/yaml",
AcceptContentTypes: []string{runtime.ContentTypeYAML},
ContentType: runtime.ContentTypeYAML,
FileExtensions: []string{"yaml"},
EncodesAsText: true,
Serializer: yamlSerializer,
},
{
AcceptContentTypes: []string{runtime.ContentTypeProtobuf},
ContentType: runtime.ContentTypeProtobuf,
FileExtensions: []string{"pb"},
Serializer: protoSerializer,
Framer: protobuf.LengthDelimitedFramer,
StreamSerializer: protoRawSerializer,
},
}
for _, fn := range serializerExtensions {
@ -89,14 +116,56 @@ type CodecFactory struct {
legacySerializer runtime.Serializer
}
// CodecFactoryOptions holds the options for configuring CodecFactory behavior
type CodecFactoryOptions struct {
// Strict configures all serializers in strict mode
Strict bool
// Pretty includes a pretty serializer along with the non-pretty one
Pretty bool
}
// CodecFactoryOptionsMutator takes a pointer to an options struct and then modifies it.
// Functions implementing this type can be passed to the NewCodecFactory() constructor.
type CodecFactoryOptionsMutator func(*CodecFactoryOptions)
// EnablePretty enables including a pretty serializer along with the non-pretty one
func EnablePretty(options *CodecFactoryOptions) {
options.Pretty = true
}
// DisablePretty disables including a pretty serializer along with the non-pretty one
func DisablePretty(options *CodecFactoryOptions) {
options.Pretty = false
}
// EnableStrict enables configuring all serializers in strict mode
func EnableStrict(options *CodecFactoryOptions) {
options.Strict = true
}
// DisableStrict disables configuring all serializers in strict mode
func DisableStrict(options *CodecFactoryOptions) {
options.Strict = false
}
// NewCodecFactory provides methods for retrieving serializers for the supported wire formats
// and conversion wrappers to define preferred internal and external versions. In the future,
// as the internal version is used less, callers may instead use a defaulting serializer and
// only convert objects which are shared internally (Status, common API machinery).
//
// Mutators can be passed to change the CodecFactoryOptions before construction of the factory.
// It is recommended to explicitly pass mutators instead of relying on defaults.
// By default, Pretty is enabled -- this is conformant with previously supported behavior.
//
// TODO: allow other codecs to be compiled in?
// TODO: accept a scheme interface
func NewCodecFactory(scheme *runtime.Scheme) CodecFactory {
serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory)
func NewCodecFactory(scheme *runtime.Scheme, mutators ...CodecFactoryOptionsMutator) CodecFactory {
options := CodecFactoryOptions{Pretty: true}
for _, fn := range mutators {
fn(&options)
}
serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory, options)
return newCodecFactory(scheme, serializers)
}
@ -120,6 +189,15 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec
Serializer: d.Serializer,
PrettySerializer: d.PrettySerializer,
}
mediaType, _, err := mime.ParseMediaType(info.MediaType)
if err != nil {
panic(err)
}
parts := strings.SplitN(mediaType, "/", 2)
info.MediaTypeType = parts[0]
info.MediaTypeSubType = parts[1]
if d.StreamSerializer != nil {
info.StreamSerializer = &runtime.StreamSerializerInfo{
Serializer: d.StreamSerializer,
@ -148,6 +226,12 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec
}
}
// WithoutConversion returns a NegotiatedSerializer that performs no conversion, even if the
// caller requests it.
func (f CodecFactory) WithoutConversion() runtime.NegotiatedSerializer {
return WithoutConversionCodecFactory{f}
}
// SupportedMediaTypes returns the RFC2046 media types that this factory has serializers for.
func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo {
return f.accepts
@ -215,23 +299,26 @@ func (f CodecFactory) EncoderForVersion(encoder runtime.Encoder, gv runtime.Grou
return f.CodecForVersions(encoder, nil, gv, nil)
}
// DirectCodecFactory provides methods for retrieving "DirectCodec"s, which do not do conversion.
type DirectCodecFactory struct {
// WithoutConversionCodecFactory is a CodecFactory that will explicitly ignore requests to perform conversion.
// This wrapper is used while code migrates away from using conversion (such as external clients) and in the future
// will be unnecessary when we change the signature of NegotiatedSerializer.
type WithoutConversionCodecFactory struct {
CodecFactory
}
// EncoderForVersion returns an encoder that does not do conversion.
func (f DirectCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder {
return versioning.DirectEncoder{
// EncoderForVersion returns an encoder that does not do conversion, but does set the group version kind of the object
// when serialized.
func (f WithoutConversionCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder {
return runtime.WithVersionEncoder{
Version: version,
Encoder: serializer,
ObjectTyper: f.CodecFactory.scheme,
}
}
// DecoderToVersion returns an decoder that does not do conversion. gv is ignored.
func (f DirectCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder {
return versioning.DirectDecoder{
// DecoderToVersion returns an decoder that does not do conversion.
func (f WithoutConversionCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder {
return runtime.WithoutVersionDecoder{
Decoder: serializer,
}
}

View File

@ -31,38 +31,78 @@ import (
"k8s.io/apimachinery/pkg/runtime/serializer/recognizer"
"k8s.io/apimachinery/pkg/util/framer"
utilyaml "k8s.io/apimachinery/pkg/util/yaml"
"k8s.io/klog/v2"
)
// NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer
// is not nil, the object has the group, version, and kind fields set.
// Deprecated: use NewSerializerWithOptions instead.
func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer {
return &Serializer{
meta: meta,
creater: creater,
typer: typer,
yaml: false,
pretty: pretty,
}
return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false})
}
// NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer
// is not nil, the object has the group, version, and kind fields set. This serializer supports only the subset of YAML that
// matches JSON, and will error if constructs are used that do not serialize to JSON.
// Deprecated: use NewSerializerWithOptions instead.
func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer {
return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false})
}
// NewSerializerWithOptions creates a JSON/YAML serializer that handles encoding versioned objects into the proper JSON/YAML
// form. If typer is not nil, the object has the group, version, and kind fields set. Options are copied into the Serializer
// and are immutable.
func NewSerializerWithOptions(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, options SerializerOptions) *Serializer {
return &Serializer{
meta: meta,
creater: creater,
typer: typer,
yaml: true,
meta: meta,
creater: creater,
typer: typer,
options: options,
identifier: identifier(options),
}
}
// identifier computes Identifier of Encoder based on the given options.
func identifier(options SerializerOptions) runtime.Identifier {
result := map[string]string{
"name": "json",
"yaml": strconv.FormatBool(options.Yaml),
"pretty": strconv.FormatBool(options.Pretty),
}
identifier, err := json.Marshal(result)
if err != nil {
klog.Fatalf("Failed marshaling identifier for json Serializer: %v", err)
}
return runtime.Identifier(identifier)
}
// SerializerOptions holds the options which are used to configure a JSON/YAML serializer.
// example:
// (1) To configure a JSON serializer, set `Yaml` to `false`.
// (2) To configure a YAML serializer, set `Yaml` to `true`.
// (3) To configure a strict serializer that can return strictDecodingError, set `Strict` to `true`.
type SerializerOptions struct {
// Yaml: configures the Serializer to work with JSON(false) or YAML(true).
// When `Yaml` is enabled, this serializer only supports the subset of YAML that
// matches JSON, and will error if constructs are used that do not serialize to JSON.
Yaml bool
// Pretty: configures a JSON enabled Serializer(`Yaml: false`) to produce human-readable output.
// This option is silently ignored when `Yaml` is `true`.
Pretty bool
// Strict: configures the Serializer to return strictDecodingError's when duplicate fields are present decoding JSON or YAML.
// Note that enabling this option is not as performant as the non-strict variant, and should not be used in fast paths.
Strict bool
}
type Serializer struct {
meta MetaFactory
options SerializerOptions
creater runtime.ObjectCreater
typer runtime.ObjectTyper
yaml bool
pretty bool
identifier runtime.Identifier
}
// Serializer implements Serializer
@ -119,11 +159,28 @@ func CaseSensitiveJsonIterator() jsoniter.API {
return config
}
// Private copy of jsoniter to try to shield against possible mutations
// StrictCaseSensitiveJsonIterator returns a jsoniterator API that's configured to be
// case-sensitive, but also disallows unknown fields when unmarshalling. It is compatible with
// the encoding/json standard library.
func StrictCaseSensitiveJsonIterator() jsoniter.API {
config := jsoniter.Config{
EscapeHTML: true,
SortMapKeys: true,
ValidateJsonRawMessage: true,
CaseSensitive: true,
DisallowUnknownFields: true,
}.Froze()
// Force jsoniter to decode number to interface{} via int64/float64, if possible.
config.RegisterExtension(&customNumberExtension{})
return config
}
// Private copies of jsoniter to try to shield against possible mutations
// from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them
// in some other library will mess with every usage of the jsoniter library in the whole program.
// See https://github.com/json-iterator/go/issues/265
var caseSensitiveJsonIterator = CaseSensitiveJsonIterator()
var strictCaseSensitiveJsonIterator = StrictCaseSensitiveJsonIterator()
// gvkWithDefaults returns group kind and version defaulting from provided default
func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind {
@ -149,18 +206,8 @@ func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVer
// On success or most errors, the method will return the calculated schema kind.
// The gvk calculate priority will be originalData > default gvk > into
func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
if versioned, ok := into.(*runtime.VersionedObjects); ok {
into = versioned.Last()
obj, actual, err := s.Decode(originalData, gvk, into)
if err != nil {
return nil, actual, err
}
versioned.Objects = []runtime.Object{obj}
return versioned, actual, nil
}
data := originalData
if s.yaml {
if s.options.Yaml {
altered, err := yaml.YAMLToJSON(data)
if err != nil {
return nil, nil, err
@ -216,12 +263,45 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i
if err := caseSensitiveJsonIterator.Unmarshal(data, obj); err != nil {
return nil, actual, err
}
// If the deserializer is non-strict, return successfully here.
if !s.options.Strict {
return obj, actual, nil
}
// In strict mode pass the data trough the YAMLToJSONStrict converter.
// This is done to catch duplicate fields regardless of encoding (JSON or YAML). For JSON data,
// the output would equal the input, unless there is a parsing error such as duplicate fields.
// As we know this was successful in the non-strict case, the only error that may be returned here
// is because of the newly-added strictness. hence we know we can return the typed strictDecoderError
// the actual error is that the object contains duplicate fields.
altered, err := yaml.YAMLToJSONStrict(originalData)
if err != nil {
return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData))
}
// As performance is not an issue for now for the strict deserializer (one has regardless to do
// the unmarshal twice), we take the sanitized, altered data that is guaranteed to have no duplicated
// fields, and unmarshal this into a copy of the already-populated obj. Any error that occurs here is
// due to that a matching field doesn't exist in the object. hence we can return a typed strictDecoderError,
// the actual error is that the object contains unknown field.
strictObj := obj.DeepCopyObject()
if err := strictCaseSensitiveJsonIterator.Unmarshal(altered, strictObj); err != nil {
return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData))
}
// Always return the same object as the non-strict serializer to avoid any deviations.
return obj, actual, nil
}
// Encode serializes the provided object to the given writer.
func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
if s.yaml {
if co, ok := obj.(runtime.CacheableObject); ok {
return co.CacheEncode(s.Identifier(), s.doEncode, w)
}
return s.doEncode(obj, w)
}
func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
if s.options.Yaml {
json, err := caseSensitiveJsonIterator.Marshal(obj)
if err != nil {
return err
@ -234,7 +314,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
return err
}
if s.pretty {
if s.options.Pretty {
data, err := caseSensitiveJsonIterator.MarshalIndent(obj, "", " ")
if err != nil {
return err
@ -246,9 +326,14 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
return encoder.Encode(obj)
}
// Identifier implements runtime.Encoder interface.
func (s *Serializer) Identifier() runtime.Identifier {
return s.identifier
}
// RecognizesData implements the RecognizingDecoder interface.
func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) {
if s.yaml {
if s.options.Yaml {
// we could potentially look for '---'
return false, true, nil
}

View File

@ -69,27 +69,25 @@ func IsNotMarshalable(err error) bool {
// NewSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer
// is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written
// as-is (any type info passed with the object will be used).
//
// This encoding scheme is experimental, and is subject to change at any time.
func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *Serializer {
func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer {
return &Serializer{
prefix: protoEncodingPrefix,
creater: creater,
typer: typer,
contentType: defaultContentType,
prefix: protoEncodingPrefix,
creater: creater,
typer: typer,
}
}
type Serializer struct {
prefix []byte
creater runtime.ObjectCreater
typer runtime.ObjectTyper
contentType string
prefix []byte
creater runtime.ObjectCreater
typer runtime.ObjectTyper
}
var _ runtime.Serializer = &Serializer{}
var _ recognizer.RecognizingDecoder = &Serializer{}
const serializerIdentifier runtime.Identifier = "protobuf"
// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default
// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown,
// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will
@ -97,23 +95,6 @@ var _ recognizer.RecognizingDecoder = &Serializer{}
// not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most
// errors, the method will return the calculated schema kind.
func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
if versioned, ok := into.(*runtime.VersionedObjects); ok {
into = versioned.Last()
obj, actual, err := s.Decode(originalData, gvk, into)
if err != nil {
return nil, actual, err
}
// the last item in versioned becomes into, so if versioned was not originally empty we reset the object
// array so the first position is the decoded object and the second position is the outermost object.
// if there were no objects in the versioned list passed to us, only add ourselves.
if into != nil && into != obj {
versioned.Objects = []runtime.Object{obj, into}
} else {
versioned.Objects = []runtime.Object{obj}
}
return versioned, actual, err
}
prefixLen := len(s.prefix)
switch {
case len(originalData) == 0:
@ -138,7 +119,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i
if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil {
*intoUnknown = unk
if ok, _, _ := s.RecognizesData(bytes.NewBuffer(unk.Raw)); ok {
intoUnknown.ContentType = s.contentType
intoUnknown.ContentType = runtime.ContentTypeProtobuf
}
return intoUnknown, &actual, nil
}
@ -180,6 +161,13 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i
// Encode serializes the provided object to the given writer.
func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
if co, ok := obj.(runtime.CacheableObject); ok {
return co.CacheEncode(s.Identifier(), s.doEncode, w)
}
return s.doEncode(obj, w)
}
func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error {
prefixSize := uint64(len(s.prefix))
var unk runtime.Unknown
@ -207,7 +195,7 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
switch t := obj.(type) {
case bufferedMarshaller:
// this path performs a single allocation during write but requires the caller to implement
// the more efficient Size and MarshalTo methods
// the more efficient Size and MarshalToSizedBuffer methods
encodedSize := uint64(t.Size())
estimatedSize := prefixSize + estimateUnknownSize(&unk, encodedSize)
data := make([]byte, estimatedSize)
@ -249,6 +237,11 @@ func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error {
}
}
// Identifier implements runtime.Encoder interface.
func (s *Serializer) Identifier() runtime.Identifier {
return serializerIdentifier
}
// RecognizesData implements the RecognizingDecoder interface.
func (s *Serializer) RecognizesData(peek io.Reader) (bool, bool, error) {
prefix := make([]byte, 4)
@ -287,6 +280,12 @@ type bufferedMarshaller interface {
runtime.ProtobufMarshaller
}
// Like bufferedMarshaller, but is able to marshal backwards, which is more efficient since it doesn't call Size() as frequently.
type bufferedReverseMarshaller interface {
proto.Sizer
runtime.ProtobufReverseMarshaller
}
// estimateUnknownSize returns the expected bytes consumed by a given runtime.Unknown
// object with a nil RawJSON struct and the expected size of the provided buffer. The
// returned size will not be correct if RawJSOn is set on unk.
@ -303,24 +302,24 @@ func estimateUnknownSize(unk *runtime.Unknown, byteSize uint64) uint64 {
// encoded object, and thus is not self describing (callers must know what type is being described in order to decode).
//
// This encoding scheme is experimental, and is subject to change at any time.
func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *RawSerializer {
func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *RawSerializer {
return &RawSerializer{
creater: creater,
typer: typer,
contentType: defaultContentType,
creater: creater,
typer: typer,
}
}
// RawSerializer encodes and decodes objects without adding a runtime.Unknown wrapper (objects are encoded without identifying
// type).
type RawSerializer struct {
creater runtime.ObjectCreater
typer runtime.ObjectTyper
contentType string
creater runtime.ObjectCreater
typer runtime.ObjectTyper
}
var _ runtime.Serializer = &RawSerializer{}
const rawSerializerIdentifier runtime.Identifier = "raw-protobuf"
// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default
// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown,
// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will
@ -332,20 +331,6 @@ func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind
return nil, nil, fmt.Errorf("this serializer requires an object to decode into: %#v", s)
}
if versioned, ok := into.(*runtime.VersionedObjects); ok {
into = versioned.Last()
obj, actual, err := s.Decode(originalData, gvk, into)
if err != nil {
return nil, actual, err
}
if into != nil && into != obj {
versioned.Objects = []runtime.Object{obj, into}
} else {
versioned.Objects = []runtime.Object{obj}
}
return versioned, actual, err
}
if len(originalData) == 0 {
// TODO: treat like decoding {} from JSON with defaulting
return nil, nil, fmt.Errorf("empty data")
@ -358,7 +343,7 @@ func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind
if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil {
intoUnknown.Raw = data
intoUnknown.ContentEncoding = ""
intoUnknown.ContentType = s.contentType
intoUnknown.ContentType = runtime.ContentTypeProtobuf
intoUnknown.SetGroupVersionKind(*actual)
return intoUnknown, actual, nil
}
@ -411,12 +396,35 @@ func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater,
if err := proto.Unmarshal(data, pb); err != nil {
return nil, actual, err
}
if actual != nil {
obj.GetObjectKind().SetGroupVersionKind(*actual)
}
return obj, actual, nil
}
// Encode serializes the provided object to the given writer. Overrides is ignored.
func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error {
if co, ok := obj.(runtime.CacheableObject); ok {
return co.CacheEncode(s.Identifier(), s.doEncode, w)
}
return s.doEncode(obj, w)
}
func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer) error {
switch t := obj.(type) {
case bufferedReverseMarshaller:
// this path performs a single allocation during write but requires the caller to implement
// the more efficient Size and MarshalToSizedBuffer methods
encodedSize := uint64(t.Size())
data := make([]byte, encodedSize)
n, err := t.MarshalToSizedBuffer(data)
if err != nil {
return err
}
_, err = w.Write(data[:n])
return err
case bufferedMarshaller:
// this path performs a single allocation during write but requires the caller to implement
// the more efficient Size and MarshalTo methods
@ -444,6 +452,11 @@ func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error {
}
}
// Identifier implements runtime.Encoder interface.
func (s *RawSerializer) Identifier() runtime.Identifier {
return rawSerializerIdentifier
}
var LengthDelimitedFramer = lengthDelimitedFramer{}
type lengthDelimitedFramer struct{}

View File

@ -1,48 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package serializer
import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/serializer/protobuf"
)
const (
// contentTypeProtobuf is the protobuf type exposed for Kubernetes. It is private to prevent others from
// depending on it unintentionally.
// TODO: potentially move to pkg/api (since it's part of the Kube public API) and pass it in to the
// CodecFactory on initialization.
contentTypeProtobuf = "application/vnd.kubernetes.protobuf"
)
func protobufSerializer(scheme *runtime.Scheme) (serializerType, bool) {
serializer := protobuf.NewSerializer(scheme, scheme, contentTypeProtobuf)
raw := protobuf.NewRawSerializer(scheme, scheme, contentTypeProtobuf)
return serializerType{
AcceptContentTypes: []string{contentTypeProtobuf},
ContentType: contentTypeProtobuf,
FileExtensions: []string{"pb"},
Serializer: serializer,
Framer: protobuf.LengthDelimitedFramer,
StreamSerializer: raw,
}, true
}
func init() {
serializerExtensions = append(serializerExtensions, protobufSerializer)
}

View File

@ -17,12 +17,15 @@ limitations under the License.
package versioning
import (
"encoding/json"
"io"
"reflect"
"sync"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/klog/v2"
)
// NewDefaultingCodecForScheme is a convenience method for callers that are using a scheme.
@ -62,6 +65,8 @@ func NewCodec(
encodeVersion: encodeVersion,
decodeVersion: decodeVersion,
identifier: identifier(encodeVersion, encoder),
originalSchemeName: originalSchemeName,
}
return internal
@ -78,19 +83,47 @@ type codec struct {
encodeVersion runtime.GroupVersioner
decodeVersion runtime.GroupVersioner
identifier runtime.Identifier
// originalSchemeName is optional, but when filled in it holds the name of the scheme from which this codec originates
originalSchemeName string
}
var identifiersMap sync.Map
type codecIdentifier struct {
EncodeGV string `json:"encodeGV,omitempty"`
Encoder string `json:"encoder,omitempty"`
Name string `json:"name,omitempty"`
}
// identifier computes Identifier of Encoder based on codec parameters.
func identifier(encodeGV runtime.GroupVersioner, encoder runtime.Encoder) runtime.Identifier {
result := codecIdentifier{
Name: "versioning",
}
if encodeGV != nil {
result.EncodeGV = encodeGV.Identifier()
}
if encoder != nil {
result.Encoder = string(encoder.Identifier())
}
if id, ok := identifiersMap.Load(result); ok {
return id.(runtime.Identifier)
}
identifier, err := json.Marshal(result)
if err != nil {
klog.Fatalf("Failed marshaling identifier for codec: %v", err)
}
identifiersMap.Store(result, runtime.Identifier(identifier))
return runtime.Identifier(identifier)
}
// Decode attempts a decode of the object, then tries to convert it to the internal version. If into is provided and the decoding is
// successful, the returned runtime.Object will be the value passed as into. Note that this may bypass conversion if you pass an
// into that matches the serialized version.
func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
versioned, isVersioned := into.(*runtime.VersionedObjects)
if isVersioned {
into = versioned.Last()
}
// If the into object is unstructured and expresses an opinion about its group/version,
// create a new instance of the type so we always exercise the conversion path (skips short-circuiting on `into == obj`)
decodeInto := into
@ -106,50 +139,30 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru
}
if d, ok := obj.(runtime.NestedObjectDecoder); ok {
if err := d.DecodeNestedObjects(DirectDecoder{c.decoder}); err != nil {
if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil {
return nil, gvk, err
}
}
// if we specify a target, use generic conversion.
if into != nil {
if into == obj {
if isVersioned {
return versioned, gvk, nil
}
return into, gvk, nil
}
// perform defaulting if requested
if c.defaulter != nil {
// create a copy to ensure defaulting is not applied to the original versioned objects
if isVersioned {
versioned.Objects = []runtime.Object{obj.DeepCopyObject()}
}
c.defaulter.Default(obj)
} else {
if isVersioned {
versioned.Objects = []runtime.Object{obj}
}
}
// Short-circuit conversion if the into object is same object
if into == obj {
return into, gvk, nil
}
if err := c.convertor.Convert(obj, into, c.decodeVersion); err != nil {
return nil, gvk, err
}
if isVersioned {
versioned.Objects = append(versioned.Objects, into)
return versioned, gvk, nil
}
return into, gvk, nil
}
// Convert if needed.
if isVersioned {
// create a copy, because ConvertToVersion does not guarantee non-mutation of objects
versioned.Objects = []runtime.Object{obj.DeepCopyObject()}
}
// perform defaulting if requested
if c.defaulter != nil {
c.defaulter.Default(obj)
@ -159,18 +172,19 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru
if err != nil {
return nil, gvk, err
}
if isVersioned {
if versioned.Last() != out {
versioned.Objects = append(versioned.Objects, out)
}
return versioned, gvk, nil
}
return out, gvk, nil
}
// Encode ensures the provided object is output in the appropriate group and version, invoking
// conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is.
func (c *codec) Encode(obj runtime.Object, w io.Writer) error {
if co, ok := obj.(runtime.CacheableObject); ok {
return co.CacheEncode(c.Identifier(), c.doEncode, w)
}
return c.doEncode(obj, w)
}
func (c *codec) doEncode(obj runtime.Object, w io.Writer) error {
switch obj := obj.(type) {
case *runtime.Unknown:
return c.encoder.Encode(obj, w)
@ -199,84 +213,38 @@ func (c *codec) Encode(obj runtime.Object, w io.Writer) error {
return err
}
objectKind := obj.GetObjectKind()
old := objectKind.GroupVersionKind()
// restore the old GVK after encoding
defer objectKind.SetGroupVersionKind(old)
if c.encodeVersion == nil || isUnversioned {
if e, ok := obj.(runtime.NestedObjectEncoder); ok {
if err := e.EncodeNestedObjects(DirectEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil {
if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil {
return err
}
}
objectKind := obj.GetObjectKind()
old := objectKind.GroupVersionKind()
objectKind.SetGroupVersionKind(gvks[0])
err = c.encoder.Encode(obj, w)
objectKind.SetGroupVersionKind(old)
return err
return c.encoder.Encode(obj, w)
}
// Perform a conversion if necessary
objectKind := obj.GetObjectKind()
old := objectKind.GroupVersionKind()
out, err := c.convertor.ConvertToVersion(obj, c.encodeVersion)
if err != nil {
return err
}
if e, ok := out.(runtime.NestedObjectEncoder); ok {
if err := e.EncodeNestedObjects(DirectEncoder{Version: c.encodeVersion, Encoder: c.encoder, ObjectTyper: c.typer}); err != nil {
if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Version: c.encodeVersion, Encoder: c.encoder, ObjectTyper: c.typer}); err != nil {
return err
}
}
// Conversion is responsible for setting the proper group, version, and kind onto the outgoing object
err = c.encoder.Encode(out, w)
// restore the old GVK, in case conversion returned the same object
objectKind.SetGroupVersionKind(old)
return err
return c.encoder.Encode(out, w)
}
// DirectEncoder serializes an object and ensures the GVK is set.
type DirectEncoder struct {
Version runtime.GroupVersioner
runtime.Encoder
runtime.ObjectTyper
}
// Encode does not do conversion. It sets the gvk during serialization.
func (e DirectEncoder) Encode(obj runtime.Object, stream io.Writer) error {
gvks, _, err := e.ObjectTyper.ObjectKinds(obj)
if err != nil {
if runtime.IsNotRegisteredError(err) {
return e.Encoder.Encode(obj, stream)
}
return err
}
kind := obj.GetObjectKind()
oldGVK := kind.GroupVersionKind()
gvk := gvks[0]
if e.Version != nil {
preferredGVK, ok := e.Version.KindForGroupVersionKinds(gvks)
if ok {
gvk = preferredGVK
}
}
kind.SetGroupVersionKind(gvk)
err = e.Encoder.Encode(obj, stream)
kind.SetGroupVersionKind(oldGVK)
return err
}
// DirectDecoder clears the group version kind of a deserialized object.
type DirectDecoder struct {
runtime.Decoder
}
// Decode does not do conversion. It removes the gvk during deserialization.
func (d DirectDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) {
obj, gvk, err := d.Decoder.Decode(data, defaults, into)
if obj != nil {
kind := obj.GetObjectKind()
// clearing the gvk is just a convention of a codec
kind.SetGroupVersionKind(schema.GroupVersionKind{})
}
return obj, gvk, err
// Identifier implements runtime.Encoder interface.
func (c *codec) Identifier() runtime.Identifier {
return c.identifier
}

View File

@ -41,8 +41,9 @@ type TypeMeta struct {
}
const (
ContentTypeJSON string = "application/json"
ContentTypeYAML string = "application/yaml"
ContentTypeJSON string = "application/json"
ContentTypeYAML string = "application/yaml"
ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf"
)
// RawExtension is used to hold extensions in external versions.
@ -94,7 +95,7 @@ type RawExtension struct {
// Raw is the underlying serialization of this object.
//
// TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data.
Raw []byte `protobuf:"bytes,1,opt,name=raw"`
Raw []byte `json:"-" protobuf:"bytes,1,opt,name=raw"`
// Object can hold a representation of this extension - useful for working with versioned
// structs.
Object Object `json:"-"`
@ -123,16 +124,3 @@ type Unknown struct {
// Unspecified means ContentTypeJSON.
ContentType string `protobuf:"bytes,4,opt,name=contentType"`
}
// VersionedObjects is used by Decoders to give callers a way to access all versions
// of an object during the decoding process.
//
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// +k8s:deepcopy-gen=true
type VersionedObjects struct {
// Objects is the set of objects retrieved during decoding, in order of conversion.
// The 0 index is the object as serialized on the wire. If conversion has occurred,
// other objects may be present. The right most object is the same as would be returned
// by a normal Decode call.
Objects []Object
}

View File

@ -24,46 +24,66 @@ type ProtobufMarshaller interface {
MarshalTo(data []byte) (int, error)
}
type ProtobufReverseMarshaller interface {
MarshalToSizedBuffer(data []byte) (int, error)
}
// NestedMarshalTo allows a caller to avoid extra allocations during serialization of an Unknown
// that will contain an object that implements ProtobufMarshaller.
// that will contain an object that implements ProtobufMarshaller or ProtobufReverseMarshaller.
func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64) (int, error) {
var i int
_ = i
var l int
_ = l
data[i] = 0xa
i++
i = encodeVarintGenerated(data, i, uint64(m.TypeMeta.Size()))
n1, err := m.TypeMeta.MarshalTo(data[i:])
// Calculate the full size of the message.
msgSize := m.Size()
if b != nil {
msgSize += int(size) + sovGenerated(size) + 1
}
// Reverse marshal the fields of m.
i := msgSize
i -= len(m.ContentType)
copy(data[i:], m.ContentType)
i = encodeVarintGenerated(data, i, uint64(len(m.ContentType)))
i--
data[i] = 0x22
i -= len(m.ContentEncoding)
copy(data[i:], m.ContentEncoding)
i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding)))
i--
data[i] = 0x1a
if b != nil {
if r, ok := b.(ProtobufReverseMarshaller); ok {
n1, err := r.MarshalToSizedBuffer(data[:i])
if err != nil {
return 0, err
}
i -= int(size)
if uint64(n1) != size {
// programmer error: the Size() method for protobuf does not match the results of LashramOt, which means the proto
// struct returned would be wrong.
return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n1)
}
} else {
i -= int(size)
n1, err := b.MarshalTo(data[i:])
if err != nil {
return 0, err
}
if uint64(n1) != size {
// programmer error: the Size() method for protobuf does not match the results of MarshalTo, which means the proto
// struct returned would be wrong.
return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n1)
}
}
i = encodeVarintGenerated(data, i, size)
i--
data[i] = 0x12
}
n2, err := m.TypeMeta.MarshalToSizedBuffer(data[:i])
if err != nil {
return 0, err
}
i += n1
if b != nil {
data[i] = 0x12
i++
i = encodeVarintGenerated(data, i, size)
n2, err := b.MarshalTo(data[i:])
if err != nil {
return 0, err
}
if uint64(n2) != size {
// programmer error: the Size() method for protobuf does not match the results of MarshalTo, which means the proto
// struct returned would be wrong.
return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n2)
}
i += n2
}
data[i] = 0x1a
i++
i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding)))
i += copy(data[i:], m.ContentEncoding)
data[i] = 0x22
i++
i = encodeVarintGenerated(data, i, uint64(len(m.ContentType)))
i += copy(data[i:], m.ContentType)
return i, nil
i -= n2
i = encodeVarintGenerated(data, i, uint64(n2))
i--
data[i] = 0xa
return msgSize - i, nil
}

View File

@ -73,36 +73,3 @@ func (in *Unknown) DeepCopyObject() Object {
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *VersionedObjects) DeepCopyInto(out *VersionedObjects) {
*out = *in
if in.Objects != nil {
in, out := &in.Objects, &out.Objects
*out = make([]Object, len(*in))
for i := range *in {
if (*in)[i] != nil {
(*out)[i] = (*in)[i].DeepCopyObject()
}
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionedObjects.
func (in *VersionedObjects) DeepCopy() *VersionedObjects {
if in == nil {
return nil
}
out := new(VersionedObjects)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new Object.
func (in *VersionedObjects) DeepCopyObject() Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}