Merge pull request #75657 from guilhermeoki/docs/sample-controller

sample-controller: update README

Kubernetes-commit: e7eb742c1907eb4f1c9e5412f6cd1d4e06f3c277
This commit is contained in:
Kubernetes Publisher 2019-03-25 17:45:00 -07:00
parent 59c098b8b3
commit b8f621986e
11 changed files with 362 additions and 270 deletions

458
Godeps/Godeps.json generated

File diff suppressed because it is too large Load Diff

View File

@ -17,30 +17,76 @@ limitations under the License.
package meta
import (
"errors"
"fmt"
"reflect"
"sync"
"k8s.io/apimachinery/pkg/conversion"
"k8s.io/apimachinery/pkg/runtime"
)
// IsListType returns true if the provided Object has a slice called Items
var (
// isListCache maintains a cache of types that are checked for lists
// which is used by IsListType.
// TODO: remove and replace with an interface check
isListCache = struct {
lock sync.RWMutex
byType map[reflect.Type]bool
}{
byType: make(map[reflect.Type]bool, 1024),
}
)
// IsListType returns true if the provided Object has a slice called Items.
// TODO: Replace the code in this check with an interface comparison by
// creating and enforcing that lists implement a list accessor.
func IsListType(obj runtime.Object) bool {
// if we're a runtime.Unstructured, check whether this is a list.
// TODO: refactor GetItemsPtr to use an interface that returns []runtime.Object
if unstructured, ok := obj.(runtime.Unstructured); ok {
return unstructured.IsList()
switch t := obj.(type) {
case runtime.Unstructured:
return t.IsList()
}
t := reflect.TypeOf(obj)
isListCache.lock.RLock()
ok, exists := isListCache.byType[t]
isListCache.lock.RUnlock()
if !exists {
_, err := getItemsPtr(obj)
ok = err == nil
// cache only the first 1024 types
isListCache.lock.Lock()
if len(isListCache.byType) < 1024 {
isListCache.byType[t] = ok
}
isListCache.lock.Unlock()
}
_, err := GetItemsPtr(obj)
return err == nil
return ok
}
var (
errExpectFieldItems = errors.New("no Items field in this object")
errExpectSliceItems = errors.New("Items field must be a slice of objects")
)
// GetItemsPtr returns a pointer to the list object's Items member.
// If 'list' doesn't have an Items member, it's not really a list type
// and an error will be returned.
// This function will either return a pointer to a slice, or an error, but not both.
// TODO: this will be replaced with an interface in the future
func GetItemsPtr(list runtime.Object) (interface{}, error) {
obj, err := getItemsPtr(list)
if err != nil {
return nil, fmt.Errorf("%T is not a list: %v", err)
}
return obj, nil
}
// getItemsPtr returns a pointer to the list object's Items member or an error.
func getItemsPtr(list runtime.Object) (interface{}, error) {
v, err := conversion.EnforcePtr(list)
if err != nil {
return nil, err
@ -48,19 +94,19 @@ func GetItemsPtr(list runtime.Object) (interface{}, error) {
items := v.FieldByName("Items")
if !items.IsValid() {
return nil, fmt.Errorf("no Items field in %#v", list)
return nil, errExpectFieldItems
}
switch items.Kind() {
case reflect.Interface, reflect.Ptr:
target := reflect.TypeOf(items.Interface()).Elem()
if target.Kind() != reflect.Slice {
return nil, fmt.Errorf("items: Expected slice, got %s", target.Kind())
return nil, errExpectSliceItems
}
return items.Interface(), nil
case reflect.Slice:
return items.Addr().Interface(), nil
default:
return nil, fmt.Errorf("items: Expected slice, got %s", items.Kind())
return nil, errExpectSliceItems
}
}

View File

@ -51,7 +51,7 @@ func UnsafeObjectConvertor(scheme *Scheme) ObjectConvertor {
func SetField(src interface{}, v reflect.Value, fieldName string) error {
field := v.FieldByName(fieldName)
if !field.IsValid() {
return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface())
return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface())
}
srcValue := reflect.ValueOf(src)
if srcValue.Type().AssignableTo(field.Type()) {
@ -70,7 +70,7 @@ func SetField(src interface{}, v reflect.Value, fieldName string) error {
func Field(v reflect.Value, fieldName string, dest interface{}) error {
field := v.FieldByName(fieldName)
if !field.IsValid() {
return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface())
return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface())
}
destValue, err := conversion.EnforcePtr(dest)
if err != nil {
@ -93,7 +93,7 @@ func Field(v reflect.Value, fieldName string, dest interface{}) error {
func FieldPtr(v reflect.Value, fieldName string, dest interface{}) error {
field := v.FieldByName(fieldName)
if !field.IsValid() {
return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface())
return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface())
}
v, err := conversion.EnforcePtr(dest)
if err != nil {

View File

@ -91,6 +91,10 @@ type Framer interface {
type SerializerInfo struct {
// MediaType is the value that represents this serializer over the wire.
MediaType string
// MediaTypeType is the first part of the MediaType ("application" in "application/json").
MediaTypeType string
// MediaTypeSubType is the second part of the MediaType ("json" in "application/json").
MediaTypeSubType string
// EncodesAsText indicates this serializer can be encoded to UTF-8 safely.
EncodesAsText bool
// Serializer is the individual object serializer for this media type.

View File

@ -17,6 +17,9 @@ limitations under the License.
package serializer
import (
"mime"
"strings"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/runtime/serializer/json"
@ -120,6 +123,15 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec
Serializer: d.Serializer,
PrettySerializer: d.PrettySerializer,
}
mediaType, _, err := mime.ParseMediaType(info.MediaType)
if err != nil {
panic(err)
}
parts := strings.SplitN(mediaType, "/", 2)
info.MediaTypeType = parts[0]
info.MediaTypeSubType = parts[1]
if d.StreamSerializer != nil {
info.StreamSerializer = &runtime.StreamSerializerInfo{
Serializer: d.StreamSerializer,

View File

@ -41,9 +41,8 @@ type TypeMeta struct {
}
const (
ContentTypeJSON string = "application/json"
ContentTypeYAML string = "application/yaml"
ContentTypeJSON string = "application/json"
ContentTypeYAML string = "application/yaml"
ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf"
)

View File

@ -250,6 +250,25 @@ func (b *Backoff) Step() time.Duration {
return duration
}
// contextForChannel derives a child context from a parent channel.
//
// The derived context's Done channel is closed when the returned cancel function
// is called or when the parent channel is closed, whichever happens first.
//
// Note the caller must *always* call the CancelFunc, otherwise resources may be leaked.
func contextForChannel(parentCh <-chan struct{}) (context.Context, context.CancelFunc) {
ctx, cancel := context.WithCancel(context.Background())
go func() {
select {
case <-parentCh:
cancel()
case <-ctx.Done():
}
}()
return ctx, cancel
}
// ExponentialBackoff repeats a condition check with exponential backoff.
//
// It checks the condition up to Steps times, increasing the wait by multiplying
@ -353,7 +372,9 @@ func PollImmediateInfinite(interval time.Duration, condition ConditionFunc) erro
// PollUntil always waits interval before the first run of 'condition'.
// 'condition' will always be invoked at least once.
func PollUntil(interval time.Duration, condition ConditionFunc, stopCh <-chan struct{}) error {
return WaitFor(poller(interval, 0), condition, stopCh)
ctx, cancel := contextForChannel(stopCh)
defer cancel()
return WaitFor(poller(interval, 0), condition, ctx.Done())
}
// PollImmediateUntil tries a condition func until it returns true, an error or stopCh is closed.
@ -422,7 +443,9 @@ func WaitFor(wait WaitFunc, fn ConditionFunc, done <-chan struct{}) error {
// timeout has elapsed and then closes the channel.
//
// Over very short intervals you may receive no ticks before the channel is
// closed. A timeout of 0 is interpreted as an infinity.
// closed. A timeout of 0 is interpreted as an infinity, and in such a case
// it would be the caller's responsibility to close the done channel.
// Failure to do so would result in a leaked goroutine.
//
// Output ticks are not buffered. If the channel is not ready to receive an
// item, the tick is skipped.

View File

@ -592,7 +592,7 @@ func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser)
if result := r.transformResponse(resp, req); result.err != nil {
return nil, result.err
}
return nil, fmt.Errorf("for request '%+v', got status: %v", url, resp.StatusCode)
return nil, fmt.Errorf("for request %s, got status: %v", url, resp.StatusCode)
}
wrapperDecoder := wrapperDecoderFn(resp.Body)
return watch.NewStreamWatcher(restclientwatch.NewDecoder(wrapperDecoder, embeddedDecoder)), nil
@ -845,13 +845,13 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu
// 3. Apiserver closes connection.
// 4. client-go should catch this and return an error.
klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err)
streamErr := fmt.Errorf("Stream error %#v when reading response body, may be caused by closed connection. Please retry.", err)
streamErr := fmt.Errorf("Stream error when reading response body, may be caused by closed connection. Please retry. Original error: %v", err)
return Result{
err: streamErr,
}
default:
klog.Errorf("Unexpected error when reading response body: %#v", err)
unexpectedErr := fmt.Errorf("Unexpected error %#v when reading response body. Please retry.", err)
klog.Errorf("Unexpected error when reading response body: %v", err)
unexpectedErr := fmt.Errorf("Unexpected error when reading response body. Please retry. Original error: %v", err)
return Result{
err: unexpectedErr,
}

View File

@ -48,7 +48,7 @@ type ExpirationCache struct {
// ExpirationPolicy dictates when an object expires. Currently only abstracted out
// so unittests don't rely on the system clock.
type ExpirationPolicy interface {
IsExpired(obj *timestampedEntry) bool
IsExpired(obj *TimestampedEntry) bool
}
// TTLPolicy implements a ttl based ExpirationPolicy.
@ -63,26 +63,29 @@ type TTLPolicy struct {
// IsExpired returns true if the given object is older than the ttl, or it can't
// determine its age.
func (p *TTLPolicy) IsExpired(obj *timestampedEntry) bool {
return p.Ttl > 0 && p.Clock.Since(obj.timestamp) > p.Ttl
func (p *TTLPolicy) IsExpired(obj *TimestampedEntry) bool {
return p.Ttl > 0 && p.Clock.Since(obj.Timestamp) > p.Ttl
}
// timestampedEntry is the only type allowed in a ExpirationCache.
type timestampedEntry struct {
obj interface{}
timestamp time.Time
// TimestampedEntry is the only type allowed in a ExpirationCache.
// Keep in mind that it is not safe to share timestamps between computers.
// Behavior may be inconsistent if you get a timestamp from the API Server and
// use it on the client machine as part of your ExpirationCache.
type TimestampedEntry struct {
Obj interface{}
Timestamp time.Time
}
// getTimestampedEntry returns the timestampedEntry stored under the given key.
func (c *ExpirationCache) getTimestampedEntry(key string) (*timestampedEntry, bool) {
// getTimestampedEntry returns the TimestampedEntry stored under the given key.
func (c *ExpirationCache) getTimestampedEntry(key string) (*TimestampedEntry, bool) {
item, _ := c.cacheStorage.Get(key)
if tsEntry, ok := item.(*timestampedEntry); ok {
if tsEntry, ok := item.(*TimestampedEntry); ok {
return tsEntry, true
}
return nil, false
}
// getOrExpire retrieves the object from the timestampedEntry if and only if it hasn't
// getOrExpire retrieves the object from the TimestampedEntry if and only if it hasn't
// already expired. It holds a write lock across deletion.
func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
// Prevent all inserts from the time we deem an item as "expired" to when we
@ -95,11 +98,11 @@ func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
return nil, false
}
if c.expirationPolicy.IsExpired(timestampedItem) {
klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj)
klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.Obj)
c.cacheStorage.Delete(key)
return nil, false
}
return timestampedItem.obj, true
return timestampedItem.Obj, true
}
// GetByKey returns the item stored under the key, or sets exists=false.
@ -126,7 +129,7 @@ func (c *ExpirationCache) List() []interface{} {
list := make([]interface{}, 0, len(items))
for _, item := range items {
obj := item.(*timestampedEntry).obj
obj := item.(*TimestampedEntry).Obj
if key, err := c.keyFunc(obj); err != nil {
list = append(list, obj)
} else if obj, exists := c.getOrExpire(key); exists {
@ -151,7 +154,7 @@ func (c *ExpirationCache) Add(obj interface{}) error {
c.expirationLock.Lock()
defer c.expirationLock.Unlock()
c.cacheStorage.Add(key, &timestampedEntry{obj, c.clock.Now()})
c.cacheStorage.Add(key, &TimestampedEntry{obj, c.clock.Now()})
return nil
}
@ -184,7 +187,7 @@ func (c *ExpirationCache) Replace(list []interface{}, resourceVersion string) er
if err != nil {
return KeyError{item, err}
}
items[key] = &timestampedEntry{item, ts}
items[key] = &TimestampedEntry{item, ts}
}
c.expirationLock.Lock()
defer c.expirationLock.Unlock()
@ -199,10 +202,15 @@ func (c *ExpirationCache) Resync() error {
// NewTTLStore creates and returns a ExpirationCache with a TTLPolicy
func NewTTLStore(keyFunc KeyFunc, ttl time.Duration) Store {
return NewExpirationStore(keyFunc, &TTLPolicy{ttl, clock.RealClock{}})
}
// NewExpirationStore creates and returns a ExpirationCache for a given policy
func NewExpirationStore(keyFunc KeyFunc, expirationPolicy ExpirationPolicy) Store {
return &ExpirationCache{
cacheStorage: NewThreadSafeStore(Indexers{}, Indices{}),
keyFunc: keyFunc,
clock: clock.RealClock{},
expirationPolicy: &TTLPolicy{ttl, clock.RealClock{}},
expirationPolicy: expirationPolicy,
}
}

View File

@ -38,7 +38,7 @@ type FakeExpirationPolicy struct {
RetrieveKeyFunc KeyFunc
}
func (p *FakeExpirationPolicy) IsExpired(obj *timestampedEntry) bool {
func (p *FakeExpirationPolicy) IsExpired(obj *TimestampedEntry) bool {
key, _ := p.RetrieveKeyFunc(obj)
return !p.NeverExpire.Has(key)
}

View File

@ -555,7 +555,7 @@ func (p *processorListener) run() {
case deleteNotification:
p.handler.OnDelete(notification.oldObj)
default:
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %#v", next))
utilruntime.HandleError(fmt.Errorf("unrecognized notification: %T", next))
}
}
// the only way to get here is if the p.nextCh is empty and closed