Merge pull request #74102 from caesarxuchao/automated-cherry-pick-of-#73443-#73713-#73805-#74000-upstream-release-1.13

Automated cherry pick of #73443: update json-patch to pick up bug fixes

Kubernetes-commit: de4225fa13bfb50581f80e6af63b326a3c1028b1
This commit is contained in:
Kubernetes Publisher 2019-02-21 12:46:51 -08:00
parent a5274af388
commit 9593044ffe
13 changed files with 480 additions and 306 deletions

490
Godeps/Godeps.json generated

File diff suppressed because it is too large Load Diff

View File

@ -25,6 +25,19 @@ go get -u github.com/evanphx/json-patch
* [Comparing JSON documents](#comparing-json-documents)
* [Combine merge patches](#combine-merge-patches)
# Configuration
* There is a global configuration variable `jsonpatch.SupportNegativeIndices`.
This defaults to `true` and enables the non-standard practice of allowing
negative indices to mean indices starting at the end of an array. This
functionality can be disabled by setting `jsonpatch.SupportNegativeIndices =
false`.
* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`,
which limits the total size increase in bytes caused by "copy" operations in a
patch. It defaults to 0, which means there is no limit.
## Create and apply a merge patch
Given both an original JSON document and a modified JSON document, you can create
a [Merge Patch](https://tools.ietf.org/html/rfc7396) document.

38
vendor/github.com/evanphx/json-patch/errors.go generated vendored Normal file
View File

@ -0,0 +1,38 @@
package jsonpatch
import "fmt"
// AccumulatedCopySizeError is an error type returned when the accumulated size
// increase caused by copy operations in a patch operation has exceeded the
// limit.
type AccumulatedCopySizeError struct {
limit int64
accumulated int64
}
// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError.
func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError {
return &AccumulatedCopySizeError{limit: l, accumulated: a}
}
// Error implements the error interface.
func (a *AccumulatedCopySizeError) Error() string {
return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit)
}
// ArraySizeError is an error type returned when the array size has exceeded
// the limit.
type ArraySizeError struct {
limit int
size int
}
// NewArraySizeError returns an ArraySizeError.
func NewArraySizeError(l, s int) *ArraySizeError {
return &ArraySizeError{limit: l, size: s}
}
// Error implements the error interface.
func (a *ArraySizeError) Error() string {
return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit)
}

View File

@ -14,6 +14,16 @@ const (
eAry
)
var (
// SupportNegativeIndices decides whether to support non-standard practice of
// allowing negative indices to mean indices starting at the end of an array.
// Default to true.
SupportNegativeIndices bool = true
// AccumulatedCopySizeLimit limits the total size increase in bytes caused by
// "copy" operations in a patch.
AccumulatedCopySizeLimit int64 = 0
)
type lazyNode struct {
raw *json.RawMessage
doc partialDoc
@ -61,6 +71,20 @@ func (n *lazyNode) UnmarshalJSON(data []byte) error {
return nil
}
func deepCopy(src *lazyNode) (*lazyNode, int, error) {
if src == nil {
return nil, 0, nil
}
a, err := src.MarshalJSON()
if err != nil {
return nil, 0, err
}
sz := len(a)
ra := make(json.RawMessage, sz)
copy(ra, a)
return newLazyNode(&ra), sz, nil
}
func (n *lazyNode) intoDoc() (*partialDoc, error) {
if n.which == eDoc {
return &n.doc, nil
@ -342,35 +366,14 @@ func (d *partialDoc) remove(key string) error {
return nil
}
// set should only be used to implement the "replace" operation, so "key" must
// be an already existing index in "d".
func (d *partialArray) set(key string, val *lazyNode) error {
if key == "-" {
*d = append(*d, val)
return nil
}
idx, err := strconv.Atoi(key)
if err != nil {
return err
}
sz := len(*d)
if idx+1 > sz {
sz = idx + 1
}
ary := make([]*lazyNode, sz)
cur := *d
copy(ary, cur)
if idx >= len(ary) {
return fmt.Errorf("Unable to access invalid index: %d", idx)
}
ary[idx] = val
*d = ary
(*d)[idx] = val
return nil
}
@ -385,17 +388,26 @@ func (d *partialArray) add(key string, val *lazyNode) error {
return err
}
ary := make([]*lazyNode, len(*d)+1)
sz := len(*d) + 1
ary := make([]*lazyNode, sz)
cur := *d
if idx < -len(ary) || idx >= len(ary) {
if idx >= len(ary) {
return fmt.Errorf("Unable to access invalid index: %d", idx)
}
if idx < 0 {
idx += len(ary)
if SupportNegativeIndices {
if idx < -len(ary) {
return fmt.Errorf("Unable to access invalid index: %d", idx)
}
if idx < 0 {
idx += len(ary)
}
}
copy(ary[0:idx], cur[0:idx])
ary[idx] = val
copy(ary[idx+1:], cur[idx:])
@ -426,11 +438,18 @@ func (d *partialArray) remove(key string) error {
cur := *d
if idx < -len(cur) || idx >= len(cur) {
return fmt.Errorf("Unable to remove invalid index: %d", idx)
if idx >= len(cur) {
return fmt.Errorf("Unable to access invalid index: %d", idx)
}
if idx < 0 {
idx += len(cur)
if SupportNegativeIndices {
if idx < -len(cur) {
return fmt.Errorf("Unable to access invalid index: %d", idx)
}
if idx < 0 {
idx += len(cur)
}
}
ary := make([]*lazyNode, len(cur)-1)
@ -511,7 +530,7 @@ func (p Patch) move(doc *container, op operation) error {
return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path)
}
return con.set(key, val)
return con.add(key, val)
}
func (p Patch) test(doc *container, op operation) error {
@ -545,7 +564,7 @@ func (p Patch) test(doc *container, op operation) error {
return fmt.Errorf("Testing value %s failed", path)
}
func (p Patch) copy(doc *container, op operation) error {
func (p Patch) copy(doc *container, op operation, accumulatedCopySize *int64) error {
from := op.from()
con, key := findObject(doc, from)
@ -567,7 +586,16 @@ func (p Patch) copy(doc *container, op operation) error {
return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path)
}
return con.set(key, val)
valCopy, sz, err := deepCopy(val)
if err != nil {
return err
}
(*accumulatedCopySize) += int64(sz)
if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit {
return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize)
}
return con.add(key, valCopy)
}
// Equal indicates if 2 JSON documents have the same structural equality.
@ -620,6 +648,8 @@ func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
err = nil
var accumulatedCopySize int64
for _, op := range p {
switch op.kind() {
case "add":
@ -633,7 +663,7 @@ func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
case "test":
err = p.test(&pd, op)
case "copy":
err = p.copy(&pd, op)
err = p.copy(&pd, op, &accumulatedCopySize)
default:
err = fmt.Errorf("Unexpected kind: %s", op.kind())
}

View File

@ -341,6 +341,17 @@ func NewTooManyRequestsError(message string) *StatusError {
}}
}
// NewRequestEntityTooLargeError returns an error indicating that the request
// entity was too large.
func NewRequestEntityTooLargeError(message string) *StatusError {
return &StatusError{metav1.Status{
Status: metav1.StatusFailure,
Code: http.StatusRequestEntityTooLarge,
Reason: metav1.StatusReasonRequestEntityTooLarge,
Message: fmt.Sprintf("Request entity too large: %s", message),
}}
}
// NewGenericServerResponse returns a new error for server responses that are not in a recognizable form.
func NewGenericServerResponse(code int, verb string, qualifiedResource schema.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError {
reason := metav1.StatusReasonUnknown
@ -527,6 +538,19 @@ func IsTooManyRequests(err error) bool {
return false
}
// IsRequestEntityTooLargeError determines if err is an error which indicates
// the request entity is too large.
func IsRequestEntityTooLargeError(err error) bool {
if ReasonForError(err) == metav1.StatusReasonRequestEntityTooLarge {
return true
}
switch t := err.(type) {
case APIStatus:
return t.Status().Code == http.StatusRequestEntityTooLarge
}
return false
}
// IsUnexpectedServerError returns true if the server response was not in the expected API format,
// and may be the result of another HTTP actor.
func IsUnexpectedServerError(err error) bool {

View File

@ -713,6 +713,10 @@ const (
// Status code 406
StatusReasonNotAcceptable StatusReason = "NotAcceptable"
// StatusReasonRequestEntityTooLarge means that the request entity is too large.
// Status code 413
StatusReasonRequestEntityTooLarge StatusReason = "RequestEntityTooLarge"
// StatusReasonUnsupportedMediaType means that the content type sent by the client is not acceptable
// to the server - for instance, attempting to send protobuf for a resource that supports only json and yaml.
// API calls that return UnsupportedMediaType can never succeed.

View File

@ -283,6 +283,7 @@ var _ GroupVersioner = multiGroupVersioner{}
type multiGroupVersioner struct {
target schema.GroupVersion
acceptedGroupKinds []schema.GroupKind
coerce bool
}
// NewMultiGroupVersioner returns the provided group version for any kind that matches one of the provided group kinds.
@ -294,6 +295,22 @@ func NewMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKi
return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds}
}
// NewCoercingMultiGroupVersioner returns the provided group version for any incoming kind.
// Incoming kinds that match the provided groupKinds are preferred.
// Kind may be empty in the provided group kind, in which case any kind will match.
// Examples:
// gv=mygroup/__internal, groupKinds=mygroup/Foo, anothergroup/Bar
// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group/kind)
//
// gv=mygroup/__internal, groupKinds=mygroup, anothergroup
// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group)
//
// gv=mygroup/__internal, groupKinds=mygroup, anothergroup
// KindForGroupVersionKinds(yetanother/v1/Baz, yetanother/v1/Bar) -> mygroup/__internal/Baz (no preferred group/kind match, uses first kind in list)
func NewCoercingMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKind) GroupVersioner {
return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds, coerce: true}
}
// KindForGroupVersionKinds returns the target group version if any kind matches any of the original group kinds. It will
// use the originating kind where possible.
func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) {
@ -308,5 +325,8 @@ func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersio
return v.target.WithKind(src.Kind), true
}
}
if v.coerce && len(kinds) > 0 {
return v.target.WithKind(kinds[0].Kind), true
}
return schema.GroupVersionKind{}, false
}

View File

@ -64,7 +64,7 @@ func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder {
reader: r,
decoder: d,
buf: make([]byte, 1024),
maxBytes: 1024 * 1024,
maxBytes: 16 * 1024 * 1024,
}
}

View File

@ -70,6 +70,11 @@ type Config struct {
// TODO: demonstrate an OAuth2 compatible client.
BearerToken string
// Path to a file containing a BearerToken.
// If set, the contents are periodically read.
// The last successfully read value takes precedence over BearerToken.
BearerTokenFile string
// Impersonate is the configuration that RESTClient will use for impersonation.
Impersonate ImpersonationConfig
@ -322,9 +327,8 @@ func InClusterConfig() (*Config, error) {
return nil, ErrNotInCluster
}
ts := NewCachedFileTokenSource(tokenFile)
if _, err := ts.Token(); err != nil {
token, err := ioutil.ReadFile(tokenFile)
if err != nil {
return nil, err
}
@ -340,7 +344,8 @@ func InClusterConfig() (*Config, error) {
// TODO: switch to using cluster DNS.
Host: "https://" + net.JoinHostPort(host, port),
TLSClientConfig: tlsClientConfig,
WrapTransport: TokenSourceWrapTransport(ts),
BearerToken: string(token),
BearerTokenFile: tokenFile,
}, nil
}
@ -430,12 +435,13 @@ func AnonymousClientConfig(config *Config) *Config {
// CopyConfig returns a copy of the given config
func CopyConfig(config *Config) *Config {
return &Config{
Host: config.Host,
APIPath: config.APIPath,
ContentConfig: config.ContentConfig,
Username: config.Username,
Password: config.Password,
BearerToken: config.BearerToken,
Host: config.Host,
APIPath: config.APIPath,
ContentConfig: config.ContentConfig,
Username: config.Username,
Password: config.Password,
BearerToken: config.BearerToken,
BearerTokenFile: config.BearerTokenFile,
Impersonate: ImpersonationConfig{
Groups: config.Impersonate.Groups,
Extra: config.Impersonate.Extra,

View File

@ -229,11 +229,12 @@ func (config *DirectClientConfig) getUserIdentificationPartialConfig(configAuthI
if len(configAuthInfo.Token) > 0 {
mergedConfig.BearerToken = configAuthInfo.Token
} else if len(configAuthInfo.TokenFile) > 0 {
ts := restclient.NewCachedFileTokenSource(configAuthInfo.TokenFile)
if _, err := ts.Token(); err != nil {
tokenBytes, err := ioutil.ReadFile(configAuthInfo.TokenFile)
if err != nil {
return nil, err
}
mergedConfig.WrapTransport = restclient.TokenSourceWrapTransport(ts)
mergedConfig.BearerToken = string(tokenBytes)
mergedConfig.BearerTokenFile = configAuthInfo.TokenFile
}
if len(configAuthInfo.Impersonate) > 0 {
mergedConfig.Impersonate = restclient.ImpersonationConfig{

View File

@ -39,6 +39,11 @@ type Config struct {
// Bearer token for authentication
BearerToken string
// Path to a file containing a BearerToken.
// If set, the contents are periodically read.
// The last successfully read value takes precedence over BearerToken.
BearerTokenFile string
// Impersonate is the config that this Config will impersonate using
Impersonate ImpersonationConfig
@ -80,7 +85,7 @@ func (c *Config) HasBasicAuth() bool {
// HasTokenAuth returns whether the configuration has token authentication or not.
func (c *Config) HasTokenAuth() bool {
return len(c.BearerToken) != 0
return len(c.BearerToken) != 0 || len(c.BearerTokenFile) != 0
}
// HasCertAuth returns whether the configuration has certificate authentication or not.

View File

@ -22,6 +22,7 @@ import (
"strings"
"time"
"golang.org/x/oauth2"
"k8s.io/klog"
utilnet "k8s.io/apimachinery/pkg/util/net"
@ -44,7 +45,11 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip
case config.HasBasicAuth() && config.HasTokenAuth():
return nil, fmt.Errorf("username/password or bearer token may be set, but not both")
case config.HasTokenAuth():
rt = NewBearerAuthRoundTripper(config.BearerToken, rt)
var err error
rt, err = NewBearerAuthWithRefreshRoundTripper(config.BearerToken, config.BearerTokenFile, rt)
if err != nil {
return nil, err
}
case config.HasBasicAuth():
rt = NewBasicAuthRoundTripper(config.Username, config.Password, rt)
}
@ -265,13 +270,35 @@ func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { r
type bearerAuthRoundTripper struct {
bearer string
source oauth2.TokenSource
rt http.RoundTripper
}
// NewBearerAuthRoundTripper adds the provided bearer token to a request
// unless the authorization header has already been set.
func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTripper {
return &bearerAuthRoundTripper{bearer, rt}
return &bearerAuthRoundTripper{bearer, nil, rt}
}
// NewBearerAuthRoundTripper adds the provided bearer token to a request
// unless the authorization header has already been set.
// If tokenFile is non-empty, it is periodically read,
// and the last successfully read content is used as the bearer token.
// If tokenFile is non-empty and bearer is empty, the tokenFile is read
// immediately to populate the initial bearer token.
func NewBearerAuthWithRefreshRoundTripper(bearer string, tokenFile string, rt http.RoundTripper) (http.RoundTripper, error) {
if len(tokenFile) == 0 {
return &bearerAuthRoundTripper{bearer, nil, rt}, nil
}
source := NewCachedFileTokenSource(tokenFile)
if len(bearer) == 0 {
token, err := source.Token()
if err != nil {
return nil, err
}
bearer = token.AccessToken
}
return &bearerAuthRoundTripper{bearer, source, rt}, nil
}
func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
@ -280,7 +307,13 @@ func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response,
}
req = utilnet.CloneRequest(req)
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", rt.bearer))
token := rt.bearer
if rt.source != nil {
if refreshedToken, err := rt.source.Token(); err == nil {
token = refreshedToken.AccessToken
}
}
req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token))
return rt.rt.RoundTrip(req)
}

View File

@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
package rest
package transport
import (
"fmt"
@ -47,14 +47,14 @@ func TokenSourceWrapTransport(ts oauth2.TokenSource) func(http.RoundTripper) htt
func NewCachedFileTokenSource(path string) oauth2.TokenSource {
return &cachingTokenSource{
now: time.Now,
leeway: 1 * time.Minute,
leeway: 10 * time.Second,
base: &fileTokenSource{
path: path,
// This period was picked because it is half of the minimum validity
// duration for a token provisioned by they TokenRequest API. This is
// unsophisticated and should induce rotation at a frequency that should
// work with the token volume source.
period: 5 * time.Minute,
// This period was picked because it is half of the duration between when the kubelet
// refreshes a projected service account token and when the original token expires.
// Default token lifetime is 10 minutes, and the kubelet starts refreshing at 80% of lifetime.
// This should induce re-reading at a frequency that works with the token volume source.
period: time.Minute,
},
}
}