mirror of
https://github.com/kubernetes/sample-controller.git
synced 2026-02-04 00:06:14 +08:00
Compare commits
20 Commits
release-1.
...
release-1.
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
23f7df7151 | ||
|
|
1dd3ce7202 | ||
|
|
a7622f471e | ||
|
|
927b78e23d | ||
|
|
d6de82cef9 | ||
|
|
e351631c79 | ||
|
|
d51645b4a2 | ||
|
|
301c6f6aca | ||
|
|
daf39bd5c7 | ||
|
|
ce0003b2f2 | ||
|
|
b4adc2284d | ||
|
|
ba64cba0a7 | ||
|
|
584bf25d7c | ||
|
|
3024d6de0b | ||
|
|
099461269d | ||
|
|
10721e2508 | ||
|
|
811147bdcf | ||
|
|
d4ef09507c | ||
|
|
eab4816b56 | ||
|
|
7d47044a89 |
546
Godeps/Godeps.json
generated
546
Godeps/Godeps.json
generated
File diff suppressed because it is too large
Load Diff
@@ -20,6 +20,7 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -36,7 +37,6 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
|
||||
samplev1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
|
||||
clientset "k8s.io/sample-controller/pkg/client/clientset/versioned"
|
||||
@@ -96,9 +96,9 @@ func NewController(
|
||||
// Add sample-controller types to the default Kubernetes Scheme so Events can be
|
||||
// logged for sample-controller types.
|
||||
utilruntime.Must(samplescheme.AddToScheme(scheme.Scheme))
|
||||
klog.V(4).Info("Creating event broadcaster")
|
||||
glog.V(4).Info("Creating event broadcaster")
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
|
||||
|
||||
@@ -113,7 +113,7 @@ func NewController(
|
||||
recorder: recorder,
|
||||
}
|
||||
|
||||
klog.Info("Setting up event handlers")
|
||||
glog.Info("Setting up event handlers")
|
||||
// Set up an event handler for when Foo resources change
|
||||
fooInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: controller.enqueueFoo,
|
||||
@@ -154,23 +154,23 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
defer c.workqueue.ShutDown()
|
||||
|
||||
// Start the informer factories to begin populating the informer caches
|
||||
klog.Info("Starting Foo controller")
|
||||
glog.Info("Starting Foo controller")
|
||||
|
||||
// Wait for the caches to be synced before starting workers
|
||||
klog.Info("Waiting for informer caches to sync")
|
||||
glog.Info("Waiting for informer caches to sync")
|
||||
if ok := cache.WaitForCacheSync(stopCh, c.deploymentsSynced, c.foosSynced); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
klog.Info("Starting workers")
|
||||
glog.Info("Starting workers")
|
||||
// Launch two workers to process Foo resources
|
||||
for i := 0; i < threadiness; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
klog.Info("Started workers")
|
||||
glog.Info("Started workers")
|
||||
<-stopCh
|
||||
klog.Info("Shutting down workers")
|
||||
glog.Info("Shutting down workers")
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -219,14 +219,12 @@ func (c *Controller) processNextWorkItem() bool {
|
||||
// Run the syncHandler, passing it the namespace/name string of the
|
||||
// Foo resource to be synced.
|
||||
if err := c.syncHandler(key); err != nil {
|
||||
// Put the item back on the workqueue to handle any transient errors.
|
||||
c.workqueue.AddRateLimited(key)
|
||||
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
|
||||
return fmt.Errorf("error syncing '%s': %s", key, err.Error())
|
||||
}
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
c.workqueue.Forget(obj)
|
||||
klog.Infof("Successfully synced '%s'", key)
|
||||
glog.Infof("Successfully synced '%s'", key)
|
||||
return nil
|
||||
}(obj)
|
||||
|
||||
@@ -297,7 +295,7 @@ func (c *Controller) syncHandler(key string) error {
|
||||
// number does not equal the current desired replicas on the Deployment, we
|
||||
// should update the Deployment resource.
|
||||
if foo.Spec.Replicas != nil && *foo.Spec.Replicas != *deployment.Spec.Replicas {
|
||||
klog.V(4).Infof("Foo %s replicas: %d, deployment replicas: %d", name, *foo.Spec.Replicas, *deployment.Spec.Replicas)
|
||||
glog.V(4).Infof("Foo %s replicas: %d, deployment replicas: %d", name, *foo.Spec.Replicas, *deployment.Spec.Replicas)
|
||||
deployment, err = c.kubeclientset.AppsV1().Deployments(foo.Namespace).Update(newDeployment(foo))
|
||||
}
|
||||
|
||||
@@ -365,9 +363,9 @@ func (c *Controller) handleObject(obj interface{}) {
|
||||
runtime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type"))
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("Recovered deleted object '%s' from tombstone", object.GetName())
|
||||
glog.V(4).Infof("Recovered deleted object '%s' from tombstone", object.GetName())
|
||||
}
|
||||
klog.V(4).Infof("Processing object: %s", object.GetName())
|
||||
glog.V(4).Infof("Processing object: %s", object.GetName())
|
||||
if ownerRef := metav1.GetControllerOf(object); ownerRef != nil {
|
||||
// If this object is not owned by a Foo, we should not do anything more
|
||||
// with it.
|
||||
@@ -377,7 +375,7 @@ func (c *Controller) handleObject(obj interface{}) {
|
||||
|
||||
foo, err := c.foosLister.Foos(object.GetNamespace()).Get(ownerRef.Name)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("ignoring orphaned object '%s' of foo '%s'", object.GetSelfLink(), ownerRef.Name)
|
||||
glog.V(4).Infof("ignoring orphaned object '%s' of foo '%s'", object.GetSelfLink(), ownerRef.Name)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -198,7 +198,7 @@ func checkAction(expected, actual core.Action, t *testing.T) {
|
||||
expPatch := e.GetPatch()
|
||||
patch := a.GetPatch()
|
||||
|
||||
if !reflect.DeepEqual(expPatch, patch) {
|
||||
if !reflect.DeepEqual(expPatch, expPatch) {
|
||||
t.Errorf("Action %s %s has wrong patch\nDiff:\n %s",
|
||||
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintDiff(expPatch, patch))
|
||||
}
|
||||
|
||||
16
main.go
16
main.go
@@ -20,10 +20,10 @@ import (
|
||||
"flag"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/klog"
|
||||
// Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
|
||||
// _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
|
||||
@@ -45,17 +45,17 @@ func main() {
|
||||
|
||||
cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig)
|
||||
if err != nil {
|
||||
klog.Fatalf("Error building kubeconfig: %s", err.Error())
|
||||
glog.Fatalf("Error building kubeconfig: %s", err.Error())
|
||||
}
|
||||
|
||||
kubeClient, err := kubernetes.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
klog.Fatalf("Error building kubernetes clientset: %s", err.Error())
|
||||
glog.Fatalf("Error building kubernetes clientset: %s", err.Error())
|
||||
}
|
||||
|
||||
exampleClient, err := clientset.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
klog.Fatalf("Error building example clientset: %s", err.Error())
|
||||
glog.Fatalf("Error building example clientset: %s", err.Error())
|
||||
}
|
||||
|
||||
kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, time.Second*30)
|
||||
@@ -65,13 +65,11 @@ func main() {
|
||||
kubeInformerFactory.Apps().V1().Deployments(),
|
||||
exampleInformerFactory.Samplecontroller().V1alpha1().Foos())
|
||||
|
||||
// notice that there is no need to run Start methods in a separate goroutine. (i.e. go kubeInformerFactory.Start(stopCh)
|
||||
// Start method is non-blocking and runs all registered informers in a dedicated goroutine.
|
||||
kubeInformerFactory.Start(stopCh)
|
||||
exampleInformerFactory.Start(stopCh)
|
||||
go kubeInformerFactory.Start(stopCh)
|
||||
go exampleInformerFactory.Start(stopCh)
|
||||
|
||||
if err = controller.Run(2, stopCh); err != nil {
|
||||
klog.Fatalf("Error running controller: %s", err.Error())
|
||||
glog.Fatalf("Error running controller: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +groupName=samplecontroller.k8s.io
|
||||
|
||||
// Package v1alpha1 is the v1alpha1 version of the API.
|
||||
// +groupName=samplecontroller.k8s.io
|
||||
package v1alpha1
|
||||
|
||||
@@ -131,7 +131,7 @@ func (c *FakeFoos) DeleteCollection(options *v1.DeleteOptions, listOptions v1.Li
|
||||
// Patch applies the patch and returns the patched foo.
|
||||
func (c *FakeFoos) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Foo, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(foosResource, c.ns, name, pt, data, subresources...), &v1alpha1.Foo{})
|
||||
Invokes(testing.NewPatchSubresourceAction(foosResource, c.ns, name, data, subresources...), &v1alpha1.Foo{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
||||
@@ -19,8 +19,6 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
@@ -78,16 +76,11 @@ func (c *foos) Get(name string, options v1.GetOptions) (result *v1alpha1.Foo, er
|
||||
|
||||
// List takes label and field selectors, and returns the list of Foos that match those selectors.
|
||||
func (c *foos) List(opts v1.ListOptions) (result *v1alpha1.FooList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1alpha1.FooList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("foos").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
@@ -95,16 +88,11 @@ func (c *foos) List(opts v1.ListOptions) (result *v1alpha1.FooList, err error) {
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested foos.
|
||||
func (c *foos) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("foos").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
@@ -162,15 +150,10 @@ func (c *foos) Delete(name string, options *v1.DeleteOptions) error {
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *foos) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("foos").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
||||
@@ -27,7 +27,6 @@ import (
|
||||
versioned "k8s.io/sample-controller/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer.
|
||||
type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer
|
||||
|
||||
// SharedInformerFactory a small interface to allow for adding an informer without an import cycle
|
||||
@@ -36,5 +35,4 @@ type SharedInformerFactory interface {
|
||||
InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer
|
||||
}
|
||||
|
||||
// TweakListOptionsFunc is a function that transforms a v1.ListOptions.
|
||||
type TweakListOptionsFunc func(*v1.ListOptions)
|
||||
|
||||
16
vendor/github.com/evanphx/json-patch/.travis.yml
generated
vendored
16
vendor/github.com/evanphx/json-patch/.travis.yml
generated
vendored
@@ -1,16 +0,0 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.8
|
||||
- 1.7
|
||||
|
||||
install:
|
||||
- if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
|
||||
- go get github.com/jessevdk/go-flags
|
||||
|
||||
script:
|
||||
- go get
|
||||
- go test -cover ./...
|
||||
|
||||
notifications:
|
||||
email: false
|
||||
25
vendor/github.com/evanphx/json-patch/LICENSE
generated
vendored
25
vendor/github.com/evanphx/json-patch/LICENSE
generated
vendored
@@ -1,25 +0,0 @@
|
||||
Copyright (c) 2014, Evan Phoenix
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
* Neither the name of the Evan Phoenix nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software
|
||||
without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
297
vendor/github.com/evanphx/json-patch/README.md
generated
vendored
297
vendor/github.com/evanphx/json-patch/README.md
generated
vendored
@@ -1,297 +0,0 @@
|
||||
# JSON-Patch
|
||||
`jsonpatch` is a library which provides functionallity for both applying
|
||||
[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as
|
||||
well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396).
|
||||
|
||||
[](http://godoc.org/github.com/evanphx/json-patch)
|
||||
[](https://travis-ci.org/evanphx/json-patch)
|
||||
[](https://goreportcard.com/report/github.com/evanphx/json-patch)
|
||||
|
||||
# Get It!
|
||||
|
||||
**Latest and greatest**:
|
||||
```bash
|
||||
go get -u github.com/evanphx/json-patch
|
||||
```
|
||||
|
||||
**Stable Versions**:
|
||||
* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4`
|
||||
|
||||
(previous versions below `v3` are unavailable)
|
||||
|
||||
# Use It!
|
||||
* [Create and apply a merge patch](#create-and-apply-a-merge-patch)
|
||||
* [Create and apply a JSON Patch](#create-and-apply-a-json-patch)
|
||||
* [Comparing JSON documents](#comparing-json-documents)
|
||||
* [Combine merge patches](#combine-merge-patches)
|
||||
|
||||
|
||||
# Configuration
|
||||
|
||||
* There is a global configuration variable `jsonpatch.SupportNegativeIndices`.
|
||||
This defaults to `true` and enables the non-standard practice of allowing
|
||||
negative indices to mean indices starting at the end of an array. This
|
||||
functionality can be disabled by setting `jsonpatch.SupportNegativeIndices =
|
||||
false`.
|
||||
|
||||
* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`,
|
||||
which limits the total size increase in bytes caused by "copy" operations in a
|
||||
patch. It defaults to 0, which means there is no limit.
|
||||
|
||||
## Create and apply a merge patch
|
||||
Given both an original JSON document and a modified JSON document, you can create
|
||||
a [Merge Patch](https://tools.ietf.org/html/rfc7396) document.
|
||||
|
||||
It can describe the changes needed to convert from the original to the
|
||||
modified JSON document.
|
||||
|
||||
Once you have a merge patch, you can apply it to other JSON documents using the
|
||||
`jsonpatch.MergePatch(document, patch)` function.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Let's create a merge patch from these two documents...
|
||||
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
|
||||
target := []byte(`{"name": "Jane", "age": 24}`)
|
||||
|
||||
patch, err := jsonpatch.CreateMergePatch(original, target)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Now lets apply the patch against a different JSON document...
|
||||
|
||||
alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`)
|
||||
modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch)
|
||||
|
||||
fmt.Printf("patch document: %s\n", patch)
|
||||
fmt.Printf("updated alternative doc: %s\n", modifiedAlternative)
|
||||
}
|
||||
```
|
||||
|
||||
When ran, you get the following output:
|
||||
|
||||
```bash
|
||||
$ go run main.go
|
||||
patch document: {"height":null,"name":"Jane"}
|
||||
updated tina doc: {"age":28,"name":"Jane"}
|
||||
```
|
||||
|
||||
## Create and apply a JSON Patch
|
||||
You can create patch objects using `DecodePatch([]byte)`, which can then
|
||||
be applied against JSON documents.
|
||||
|
||||
The following is an example of creating a patch from two operations, and
|
||||
applying it against a JSON document.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
func main() {
|
||||
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
|
||||
patchJSON := []byte(`[
|
||||
{"op": "replace", "path": "/name", "value": "Jane"},
|
||||
{"op": "remove", "path": "/height"}
|
||||
]`)
|
||||
|
||||
patch, err := jsonpatch.DecodePatch(patchJSON)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
modified, err := patch.Apply(original)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Printf("Original document: %s\n", original)
|
||||
fmt.Printf("Modified document: %s\n", modified)
|
||||
}
|
||||
```
|
||||
|
||||
When ran, you get the following output:
|
||||
|
||||
```bash
|
||||
$ go run main.go
|
||||
Original document: {"name": "John", "age": 24, "height": 3.21}
|
||||
Modified document: {"age":24,"name":"Jane"}
|
||||
```
|
||||
|
||||
## Comparing JSON documents
|
||||
Due to potential whitespace and ordering differences, one cannot simply compare
|
||||
JSON strings or byte-arrays directly.
|
||||
|
||||
As such, you can instead use `jsonpatch.Equal(document1, document2)` to
|
||||
determine if two JSON documents are _structurally_ equal. This ignores
|
||||
whitespace differences, and key-value ordering.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
func main() {
|
||||
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
|
||||
similar := []byte(`
|
||||
{
|
||||
"age": 24,
|
||||
"height": 3.21,
|
||||
"name": "John"
|
||||
}
|
||||
`)
|
||||
different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`)
|
||||
|
||||
if jsonpatch.Equal(original, similar) {
|
||||
fmt.Println(`"original" is structurally equal to "similar"`)
|
||||
}
|
||||
|
||||
if !jsonpatch.Equal(original, different) {
|
||||
fmt.Println(`"original" is _not_ structurally equal to "similar"`)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
When ran, you get the following output:
|
||||
```bash
|
||||
$ go run main.go
|
||||
"original" is structurally equal to "similar"
|
||||
"original" is _not_ structurally equal to "similar"
|
||||
```
|
||||
|
||||
## Combine merge patches
|
||||
Given two JSON merge patch documents, it is possible to combine them into a
|
||||
single merge patch which can describe both set of changes.
|
||||
|
||||
The resulting merge patch can be used such that applying it results in a
|
||||
document structurally similar as merging each merge patch to the document
|
||||
in succession.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
func main() {
|
||||
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
|
||||
|
||||
nameAndHeight := []byte(`{"height":null,"name":"Jane"}`)
|
||||
ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`)
|
||||
|
||||
// Let's combine these merge patch documents...
|
||||
combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Apply each patch individual against the original document
|
||||
withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Apply the combined patch against the original document
|
||||
|
||||
withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Do both result in the same thing? They should!
|
||||
if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) {
|
||||
fmt.Println("Both JSON documents are structurally the same!")
|
||||
}
|
||||
|
||||
fmt.Printf("combined merge patch: %s", combinedPatch)
|
||||
}
|
||||
```
|
||||
|
||||
When ran, you get the following output:
|
||||
```bash
|
||||
$ go run main.go
|
||||
Both JSON documents are structurally the same!
|
||||
combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"}
|
||||
```
|
||||
|
||||
# CLI for comparing JSON documents
|
||||
You can install the commandline program `json-patch`.
|
||||
|
||||
This program can take multiple JSON patch documents as arguments,
|
||||
and fed a JSON document from `stdin`. It will apply the patch(es) against
|
||||
the document and output the modified doc.
|
||||
|
||||
**patch.1.json**
|
||||
```json
|
||||
[
|
||||
{"op": "replace", "path": "/name", "value": "Jane"},
|
||||
{"op": "remove", "path": "/height"}
|
||||
]
|
||||
```
|
||||
|
||||
**patch.2.json**
|
||||
```json
|
||||
[
|
||||
{"op": "add", "path": "/address", "value": "123 Main St"},
|
||||
{"op": "replace", "path": "/age", "value": "21"}
|
||||
]
|
||||
```
|
||||
|
||||
**document.json**
|
||||
```json
|
||||
{
|
||||
"name": "John",
|
||||
"age": 24,
|
||||
"height": 3.21
|
||||
}
|
||||
```
|
||||
|
||||
You can then run:
|
||||
|
||||
```bash
|
||||
$ go install github.com/evanphx/json-patch/cmd/json-patch
|
||||
$ cat document.json | json-patch -p patch.1.json -p patch.2.json
|
||||
{"address":"123 Main St","age":"21","name":"Jane"}
|
||||
```
|
||||
|
||||
# Help It!
|
||||
Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues)
|
||||
or [create a PR](https://github.com/evanphx/json-patch/compare).
|
||||
|
||||
|
||||
Before creating a pull request, we'd ask that you make sure tests are passing
|
||||
and that you have added new tests when applicable.
|
||||
|
||||
Contributors can run tests using:
|
||||
|
||||
```bash
|
||||
go test -cover ./...
|
||||
```
|
||||
|
||||
Builds for pull requests are tested automatically
|
||||
using [TravisCI](https://travis-ci.org/evanphx/json-patch).
|
||||
38
vendor/github.com/evanphx/json-patch/errors.go
generated
vendored
38
vendor/github.com/evanphx/json-patch/errors.go
generated
vendored
@@ -1,38 +0,0 @@
|
||||
package jsonpatch
|
||||
|
||||
import "fmt"
|
||||
|
||||
// AccumulatedCopySizeError is an error type returned when the accumulated size
|
||||
// increase caused by copy operations in a patch operation has exceeded the
|
||||
// limit.
|
||||
type AccumulatedCopySizeError struct {
|
||||
limit int64
|
||||
accumulated int64
|
||||
}
|
||||
|
||||
// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError.
|
||||
func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError {
|
||||
return &AccumulatedCopySizeError{limit: l, accumulated: a}
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (a *AccumulatedCopySizeError) Error() string {
|
||||
return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit)
|
||||
}
|
||||
|
||||
// ArraySizeError is an error type returned when the array size has exceeded
|
||||
// the limit.
|
||||
type ArraySizeError struct {
|
||||
limit int
|
||||
size int
|
||||
}
|
||||
|
||||
// NewArraySizeError returns an ArraySizeError.
|
||||
func NewArraySizeError(l, s int) *ArraySizeError {
|
||||
return &ArraySizeError{limit: l, size: s}
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (a *ArraySizeError) Error() string {
|
||||
return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit)
|
||||
}
|
||||
383
vendor/github.com/evanphx/json-patch/merge.go
generated
vendored
383
vendor/github.com/evanphx/json-patch/merge.go
generated
vendored
@@ -1,383 +0,0 @@
|
||||
package jsonpatch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
|
||||
curDoc, err := cur.intoDoc()
|
||||
|
||||
if err != nil {
|
||||
pruneNulls(patch)
|
||||
return patch
|
||||
}
|
||||
|
||||
patchDoc, err := patch.intoDoc()
|
||||
|
||||
if err != nil {
|
||||
return patch
|
||||
}
|
||||
|
||||
mergeDocs(curDoc, patchDoc, mergeMerge)
|
||||
|
||||
return cur
|
||||
}
|
||||
|
||||
func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
|
||||
for k, v := range *patch {
|
||||
if v == nil {
|
||||
if mergeMerge {
|
||||
(*doc)[k] = nil
|
||||
} else {
|
||||
delete(*doc, k)
|
||||
}
|
||||
} else {
|
||||
cur, ok := (*doc)[k]
|
||||
|
||||
if !ok || cur == nil {
|
||||
pruneNulls(v)
|
||||
(*doc)[k] = v
|
||||
} else {
|
||||
(*doc)[k] = merge(cur, v, mergeMerge)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func pruneNulls(n *lazyNode) {
|
||||
sub, err := n.intoDoc()
|
||||
|
||||
if err == nil {
|
||||
pruneDocNulls(sub)
|
||||
} else {
|
||||
ary, err := n.intoAry()
|
||||
|
||||
if err == nil {
|
||||
pruneAryNulls(ary)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func pruneDocNulls(doc *partialDoc) *partialDoc {
|
||||
for k, v := range *doc {
|
||||
if v == nil {
|
||||
delete(*doc, k)
|
||||
} else {
|
||||
pruneNulls(v)
|
||||
}
|
||||
}
|
||||
|
||||
return doc
|
||||
}
|
||||
|
||||
func pruneAryNulls(ary *partialArray) *partialArray {
|
||||
newAry := []*lazyNode{}
|
||||
|
||||
for _, v := range *ary {
|
||||
if v != nil {
|
||||
pruneNulls(v)
|
||||
newAry = append(newAry, v)
|
||||
}
|
||||
}
|
||||
|
||||
*ary = newAry
|
||||
|
||||
return ary
|
||||
}
|
||||
|
||||
var errBadJSONDoc = fmt.Errorf("Invalid JSON Document")
|
||||
var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
|
||||
var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents")
|
||||
|
||||
// MergeMergePatches merges two merge patches together, such that
|
||||
// applying this resulting merged merge patch to a document yields the same
|
||||
// as merging each merge patch to the document in succession.
|
||||
func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) {
|
||||
return doMergePatch(patch1Data, patch2Data, true)
|
||||
}
|
||||
|
||||
// MergePatch merges the patchData into the docData.
|
||||
func MergePatch(docData, patchData []byte) ([]byte, error) {
|
||||
return doMergePatch(docData, patchData, false)
|
||||
}
|
||||
|
||||
func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
|
||||
doc := &partialDoc{}
|
||||
|
||||
docErr := json.Unmarshal(docData, doc)
|
||||
|
||||
patch := &partialDoc{}
|
||||
|
||||
patchErr := json.Unmarshal(patchData, patch)
|
||||
|
||||
if _, ok := docErr.(*json.SyntaxError); ok {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
if _, ok := patchErr.(*json.SyntaxError); ok {
|
||||
return nil, errBadJSONPatch
|
||||
}
|
||||
|
||||
if docErr == nil && *doc == nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
if patchErr == nil && *patch == nil {
|
||||
return nil, errBadJSONPatch
|
||||
}
|
||||
|
||||
if docErr != nil || patchErr != nil {
|
||||
// Not an error, just not a doc, so we turn straight into the patch
|
||||
if patchErr == nil {
|
||||
if mergeMerge {
|
||||
doc = patch
|
||||
} else {
|
||||
doc = pruneDocNulls(patch)
|
||||
}
|
||||
} else {
|
||||
patchAry := &partialArray{}
|
||||
patchErr = json.Unmarshal(patchData, patchAry)
|
||||
|
||||
if patchErr != nil {
|
||||
return nil, errBadJSONPatch
|
||||
}
|
||||
|
||||
pruneAryNulls(patchAry)
|
||||
|
||||
out, patchErr := json.Marshal(patchAry)
|
||||
|
||||
if patchErr != nil {
|
||||
return nil, errBadJSONPatch
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
} else {
|
||||
mergeDocs(doc, patch, mergeMerge)
|
||||
}
|
||||
|
||||
return json.Marshal(doc)
|
||||
}
|
||||
|
||||
// resemblesJSONArray indicates whether the byte-slice "appears" to be
|
||||
// a JSON array or not.
|
||||
// False-positives are possible, as this function does not check the internal
|
||||
// structure of the array. It only checks that the outer syntax is present and
|
||||
// correct.
|
||||
func resemblesJSONArray(input []byte) bool {
|
||||
input = bytes.TrimSpace(input)
|
||||
|
||||
hasPrefix := bytes.HasPrefix(input, []byte("["))
|
||||
hasSuffix := bytes.HasSuffix(input, []byte("]"))
|
||||
|
||||
return hasPrefix && hasSuffix
|
||||
}
|
||||
|
||||
// CreateMergePatch will return a merge patch document capable of converting
|
||||
// the original document(s) to the modified document(s).
|
||||
// The parameters can be bytes of either two JSON Documents, or two arrays of
|
||||
// JSON documents.
|
||||
// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07
|
||||
func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
||||
originalResemblesArray := resemblesJSONArray(originalJSON)
|
||||
modifiedResemblesArray := resemblesJSONArray(modifiedJSON)
|
||||
|
||||
// Do both byte-slices seem like JSON arrays?
|
||||
if originalResemblesArray && modifiedResemblesArray {
|
||||
return createArrayMergePatch(originalJSON, modifiedJSON)
|
||||
}
|
||||
|
||||
// Are both byte-slices are not arrays? Then they are likely JSON objects...
|
||||
if !originalResemblesArray && !modifiedResemblesArray {
|
||||
return createObjectMergePatch(originalJSON, modifiedJSON)
|
||||
}
|
||||
|
||||
// None of the above? Then return an error because of mismatched types.
|
||||
return nil, errBadMergeTypes
|
||||
}
|
||||
|
||||
// createObjectMergePatch will return a merge-patch document capable of
|
||||
// converting the original document to the modified document.
|
||||
func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
||||
originalDoc := map[string]interface{}{}
|
||||
modifiedDoc := map[string]interface{}{}
|
||||
|
||||
err := json.Unmarshal(originalJSON, &originalDoc)
|
||||
if err != nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
err = json.Unmarshal(modifiedJSON, &modifiedDoc)
|
||||
if err != nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
dest, err := getDiff(originalDoc, modifiedDoc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return json.Marshal(dest)
|
||||
}
|
||||
|
||||
// createArrayMergePatch will return an array of merge-patch documents capable
|
||||
// of converting the original document to the modified document for each
|
||||
// pair of JSON documents provided in the arrays.
|
||||
// Arrays of mismatched sizes will result in an error.
|
||||
func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
||||
originalDocs := []json.RawMessage{}
|
||||
modifiedDocs := []json.RawMessage{}
|
||||
|
||||
err := json.Unmarshal(originalJSON, &originalDocs)
|
||||
if err != nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
err = json.Unmarshal(modifiedJSON, &modifiedDocs)
|
||||
if err != nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
total := len(originalDocs)
|
||||
if len(modifiedDocs) != total {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
result := []json.RawMessage{}
|
||||
for i := 0; i < len(originalDocs); i++ {
|
||||
original := originalDocs[i]
|
||||
modified := modifiedDocs[i]
|
||||
|
||||
patch, err := createObjectMergePatch(original, modified)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result = append(result, json.RawMessage(patch))
|
||||
}
|
||||
|
||||
return json.Marshal(result)
|
||||
}
|
||||
|
||||
// Returns true if the array matches (must be json types).
|
||||
// As is idiomatic for go, an empty array is not the same as a nil array.
|
||||
func matchesArray(a, b []interface{}) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
if (a == nil && b != nil) || (a != nil && b == nil) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if !matchesValue(a[i], b[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns true if the values matches (must be json types)
|
||||
// The types of the values must match, otherwise it will always return false
|
||||
// If two map[string]interface{} are given, all elements must match.
|
||||
func matchesValue(av, bv interface{}) bool {
|
||||
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
|
||||
return false
|
||||
}
|
||||
switch at := av.(type) {
|
||||
case string:
|
||||
bt := bv.(string)
|
||||
if bt == at {
|
||||
return true
|
||||
}
|
||||
case float64:
|
||||
bt := bv.(float64)
|
||||
if bt == at {
|
||||
return true
|
||||
}
|
||||
case bool:
|
||||
bt := bv.(bool)
|
||||
if bt == at {
|
||||
return true
|
||||
}
|
||||
case nil:
|
||||
// Both nil, fine.
|
||||
return true
|
||||
case map[string]interface{}:
|
||||
bt := bv.(map[string]interface{})
|
||||
for key := range at {
|
||||
if !matchesValue(at[key], bt[key]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for key := range bt {
|
||||
if !matchesValue(at[key], bt[key]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case []interface{}:
|
||||
bt := bv.([]interface{})
|
||||
return matchesArray(at, bt)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getDiff returns the (recursive) difference between a and b as a map[string]interface{}.
|
||||
func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
|
||||
into := map[string]interface{}{}
|
||||
for key, bv := range b {
|
||||
av, ok := a[key]
|
||||
// value was added
|
||||
if !ok {
|
||||
into[key] = bv
|
||||
continue
|
||||
}
|
||||
// If types have changed, replace completely
|
||||
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
|
||||
into[key] = bv
|
||||
continue
|
||||
}
|
||||
// Types are the same, compare values
|
||||
switch at := av.(type) {
|
||||
case map[string]interface{}:
|
||||
bt := bv.(map[string]interface{})
|
||||
dst := make(map[string]interface{}, len(bt))
|
||||
dst, err := getDiff(at, bt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) > 0 {
|
||||
into[key] = dst
|
||||
}
|
||||
case string, float64, bool:
|
||||
if !matchesValue(av, bv) {
|
||||
into[key] = bv
|
||||
}
|
||||
case []interface{}:
|
||||
bt := bv.([]interface{})
|
||||
if !matchesArray(at, bt) {
|
||||
into[key] = bv
|
||||
}
|
||||
case nil:
|
||||
switch bv.(type) {
|
||||
case nil:
|
||||
// Both nil, fine.
|
||||
default:
|
||||
into[key] = bv
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown type:%T in key %s", av, key))
|
||||
}
|
||||
}
|
||||
// Now add all deleted values as nil
|
||||
for key := range a {
|
||||
_, found := b[key]
|
||||
if !found {
|
||||
into[key] = nil
|
||||
}
|
||||
}
|
||||
return into, nil
|
||||
}
|
||||
696
vendor/github.com/evanphx/json-patch/patch.go
generated
vendored
696
vendor/github.com/evanphx/json-patch/patch.go
generated
vendored
@@ -1,696 +0,0 @@
|
||||
package jsonpatch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
eRaw = iota
|
||||
eDoc
|
||||
eAry
|
||||
)
|
||||
|
||||
var (
|
||||
// SupportNegativeIndices decides whether to support non-standard practice of
|
||||
// allowing negative indices to mean indices starting at the end of an array.
|
||||
// Default to true.
|
||||
SupportNegativeIndices bool = true
|
||||
// AccumulatedCopySizeLimit limits the total size increase in bytes caused by
|
||||
// "copy" operations in a patch.
|
||||
AccumulatedCopySizeLimit int64 = 0
|
||||
)
|
||||
|
||||
type lazyNode struct {
|
||||
raw *json.RawMessage
|
||||
doc partialDoc
|
||||
ary partialArray
|
||||
which int
|
||||
}
|
||||
|
||||
type operation map[string]*json.RawMessage
|
||||
|
||||
// Patch is an ordered collection of operations.
|
||||
type Patch []operation
|
||||
|
||||
type partialDoc map[string]*lazyNode
|
||||
type partialArray []*lazyNode
|
||||
|
||||
type container interface {
|
||||
get(key string) (*lazyNode, error)
|
||||
set(key string, val *lazyNode) error
|
||||
add(key string, val *lazyNode) error
|
||||
remove(key string) error
|
||||
}
|
||||
|
||||
func newLazyNode(raw *json.RawMessage) *lazyNode {
|
||||
return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw}
|
||||
}
|
||||
|
||||
func (n *lazyNode) MarshalJSON() ([]byte, error) {
|
||||
switch n.which {
|
||||
case eRaw:
|
||||
return json.Marshal(n.raw)
|
||||
case eDoc:
|
||||
return json.Marshal(n.doc)
|
||||
case eAry:
|
||||
return json.Marshal(n.ary)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown type")
|
||||
}
|
||||
}
|
||||
|
||||
func (n *lazyNode) UnmarshalJSON(data []byte) error {
|
||||
dest := make(json.RawMessage, len(data))
|
||||
copy(dest, data)
|
||||
n.raw = &dest
|
||||
n.which = eRaw
|
||||
return nil
|
||||
}
|
||||
|
||||
func deepCopy(src *lazyNode) (*lazyNode, int, error) {
|
||||
if src == nil {
|
||||
return nil, 0, nil
|
||||
}
|
||||
a, err := src.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
sz := len(a)
|
||||
ra := make(json.RawMessage, sz)
|
||||
copy(ra, a)
|
||||
return newLazyNode(&ra), sz, nil
|
||||
}
|
||||
|
||||
func (n *lazyNode) intoDoc() (*partialDoc, error) {
|
||||
if n.which == eDoc {
|
||||
return &n.doc, nil
|
||||
}
|
||||
|
||||
if n.raw == nil {
|
||||
return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial document")
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.doc)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n.which = eDoc
|
||||
return &n.doc, nil
|
||||
}
|
||||
|
||||
func (n *lazyNode) intoAry() (*partialArray, error) {
|
||||
if n.which == eAry {
|
||||
return &n.ary, nil
|
||||
}
|
||||
|
||||
if n.raw == nil {
|
||||
return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial array")
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.ary)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n.which = eAry
|
||||
return &n.ary, nil
|
||||
}
|
||||
|
||||
func (n *lazyNode) compact() []byte {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
if n.raw == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := json.Compact(buf, *n.raw)
|
||||
|
||||
if err != nil {
|
||||
return *n.raw
|
||||
}
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (n *lazyNode) tryDoc() bool {
|
||||
if n.raw == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.doc)
|
||||
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
n.which = eDoc
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *lazyNode) tryAry() bool {
|
||||
if n.raw == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.ary)
|
||||
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
n.which = eAry
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *lazyNode) equal(o *lazyNode) bool {
|
||||
if n.which == eRaw {
|
||||
if !n.tryDoc() && !n.tryAry() {
|
||||
if o.which != eRaw {
|
||||
return false
|
||||
}
|
||||
|
||||
return bytes.Equal(n.compact(), o.compact())
|
||||
}
|
||||
}
|
||||
|
||||
if n.which == eDoc {
|
||||
if o.which == eRaw {
|
||||
if !o.tryDoc() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if o.which != eDoc {
|
||||
return false
|
||||
}
|
||||
|
||||
for k, v := range n.doc {
|
||||
ov, ok := o.doc[k]
|
||||
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if v == nil && ov == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !v.equal(ov) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
if o.which != eAry && !o.tryAry() {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(n.ary) != len(o.ary) {
|
||||
return false
|
||||
}
|
||||
|
||||
for idx, val := range n.ary {
|
||||
if !val.equal(o.ary[idx]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (o operation) kind() string {
|
||||
if obj, ok := o["op"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
return op
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func (o operation) path() string {
|
||||
if obj, ok := o["path"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
return op
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func (o operation) from() string {
|
||||
if obj, ok := o["from"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
return op
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func (o operation) value() *lazyNode {
|
||||
if obj, ok := o["value"]; ok {
|
||||
return newLazyNode(obj)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isArray(buf []byte) bool {
|
||||
Loop:
|
||||
for _, c := range buf {
|
||||
switch c {
|
||||
case ' ':
|
||||
case '\n':
|
||||
case '\t':
|
||||
continue
|
||||
case '[':
|
||||
return true
|
||||
default:
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func findObject(pd *container, path string) (container, string) {
|
||||
doc := *pd
|
||||
|
||||
split := strings.Split(path, "/")
|
||||
|
||||
if len(split) < 2 {
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
parts := split[1 : len(split)-1]
|
||||
|
||||
key := split[len(split)-1]
|
||||
|
||||
var err error
|
||||
|
||||
for _, part := range parts {
|
||||
|
||||
next, ok := doc.get(decodePatchKey(part))
|
||||
|
||||
if next == nil || ok != nil {
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
if isArray(*next.raw) {
|
||||
doc, err = next.intoAry()
|
||||
|
||||
if err != nil {
|
||||
return nil, ""
|
||||
}
|
||||
} else {
|
||||
doc, err = next.intoDoc()
|
||||
|
||||
if err != nil {
|
||||
return nil, ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return doc, decodePatchKey(key)
|
||||
}
|
||||
|
||||
func (d *partialDoc) set(key string, val *lazyNode) error {
|
||||
(*d)[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialDoc) add(key string, val *lazyNode) error {
|
||||
(*d)[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialDoc) get(key string) (*lazyNode, error) {
|
||||
return (*d)[key], nil
|
||||
}
|
||||
|
||||
func (d *partialDoc) remove(key string) error {
|
||||
_, ok := (*d)[key]
|
||||
if !ok {
|
||||
return fmt.Errorf("Unable to remove nonexistent key: %s", key)
|
||||
}
|
||||
|
||||
delete(*d, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// set should only be used to implement the "replace" operation, so "key" must
|
||||
// be an already existing index in "d".
|
||||
func (d *partialArray) set(key string, val *lazyNode) error {
|
||||
idx, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*d)[idx] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialArray) add(key string, val *lazyNode) error {
|
||||
if key == "-" {
|
||||
*d = append(*d, val)
|
||||
return nil
|
||||
}
|
||||
|
||||
idx, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sz := len(*d) + 1
|
||||
|
||||
ary := make([]*lazyNode, sz)
|
||||
|
||||
cur := *d
|
||||
|
||||
if idx >= len(ary) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if SupportNegativeIndices {
|
||||
if idx < -len(ary) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if idx < 0 {
|
||||
idx += len(ary)
|
||||
}
|
||||
}
|
||||
|
||||
copy(ary[0:idx], cur[0:idx])
|
||||
ary[idx] = val
|
||||
copy(ary[idx+1:], cur[idx:])
|
||||
|
||||
*d = ary
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialArray) get(key string) (*lazyNode, error) {
|
||||
idx, err := strconv.Atoi(key)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if idx >= len(*d) {
|
||||
return nil, fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
return (*d)[idx], nil
|
||||
}
|
||||
|
||||
func (d *partialArray) remove(key string) error {
|
||||
idx, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cur := *d
|
||||
|
||||
if idx >= len(cur) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if SupportNegativeIndices {
|
||||
if idx < -len(cur) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if idx < 0 {
|
||||
idx += len(cur)
|
||||
}
|
||||
}
|
||||
|
||||
ary := make([]*lazyNode, len(cur)-1)
|
||||
|
||||
copy(ary[0:idx], cur[0:idx])
|
||||
copy(ary[idx:], cur[idx+1:])
|
||||
|
||||
*d = ary
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (p Patch) add(doc *container, op operation) error {
|
||||
path := op.path()
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: \"%s\"", path)
|
||||
}
|
||||
|
||||
return con.add(key, op.value())
|
||||
}
|
||||
|
||||
func (p Patch) remove(doc *container, op operation) error {
|
||||
path := op.path()
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: \"%s\"", path)
|
||||
}
|
||||
|
||||
return con.remove(key)
|
||||
}
|
||||
|
||||
func (p Patch) replace(doc *container, op operation) error {
|
||||
path := op.path()
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path)
|
||||
}
|
||||
|
||||
_, ok := con.get(key)
|
||||
if ok != nil {
|
||||
return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing key: %s", path)
|
||||
}
|
||||
|
||||
return con.set(key, op.value())
|
||||
}
|
||||
|
||||
func (p Patch) move(doc *container, op operation) error {
|
||||
from := op.from()
|
||||
|
||||
con, key := findObject(doc, from)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch move operation does not apply: doc is missing from path: %s", from)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = con.remove(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := op.path()
|
||||
|
||||
con, key = findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path)
|
||||
}
|
||||
|
||||
return con.add(key, val)
|
||||
}
|
||||
|
||||
func (p Patch) test(doc *container, op operation) error {
|
||||
path := op.path()
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch test operation does not apply: is missing path: %s", path)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if val == nil {
|
||||
if op.value().raw == nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Testing value %s failed", path)
|
||||
} else if op.value() == nil {
|
||||
return fmt.Errorf("Testing value %s failed", path)
|
||||
}
|
||||
|
||||
if val.equal(op.value()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Testing value %s failed", path)
|
||||
}
|
||||
|
||||
func (p Patch) copy(doc *container, op operation, accumulatedCopySize *int64) error {
|
||||
from := op.from()
|
||||
|
||||
con, key := findObject(doc, from)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing from path: %s", from)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := op.path()
|
||||
|
||||
con, key = findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path)
|
||||
}
|
||||
|
||||
valCopy, sz, err := deepCopy(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*accumulatedCopySize) += int64(sz)
|
||||
if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit {
|
||||
return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize)
|
||||
}
|
||||
|
||||
return con.add(key, valCopy)
|
||||
}
|
||||
|
||||
// Equal indicates if 2 JSON documents have the same structural equality.
|
||||
func Equal(a, b []byte) bool {
|
||||
ra := make(json.RawMessage, len(a))
|
||||
copy(ra, a)
|
||||
la := newLazyNode(&ra)
|
||||
|
||||
rb := make(json.RawMessage, len(b))
|
||||
copy(rb, b)
|
||||
lb := newLazyNode(&rb)
|
||||
|
||||
return la.equal(lb)
|
||||
}
|
||||
|
||||
// DecodePatch decodes the passed JSON document as an RFC 6902 patch.
|
||||
func DecodePatch(buf []byte) (Patch, error) {
|
||||
var p Patch
|
||||
|
||||
err := json.Unmarshal(buf, &p)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Apply mutates a JSON document according to the patch, and returns the new
|
||||
// document.
|
||||
func (p Patch) Apply(doc []byte) ([]byte, error) {
|
||||
return p.ApplyIndent(doc, "")
|
||||
}
|
||||
|
||||
// ApplyIndent mutates a JSON document according to the patch, and returns the new
|
||||
// document indented.
|
||||
func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
|
||||
var pd container
|
||||
if doc[0] == '[' {
|
||||
pd = &partialArray{}
|
||||
} else {
|
||||
pd = &partialDoc{}
|
||||
}
|
||||
|
||||
err := json.Unmarshal(doc, pd)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = nil
|
||||
|
||||
var accumulatedCopySize int64
|
||||
|
||||
for _, op := range p {
|
||||
switch op.kind() {
|
||||
case "add":
|
||||
err = p.add(&pd, op)
|
||||
case "remove":
|
||||
err = p.remove(&pd, op)
|
||||
case "replace":
|
||||
err = p.replace(&pd, op)
|
||||
case "move":
|
||||
err = p.move(&pd, op)
|
||||
case "test":
|
||||
err = p.test(&pd, op)
|
||||
case "copy":
|
||||
err = p.copy(&pd, op, &accumulatedCopySize)
|
||||
default:
|
||||
err = fmt.Errorf("Unexpected kind: %s", op.kind())
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if indent != "" {
|
||||
return json.MarshalIndent(pd, "", indent)
|
||||
}
|
||||
|
||||
return json.Marshal(pd)
|
||||
}
|
||||
|
||||
// From http://tools.ietf.org/html/rfc6901#section-4 :
|
||||
//
|
||||
// Evaluation of each reference token begins by decoding any escaped
|
||||
// character sequence. This is performed by first transforming any
|
||||
// occurrence of the sequence '~1' to '/', and then transforming any
|
||||
// occurrence of the sequence '~0' to '~'.
|
||||
|
||||
var (
|
||||
rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~")
|
||||
)
|
||||
|
||||
func decodePatchKey(k string) string {
|
||||
return rfc6901Decoder.Replace(k)
|
||||
}
|
||||
0
vendor/sigs.k8s.io/yaml/.gitignore → vendor/github.com/ghodss/yaml/.gitignore
generated
vendored
0
vendor/sigs.k8s.io/yaml/.gitignore → vendor/github.com/ghodss/yaml/.gitignore
generated
vendored
7
vendor/github.com/ghodss/yaml/.travis.yml
generated
vendored
Normal file
7
vendor/github.com/ghodss/yaml/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
script:
|
||||
- go test
|
||||
- go build
|
||||
0
vendor/sigs.k8s.io/yaml/LICENSE → vendor/github.com/ghodss/yaml/LICENSE
generated
vendored
0
vendor/sigs.k8s.io/yaml/LICENSE → vendor/github.com/ghodss/yaml/LICENSE
generated
vendored
17
vendor/sigs.k8s.io/yaml/README.md → vendor/github.com/ghodss/yaml/README.md
generated
vendored
17
vendor/sigs.k8s.io/yaml/README.md → vendor/github.com/ghodss/yaml/README.md
generated
vendored
@@ -4,13 +4,13 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
|
||||
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
|
||||
|
||||
In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
|
||||
|
||||
## Compatibility
|
||||
|
||||
This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
|
||||
This package uses [go-yaml v2](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
|
||||
|
||||
## Caveats
|
||||
|
||||
@@ -44,8 +44,6 @@ import "github.com/ghodss/yaml"
|
||||
Usage is very similar to the JSON library:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
@@ -53,8 +51,8 @@ import (
|
||||
)
|
||||
|
||||
type Person struct {
|
||||
Name string `json:"name"` // Affects YAML field names too.
|
||||
Age int `json:"age"`
|
||||
Name string `json:"name"` // Affects YAML field names too.
|
||||
Age int `json:"name"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -67,13 +65,13 @@ func main() {
|
||||
}
|
||||
fmt.Println(string(y))
|
||||
/* Output:
|
||||
age: 30
|
||||
name: John
|
||||
age: 30
|
||||
*/
|
||||
|
||||
// Unmarshal the YAML back into a Person struct.
|
||||
var p2 Person
|
||||
err = yaml.Unmarshal(y, &p2)
|
||||
err := yaml.Unmarshal(y, &p2)
|
||||
if err != nil {
|
||||
fmt.Printf("err: %v\n", err)
|
||||
return
|
||||
@@ -88,14 +86,11 @@ func main() {
|
||||
`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
)
|
||||
|
||||
func main() {
|
||||
j := []byte(`{"name": "John", "age": 30}`)
|
||||
y, err := yaml.JSONToYAML(j)
|
||||
7
vendor/sigs.k8s.io/yaml/fields.go → vendor/github.com/ghodss/yaml/fields.go
generated
vendored
7
vendor/sigs.k8s.io/yaml/fields.go → vendor/github.com/ghodss/yaml/fields.go
generated
vendored
@@ -1,7 +1,6 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package yaml
|
||||
|
||||
import (
|
||||
@@ -46,11 +45,7 @@ func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.Te
|
||||
break
|
||||
}
|
||||
if v.IsNil() {
|
||||
if v.CanSet() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
} else {
|
||||
v = reflect.New(v.Type().Elem())
|
||||
}
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
if v.Type().NumMethod() > 0 {
|
||||
if u, ok := v.Interface().(json.Unmarshaler); ok {
|
||||
74
vendor/sigs.k8s.io/yaml/yaml.go → vendor/github.com/ghodss/yaml/yaml.go
generated
vendored
74
vendor/sigs.k8s.io/yaml/yaml.go → vendor/github.com/ghodss/yaml/yaml.go
generated
vendored
@@ -4,58 +4,37 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// Marshal marshals the object into JSON then converts JSON to YAML and returns the
|
||||
// Marshals the object into JSON then converts JSON to YAML and returns the
|
||||
// YAML.
|
||||
func Marshal(o interface{}) ([]byte, error) {
|
||||
j, err := json.Marshal(o)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshaling into JSON: %v", err)
|
||||
return nil, fmt.Errorf("error marshaling into JSON: ", err)
|
||||
}
|
||||
|
||||
y, err := JSONToYAML(j)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
|
||||
return nil, fmt.Errorf("error converting JSON to YAML: ", err)
|
||||
}
|
||||
|
||||
return y, nil
|
||||
}
|
||||
|
||||
// JSONOpt is a decoding option for decoding from JSON format.
|
||||
type JSONOpt func(*json.Decoder) *json.Decoder
|
||||
|
||||
// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object,
|
||||
// optionally configuring the behavior of the JSON unmarshal.
|
||||
func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error {
|
||||
return yamlUnmarshal(y, o, false, opts...)
|
||||
}
|
||||
|
||||
// UnmarshalStrict strictly converts YAML to JSON then uses JSON to unmarshal
|
||||
// into an object, optionally configuring the behavior of the JSON unmarshal.
|
||||
func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error {
|
||||
return yamlUnmarshal(y, o, true, append(opts, DisallowUnknownFields)...)
|
||||
}
|
||||
|
||||
// yamlUnmarshal unmarshals the given YAML byte stream into the given interface,
|
||||
// optionally performing the unmarshalling strictly
|
||||
func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error {
|
||||
// Converts YAML to JSON then uses JSON to unmarshal into an object.
|
||||
func Unmarshal(y []byte, o interface{}) error {
|
||||
vo := reflect.ValueOf(o)
|
||||
unmarshalFn := yaml.Unmarshal
|
||||
if strict {
|
||||
unmarshalFn = yaml.UnmarshalStrict
|
||||
}
|
||||
j, err := yamlToJSON(y, &vo, unmarshalFn)
|
||||
j, err := yamlToJSON(y, &vo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error converting YAML to JSON: %v", err)
|
||||
}
|
||||
|
||||
err = jsonUnmarshal(bytes.NewReader(j), o, opts...)
|
||||
err = json.Unmarshal(j, o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error unmarshaling JSON: %v", err)
|
||||
}
|
||||
@@ -63,28 +42,13 @@ func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error
|
||||
return nil
|
||||
}
|
||||
|
||||
// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the
|
||||
// object, optionally applying decoder options prior to decoding. We are not
|
||||
// using json.Unmarshal directly as we want the chance to pass in non-default
|
||||
// options.
|
||||
func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error {
|
||||
d := json.NewDecoder(r)
|
||||
for _, opt := range opts {
|
||||
d = opt(d)
|
||||
}
|
||||
if err := d.Decode(&o); err != nil {
|
||||
return fmt.Errorf("while decoding JSON: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// JSONToYAML Converts JSON to YAML.
|
||||
// Convert JSON to YAML.
|
||||
func JSONToYAML(j []byte) ([]byte, error) {
|
||||
// Convert the JSON to an object.
|
||||
var jsonObj interface{}
|
||||
// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
|
||||
// Go JSON library doesn't try to pick the right number type (int, float,
|
||||
// etc.) when unmarshalling to interface{}, it just picks float64
|
||||
// etc.) when unmarshling to interface{}, it just picks float64
|
||||
// universally. go-yaml does go through the effort of picking the right
|
||||
// number type, so we can preserve number type throughout this process.
|
||||
err := yaml.Unmarshal(j, &jsonObj)
|
||||
@@ -96,8 +60,8 @@ func JSONToYAML(j []byte) ([]byte, error) {
|
||||
return yaml.Marshal(jsonObj)
|
||||
}
|
||||
|
||||
// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML,
|
||||
// passing JSON through this method should be a no-op.
|
||||
// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
|
||||
// this method should be a no-op.
|
||||
//
|
||||
// Things YAML can do that are not supported by JSON:
|
||||
// * In YAML you can have binary and null keys in your maps. These are invalid
|
||||
@@ -106,22 +70,14 @@ func JSONToYAML(j []byte) ([]byte, error) {
|
||||
// use binary data with this library, encode the data as base64 as usual but do
|
||||
// not use the !!binary tag in your YAML. This will ensure the original base64
|
||||
// encoded data makes it all the way through to the JSON.
|
||||
//
|
||||
// For strict decoding of YAML, use YAMLToJSONStrict.
|
||||
func YAMLToJSON(y []byte) ([]byte, error) {
|
||||
return yamlToJSON(y, nil, yaml.Unmarshal)
|
||||
return yamlToJSON(y, nil)
|
||||
}
|
||||
|
||||
// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding,
|
||||
// returning an error on any duplicate field names.
|
||||
func YAMLToJSONStrict(y []byte) ([]byte, error) {
|
||||
return yamlToJSON(y, nil, yaml.UnmarshalStrict)
|
||||
}
|
||||
|
||||
func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) {
|
||||
func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
|
||||
// Convert the YAML to an object.
|
||||
var yamlObj interface{}
|
||||
err := yamlUnmarshal(y, &yamlObj)
|
||||
err := yaml.Unmarshal(y, &yamlObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -316,4 +272,6 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in
|
||||
}
|
||||
return yamlObj, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
1
vendor/github.com/gogo/protobuf/AUTHORS
generated
vendored
1
vendor/github.com/gogo/protobuf/AUTHORS
generated
vendored
@@ -10,6 +10,5 @@
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Sendgrid, Inc
|
||||
Vastech SA (PTY) LTD
|
||||
Walter Schulze <awalterschulze@gmail.com>
|
||||
|
||||
4
vendor/github.com/gogo/protobuf/CONTRIBUTORS
generated
vendored
4
vendor/github.com/gogo/protobuf/CONTRIBUTORS
generated
vendored
@@ -1,5 +1,4 @@
|
||||
Anton Povarov <anton.povarov@gmail.com>
|
||||
Brian Goff <cpuguy83@gmail.com>
|
||||
Clayton Coleman <ccoleman@redhat.com>
|
||||
Denis Smirnov <denis.smirnov.91@gmail.com>
|
||||
DongYun Kang <ceram1000@gmail.com>
|
||||
@@ -11,12 +10,9 @@ John Shahid <jvshahid@gmail.com>
|
||||
John Tuley <john@tuley.org>
|
||||
Laurent <laurent@adyoulike.com>
|
||||
Patrick Lee <patrick@dropbox.com>
|
||||
Roger Johansson <rogeralsing@gmail.com>
|
||||
Sam Nguyen <sam.nguyen@sendgrid.com>
|
||||
Sergio Arbeo <serabe@gmail.com>
|
||||
Stephen J Day <stephen.day@docker.com>
|
||||
Tamir Duberstein <tamird@gmail.com>
|
||||
Todd Eisenberger <teisenberger@dropbox.com>
|
||||
Tormod Erevik Lea <tormodlea@gmail.com>
|
||||
Vyacheslav Kim <kane@sendgrid.com>
|
||||
Walter Schulze <awalterschulze@gmail.com>
|
||||
|
||||
4
vendor/github.com/gogo/protobuf/proto/encode.go
generated
vendored
4
vendor/github.com/gogo/protobuf/proto/encode.go
generated
vendored
@@ -174,11 +174,11 @@ func sizeFixed32(x uint64) int {
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) EncodeZigzag64(x uint64) error {
|
||||
// use signed number to get arithmetic right shift.
|
||||
return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63)))
|
||||
return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
|
||||
func sizeZigzag64(x uint64) int {
|
||||
return sizeVarint((x << 1) ^ uint64((int64(x) >> 63)))
|
||||
return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
|
||||
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
|
||||
|
||||
1
vendor/github.com/gogo/protobuf/proto/lib.go
generated
vendored
1
vendor/github.com/gogo/protobuf/proto/lib.go
generated
vendored
@@ -73,6 +73,7 @@ for a protocol buffer variable v:
|
||||
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
||||
|
||||
- Non-repeated fields of non-message type are values instead of pointers.
|
||||
- Getters are only generated for message and oneof fields.
|
||||
- Enum types do not get an Enum method.
|
||||
|
||||
The simplest way to describe this is to see an example.
|
||||
|
||||
3
vendor/github.com/gogo/protobuf/proto/properties.go
generated
vendored
3
vendor/github.com/gogo/protobuf/proto/properties.go
generated
vendored
@@ -193,7 +193,6 @@ type Properties struct {
|
||||
Default string // default value
|
||||
HasDefault bool // whether an explicit default was provided
|
||||
CustomType string
|
||||
CastType string
|
||||
StdTime bool
|
||||
StdDuration bool
|
||||
|
||||
@@ -342,8 +341,6 @@ func (p *Properties) Parse(s string) {
|
||||
p.OrigName = strings.Split(f, "=")[1]
|
||||
case strings.HasPrefix(f, "customtype="):
|
||||
p.CustomType = strings.Split(f, "=")[1]
|
||||
case strings.HasPrefix(f, "casttype="):
|
||||
p.CastType = strings.Split(f, "=")[1]
|
||||
case f == "stdtime":
|
||||
p.StdTime = true
|
||||
case f == "stdduration":
|
||||
|
||||
23
vendor/github.com/gogo/protobuf/proto/text.go
generated
vendored
23
vendor/github.com/gogo/protobuf/proto/text.go
generated
vendored
@@ -522,17 +522,6 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
|
||||
}
|
||||
return nil
|
||||
}
|
||||
} else if len(props.CastType) > 0 {
|
||||
if _, ok := v.Interface().(interface {
|
||||
String() string
|
||||
}); ok {
|
||||
switch v.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
_, err := fmt.Fprintf(w, "%d", v.Interface())
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if props.StdTime {
|
||||
t, ok := v.Interface().(time.Time)
|
||||
if !ok {
|
||||
@@ -542,9 +531,9 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
propsCopy := *props // Make a copy so that this is goroutine-safe
|
||||
propsCopy.StdTime = false
|
||||
err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy)
|
||||
props.StdTime = false
|
||||
err = tm.writeAny(w, reflect.ValueOf(tproto), props)
|
||||
props.StdTime = true
|
||||
return err
|
||||
} else if props.StdDuration {
|
||||
d, ok := v.Interface().(time.Duration)
|
||||
@@ -552,9 +541,9 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
|
||||
return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface())
|
||||
}
|
||||
dproto := durationProto(d)
|
||||
propsCopy := *props // Make a copy so that this is goroutine-safe
|
||||
propsCopy.StdDuration = false
|
||||
err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy)
|
||||
props.StdDuration = false
|
||||
err := tm.writeAny(w, reflect.ValueOf(dproto), props)
|
||||
props.StdDuration = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
2
vendor/github.com/gogo/protobuf/proto/text_parser.go
generated
vendored
2
vendor/github.com/gogo/protobuf/proto/text_parser.go
generated
vendored
@@ -983,7 +983,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
||||
return p.readStruct(fv, terminator)
|
||||
case reflect.Uint32:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
fv.SetUint(x)
|
||||
fv.SetUint(uint64(x))
|
||||
return nil
|
||||
}
|
||||
case reflect.Uint64:
|
||||
|
||||
@@ -1,10 +1,3 @@
|
||||
klog
|
||||
====
|
||||
|
||||
klog is a permanant fork of https://github.com/golang/glog. original README from glog is below
|
||||
|
||||
----
|
||||
|
||||
glog
|
||||
====
|
||||
|
||||
@@ -12,7 +5,7 @@ Leveled execution logs for Go.
|
||||
|
||||
This is an efficient pure Go implementation of leveled logs in the
|
||||
manner of the open source C++ package
|
||||
https://github.com/google/glog
|
||||
http://code.google.com/p/google-glog
|
||||
|
||||
By binding methods to booleans it is possible to use the log package
|
||||
without paying the expense of evaluating the arguments to the log.
|
||||
@@ -14,7 +14,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
|
||||
// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
|
||||
// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
|
||||
// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
|
||||
//
|
||||
@@ -68,7 +68,7 @@
|
||||
// -vmodule=gopher*=3
|
||||
// sets the V level to 3 in all Go files whose names begin "gopher".
|
||||
//
|
||||
package klog
|
||||
package glog
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -396,6 +396,13 @@ type flushSyncWriter interface {
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
|
||||
flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
|
||||
flag.Var(&logging.verbosity, "v", "log level for V logs")
|
||||
flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
|
||||
flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
|
||||
flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
|
||||
|
||||
// Default stderrThreshold is ERROR.
|
||||
logging.stderrThreshold = errorLog
|
||||
|
||||
@@ -403,22 +410,6 @@ func init() {
|
||||
go logging.flushDaemon()
|
||||
}
|
||||
|
||||
// InitFlags is for explicitly initializing the flags
|
||||
func InitFlags(flagset *flag.FlagSet) {
|
||||
if flagset == nil {
|
||||
flagset = flag.CommandLine
|
||||
}
|
||||
flagset.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory")
|
||||
flagset.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file")
|
||||
flagset.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
|
||||
flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
|
||||
flagset.Var(&logging.verbosity, "v", "log level for V logs")
|
||||
flagset.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages")
|
||||
flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
|
||||
flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
|
||||
flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
|
||||
}
|
||||
|
||||
// Flush flushes all pending log I/O.
|
||||
func Flush() {
|
||||
logging.lockAndFlushAll()
|
||||
@@ -462,17 +453,6 @@ type loggingT struct {
|
||||
// safely using atomic.LoadInt32.
|
||||
vmodule moduleSpec // The state of the -vmodule flag.
|
||||
verbosity Level // V logging level, the value of the -v flag/
|
||||
|
||||
// If non-empty, overrides the choice of directory in which to write logs.
|
||||
// See createLogDirs for the full list of possible destinations.
|
||||
logDir string
|
||||
|
||||
// If non-empty, specifies the path of the file to write logs. mutually exclusive
|
||||
// with the log-dir option.
|
||||
logFile string
|
||||
|
||||
// If true, do not add the prefix headers, useful when used with SetOutput
|
||||
skipHeaders bool
|
||||
}
|
||||
|
||||
// buffer holds a byte Buffer for reuse. The zero value is ready for use.
|
||||
@@ -576,9 +556,6 @@ func (l *loggingT) formatHeader(s severity, file string, line int) *buffer {
|
||||
s = infoLog // for safety.
|
||||
}
|
||||
buf := l.getBuffer()
|
||||
if l.skipHeaders {
|
||||
return buf
|
||||
}
|
||||
|
||||
// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
|
||||
// It's worth about 3X. Fprintf is hard.
|
||||
@@ -690,45 +667,6 @@ func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToSt
|
||||
l.output(s, buf, file, line, alsoToStderr)
|
||||
}
|
||||
|
||||
// redirectBuffer is used to set an alternate destination for the logs
|
||||
type redirectBuffer struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (rb *redirectBuffer) Sync() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *redirectBuffer) Flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) {
|
||||
return rb.w.Write(bytes)
|
||||
}
|
||||
|
||||
// SetOutput sets the output destination for all severities
|
||||
func SetOutput(w io.Writer) {
|
||||
for s := fatalLog; s >= infoLog; s-- {
|
||||
rb := &redirectBuffer{
|
||||
w: w,
|
||||
}
|
||||
logging.file[s] = rb
|
||||
}
|
||||
}
|
||||
|
||||
// SetOutputBySeverity sets the output destination for specific severity
|
||||
func SetOutputBySeverity(name string, w io.Writer) {
|
||||
sev, ok := severityByName(name)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name))
|
||||
}
|
||||
rb := &redirectBuffer{
|
||||
w: w,
|
||||
}
|
||||
logging.file[sev] = rb
|
||||
}
|
||||
|
||||
// output writes the data to the log files and releases the buffer.
|
||||
func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) {
|
||||
l.mu.Lock()
|
||||
@@ -938,7 +876,7 @@ const flushInterval = 30 * time.Second
|
||||
|
||||
// flushDaemon periodically flushes the log file buffers.
|
||||
func (l *loggingT) flushDaemon() {
|
||||
for range time.NewTicker(flushInterval).C {
|
||||
for _ = range time.NewTicker(flushInterval).C {
|
||||
l.lockAndFlushAll()
|
||||
}
|
||||
}
|
||||
@@ -16,10 +16,11 @@
|
||||
|
||||
// File I/O for logs.
|
||||
|
||||
package klog
|
||||
package glog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/user"
|
||||
@@ -35,9 +36,13 @@ var MaxSize uint64 = 1024 * 1024 * 1800
|
||||
// logDirs lists the candidate directories for new log files.
|
||||
var logDirs []string
|
||||
|
||||
// If non-empty, overrides the choice of directory in which to write logs.
|
||||
// See createLogDirs for the full list of possible destinations.
|
||||
var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
|
||||
|
||||
func createLogDirs() {
|
||||
if logging.logDir != "" {
|
||||
logDirs = append(logDirs, logging.logDir)
|
||||
if *logDir != "" {
|
||||
logDirs = append(logDirs, *logDir)
|
||||
}
|
||||
logDirs = append(logDirs, os.TempDir())
|
||||
}
|
||||
@@ -98,13 +103,6 @@ var onceLogDirs sync.Once
|
||||
// successfully, create also attempts to update the symlink for that tag, ignoring
|
||||
// errors.
|
||||
func create(tag string, t time.Time) (f *os.File, filename string, err error) {
|
||||
if logging.logFile != "" {
|
||||
f, err := os.Create(logging.logFile)
|
||||
if err == nil {
|
||||
return f, logging.logFile, nil
|
||||
}
|
||||
return nil, "", fmt.Errorf("log: unable to create log: %v", err)
|
||||
}
|
||||
onceLogDirs.Do(createLogDirs)
|
||||
if len(logDirs) == 0 {
|
||||
return nil, "", errors.New("log: no log dirs")
|
||||
12
vendor/github.com/json-iterator/go/Gopkg.lock
generated
vendored
12
vendor/github.com/json-iterator/go/Gopkg.lock
generated
vendored
@@ -1,6 +1,12 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/json-iterator/go"
|
||||
packages = ["."]
|
||||
revision = "ca39e5af3ece67bbcda3d0f4f56a8e24d9f2dad4"
|
||||
version = "1.1.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/modern-go/concurrent"
|
||||
packages = ["."]
|
||||
@@ -10,12 +16,12 @@
|
||||
[[projects]]
|
||||
name = "github.com/modern-go/reflect2"
|
||||
packages = ["."]
|
||||
revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
|
||||
version = "1.0.1"
|
||||
revision = "1df9eeb2bb81f327b96228865c5687bc2194af3f"
|
||||
version = "1.0.0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8"
|
||||
inputs-digest = "56a0b9e9e61d2bc8af5e1b68537401b7f4d60805eda3d107058f3171aa5cf793"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
||||
2
vendor/github.com/json-iterator/go/Gopkg.toml
generated
vendored
2
vendor/github.com/json-iterator/go/Gopkg.toml
generated
vendored
@@ -23,4 +23,4 @@ ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/modern-go/reflect2"
|
||||
version = "1.0.1"
|
||||
version = "1.0.0"
|
||||
|
||||
1
vendor/github.com/modern-go/reflect2/.travis.yml
generated
vendored
1
vendor/github.com/modern-go/reflect2/.travis.yml
generated
vendored
@@ -6,7 +6,6 @@ go:
|
||||
|
||||
before_install:
|
||||
- go get -t -v ./...
|
||||
- go get -t -v github.com/modern-go/reflect2-tests/...
|
||||
|
||||
script:
|
||||
- ./test.sh
|
||||
|
||||
2
vendor/github.com/modern-go/reflect2/Gopkg.toml
generated
vendored
2
vendor/github.com/modern-go/reflect2/Gopkg.toml
generated
vendored
@@ -24,7 +24,7 @@
|
||||
# go-tests = true
|
||||
# unused-packages = true
|
||||
|
||||
ignored = []
|
||||
ignored = ["github.com/modern-go/test","github.com/modern-go/test/must","github.com/modern-go/test/should"]
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/modern-go/concurrent"
|
||||
|
||||
3
vendor/github.com/modern-go/reflect2/reflect2.go
generated
vendored
3
vendor/github.com/modern-go/reflect2/reflect2.go
generated
vendored
@@ -150,9 +150,6 @@ func (cfg *frozenConfig) TypeOf(obj interface{}) Type {
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) Type2(type1 reflect.Type) Type {
|
||||
if type1 == nil {
|
||||
return nil
|
||||
}
|
||||
cacheKey := uintptr(unpackEFace(type1).data)
|
||||
typeObj, found := cfg.cache.Load(cacheKey)
|
||||
if found {
|
||||
|
||||
2
vendor/github.com/modern-go/reflect2/test.sh
generated
vendored
2
vendor/github.com/modern-go/reflect2/test.sh
generated
vendored
@@ -3,7 +3,7 @@
|
||||
set -e
|
||||
echo "" > coverage.txt
|
||||
|
||||
for d in $(go list github.com/modern-go/reflect2-tests/... | grep -v vendor); do
|
||||
for d in $(go list ./... | grep -v vendor); do
|
||||
go test -coverprofile=profile.out -coverpkg=github.com/modern-go/reflect2 $d
|
||||
if [ -f profile.out ]; then
|
||||
cat profile.out >> coverage.txt
|
||||
|
||||
16
vendor/github.com/modern-go/reflect2/type_map.go
generated
vendored
16
vendor/github.com/modern-go/reflect2/type_map.go
generated
vendored
@@ -4,7 +4,6 @@ import (
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
@@ -16,17 +15,10 @@ func typelinks1() [][]unsafe.Pointer
|
||||
//go:linkname typelinks2 reflect.typelinks
|
||||
func typelinks2() (sections []unsafe.Pointer, offset [][]int32)
|
||||
|
||||
// initOnce guards initialization of types and packages
|
||||
var initOnce sync.Once
|
||||
|
||||
var types map[string]reflect.Type
|
||||
var packages map[string]map[string]reflect.Type
|
||||
|
||||
// discoverTypes initializes types and packages
|
||||
func discoverTypes() {
|
||||
types = make(map[string]reflect.Type)
|
||||
packages = make(map[string]map[string]reflect.Type)
|
||||
var types = map[string]reflect.Type{}
|
||||
var packages = map[string]map[string]reflect.Type{}
|
||||
|
||||
func init() {
|
||||
ver := runtime.Version()
|
||||
if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") {
|
||||
loadGo15Types()
|
||||
@@ -98,13 +90,11 @@ type emptyInterface struct {
|
||||
|
||||
// TypeByName return the type by its name, just like Class.forName in java
|
||||
func TypeByName(typeName string) Type {
|
||||
initOnce.Do(discoverTypes)
|
||||
return Type2(types[typeName])
|
||||
}
|
||||
|
||||
// TypeByPackageName return the type by its package and name
|
||||
func TypeByPackageName(pkgPath string, name string) Type {
|
||||
initOnce.Do(discoverTypes)
|
||||
pkgTypes := packages[pkgPath]
|
||||
if pkgTypes == nil {
|
||||
return nil
|
||||
|
||||
50
vendor/golang.org/x/net/http/httpguts/guts.go
generated
vendored
50
vendor/golang.org/x/net/http/httpguts/guts.go
generated
vendored
@@ -1,50 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package httpguts provides functions implementing various details
|
||||
// of the HTTP specification.
|
||||
//
|
||||
// This package is shared by the standard library (which vendors it)
|
||||
// and x/net/http2. It comes with no API stability promise.
|
||||
package httpguts
|
||||
|
||||
import (
|
||||
"net/textproto"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ValidTrailerHeader reports whether name is a valid header field name to appear
|
||||
// in trailers.
|
||||
// See RFC 7230, Section 4.1.2
|
||||
func ValidTrailerHeader(name string) bool {
|
||||
name = textproto.CanonicalMIMEHeaderKey(name)
|
||||
if strings.HasPrefix(name, "If-") || badTrailer[name] {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var badTrailer = map[string]bool{
|
||||
"Authorization": true,
|
||||
"Cache-Control": true,
|
||||
"Connection": true,
|
||||
"Content-Encoding": true,
|
||||
"Content-Length": true,
|
||||
"Content-Range": true,
|
||||
"Content-Type": true,
|
||||
"Expect": true,
|
||||
"Host": true,
|
||||
"Keep-Alive": true,
|
||||
"Max-Forwards": true,
|
||||
"Pragma": true,
|
||||
"Proxy-Authenticate": true,
|
||||
"Proxy-Authorization": true,
|
||||
"Proxy-Connection": true,
|
||||
"Range": true,
|
||||
"Realm": true,
|
||||
"Te": true,
|
||||
"Trailer": true,
|
||||
"Transfer-Encoding": true,
|
||||
"Www-Authenticate": true,
|
||||
}
|
||||
28
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
28
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
@@ -52,31 +52,9 @@ const (
|
||||
noDialOnMiss = false
|
||||
)
|
||||
|
||||
// shouldTraceGetConn reports whether getClientConn should call any
|
||||
// ClientTrace.GetConn hook associated with the http.Request.
|
||||
//
|
||||
// This complexity is needed to avoid double calls of the GetConn hook
|
||||
// during the back-and-forth between net/http and x/net/http2 (when the
|
||||
// net/http.Transport is upgraded to also speak http2), as well as support
|
||||
// the case where x/net/http2 is being used directly.
|
||||
func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool {
|
||||
// If our Transport wasn't made via ConfigureTransport, always
|
||||
// trace the GetConn hook if provided, because that means the
|
||||
// http2 package is being used directly and it's the one
|
||||
// dialing, as opposed to net/http.
|
||||
if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok {
|
||||
return true
|
||||
}
|
||||
// Otherwise, only use the GetConn hook if this connection has
|
||||
// been used previously for other requests. For fresh
|
||||
// connections, the net/http package does the dialing.
|
||||
return !st.freshConn
|
||||
}
|
||||
|
||||
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
|
||||
if isConnectionCloseRequest(req) && dialOnMiss {
|
||||
// It gets its own connection.
|
||||
traceGetConn(req, addr)
|
||||
const singleUse = true
|
||||
cc, err := p.t.dialClientConn(addr, singleUse)
|
||||
if err != nil {
|
||||
@@ -86,10 +64,7 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
|
||||
}
|
||||
p.mu.Lock()
|
||||
for _, cc := range p.conns[addr] {
|
||||
if st := cc.idleState(); st.canTakeNewRequest {
|
||||
if p.shouldTraceGetConn(st) {
|
||||
traceGetConn(req, addr)
|
||||
}
|
||||
if cc.CanTakeNewRequest() {
|
||||
p.mu.Unlock()
|
||||
return cc, nil
|
||||
}
|
||||
@@ -98,7 +73,6 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
|
||||
p.mu.Unlock()
|
||||
return nil, ErrNoCachedConn
|
||||
}
|
||||
traceGetConn(req, addr)
|
||||
call := p.getStartDialLocked(addr)
|
||||
p.mu.Unlock()
|
||||
<-call.done
|
||||
|
||||
8
vendor/golang.org/x/net/http2/configure_transport.go
generated
vendored
8
vendor/golang.org/x/net/http2/configure_transport.go
generated
vendored
@@ -57,7 +57,7 @@ func configureTransport(t1 *http.Transport) (*Transport, error) {
|
||||
|
||||
// registerHTTPSProtocol calls Transport.RegisterProtocol but
|
||||
// converting panics into errors.
|
||||
func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err error) {
|
||||
func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("%v", e)
|
||||
@@ -69,12 +69,10 @@ func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err erro
|
||||
|
||||
// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
|
||||
// if there's already has a cached connection to the host.
|
||||
// (The field is exported so it can be accessed via reflect from net/http; tested
|
||||
// by TestNoDialH2RoundTripperType)
|
||||
type noDialH2RoundTripper struct{ *Transport }
|
||||
type noDialH2RoundTripper struct{ t *Transport }
|
||||
|
||||
func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
res, err := rt.Transport.RoundTrip(req)
|
||||
res, err := rt.t.RoundTrip(req)
|
||||
if isNoCachedConnError(err) {
|
||||
return nil, http.ErrSkipAltProtocol
|
||||
}
|
||||
|
||||
10
vendor/golang.org/x/net/http2/flow.go
generated
vendored
10
vendor/golang.org/x/net/http2/flow.go
generated
vendored
@@ -41,10 +41,10 @@ func (f *flow) take(n int32) {
|
||||
// add adds n bytes (positive or negative) to the flow control window.
|
||||
// It returns false if the sum would exceed 2^31-1.
|
||||
func (f *flow) add(n int32) bool {
|
||||
sum := f.n + n
|
||||
if (sum > n) == (f.n > 0) {
|
||||
f.n = sum
|
||||
return true
|
||||
remain := (1<<31 - 1) - f.n
|
||||
if n > remain {
|
||||
return false
|
||||
}
|
||||
return false
|
||||
f.n += n
|
||||
return true
|
||||
}
|
||||
|
||||
67
vendor/golang.org/x/net/http2/frame.go
generated
vendored
67
vendor/golang.org/x/net/http2/frame.go
generated
vendored
@@ -14,8 +14,8 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"golang.org/x/net/lex/httplex"
|
||||
)
|
||||
|
||||
const frameHeaderLen = 9
|
||||
@@ -733,67 +733,32 @@ func (f *SettingsFrame) IsAck() bool {
|
||||
return f.FrameHeader.Flags.Has(FlagSettingsAck)
|
||||
}
|
||||
|
||||
func (f *SettingsFrame) Value(id SettingID) (v uint32, ok bool) {
|
||||
func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) {
|
||||
f.checkValid()
|
||||
for i := 0; i < f.NumSettings(); i++ {
|
||||
if s := f.Setting(i); s.ID == id {
|
||||
return s.Val, true
|
||||
buf := f.p
|
||||
for len(buf) > 0 {
|
||||
settingID := SettingID(binary.BigEndian.Uint16(buf[:2]))
|
||||
if settingID == s {
|
||||
return binary.BigEndian.Uint32(buf[2:6]), true
|
||||
}
|
||||
buf = buf[6:]
|
||||
}
|
||||
return 0, false
|
||||
}
|
||||
|
||||
// Setting returns the setting from the frame at the given 0-based index.
|
||||
// The index must be >= 0 and less than f.NumSettings().
|
||||
func (f *SettingsFrame) Setting(i int) Setting {
|
||||
buf := f.p
|
||||
return Setting{
|
||||
ID: SettingID(binary.BigEndian.Uint16(buf[i*6 : i*6+2])),
|
||||
Val: binary.BigEndian.Uint32(buf[i*6+2 : i*6+6]),
|
||||
}
|
||||
}
|
||||
|
||||
func (f *SettingsFrame) NumSettings() int { return len(f.p) / 6 }
|
||||
|
||||
// HasDuplicates reports whether f contains any duplicate setting IDs.
|
||||
func (f *SettingsFrame) HasDuplicates() bool {
|
||||
num := f.NumSettings()
|
||||
if num == 0 {
|
||||
return false
|
||||
}
|
||||
// If it's small enough (the common case), just do the n^2
|
||||
// thing and avoid a map allocation.
|
||||
if num < 10 {
|
||||
for i := 0; i < num; i++ {
|
||||
idi := f.Setting(i).ID
|
||||
for j := i + 1; j < num; j++ {
|
||||
idj := f.Setting(j).ID
|
||||
if idi == idj {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
seen := map[SettingID]bool{}
|
||||
for i := 0; i < num; i++ {
|
||||
id := f.Setting(i).ID
|
||||
if seen[id] {
|
||||
return true
|
||||
}
|
||||
seen[id] = true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ForeachSetting runs fn for each setting.
|
||||
// It stops and returns the first error.
|
||||
func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error {
|
||||
f.checkValid()
|
||||
for i := 0; i < f.NumSettings(); i++ {
|
||||
if err := fn(f.Setting(i)); err != nil {
|
||||
buf := f.p
|
||||
for len(buf) > 0 {
|
||||
if err := fn(Setting{
|
||||
SettingID(binary.BigEndian.Uint16(buf[:2])),
|
||||
binary.BigEndian.Uint32(buf[2:6]),
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
buf = buf[6:]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1497,7 +1462,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) {
|
||||
if VerboseLogs && fr.logReads {
|
||||
fr.debugReadLoggerf("http2: decoded hpack field %+v", hf)
|
||||
}
|
||||
if !httpguts.ValidHeaderFieldValue(hf.Value) {
|
||||
if !httplex.ValidHeaderFieldValue(hf.Value) {
|
||||
invalid = headerFieldValueError(hf.Value)
|
||||
}
|
||||
isPseudo := strings.HasPrefix(hf.Name, ":")
|
||||
|
||||
26
vendor/golang.org/x/net/http2/go111.go
generated
vendored
26
vendor/golang.org/x/net/http2/go111.go
generated
vendored
@@ -1,26 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.11
|
||||
|
||||
package http2
|
||||
|
||||
import "net/textproto"
|
||||
|
||||
func traceHasWroteHeaderField(trace *clientTrace) bool {
|
||||
return trace != nil && trace.WroteHeaderField != nil
|
||||
}
|
||||
|
||||
func traceWroteHeaderField(trace *clientTrace, k, v string) {
|
||||
if trace != nil && trace.WroteHeaderField != nil {
|
||||
trace.WroteHeaderField(k, []string{v})
|
||||
}
|
||||
}
|
||||
|
||||
func traceGot1xxResponseFunc(trace *clientTrace) func(int, textproto.MIMEHeader) error {
|
||||
if trace != nil {
|
||||
return trace.Got1xxResponse
|
||||
}
|
||||
return nil
|
||||
}
|
||||
15
vendor/golang.org/x/net/http2/go17.go
generated
vendored
15
vendor/golang.org/x/net/http2/go17.go
generated
vendored
@@ -18,8 +18,6 @@ type contextContext interface {
|
||||
context.Context
|
||||
}
|
||||
|
||||
var errCanceled = context.Canceled
|
||||
|
||||
func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) {
|
||||
ctx, cancel = context.WithCancel(context.Background())
|
||||
ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr())
|
||||
@@ -50,14 +48,6 @@ func (t *Transport) idleConnTimeout() time.Duration {
|
||||
|
||||
func setResponseUncompressed(res *http.Response) { res.Uncompressed = true }
|
||||
|
||||
func traceGetConn(req *http.Request, hostPort string) {
|
||||
trace := httptrace.ContextClientTrace(req.Context())
|
||||
if trace == nil || trace.GetConn == nil {
|
||||
return
|
||||
}
|
||||
trace.GetConn(hostPort)
|
||||
}
|
||||
|
||||
func traceGotConn(req *http.Request, cc *ClientConn) {
|
||||
trace := httptrace.ContextClientTrace(req.Context())
|
||||
if trace == nil || trace.GotConn == nil {
|
||||
@@ -114,8 +104,3 @@ func requestTrace(req *http.Request) *clientTrace {
|
||||
func (cc *ClientConn) Ping(ctx context.Context) error {
|
||||
return cc.ping(ctx)
|
||||
}
|
||||
|
||||
// Shutdown gracefully closes the client connection, waiting for running streams to complete.
|
||||
func (cc *ClientConn) Shutdown(ctx context.Context) error {
|
||||
return cc.shutdown(ctx)
|
||||
}
|
||||
|
||||
2
vendor/golang.org/x/net/http2/hpack/encode.go
generated
vendored
2
vendor/golang.org/x/net/http2/hpack/encode.go
generated
vendored
@@ -206,7 +206,7 @@ func appendVarInt(dst []byte, n byte, i uint64) []byte {
|
||||
}
|
||||
|
||||
// appendHpackString appends s, as encoded in "String Literal"
|
||||
// representation, to dst and returns the extended buffer.
|
||||
// representation, to dst and returns the the extended buffer.
|
||||
//
|
||||
// s will be encoded in Huffman codes only when it produces strictly
|
||||
// shorter byte string.
|
||||
|
||||
6
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
6
vendor/golang.org/x/net/http2/hpack/hpack.go
generated
vendored
@@ -389,12 +389,6 @@ func (d *Decoder) callEmit(hf HeaderField) error {
|
||||
|
||||
// (same invariants and behavior as parseHeaderFieldRepr)
|
||||
func (d *Decoder) parseDynamicTableSizeUpdate() error {
|
||||
// RFC 7541, sec 4.2: This dynamic table size update MUST occur at the
|
||||
// beginning of the first header block following the change to the dynamic table size.
|
||||
if d.dynTab.size > 0 {
|
||||
return DecodingError{errors.New("dynamic table size update MUST occur at the beginning of a header block")}
|
||||
}
|
||||
|
||||
buf := d.buf
|
||||
size, buf, err := readVarInt(5, buf)
|
||||
if err != nil {
|
||||
|
||||
20
vendor/golang.org/x/net/http2/hpack/huffman.go
generated
vendored
20
vendor/golang.org/x/net/http2/hpack/huffman.go
generated
vendored
@@ -47,7 +47,6 @@ var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data")
|
||||
// If maxLen is greater than 0, attempts to write more to buf than
|
||||
// maxLen bytes will return ErrStringLength.
|
||||
func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
|
||||
rootHuffmanNode := getRootHuffmanNode()
|
||||
n := rootHuffmanNode
|
||||
// cur is the bit buffer that has not been fed into n.
|
||||
// cbits is the number of low order bits in cur that are valid.
|
||||
@@ -107,7 +106,7 @@ func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
|
||||
|
||||
type node struct {
|
||||
// children is non-nil for internal nodes
|
||||
children *[256]*node
|
||||
children []*node
|
||||
|
||||
// The following are only valid if children is nil:
|
||||
codeLen uint8 // number of bits that led to the output of sym
|
||||
@@ -115,31 +114,22 @@ type node struct {
|
||||
}
|
||||
|
||||
func newInternalNode() *node {
|
||||
return &node{children: new([256]*node)}
|
||||
return &node{children: make([]*node, 256)}
|
||||
}
|
||||
|
||||
var (
|
||||
buildRootOnce sync.Once
|
||||
lazyRootHuffmanNode *node
|
||||
)
|
||||
var rootHuffmanNode = newInternalNode()
|
||||
|
||||
func getRootHuffmanNode() *node {
|
||||
buildRootOnce.Do(buildRootHuffmanNode)
|
||||
return lazyRootHuffmanNode
|
||||
}
|
||||
|
||||
func buildRootHuffmanNode() {
|
||||
func init() {
|
||||
if len(huffmanCodes) != 256 {
|
||||
panic("unexpected size")
|
||||
}
|
||||
lazyRootHuffmanNode = newInternalNode()
|
||||
for i, code := range huffmanCodes {
|
||||
addDecoderNode(byte(i), code, huffmanCodeLen[i])
|
||||
}
|
||||
}
|
||||
|
||||
func addDecoderNode(sym byte, code uint32, codeLen uint8) {
|
||||
cur := lazyRootHuffmanNode
|
||||
cur := rootHuffmanNode
|
||||
for codeLen > 8 {
|
||||
codeLen -= 8
|
||||
i := uint8(code >> codeLen)
|
||||
|
||||
8
vendor/golang.org/x/net/http2/http2.go
generated
vendored
8
vendor/golang.org/x/net/http2/http2.go
generated
vendored
@@ -29,7 +29,7 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/net/lex/httplex"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -179,7 +179,7 @@ var (
|
||||
)
|
||||
|
||||
// validWireHeaderFieldName reports whether v is a valid header field
|
||||
// name (key). See httpguts.ValidHeaderName for the base rules.
|
||||
// name (key). See httplex.ValidHeaderName for the base rules.
|
||||
//
|
||||
// Further, http2 says:
|
||||
// "Just as in HTTP/1.x, header field names are strings of ASCII
|
||||
@@ -191,7 +191,7 @@ func validWireHeaderFieldName(v string) bool {
|
||||
return false
|
||||
}
|
||||
for _, r := range v {
|
||||
if !httpguts.IsTokenRune(r) {
|
||||
if !httplex.IsTokenRune(r) {
|
||||
return false
|
||||
}
|
||||
if 'A' <= r && r <= 'Z' {
|
||||
@@ -312,7 +312,7 @@ func mustUint31(v int32) uint32 {
|
||||
}
|
||||
|
||||
// bodyAllowedForStatus reports whether a given response status code
|
||||
// permits a body. See RFC 7230, section 3.3.
|
||||
// permits a body. See RFC 2616, section 4.4.
|
||||
func bodyAllowedForStatus(status int) bool {
|
||||
switch {
|
||||
case status >= 100 && status <= 199:
|
||||
|
||||
17
vendor/golang.org/x/net/http2/not_go111.go
generated
vendored
17
vendor/golang.org/x/net/http2/not_go111.go
generated
vendored
@@ -1,17 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.11
|
||||
|
||||
package http2
|
||||
|
||||
import "net/textproto"
|
||||
|
||||
func traceHasWroteHeaderField(trace *clientTrace) bool { return false }
|
||||
|
||||
func traceWroteHeaderField(trace *clientTrace, k, v string) {}
|
||||
|
||||
func traceGot1xxResponseFunc(trace *clientTrace) func(int, textproto.MIMEHeader) error {
|
||||
return nil
|
||||
}
|
||||
8
vendor/golang.org/x/net/http2/not_go17.go
generated
vendored
8
vendor/golang.org/x/net/http2/not_go17.go
generated
vendored
@@ -8,7 +8,6 @@ package http2
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
"time"
|
||||
@@ -19,8 +18,6 @@ type contextContext interface {
|
||||
Err() error
|
||||
}
|
||||
|
||||
var errCanceled = errors.New("canceled")
|
||||
|
||||
type fakeContext struct{}
|
||||
|
||||
func (fakeContext) Done() <-chan struct{} { return nil }
|
||||
@@ -37,7 +34,6 @@ func setResponseUncompressed(res *http.Response) {
|
||||
type clientTrace struct{}
|
||||
|
||||
func requestTrace(*http.Request) *clientTrace { return nil }
|
||||
func traceGetConn(*http.Request, string) {}
|
||||
func traceGotConn(*http.Request, *ClientConn) {}
|
||||
func traceFirstResponseByte(*clientTrace) {}
|
||||
func traceWroteHeaders(*clientTrace) {}
|
||||
@@ -88,8 +84,4 @@ func (cc *ClientConn) Ping(ctx contextContext) error {
|
||||
return cc.ping(ctx)
|
||||
}
|
||||
|
||||
func (cc *ClientConn) Shutdown(ctx contextContext) error {
|
||||
return cc.shutdown(ctx)
|
||||
}
|
||||
|
||||
func (t *Transport) idleConnTimeout() time.Duration { return 0 }
|
||||
|
||||
131
vendor/golang.org/x/net/http2/server.go
generated
vendored
131
vendor/golang.org/x/net/http2/server.go
generated
vendored
@@ -46,16 +46,14 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
)
|
||||
|
||||
const (
|
||||
prefaceTimeout = 10 * time.Second
|
||||
firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
|
||||
handlerChunkWriteSize = 4 << 10
|
||||
defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
|
||||
maxQueuedControlFrames = 10000
|
||||
prefaceTimeout = 10 * time.Second
|
||||
firstSettingsTimeout = 2 * time.Second // should be in-flight with preface anyway
|
||||
handlerChunkWriteSize = 4 << 10
|
||||
defaultMaxStreams = 250 // TODO: make this 100 as the GFE seems to?
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -163,15 +161,6 @@ func (s *Server) maxConcurrentStreams() uint32 {
|
||||
return defaultMaxStreams
|
||||
}
|
||||
|
||||
// maxQueuedControlFrames is the maximum number of control frames like
|
||||
// SETTINGS, PING and RST_STREAM that will be queued for writing before
|
||||
// the connection is closed to prevent memory exhaustion attacks.
|
||||
func (s *Server) maxQueuedControlFrames() int {
|
||||
// TODO: if anybody asks, add a Server field, and remember to define the
|
||||
// behavior of negative values.
|
||||
return maxQueuedControlFrames
|
||||
}
|
||||
|
||||
type serverInternalState struct {
|
||||
mu sync.Mutex
|
||||
activeConns map[*serverConn]struct{}
|
||||
@@ -417,7 +406,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
||||
// addresses during development.
|
||||
//
|
||||
// TODO: optionally enforce? Or enforce at the time we receive
|
||||
// a new request, and verify the ServerName matches the :authority?
|
||||
// a new request, and verify the the ServerName matches the :authority?
|
||||
// But that precludes proxy situations, perhaps.
|
||||
//
|
||||
// So for now, do nothing here again.
|
||||
@@ -480,7 +469,6 @@ type serverConn struct {
|
||||
sawFirstSettings bool // got the initial SETTINGS frame after the preface
|
||||
needToSendSettingsAck bool
|
||||
unackedSettings int // how many SETTINGS have we sent without ACKs?
|
||||
queuedControlFrames int // control frames in the writeSched queue
|
||||
clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit)
|
||||
advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client
|
||||
curClientStreams uint32 // number of open streams initiated by the client
|
||||
@@ -868,14 +856,6 @@ func (sc *serverConn) serve() {
|
||||
}
|
||||
}
|
||||
|
||||
// If the peer is causing us to generate a lot of control frames,
|
||||
// but not reading them from us, assume they are trying to make us
|
||||
// run out of memory.
|
||||
if sc.queuedControlFrames > sc.srv.maxQueuedControlFrames() {
|
||||
sc.vlogf("http2: too many control frames in send queue, closing connection")
|
||||
return
|
||||
}
|
||||
|
||||
// Start the shutdown timer after sending a GOAWAY. When sending GOAWAY
|
||||
// with no error code (graceful shutdown), don't start the timer until
|
||||
// all open streams have been completed.
|
||||
@@ -1075,14 +1055,6 @@ func (sc *serverConn) writeFrame(wr FrameWriteRequest) {
|
||||
}
|
||||
|
||||
if !ignoreWrite {
|
||||
if wr.isControl() {
|
||||
sc.queuedControlFrames++
|
||||
// For extra safety, detect wraparounds, which should not happen,
|
||||
// and pull the plug.
|
||||
if sc.queuedControlFrames < 0 {
|
||||
sc.conn.Close()
|
||||
}
|
||||
}
|
||||
sc.writeSched.Push(wr)
|
||||
}
|
||||
sc.scheduleFrameWrite()
|
||||
@@ -1200,8 +1172,10 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) {
|
||||
// If a frame is already being written, nothing happens. This will be called again
|
||||
// when the frame is done being written.
|
||||
//
|
||||
// If a frame isn't being written and we need to send one, the best frame
|
||||
// to send is selected by writeSched.
|
||||
// If a frame isn't being written we need to send one, the best frame
|
||||
// to send is selected, preferring first things that aren't
|
||||
// stream-specific (e.g. ACKing settings), and then finding the
|
||||
// highest priority stream.
|
||||
//
|
||||
// If a frame isn't being written and there's nothing else to send, we
|
||||
// flush the write buffer.
|
||||
@@ -1229,9 +1203,6 @@ func (sc *serverConn) scheduleFrameWrite() {
|
||||
}
|
||||
if !sc.inGoAway || sc.goAwayCode == ErrCodeNo {
|
||||
if wr, ok := sc.writeSched.Pop(); ok {
|
||||
if wr.isControl() {
|
||||
sc.queuedControlFrames--
|
||||
}
|
||||
sc.startFrameWrite(wr)
|
||||
continue
|
||||
}
|
||||
@@ -1515,17 +1486,9 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if f.NumSettings() > 100 || f.HasDuplicates() {
|
||||
// This isn't actually in the spec, but hang up on
|
||||
// suspiciously large settings frames or those with
|
||||
// duplicate entries.
|
||||
return ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
if err := f.ForeachSetting(sc.processSetting); err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: judging by RFC 7540, Section 6.5.3 each SETTINGS frame should be
|
||||
// acknowledged individually, even if multiple are received before the ACK.
|
||||
sc.needToSendSettingsAck = true
|
||||
sc.scheduleFrameWrite()
|
||||
return nil
|
||||
@@ -1611,12 +1574,6 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
||||
// type PROTOCOL_ERROR."
|
||||
return ConnectionError(ErrCodeProtocol)
|
||||
}
|
||||
// RFC 7540, sec 6.1: If a DATA frame is received whose stream is not in
|
||||
// "open" or "half-closed (local)" state, the recipient MUST respond with a
|
||||
// stream error (Section 5.4.2) of type STREAM_CLOSED.
|
||||
if state == stateClosed {
|
||||
return streamError(id, ErrCodeStreamClosed)
|
||||
}
|
||||
if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued {
|
||||
// This includes sending a RST_STREAM if the stream is
|
||||
// in stateHalfClosedLocal (which currently means that
|
||||
@@ -1650,10 +1607,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
||||
// Sender sending more than they'd declared?
|
||||
if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes {
|
||||
st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes))
|
||||
// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
|
||||
// value of a content-length header field does not equal the sum of the
|
||||
// DATA frame payload lengths that form the body.
|
||||
return streamError(id, ErrCodeProtocol)
|
||||
return streamError(id, ErrCodeStreamClosed)
|
||||
}
|
||||
if f.Length > 0 {
|
||||
// Check whether the client has flow control quota.
|
||||
@@ -1763,13 +1717,6 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
||||
// processing this frame.
|
||||
return nil
|
||||
}
|
||||
// RFC 7540, sec 5.1: If an endpoint receives additional frames, other than
|
||||
// WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in
|
||||
// this state, it MUST respond with a stream error (Section 5.4.2) of
|
||||
// type STREAM_CLOSED.
|
||||
if st.state == stateHalfClosedRemote {
|
||||
return streamError(id, ErrCodeStreamClosed)
|
||||
}
|
||||
return st.processTrailerHeaders(f)
|
||||
}
|
||||
|
||||
@@ -1870,7 +1817,7 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
|
||||
if st.trailer != nil {
|
||||
for _, hf := range f.RegularFields() {
|
||||
key := sc.canonicalHeader(hf.Name)
|
||||
if !httpguts.ValidTrailerHeader(key) {
|
||||
if !ValidTrailerHeader(key) {
|
||||
// TODO: send more details to the peer somehow. But http2 has
|
||||
// no way to send debug data at a stream level. Discuss with
|
||||
// HTTP folk.
|
||||
@@ -2337,8 +2284,8 @@ func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) !=
|
||||
// written in the trailers at the end of the response.
|
||||
func (rws *responseWriterState) declareTrailer(k string) {
|
||||
k = http.CanonicalHeaderKey(k)
|
||||
if !httpguts.ValidTrailerHeader(k) {
|
||||
// Forbidden by RFC 7230, section 4.1.2.
|
||||
if !ValidTrailerHeader(k) {
|
||||
// Forbidden by RFC 2616 14.40.
|
||||
rws.conn.logf("ignoring invalid trailer %q", k)
|
||||
return
|
||||
}
|
||||
@@ -2388,19 +2335,6 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) {
|
||||
foreachHeaderElement(v, rws.declareTrailer)
|
||||
}
|
||||
|
||||
// "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2),
|
||||
// but respect "Connection" == "close" to mean sending a GOAWAY and tearing
|
||||
// down the TCP connection when idle, like we do for HTTP/1.
|
||||
// TODO: remove more Connection-specific header fields here, in addition
|
||||
// to "Connection".
|
||||
if _, ok := rws.snapHeader["Connection"]; ok {
|
||||
v := rws.snapHeader.Get("Connection")
|
||||
delete(rws.snapHeader, "Connection")
|
||||
if v == "close" {
|
||||
rws.conn.startGracefulShutdown()
|
||||
}
|
||||
}
|
||||
|
||||
endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp
|
||||
err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{
|
||||
streamID: rws.stream.id,
|
||||
@@ -2472,7 +2406,7 @@ const TrailerPrefix = "Trailer:"
|
||||
// after the header has already been flushed. Because the Go
|
||||
// ResponseWriter interface has no way to set Trailers (only the
|
||||
// Header), and because we didn't want to expand the ResponseWriter
|
||||
// interface, and because nobody used trailers, and because RFC 7230
|
||||
// interface, and because nobody used trailers, and because RFC 2616
|
||||
// says you SHOULD (but not must) predeclare any trailers in the
|
||||
// header, the official ResponseWriter rules said trailers in Go must
|
||||
// be predeclared, and then we reuse the same ResponseWriter.Header()
|
||||
@@ -2856,7 +2790,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) {
|
||||
}
|
||||
|
||||
// foreachHeaderElement splits v according to the "#rule" construction
|
||||
// in RFC 7230 section 7 and calls fn for each non-empty element.
|
||||
// in RFC 2616 section 2.1 and calls fn for each non-empty element.
|
||||
func foreachHeaderElement(v string, fn func(string)) {
|
||||
v = textproto.TrimString(v)
|
||||
if v == "" {
|
||||
@@ -2904,6 +2838,41 @@ func new400Handler(err error) http.HandlerFunc {
|
||||
}
|
||||
}
|
||||
|
||||
// ValidTrailerHeader reports whether name is a valid header field name to appear
|
||||
// in trailers.
|
||||
// See: http://tools.ietf.org/html/rfc7230#section-4.1.2
|
||||
func ValidTrailerHeader(name string) bool {
|
||||
name = http.CanonicalHeaderKey(name)
|
||||
if strings.HasPrefix(name, "If-") || badTrailer[name] {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
var badTrailer = map[string]bool{
|
||||
"Authorization": true,
|
||||
"Cache-Control": true,
|
||||
"Connection": true,
|
||||
"Content-Encoding": true,
|
||||
"Content-Length": true,
|
||||
"Content-Range": true,
|
||||
"Content-Type": true,
|
||||
"Expect": true,
|
||||
"Host": true,
|
||||
"Keep-Alive": true,
|
||||
"Max-Forwards": true,
|
||||
"Pragma": true,
|
||||
"Proxy-Authenticate": true,
|
||||
"Proxy-Authorization": true,
|
||||
"Proxy-Connection": true,
|
||||
"Range": true,
|
||||
"Realm": true,
|
||||
"Te": true,
|
||||
"Trailer": true,
|
||||
"Transfer-Encoding": true,
|
||||
"Www-Authenticate": true,
|
||||
}
|
||||
|
||||
// h1ServerKeepAlivesDisabled reports whether hs has its keep-alives
|
||||
// disabled. See comments on h1ServerShutdownChan above for why
|
||||
// the code is written this way.
|
||||
|
||||
232
vendor/golang.org/x/net/http2/transport.go
generated
vendored
232
vendor/golang.org/x/net/http2/transport.go
generated
vendored
@@ -21,16 +21,15 @@ import (
|
||||
mathrand "math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"golang.org/x/net/idna"
|
||||
"golang.org/x/net/lex/httplex"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -160,7 +159,6 @@ type ClientConn struct {
|
||||
cond *sync.Cond // hold mu; broadcast on flow/closed changes
|
||||
flow flow // our conn-level flow control quota (cs.flow is per stream)
|
||||
inflow flow // peer's conn-level flow control
|
||||
closing bool
|
||||
closed bool
|
||||
wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
|
||||
goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
|
||||
@@ -213,10 +211,9 @@ type clientStream struct {
|
||||
done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu
|
||||
|
||||
// owned by clientConnReadLoop:
|
||||
firstByte bool // got the first response byte
|
||||
pastHeaders bool // got first MetaHeadersFrame (actual headers)
|
||||
pastTrailers bool // got optional second MetaHeadersFrame (trailers)
|
||||
num1xx uint8 // number of 1xx responses seen
|
||||
firstByte bool // got the first response byte
|
||||
pastHeaders bool // got first MetaHeadersFrame (actual headers)
|
||||
pastTrailers bool // got optional second MetaHeadersFrame (trailers)
|
||||
|
||||
trailer http.Header // accumulated trailers
|
||||
resTrailer *http.Header // client's Response.Trailer
|
||||
@@ -240,17 +237,6 @@ func awaitRequestCancel(req *http.Request, done <-chan struct{}) error {
|
||||
}
|
||||
}
|
||||
|
||||
var got1xxFuncForTests func(int, textproto.MIMEHeader) error
|
||||
|
||||
// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func,
|
||||
// if any. It returns nil if not set or if the Go version is too old.
|
||||
func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error {
|
||||
if fn := got1xxFuncForTests; fn != nil {
|
||||
return fn
|
||||
}
|
||||
return traceGot1xxResponseFunc(cs.trace)
|
||||
}
|
||||
|
||||
// awaitRequestCancel waits for the user to cancel a request, its context to
|
||||
// expire, or for the request to be done (any way it might be removed from the
|
||||
// cc.streams map: peer reset, successful completion, TCP connection breakage,
|
||||
@@ -437,36 +423,27 @@ func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*htt
|
||||
if !canRetryError(err) {
|
||||
return nil, err
|
||||
}
|
||||
if !afterBodyWrite {
|
||||
return req, nil
|
||||
}
|
||||
// If the Body is nil (or http.NoBody), it's safe to reuse
|
||||
// this request and its Body.
|
||||
if req.Body == nil || reqBodyIsNoBody(req.Body) {
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// If the request body can be reset back to its original
|
||||
// state via the optional req.GetBody, do that.
|
||||
// Otherwise we depend on the Request having its GetBody
|
||||
// func defined.
|
||||
getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody
|
||||
if getBody != nil {
|
||||
// TODO: consider a req.Body.Close here? or audit that all caller paths do?
|
||||
body, err := getBody()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newReq := *req
|
||||
newReq.Body = body
|
||||
return &newReq, nil
|
||||
if getBody == nil {
|
||||
return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err)
|
||||
}
|
||||
|
||||
// The Request.Body can't reset back to the beginning, but we
|
||||
// don't seem to have started to read from it yet, so reuse
|
||||
// the request directly. The "afterBodyWrite" means the
|
||||
// bodyWrite process has started, which becomes true before
|
||||
// the first Read.
|
||||
if !afterBodyWrite {
|
||||
return req, nil
|
||||
body, err := getBody()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err)
|
||||
newReq := *req
|
||||
newReq.Body = body
|
||||
return &newReq, nil
|
||||
}
|
||||
|
||||
func canRetryError(err error) bool {
|
||||
@@ -590,10 +567,6 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
// henc in response to SETTINGS frames?
|
||||
cc.henc = hpack.NewEncoder(&cc.hbuf)
|
||||
|
||||
if t.AllowHTTP {
|
||||
cc.nextStreamID = 3
|
||||
}
|
||||
|
||||
if cs, ok := c.(connectionStater); ok {
|
||||
state := cs.ConnectionState()
|
||||
cc.tlsState = &state
|
||||
@@ -653,32 +626,12 @@ func (cc *ClientConn) CanTakeNewRequest() bool {
|
||||
return cc.canTakeNewRequestLocked()
|
||||
}
|
||||
|
||||
// clientConnIdleState describes the suitability of a client
|
||||
// connection to initiate a new RoundTrip request.
|
||||
type clientConnIdleState struct {
|
||||
canTakeNewRequest bool
|
||||
freshConn bool // whether it's unused by any previous request
|
||||
}
|
||||
|
||||
func (cc *ClientConn) idleState() clientConnIdleState {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
return cc.idleStateLocked()
|
||||
}
|
||||
|
||||
func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
|
||||
if cc.singleUse && cc.nextStreamID > 1 {
|
||||
return
|
||||
}
|
||||
st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing &&
|
||||
int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32
|
||||
st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest
|
||||
return
|
||||
}
|
||||
|
||||
func (cc *ClientConn) canTakeNewRequestLocked() bool {
|
||||
st := cc.idleStateLocked()
|
||||
return st.canTakeNewRequest
|
||||
if cc.singleUse && cc.nextStreamID > 1 {
|
||||
return false
|
||||
}
|
||||
return cc.goAway == nil && !cc.closed &&
|
||||
int64(cc.nextStreamID)+int64(cc.pendingRequests) < math.MaxInt32
|
||||
}
|
||||
|
||||
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
|
||||
@@ -708,88 +661,6 @@ func (cc *ClientConn) closeIfIdle() {
|
||||
cc.tconn.Close()
|
||||
}
|
||||
|
||||
var shutdownEnterWaitStateHook = func() {}
|
||||
|
||||
// Shutdown gracefully close the client connection, waiting for running streams to complete.
|
||||
// Public implementation is in go17.go and not_go17.go
|
||||
func (cc *ClientConn) shutdown(ctx contextContext) error {
|
||||
if err := cc.sendGoAway(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Wait for all in-flight streams to complete or connection to close
|
||||
done := make(chan error, 1)
|
||||
cancelled := false // guarded by cc.mu
|
||||
go func() {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
for {
|
||||
if len(cc.streams) == 0 || cc.closed {
|
||||
cc.closed = true
|
||||
done <- cc.tconn.Close()
|
||||
break
|
||||
}
|
||||
if cancelled {
|
||||
break
|
||||
}
|
||||
cc.cond.Wait()
|
||||
}
|
||||
}()
|
||||
shutdownEnterWaitStateHook()
|
||||
select {
|
||||
case err := <-done:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
cc.mu.Lock()
|
||||
// Free the goroutine above
|
||||
cancelled = true
|
||||
cc.cond.Broadcast()
|
||||
cc.mu.Unlock()
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (cc *ClientConn) sendGoAway() error {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
cc.wmu.Lock()
|
||||
defer cc.wmu.Unlock()
|
||||
if cc.closing {
|
||||
// GOAWAY sent already
|
||||
return nil
|
||||
}
|
||||
// Send a graceful shutdown frame to server
|
||||
maxStreamID := cc.nextStreamID
|
||||
if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cc.bw.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Prevent new requests
|
||||
cc.closing = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the client connection immediately.
|
||||
//
|
||||
// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
|
||||
func (cc *ClientConn) Close() error {
|
||||
cc.mu.Lock()
|
||||
defer cc.cond.Broadcast()
|
||||
defer cc.mu.Unlock()
|
||||
err := errors.New("http2: client connection force closed via ClientConn.Close")
|
||||
for id, cs := range cc.streams {
|
||||
select {
|
||||
case cs.resc <- resAndError{err: err}:
|
||||
default:
|
||||
}
|
||||
cs.bufPipe.CloseWithError(err)
|
||||
delete(cc.streams, id)
|
||||
}
|
||||
cc.closed = true
|
||||
return cc.tconn.Close()
|
||||
}
|
||||
|
||||
const maxAllocFrameSize = 512 << 10
|
||||
|
||||
// frameBuffer returns a scratch buffer suitable for writing DATA frames.
|
||||
@@ -872,7 +743,7 @@ func checkConnHeaders(req *http.Request) error {
|
||||
if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
|
||||
return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
|
||||
}
|
||||
if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !strings.EqualFold(vv[0], "close") && !strings.EqualFold(vv[0], "keep-alive")) {
|
||||
if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") {
|
||||
return fmt.Errorf("http2: invalid Connection request header: %q", vv)
|
||||
}
|
||||
return nil
|
||||
@@ -1060,7 +931,6 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf
|
||||
default:
|
||||
}
|
||||
if err != nil {
|
||||
cc.forgetStreamID(cs.ID)
|
||||
return nil, cs.getStartedWrite(), err
|
||||
}
|
||||
bodyWritten = true
|
||||
@@ -1081,9 +951,6 @@ func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error {
|
||||
for {
|
||||
cc.lastActive = time.Now()
|
||||
if cc.closed || !cc.canTakeNewRequestLocked() {
|
||||
if waitingForConn != nil {
|
||||
close(waitingForConn)
|
||||
}
|
||||
return errClientConnUnusable
|
||||
}
|
||||
if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) {
|
||||
@@ -1182,7 +1049,6 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) (
|
||||
sawEOF = true
|
||||
err = nil
|
||||
} else if err != nil {
|
||||
cc.writeStreamReset(cs.ID, ErrCodeCancel, err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -1308,7 +1174,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
|
||||
if host == "" {
|
||||
host = req.URL.Host
|
||||
}
|
||||
host, err := httpguts.PunycodeHostPort(host)
|
||||
host, err := httplex.PunycodeHostPort(host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -1333,11 +1199,11 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
|
||||
// potentially pollute our hpack state. (We want to be able to
|
||||
// continue to reuse the hpack encoder for future requests)
|
||||
for k, vv := range req.Header {
|
||||
if !httpguts.ValidHeaderFieldName(k) {
|
||||
if !httplex.ValidHeaderFieldName(k) {
|
||||
return nil, fmt.Errorf("invalid HTTP header name %q", k)
|
||||
}
|
||||
for _, v := range vv {
|
||||
if !httpguts.ValidHeaderFieldValue(v) {
|
||||
if !httplex.ValidHeaderFieldValue(v) {
|
||||
return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k)
|
||||
}
|
||||
}
|
||||
@@ -1418,16 +1284,9 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
|
||||
return nil, errRequestHeaderListSize
|
||||
}
|
||||
|
||||
trace := requestTrace(req)
|
||||
traceHeaders := traceHasWroteHeaderField(trace)
|
||||
|
||||
// Header list size is ok. Write the headers.
|
||||
enumerateHeaders(func(name, value string) {
|
||||
name = strings.ToLower(name)
|
||||
cc.writeHeader(name, value)
|
||||
if traceHeaders {
|
||||
traceWroteHeaderField(trace, name, value)
|
||||
}
|
||||
cc.writeHeader(strings.ToLower(name), value)
|
||||
})
|
||||
|
||||
return cc.hbuf.Bytes(), nil
|
||||
@@ -1749,7 +1608,8 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
|
||||
// is the detail.
|
||||
//
|
||||
// As a special case, handleResponse may return (nil, nil) to skip the
|
||||
// frame (currently only used for 1xx responses).
|
||||
// frame (currently only used for 100 expect continue). This special
|
||||
// case is going away after Issue 13851 is fixed.
|
||||
func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) {
|
||||
if f.Truncated {
|
||||
return nil, errResponseHeaderListSize
|
||||
@@ -1764,6 +1624,15 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
|
||||
return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header")
|
||||
}
|
||||
|
||||
if statusCode == 100 {
|
||||
traceGot100Continue(cs.trace)
|
||||
if cs.on100 != nil {
|
||||
cs.on100() // forces any write delay timer to fire
|
||||
}
|
||||
cs.pastHeaders = false // do it all again
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
header := make(http.Header)
|
||||
res := &http.Response{
|
||||
Proto: "HTTP/2.0",
|
||||
@@ -1788,27 +1657,6 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
|
||||
}
|
||||
}
|
||||
|
||||
if statusCode >= 100 && statusCode <= 199 {
|
||||
cs.num1xx++
|
||||
const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http
|
||||
if cs.num1xx > max1xxResponses {
|
||||
return nil, errors.New("http2: too many 1xx informational responses")
|
||||
}
|
||||
if fn := cs.get1xxTraceFunc(); fn != nil {
|
||||
if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if statusCode == 100 {
|
||||
traceGot100Continue(cs.trace)
|
||||
if cs.on100 != nil {
|
||||
cs.on100() // forces any write delay timer to fire
|
||||
}
|
||||
}
|
||||
cs.pastHeaders = false // do it all again
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
streamEnded := f.StreamEnded()
|
||||
isHead := cs.req.Method == "HEAD"
|
||||
if !streamEnded || isHead {
|
||||
@@ -2396,7 +2244,7 @@ func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s body
|
||||
}
|
||||
s.delay = t.expectContinueTimeout()
|
||||
if s.delay == 0 ||
|
||||
!httpguts.HeaderValuesContainsToken(
|
||||
!httplex.HeaderValuesContainsToken(
|
||||
cs.req.Header["Expect"],
|
||||
"100-continue") {
|
||||
return
|
||||
@@ -2451,5 +2299,5 @@ func (s bodyWriterState) scheduleBodyWrite() {
|
||||
// isConnectionCloseRequest reports whether req should use its own
|
||||
// connection for a single request and then close the connection.
|
||||
func isConnectionCloseRequest(req *http.Request) bool {
|
||||
return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close")
|
||||
return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close")
|
||||
}
|
||||
|
||||
4
vendor/golang.org/x/net/http2/write.go
generated
vendored
4
vendor/golang.org/x/net/http2/write.go
generated
vendored
@@ -11,8 +11,8 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"golang.org/x/net/http/httpguts"
|
||||
"golang.org/x/net/http2/hpack"
|
||||
"golang.org/x/net/lex/httplex"
|
||||
)
|
||||
|
||||
// writeFramer is implemented by any type that is used to write frames.
|
||||
@@ -350,7 +350,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
|
||||
}
|
||||
isTE := k == "transfer-encoding"
|
||||
for _, v := range vv {
|
||||
if !httpguts.ValidHeaderFieldValue(v) {
|
||||
if !httplex.ValidHeaderFieldValue(v) {
|
||||
// TODO: return an error? golang.org/issue/14048
|
||||
// For now just omit it.
|
||||
continue
|
||||
|
||||
8
vendor/golang.org/x/net/http2/writesched.go
generated
vendored
8
vendor/golang.org/x/net/http2/writesched.go
generated
vendored
@@ -32,7 +32,7 @@ type WriteScheduler interface {
|
||||
|
||||
// Pop dequeues the next frame to write. Returns false if no frames can
|
||||
// be written. Frames with a given wr.StreamID() are Pop'd in the same
|
||||
// order they are Push'd. No frames should be discarded except by CloseStream.
|
||||
// order they are Push'd.
|
||||
Pop() (wr FrameWriteRequest, ok bool)
|
||||
}
|
||||
|
||||
@@ -76,12 +76,6 @@ func (wr FrameWriteRequest) StreamID() uint32 {
|
||||
return wr.stream.id
|
||||
}
|
||||
|
||||
// isControl reports whether wr is a control frame for MaxQueuedControlFrames
|
||||
// purposes. That includes non-stream frames and RST_STREAM frames.
|
||||
func (wr FrameWriteRequest) isControl() bool {
|
||||
return wr.stream == nil
|
||||
}
|
||||
|
||||
// DataSize returns the number of flow control bytes that must be consumed
|
||||
// to write this entire frame. This is 0 for non-DATA frames.
|
||||
func (wr FrameWriteRequest) DataSize() int {
|
||||
|
||||
@@ -2,7 +2,12 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package httpguts
|
||||
// Package httplex contains rules around lexical matters of various
|
||||
// HTTP-related specifications.
|
||||
//
|
||||
// This package is shared by the standard library (which vendors it)
|
||||
// and x/net/http2. It comes with no API stability promise.
|
||||
package httplex
|
||||
|
||||
import (
|
||||
"net"
|
||||
3
vendor/gopkg.in/yaml.v2/.travis.yml
generated
vendored
3
vendor/gopkg.in/yaml.v2/.travis.yml
generated
vendored
@@ -4,9 +4,6 @@ go:
|
||||
- 1.4
|
||||
- 1.5
|
||||
- 1.6
|
||||
- 1.7
|
||||
- 1.8
|
||||
- 1.9
|
||||
- tip
|
||||
|
||||
go_import_path: gopkg.in/yaml.v2
|
||||
|
||||
208
vendor/gopkg.in/yaml.v2/LICENSE
generated
vendored
208
vendor/gopkg.in/yaml.v2/LICENSE
generated
vendored
@@ -1,201 +1,13 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
Copyright 2011-2016 Canonical Ltd.
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
1. Definitions.
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
4
vendor/gopkg.in/yaml.v2/README.md
generated
vendored
4
vendor/gopkg.in/yaml.v2/README.md
generated
vendored
@@ -48,6 +48,8 @@ The yaml package is licensed under the Apache License 2.0. Please see the LICENS
|
||||
Example
|
||||
-------
|
||||
|
||||
Some more examples can be found in the "examples" folder.
|
||||
|
||||
```Go
|
||||
package main
|
||||
|
||||
@@ -65,8 +67,6 @@ b:
|
||||
d: [3, 4]
|
||||
`
|
||||
|
||||
// Note: struct fields must be public in order for unmarshal to
|
||||
// correctly populate the data.
|
||||
type T struct {
|
||||
A string
|
||||
B struct {
|
||||
|
||||
55
vendor/gopkg.in/yaml.v2/apic.go
generated
vendored
55
vendor/gopkg.in/yaml.v2/apic.go
generated
vendored
@@ -2,6 +2,7 @@ package yaml
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
|
||||
@@ -47,9 +48,9 @@ func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err
|
||||
return n, nil
|
||||
}
|
||||
|
||||
// Reader read handler.
|
||||
func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||||
return parser.input_reader.Read(buffer)
|
||||
// File read handler.
|
||||
func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
|
||||
return parser.input_file.Read(buffer)
|
||||
}
|
||||
|
||||
// Set a string input.
|
||||
@@ -63,12 +64,12 @@ func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
|
||||
}
|
||||
|
||||
// Set a file input.
|
||||
func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
|
||||
func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
|
||||
if parser.read_handler != nil {
|
||||
panic("must set the input source only once")
|
||||
}
|
||||
parser.read_handler = yaml_reader_read_handler
|
||||
parser.input_reader = r
|
||||
parser.read_handler = yaml_file_read_handler
|
||||
parser.input_file = file
|
||||
}
|
||||
|
||||
// Set the source encoding.
|
||||
@@ -80,13 +81,14 @@ func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
|
||||
}
|
||||
|
||||
// Create a new emitter object.
|
||||
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
|
||||
func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
|
||||
*emitter = yaml_emitter_t{
|
||||
buffer: make([]byte, output_buffer_size),
|
||||
raw_buffer: make([]byte, 0, output_raw_buffer_size),
|
||||
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
|
||||
events: make([]yaml_event_t, 0, initial_queue_size),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Destroy an emitter object.
|
||||
@@ -100,10 +102,9 @@ func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// yaml_writer_write_handler uses emitter.output_writer to write the
|
||||
// emitted text.
|
||||
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||||
_, err := emitter.output_writer.Write(buffer)
|
||||
// File write handler.
|
||||
func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
|
||||
_, err := emitter.output_file.Write(buffer)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -117,12 +118,12 @@ func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]by
|
||||
}
|
||||
|
||||
// Set a file output.
|
||||
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
|
||||
func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
|
||||
if emitter.write_handler != nil {
|
||||
panic("must set the output target only once")
|
||||
}
|
||||
emitter.write_handler = yaml_writer_write_handler
|
||||
emitter.output_writer = w
|
||||
emitter.write_handler = yaml_file_write_handler
|
||||
emitter.output_file = file
|
||||
}
|
||||
|
||||
// Set the output encoding.
|
||||
@@ -251,41 +252,41 @@ func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
|
||||
//
|
||||
|
||||
// Create STREAM-START.
|
||||
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
|
||||
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_STREAM_START_EVENT,
|
||||
encoding: encoding,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create STREAM-END.
|
||||
func yaml_stream_end_event_initialize(event *yaml_event_t) {
|
||||
func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_STREAM_END_EVENT,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create DOCUMENT-START.
|
||||
func yaml_document_start_event_initialize(
|
||||
event *yaml_event_t,
|
||||
version_directive *yaml_version_directive_t,
|
||||
tag_directives []yaml_tag_directive_t,
|
||||
implicit bool,
|
||||
) {
|
||||
func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
|
||||
tag_directives []yaml_tag_directive_t, implicit bool) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_DOCUMENT_START_EVENT,
|
||||
version_directive: version_directive,
|
||||
tag_directives: tag_directives,
|
||||
implicit: implicit,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create DOCUMENT-END.
|
||||
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
|
||||
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_DOCUMENT_END_EVENT,
|
||||
implicit: implicit,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
///*
|
||||
@@ -347,7 +348,7 @@ func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
|
||||
}
|
||||
|
||||
// Create MAPPING-START.
|
||||
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
|
||||
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_MAPPING_START_EVENT,
|
||||
anchor: anchor,
|
||||
@@ -355,13 +356,15 @@ func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte
|
||||
implicit: implicit,
|
||||
style: yaml_style_t(style),
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Create MAPPING-END.
|
||||
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
|
||||
func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
|
||||
*event = yaml_event_t{
|
||||
typ: yaml_MAPPING_END_EVENT,
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Destroy an event object.
|
||||
@@ -468,7 +471,7 @@ func yaml_event_delete(event *yaml_event_t) {
|
||||
// } context
|
||||
// tag_directive *yaml_tag_directive_t
|
||||
//
|
||||
// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
|
||||
// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
|
||||
//
|
||||
// assert(document) // Non-NULL document object is expected.
|
||||
//
|
||||
|
||||
278
vendor/gopkg.in/yaml.v2/decode.go
generated
vendored
278
vendor/gopkg.in/yaml.v2/decode.go
generated
vendored
@@ -4,7 +4,6 @@ import (
|
||||
"encoding"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
@@ -23,22 +22,19 @@ type node struct {
|
||||
kind int
|
||||
line, column int
|
||||
tag string
|
||||
// For an alias node, alias holds the resolved alias.
|
||||
alias *node
|
||||
value string
|
||||
implicit bool
|
||||
children []*node
|
||||
anchors map[string]*node
|
||||
value string
|
||||
implicit bool
|
||||
children []*node
|
||||
anchors map[string]*node
|
||||
}
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Parser, produces a node tree out of a libyaml event stream.
|
||||
|
||||
type parser struct {
|
||||
parser yaml_parser_t
|
||||
event yaml_event_t
|
||||
doc *node
|
||||
doneInit bool
|
||||
parser yaml_parser_t
|
||||
event yaml_event_t
|
||||
doc *node
|
||||
}
|
||||
|
||||
func newParser(b []byte) *parser {
|
||||
@@ -46,30 +42,21 @@ func newParser(b []byte) *parser {
|
||||
if !yaml_parser_initialize(&p.parser) {
|
||||
panic("failed to initialize YAML emitter")
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
b = []byte{'\n'}
|
||||
}
|
||||
|
||||
yaml_parser_set_input_string(&p.parser, b)
|
||||
return &p
|
||||
}
|
||||
|
||||
func newParserFromReader(r io.Reader) *parser {
|
||||
p := parser{}
|
||||
if !yaml_parser_initialize(&p.parser) {
|
||||
panic("failed to initialize YAML emitter")
|
||||
p.skip()
|
||||
if p.event.typ != yaml_STREAM_START_EVENT {
|
||||
panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
|
||||
}
|
||||
yaml_parser_set_input_reader(&p.parser, r)
|
||||
p.skip()
|
||||
return &p
|
||||
}
|
||||
|
||||
func (p *parser) init() {
|
||||
if p.doneInit {
|
||||
return
|
||||
}
|
||||
p.expect(yaml_STREAM_START_EVENT)
|
||||
p.doneInit = true
|
||||
}
|
||||
|
||||
func (p *parser) destroy() {
|
||||
if p.event.typ != yaml_NO_EVENT {
|
||||
yaml_event_delete(&p.event)
|
||||
@@ -77,35 +64,16 @@ func (p *parser) destroy() {
|
||||
yaml_parser_delete(&p.parser)
|
||||
}
|
||||
|
||||
// expect consumes an event from the event stream and
|
||||
// checks that it's of the expected type.
|
||||
func (p *parser) expect(e yaml_event_type_t) {
|
||||
if p.event.typ == yaml_NO_EVENT {
|
||||
if !yaml_parser_parse(&p.parser, &p.event) {
|
||||
p.fail()
|
||||
}
|
||||
}
|
||||
if p.event.typ == yaml_STREAM_END_EVENT {
|
||||
failf("attempted to go past the end of stream; corrupted value?")
|
||||
}
|
||||
if p.event.typ != e {
|
||||
p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
|
||||
p.fail()
|
||||
}
|
||||
yaml_event_delete(&p.event)
|
||||
p.event.typ = yaml_NO_EVENT
|
||||
}
|
||||
|
||||
// peek peeks at the next event in the event stream,
|
||||
// puts the results into p.event and returns the event type.
|
||||
func (p *parser) peek() yaml_event_type_t {
|
||||
func (p *parser) skip() {
|
||||
if p.event.typ != yaml_NO_EVENT {
|
||||
return p.event.typ
|
||||
if p.event.typ == yaml_STREAM_END_EVENT {
|
||||
failf("attempted to go past the end of stream; corrupted value?")
|
||||
}
|
||||
yaml_event_delete(&p.event)
|
||||
}
|
||||
if !yaml_parser_parse(&p.parser, &p.event) {
|
||||
p.fail()
|
||||
}
|
||||
return p.event.typ
|
||||
}
|
||||
|
||||
func (p *parser) fail() {
|
||||
@@ -113,10 +81,6 @@ func (p *parser) fail() {
|
||||
var line int
|
||||
if p.parser.problem_mark.line != 0 {
|
||||
line = p.parser.problem_mark.line
|
||||
// Scanner errors don't iterate line before returning error
|
||||
if p.parser.error == yaml_SCANNER_ERROR {
|
||||
line++
|
||||
}
|
||||
} else if p.parser.context_mark.line != 0 {
|
||||
line = p.parser.context_mark.line
|
||||
}
|
||||
@@ -139,8 +103,7 @@ func (p *parser) anchor(n *node, anchor []byte) {
|
||||
}
|
||||
|
||||
func (p *parser) parse() *node {
|
||||
p.init()
|
||||
switch p.peek() {
|
||||
switch p.event.typ {
|
||||
case yaml_SCALAR_EVENT:
|
||||
return p.scalar()
|
||||
case yaml_ALIAS_EVENT:
|
||||
@@ -155,7 +118,7 @@ func (p *parser) parse() *node {
|
||||
// Happens when attempting to decode an empty buffer.
|
||||
return nil
|
||||
default:
|
||||
panic("attempted to parse unknown event: " + p.event.typ.String())
|
||||
panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -171,20 +134,19 @@ func (p *parser) document() *node {
|
||||
n := p.node(documentNode)
|
||||
n.anchors = make(map[string]*node)
|
||||
p.doc = n
|
||||
p.expect(yaml_DOCUMENT_START_EVENT)
|
||||
p.skip()
|
||||
n.children = append(n.children, p.parse())
|
||||
p.expect(yaml_DOCUMENT_END_EVENT)
|
||||
if p.event.typ != yaml_DOCUMENT_END_EVENT {
|
||||
panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
|
||||
}
|
||||
p.skip()
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) alias() *node {
|
||||
n := p.node(aliasNode)
|
||||
n.value = string(p.event.anchor)
|
||||
n.alias = p.doc.anchors[n.value]
|
||||
if n.alias == nil {
|
||||
failf("unknown anchor '%s' referenced", n.value)
|
||||
}
|
||||
p.expect(yaml_ALIAS_EVENT)
|
||||
p.skip()
|
||||
return n
|
||||
}
|
||||
|
||||
@@ -194,29 +156,29 @@ func (p *parser) scalar() *node {
|
||||
n.tag = string(p.event.tag)
|
||||
n.implicit = p.event.implicit
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.expect(yaml_SCALAR_EVENT)
|
||||
p.skip()
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) sequence() *node {
|
||||
n := p.node(sequenceNode)
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.expect(yaml_SEQUENCE_START_EVENT)
|
||||
for p.peek() != yaml_SEQUENCE_END_EVENT {
|
||||
p.skip()
|
||||
for p.event.typ != yaml_SEQUENCE_END_EVENT {
|
||||
n.children = append(n.children, p.parse())
|
||||
}
|
||||
p.expect(yaml_SEQUENCE_END_EVENT)
|
||||
p.skip()
|
||||
return n
|
||||
}
|
||||
|
||||
func (p *parser) mapping() *node {
|
||||
n := p.node(mappingNode)
|
||||
p.anchor(n, p.event.anchor)
|
||||
p.expect(yaml_MAPPING_START_EVENT)
|
||||
for p.peek() != yaml_MAPPING_END_EVENT {
|
||||
p.skip()
|
||||
for p.event.typ != yaml_MAPPING_END_EVENT {
|
||||
n.children = append(n.children, p.parse(), p.parse())
|
||||
}
|
||||
p.expect(yaml_MAPPING_END_EVENT)
|
||||
p.skip()
|
||||
return n
|
||||
}
|
||||
|
||||
@@ -225,14 +187,10 @@ func (p *parser) mapping() *node {
|
||||
|
||||
type decoder struct {
|
||||
doc *node
|
||||
aliases map[*node]bool
|
||||
aliases map[string]bool
|
||||
mapType reflect.Type
|
||||
terrors []string
|
||||
strict bool
|
||||
|
||||
decodeCount int
|
||||
aliasCount int
|
||||
aliasDepth int
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -240,13 +198,11 @@ var (
|
||||
durationType = reflect.TypeOf(time.Duration(0))
|
||||
defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
|
||||
ifaceType = defaultMapType.Elem()
|
||||
timeType = reflect.TypeOf(time.Time{})
|
||||
ptrTimeType = reflect.TypeOf(&time.Time{})
|
||||
)
|
||||
|
||||
func newDecoder(strict bool) *decoder {
|
||||
d := &decoder{mapType: defaultMapType, strict: strict}
|
||||
d.aliases = make(map[*node]bool)
|
||||
d.aliases = make(map[string]bool)
|
||||
return d
|
||||
}
|
||||
|
||||
@@ -295,7 +251,7 @@ func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
|
||||
//
|
||||
// If n holds a null value, prepare returns before doing anything.
|
||||
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
|
||||
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
|
||||
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "" && n.implicit) {
|
||||
return out, false, false
|
||||
}
|
||||
again := true
|
||||
@@ -318,39 +274,7 @@ func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unm
|
||||
return out, false, false
|
||||
}
|
||||
|
||||
const (
|
||||
// 400,000 decode operations is ~500kb of dense object declarations, or ~5kb of dense object declarations with 10000% alias expansion
|
||||
alias_ratio_range_low = 400000
|
||||
// 4,000,000 decode operations is ~5MB of dense object declarations, or ~4.5MB of dense object declarations with 10% alias expansion
|
||||
alias_ratio_range_high = 4000000
|
||||
// alias_ratio_range is the range over which we scale allowed alias ratios
|
||||
alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low)
|
||||
)
|
||||
|
||||
func allowedAliasRatio(decodeCount int) float64 {
|
||||
switch {
|
||||
case decodeCount <= alias_ratio_range_low:
|
||||
// allow 99% to come from alias expansion for small-to-medium documents
|
||||
return 0.99
|
||||
case decodeCount >= alias_ratio_range_high:
|
||||
// allow 10% to come from alias expansion for very large documents
|
||||
return 0.10
|
||||
default:
|
||||
// scale smoothly from 99% down to 10% over the range.
|
||||
// this maps to 396,000 - 400,000 allowed alias-driven decodes over the range.
|
||||
// 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps).
|
||||
return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range)
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
|
||||
d.decodeCount++
|
||||
if d.aliasDepth > 0 {
|
||||
d.aliasCount++
|
||||
}
|
||||
if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) {
|
||||
failf("document contains excessive aliasing")
|
||||
}
|
||||
switch n.kind {
|
||||
case documentNode:
|
||||
return d.document(n, out)
|
||||
@@ -384,15 +308,16 @@ func (d *decoder) document(n *node, out reflect.Value) (good bool) {
|
||||
}
|
||||
|
||||
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
|
||||
if d.aliases[n] {
|
||||
// TODO this could actually be allowed in some circumstances.
|
||||
an, ok := d.doc.anchors[n.value]
|
||||
if !ok {
|
||||
failf("unknown anchor '%s' referenced", n.value)
|
||||
}
|
||||
if d.aliases[n.value] {
|
||||
failf("anchor '%s' value contains itself", n.value)
|
||||
}
|
||||
d.aliases[n] = true
|
||||
d.aliasDepth++
|
||||
good = d.unmarshal(n.alias, out)
|
||||
d.aliasDepth--
|
||||
delete(d.aliases, n)
|
||||
d.aliases[n.value] = true
|
||||
good = d.unmarshal(an, out)
|
||||
delete(d.aliases, n.value)
|
||||
return good
|
||||
}
|
||||
|
||||
@@ -404,7 +329,7 @@ func resetMap(out reflect.Value) {
|
||||
}
|
||||
}
|
||||
|
||||
func (d *decoder) scalar(n *node, out reflect.Value) bool {
|
||||
func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
|
||||
var tag string
|
||||
var resolved interface{}
|
||||
if n.tag == "" && !n.implicit {
|
||||
@@ -428,26 +353,9 @@ func (d *decoder) scalar(n *node, out reflect.Value) bool {
|
||||
}
|
||||
return true
|
||||
}
|
||||
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
|
||||
// We've resolved to exactly the type we want, so use that.
|
||||
out.Set(resolvedv)
|
||||
return true
|
||||
}
|
||||
// Perhaps we can use the value as a TextUnmarshaler to
|
||||
// set its value.
|
||||
if out.CanAddr() {
|
||||
u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
|
||||
if ok {
|
||||
var text []byte
|
||||
if tag == yaml_BINARY_TAG {
|
||||
text = []byte(resolved.(string))
|
||||
} else {
|
||||
// We let any value be unmarshaled into TextUnmarshaler.
|
||||
// That might be more lax than we'd like, but the
|
||||
// TextUnmarshaler itself should bowl out any dubious values.
|
||||
text = []byte(n.value)
|
||||
}
|
||||
err := u.UnmarshalText(text)
|
||||
if s, ok := resolved.(string); ok && out.CanAddr() {
|
||||
if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
|
||||
err := u.UnmarshalText([]byte(s))
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
@@ -458,54 +366,46 @@ func (d *decoder) scalar(n *node, out reflect.Value) bool {
|
||||
case reflect.String:
|
||||
if tag == yaml_BINARY_TAG {
|
||||
out.SetString(resolved.(string))
|
||||
return true
|
||||
}
|
||||
if resolved != nil {
|
||||
good = true
|
||||
} else if resolved != nil {
|
||||
out.SetString(n.value)
|
||||
return true
|
||||
good = true
|
||||
}
|
||||
case reflect.Interface:
|
||||
if resolved == nil {
|
||||
out.Set(reflect.Zero(out.Type()))
|
||||
} else if tag == yaml_TIMESTAMP_TAG {
|
||||
// It looks like a timestamp but for backward compatibility
|
||||
// reasons we set it as a string, so that code that unmarshals
|
||||
// timestamp-like values into interface{} will continue to
|
||||
// see a string and not a time.Time.
|
||||
// TODO(v3) Drop this.
|
||||
out.Set(reflect.ValueOf(n.value))
|
||||
} else {
|
||||
out.Set(reflect.ValueOf(resolved))
|
||||
}
|
||||
return true
|
||||
good = true
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
switch resolved := resolved.(type) {
|
||||
case int:
|
||||
if !out.OverflowInt(int64(resolved)) {
|
||||
out.SetInt(int64(resolved))
|
||||
return true
|
||||
good = true
|
||||
}
|
||||
case int64:
|
||||
if !out.OverflowInt(resolved) {
|
||||
out.SetInt(resolved)
|
||||
return true
|
||||
good = true
|
||||
}
|
||||
case uint64:
|
||||
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
|
||||
out.SetInt(int64(resolved))
|
||||
return true
|
||||
good = true
|
||||
}
|
||||
case float64:
|
||||
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
|
||||
out.SetInt(int64(resolved))
|
||||
return true
|
||||
good = true
|
||||
}
|
||||
case string:
|
||||
if out.Type() == durationType {
|
||||
d, err := time.ParseDuration(resolved)
|
||||
if err == nil {
|
||||
out.SetInt(int64(d))
|
||||
return true
|
||||
good = true
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -514,49 +414,44 @@ func (d *decoder) scalar(n *node, out reflect.Value) bool {
|
||||
case int:
|
||||
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
return true
|
||||
good = true
|
||||
}
|
||||
case int64:
|
||||
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
return true
|
||||
good = true
|
||||
}
|
||||
case uint64:
|
||||
if !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
return true
|
||||
good = true
|
||||
}
|
||||
case float64:
|
||||
if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
|
||||
out.SetUint(uint64(resolved))
|
||||
return true
|
||||
good = true
|
||||
}
|
||||
}
|
||||
case reflect.Bool:
|
||||
switch resolved := resolved.(type) {
|
||||
case bool:
|
||||
out.SetBool(resolved)
|
||||
return true
|
||||
good = true
|
||||
}
|
||||
case reflect.Float32, reflect.Float64:
|
||||
switch resolved := resolved.(type) {
|
||||
case int:
|
||||
out.SetFloat(float64(resolved))
|
||||
return true
|
||||
good = true
|
||||
case int64:
|
||||
out.SetFloat(float64(resolved))
|
||||
return true
|
||||
good = true
|
||||
case uint64:
|
||||
out.SetFloat(float64(resolved))
|
||||
return true
|
||||
good = true
|
||||
case float64:
|
||||
out.SetFloat(resolved)
|
||||
return true
|
||||
}
|
||||
case reflect.Struct:
|
||||
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
|
||||
out.Set(resolvedv)
|
||||
return true
|
||||
good = true
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if out.Type().Elem() == reflect.TypeOf(resolved) {
|
||||
@@ -564,11 +459,13 @@ func (d *decoder) scalar(n *node, out reflect.Value) bool {
|
||||
elem := reflect.New(out.Type().Elem())
|
||||
elem.Elem().Set(reflect.ValueOf(resolved))
|
||||
out.Set(elem)
|
||||
return true
|
||||
good = true
|
||||
}
|
||||
}
|
||||
d.terror(n, tag, out)
|
||||
return false
|
||||
if !good {
|
||||
d.terror(n, tag, out)
|
||||
}
|
||||
return good
|
||||
}
|
||||
|
||||
func settableValueOf(i interface{}) reflect.Value {
|
||||
@@ -585,10 +482,6 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
|
||||
switch out.Kind() {
|
||||
case reflect.Slice:
|
||||
out.Set(reflect.MakeSlice(out.Type(), l, l))
|
||||
case reflect.Array:
|
||||
if l != out.Len() {
|
||||
failf("invalid array: want %d elements but got %d", out.Len(), l)
|
||||
}
|
||||
case reflect.Interface:
|
||||
// No type hints. Will have to use a generic sequence.
|
||||
iface = out
|
||||
@@ -607,9 +500,7 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
|
||||
j++
|
||||
}
|
||||
}
|
||||
if out.Kind() != reflect.Array {
|
||||
out.Set(out.Slice(0, j))
|
||||
}
|
||||
out.Set(out.Slice(0, j))
|
||||
if iface.IsValid() {
|
||||
iface.Set(out)
|
||||
}
|
||||
@@ -670,7 +561,7 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
|
||||
}
|
||||
e := reflect.New(et).Elem()
|
||||
if d.unmarshal(n.children[i+1], e) {
|
||||
d.setMapIndex(n.children[i+1], out, k, e)
|
||||
out.SetMapIndex(k, e)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -678,14 +569,6 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
|
||||
if d.strict && out.MapIndex(k) != zeroValue {
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
|
||||
return
|
||||
}
|
||||
out.SetMapIndex(k, v)
|
||||
}
|
||||
|
||||
func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
|
||||
outt := out.Type()
|
||||
if outt.Elem() != mapItemType {
|
||||
@@ -733,10 +616,6 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
|
||||
elemType = inlineMap.Type().Elem()
|
||||
}
|
||||
|
||||
var doneFields []bool
|
||||
if d.strict {
|
||||
doneFields = make([]bool, len(sinfo.FieldsList))
|
||||
}
|
||||
for i := 0; i < l; i += 2 {
|
||||
ni := n.children[i]
|
||||
if isMerge(ni) {
|
||||
@@ -747,13 +626,6 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
|
||||
continue
|
||||
}
|
||||
if info, ok := sinfo.FieldsMap[name.String()]; ok {
|
||||
if d.strict {
|
||||
if doneFields[info.Id] {
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
|
||||
continue
|
||||
}
|
||||
doneFields[info.Id] = true
|
||||
}
|
||||
var field reflect.Value
|
||||
if info.Inline == nil {
|
||||
field = out.Field(info.Num)
|
||||
@@ -767,9 +639,9 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
|
||||
}
|
||||
value := reflect.New(elemType).Elem()
|
||||
d.unmarshal(n.children[i+1], value)
|
||||
d.setMapIndex(n.children[i+1], inlineMap, name, value)
|
||||
inlineMap.SetMapIndex(name, value)
|
||||
} else if d.strict {
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
|
||||
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in struct %s", n.line+1, name.String(), out.Type()))
|
||||
}
|
||||
}
|
||||
return true
|
||||
|
||||
11
vendor/gopkg.in/yaml.v2/emitterc.go
generated
vendored
11
vendor/gopkg.in/yaml.v2/emitterc.go
generated
vendored
@@ -2,7 +2,6 @@ package yaml
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Flush the buffer if needed.
|
||||
@@ -665,7 +664,7 @@ func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
|
||||
return yaml_emitter_emit_mapping_start(emitter, event)
|
||||
default:
|
||||
return yaml_emitter_set_emitter_error(emitter,
|
||||
fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
|
||||
"expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -843,7 +842,7 @@ func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event
|
||||
return true
|
||||
}
|
||||
|
||||
// Write an anchor.
|
||||
// Write an achor.
|
||||
func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
|
||||
if emitter.anchor_data.anchor == nil {
|
||||
return true
|
||||
@@ -996,9 +995,9 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
|
||||
space_break = false
|
||||
|
||||
preceded_by_whitespace = false
|
||||
followed_by_whitespace = false
|
||||
previous_space = false
|
||||
previous_break = false
|
||||
followed_by_whitespace = false
|
||||
previous_space = false
|
||||
previous_break = false
|
||||
)
|
||||
|
||||
emitter.scalar_data.value = value
|
||||
|
||||
166
vendor/gopkg.in/yaml.v2/encode.go
generated
vendored
166
vendor/gopkg.in/yaml.v2/encode.go
generated
vendored
@@ -3,67 +3,38 @@ package yaml
|
||||
import (
|
||||
"encoding"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// jsonNumber is the interface of the encoding/json.Number datatype.
|
||||
// Repeating the interface here avoids a dependency on encoding/json, and also
|
||||
// supports other libraries like jsoniter, which use a similar datatype with
|
||||
// the same interface. Detecting this interface is useful when dealing with
|
||||
// structures containing json.Number, which is a string under the hood. The
|
||||
// encoder should prefer the use of Int64(), Float64() and string(), in that
|
||||
// order, when encoding this type.
|
||||
type jsonNumber interface {
|
||||
Float64() (float64, error)
|
||||
Int64() (int64, error)
|
||||
String() string
|
||||
}
|
||||
|
||||
type encoder struct {
|
||||
emitter yaml_emitter_t
|
||||
event yaml_event_t
|
||||
out []byte
|
||||
flow bool
|
||||
// doneInit holds whether the initial stream_start_event has been
|
||||
// emitted.
|
||||
doneInit bool
|
||||
}
|
||||
|
||||
func newEncoder() *encoder {
|
||||
e := &encoder{}
|
||||
yaml_emitter_initialize(&e.emitter)
|
||||
func newEncoder() (e *encoder) {
|
||||
e = &encoder{}
|
||||
e.must(yaml_emitter_initialize(&e.emitter))
|
||||
yaml_emitter_set_output_string(&e.emitter, &e.out)
|
||||
yaml_emitter_set_unicode(&e.emitter, true)
|
||||
return e
|
||||
}
|
||||
|
||||
func newEncoderWithWriter(w io.Writer) *encoder {
|
||||
e := &encoder{}
|
||||
yaml_emitter_initialize(&e.emitter)
|
||||
yaml_emitter_set_output_writer(&e.emitter, w)
|
||||
yaml_emitter_set_unicode(&e.emitter, true)
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *encoder) init() {
|
||||
if e.doneInit {
|
||||
return
|
||||
}
|
||||
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
|
||||
e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
|
||||
e.emit()
|
||||
e.doneInit = true
|
||||
e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
|
||||
e.emit()
|
||||
return e
|
||||
}
|
||||
|
||||
func (e *encoder) finish() {
|
||||
e.must(yaml_document_end_event_initialize(&e.event, true))
|
||||
e.emit()
|
||||
e.emitter.open_ended = false
|
||||
yaml_stream_end_event_initialize(&e.event)
|
||||
e.must(yaml_stream_end_event_initialize(&e.event))
|
||||
e.emit()
|
||||
}
|
||||
|
||||
@@ -73,7 +44,9 @@ func (e *encoder) destroy() {
|
||||
|
||||
func (e *encoder) emit() {
|
||||
// This will internally delete the e.event value.
|
||||
e.must(yaml_emitter_emit(&e.emitter, &e.event))
|
||||
if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
|
||||
e.must(false)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) must(ok bool) {
|
||||
@@ -86,43 +59,13 @@ func (e *encoder) must(ok bool) {
|
||||
}
|
||||
}
|
||||
|
||||
func (e *encoder) marshalDoc(tag string, in reflect.Value) {
|
||||
e.init()
|
||||
yaml_document_start_event_initialize(&e.event, nil, nil, true)
|
||||
e.emit()
|
||||
e.marshal(tag, in)
|
||||
yaml_document_end_event_initialize(&e.event, true)
|
||||
e.emit()
|
||||
}
|
||||
|
||||
func (e *encoder) marshal(tag string, in reflect.Value) {
|
||||
if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
|
||||
if !in.IsValid() {
|
||||
e.nilv()
|
||||
return
|
||||
}
|
||||
iface := in.Interface()
|
||||
switch m := iface.(type) {
|
||||
case jsonNumber:
|
||||
integer, err := m.Int64()
|
||||
if err == nil {
|
||||
// In this case the json.Number is a valid int64
|
||||
in = reflect.ValueOf(integer)
|
||||
break
|
||||
}
|
||||
float, err := m.Float64()
|
||||
if err == nil {
|
||||
// In this case the json.Number is a valid float64
|
||||
in = reflect.ValueOf(float)
|
||||
break
|
||||
}
|
||||
// fallback case - no number could be obtained
|
||||
in = reflect.ValueOf(m.String())
|
||||
case time.Time, *time.Time:
|
||||
// Although time.Time implements TextMarshaler,
|
||||
// we don't want to treat it as a string for YAML
|
||||
// purposes because YAML has special support for
|
||||
// timestamps.
|
||||
case Marshaler:
|
||||
if m, ok := iface.(Marshaler); ok {
|
||||
v, err := m.MarshalYAML()
|
||||
if err != nil {
|
||||
fail(err)
|
||||
@@ -132,34 +75,31 @@ func (e *encoder) marshal(tag string, in reflect.Value) {
|
||||
return
|
||||
}
|
||||
in = reflect.ValueOf(v)
|
||||
case encoding.TextMarshaler:
|
||||
} else if m, ok := iface.(encoding.TextMarshaler); ok {
|
||||
text, err := m.MarshalText()
|
||||
if err != nil {
|
||||
fail(err)
|
||||
}
|
||||
in = reflect.ValueOf(string(text))
|
||||
case nil:
|
||||
e.nilv()
|
||||
return
|
||||
}
|
||||
switch in.Kind() {
|
||||
case reflect.Interface:
|
||||
e.marshal(tag, in.Elem())
|
||||
if in.IsNil() {
|
||||
e.nilv()
|
||||
} else {
|
||||
e.marshal(tag, in.Elem())
|
||||
}
|
||||
case reflect.Map:
|
||||
e.mapv(tag, in)
|
||||
case reflect.Ptr:
|
||||
if in.Type() == ptrTimeType {
|
||||
e.timev(tag, in.Elem())
|
||||
if in.IsNil() {
|
||||
e.nilv()
|
||||
} else {
|
||||
e.marshal(tag, in.Elem())
|
||||
}
|
||||
case reflect.Struct:
|
||||
if in.Type() == timeType {
|
||||
e.timev(tag, in)
|
||||
} else {
|
||||
e.structv(tag, in)
|
||||
}
|
||||
case reflect.Slice, reflect.Array:
|
||||
e.structv(tag, in)
|
||||
case reflect.Slice:
|
||||
if in.Type().Elem() == mapItemType {
|
||||
e.itemsv(tag, in)
|
||||
} else {
|
||||
@@ -251,10 +191,10 @@ func (e *encoder) mappingv(tag string, f func()) {
|
||||
e.flow = false
|
||||
style = yaml_FLOW_MAPPING_STYLE
|
||||
}
|
||||
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
|
||||
e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
|
||||
e.emit()
|
||||
f()
|
||||
yaml_mapping_end_event_initialize(&e.event)
|
||||
e.must(yaml_mapping_end_event_initialize(&e.event))
|
||||
e.emit()
|
||||
}
|
||||
|
||||
@@ -300,36 +240,23 @@ var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0
|
||||
func (e *encoder) stringv(tag string, in reflect.Value) {
|
||||
var style yaml_scalar_style_t
|
||||
s := in.String()
|
||||
canUsePlain := true
|
||||
switch {
|
||||
case !utf8.ValidString(s):
|
||||
if tag == yaml_BINARY_TAG {
|
||||
rtag, rs := resolve("", s)
|
||||
if rtag == yaml_BINARY_TAG {
|
||||
if tag == "" || tag == yaml_STR_TAG {
|
||||
tag = rtag
|
||||
s = rs.(string)
|
||||
} else if tag == yaml_BINARY_TAG {
|
||||
failf("explicitly tagged !!binary data must be base64-encoded")
|
||||
}
|
||||
if tag != "" {
|
||||
} else {
|
||||
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
|
||||
}
|
||||
// It can't be encoded directly as YAML so use a binary tag
|
||||
// and encode it as base64.
|
||||
tag = yaml_BINARY_TAG
|
||||
s = encodeBase64(s)
|
||||
case tag == "":
|
||||
// Check to see if it would resolve to a specific
|
||||
// tag when encoded unquoted. If it doesn't,
|
||||
// there's no need to quote it.
|
||||
rtag, _ := resolve("", s)
|
||||
canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
|
||||
}
|
||||
// Note: it's possible for user code to emit invalid YAML
|
||||
// if they explicitly specify a tag and a string containing
|
||||
// text that's incompatible with that tag.
|
||||
switch {
|
||||
case strings.Contains(s, "\n"):
|
||||
style = yaml_LITERAL_SCALAR_STYLE
|
||||
case canUsePlain:
|
||||
style = yaml_PLAIN_SCALAR_STYLE
|
||||
default:
|
||||
if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
|
||||
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
|
||||
} else if strings.Contains(s, "\n") {
|
||||
style = yaml_LITERAL_SCALAR_STYLE
|
||||
} else {
|
||||
style = yaml_PLAIN_SCALAR_STYLE
|
||||
}
|
||||
e.emitScalar(s, "", tag, style)
|
||||
}
|
||||
@@ -354,20 +281,9 @@ func (e *encoder) uintv(tag string, in reflect.Value) {
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) timev(tag string, in reflect.Value) {
|
||||
t := in.Interface().(time.Time)
|
||||
s := t.Format(time.RFC3339Nano)
|
||||
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
|
||||
}
|
||||
|
||||
func (e *encoder) floatv(tag string, in reflect.Value) {
|
||||
// Issue #352: When formatting, use the precision of the underlying value
|
||||
precision := 64
|
||||
if in.Kind() == reflect.Float32 {
|
||||
precision = 32
|
||||
}
|
||||
|
||||
s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
|
||||
// FIXME: Handle 64 bits here.
|
||||
s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
|
||||
switch s {
|
||||
case "+Inf":
|
||||
s = ".inf"
|
||||
|
||||
5
vendor/gopkg.in/yaml.v2/go.mod
generated
vendored
5
vendor/gopkg.in/yaml.v2/go.mod
generated
vendored
@@ -1,5 +0,0 @@
|
||||
module "gopkg.in/yaml.v2"
|
||||
|
||||
require (
|
||||
"gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
|
||||
)
|
||||
20
vendor/gopkg.in/yaml.v2/readerc.go
generated
vendored
20
vendor/gopkg.in/yaml.v2/readerc.go
generated
vendored
@@ -93,18 +93,9 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
|
||||
panic("read handler must be set")
|
||||
}
|
||||
|
||||
// [Go] This function was changed to guarantee the requested length size at EOF.
|
||||
// The fact we need to do this is pretty awful, but the description above implies
|
||||
// for that to be the case, and there are tests
|
||||
|
||||
// If the EOF flag is set and the raw buffer is empty, do nothing.
|
||||
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
|
||||
// [Go] ACTUALLY! Read the documentation of this function above.
|
||||
// This is just broken. To return true, we need to have the
|
||||
// given length in the buffer. Not doing that means every single
|
||||
// check that calls this function to make sure the buffer has a
|
||||
// given length is Go) panicking; or C) accessing invalid memory.
|
||||
//return true
|
||||
return true
|
||||
}
|
||||
|
||||
// Return if the buffer contains enough characters.
|
||||
@@ -398,15 +389,6 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
|
||||
break
|
||||
}
|
||||
}
|
||||
// [Go] Read the documentation of this function above. To return true,
|
||||
// we need to have the given length in the buffer. Not doing that means
|
||||
// every single check that calls this function to make sure the buffer
|
||||
// has a given length is Go) panicking; or C) accessing invalid memory.
|
||||
// This happens here due to the EOF above breaking early.
|
||||
for buffer_len < length {
|
||||
parser.buffer[buffer_len] = 0
|
||||
buffer_len++
|
||||
}
|
||||
parser.buffer = parser.buffer[:buffer_len]
|
||||
return true
|
||||
}
|
||||
|
||||
82
vendor/gopkg.in/yaml.v2/resolve.go
generated
vendored
82
vendor/gopkg.in/yaml.v2/resolve.go
generated
vendored
@@ -6,7 +6,7 @@ import (
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type resolveMapItem struct {
|
||||
@@ -75,13 +75,13 @@ func longTag(tag string) string {
|
||||
|
||||
func resolvableTag(tag string) bool {
|
||||
switch tag {
|
||||
case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
|
||||
case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`)
|
||||
var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`)
|
||||
|
||||
func resolve(tag string, in string) (rtag string, out interface{}) {
|
||||
if !resolvableTag(tag) {
|
||||
@@ -92,19 +92,6 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
|
||||
switch tag {
|
||||
case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
|
||||
return
|
||||
case yaml_FLOAT_TAG:
|
||||
if rtag == yaml_INT_TAG {
|
||||
switch v := out.(type) {
|
||||
case int64:
|
||||
rtag = yaml_FLOAT_TAG
|
||||
out = float64(v)
|
||||
return
|
||||
case int:
|
||||
rtag = yaml_FLOAT_TAG
|
||||
out = float64(v)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
|
||||
}()
|
||||
@@ -138,15 +125,6 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
|
||||
|
||||
case 'D', 'S':
|
||||
// Int, float, or timestamp.
|
||||
// Only try values as a timestamp if the value is unquoted or there's an explicit
|
||||
// !!timestamp tag.
|
||||
if tag == "" || tag == yaml_TIMESTAMP_TAG {
|
||||
t, ok := parseTimestamp(in)
|
||||
if ok {
|
||||
return yaml_TIMESTAMP_TAG, t
|
||||
}
|
||||
}
|
||||
|
||||
plain := strings.Replace(in, "_", "", -1)
|
||||
intv, err := strconv.ParseInt(plain, 0, 64)
|
||||
if err == nil {
|
||||
@@ -180,20 +158,28 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
|
||||
return yaml_INT_TAG, uintv
|
||||
}
|
||||
} else if strings.HasPrefix(plain, "-0b") {
|
||||
intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
|
||||
intv, err := strconv.ParseInt(plain[3:], 2, 64)
|
||||
if err == nil {
|
||||
if true || intv == int64(int(intv)) {
|
||||
return yaml_INT_TAG, int(intv)
|
||||
if intv == int64(int(intv)) {
|
||||
return yaml_INT_TAG, -int(intv)
|
||||
} else {
|
||||
return yaml_INT_TAG, intv
|
||||
return yaml_INT_TAG, -intv
|
||||
}
|
||||
}
|
||||
}
|
||||
// XXX Handle timestamps here.
|
||||
|
||||
default:
|
||||
panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
|
||||
}
|
||||
}
|
||||
return yaml_STR_TAG, in
|
||||
if tag == yaml_BINARY_TAG {
|
||||
return yaml_BINARY_TAG, in
|
||||
}
|
||||
if utf8.ValidString(in) {
|
||||
return yaml_STR_TAG, in
|
||||
}
|
||||
return yaml_BINARY_TAG, encodeBase64(in)
|
||||
}
|
||||
|
||||
// encodeBase64 encodes s as base64 that is broken up into multiple lines
|
||||
@@ -220,39 +206,3 @@ func encodeBase64(s string) string {
|
||||
}
|
||||
return string(out[:k])
|
||||
}
|
||||
|
||||
// This is a subset of the formats allowed by the regular expression
|
||||
// defined at http://yaml.org/type/timestamp.html.
|
||||
var allowedTimestampFormats = []string{
|
||||
"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
|
||||
"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
|
||||
"2006-1-2 15:4:5.999999999", // space separated with no time zone
|
||||
"2006-1-2", // date only
|
||||
// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
|
||||
// from the set of examples.
|
||||
}
|
||||
|
||||
// parseTimestamp parses s as a timestamp string and
|
||||
// returns the timestamp and reports whether it succeeded.
|
||||
// Timestamp formats are defined at http://yaml.org/type/timestamp.html
|
||||
func parseTimestamp(s string) (time.Time, bool) {
|
||||
// TODO write code to check all the formats supported by
|
||||
// http://yaml.org/type/timestamp.html instead of using time.Parse.
|
||||
|
||||
// Quick check: all date formats start with YYYY-.
|
||||
i := 0
|
||||
for ; i < len(s); i++ {
|
||||
if c := s[i]; c < '0' || c > '9' {
|
||||
break
|
||||
}
|
||||
}
|
||||
if i != 4 || i == len(s) || s[i] != '-' {
|
||||
return time.Time{}, false
|
||||
}
|
||||
for _, format := range allowedTimestampFormats {
|
||||
if t, err := time.Parse(format, s); err == nil {
|
||||
return t, true
|
||||
}
|
||||
}
|
||||
return time.Time{}, false
|
||||
}
|
||||
|
||||
56
vendor/gopkg.in/yaml.v2/scannerc.go
generated
vendored
56
vendor/gopkg.in/yaml.v2/scannerc.go
generated
vendored
@@ -871,6 +871,12 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
|
||||
|
||||
required := parser.flow_level == 0 && parser.indent == parser.mark.column
|
||||
|
||||
// A simple key is required only when it is the first token in the current
|
||||
// line. Therefore it is always allowed. But we add a check anyway.
|
||||
if required && !parser.simple_key_allowed {
|
||||
panic("should not happen")
|
||||
}
|
||||
|
||||
//
|
||||
// If the current position may start a simple key, save it.
|
||||
//
|
||||
@@ -906,9 +912,6 @@ func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// max_flow_level limits the flow_level
|
||||
const max_flow_level = 10000
|
||||
|
||||
// Increase the flow level and resize the simple key list if needed.
|
||||
func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
|
||||
// Reset the simple key on the next level.
|
||||
@@ -916,11 +919,6 @@ func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
|
||||
|
||||
// Increase the flow level.
|
||||
parser.flow_level++
|
||||
if parser.flow_level > max_flow_level {
|
||||
return yaml_parser_set_scanner_error(parser,
|
||||
"while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark,
|
||||
fmt.Sprintf("exceeded max depth of %d", max_flow_level))
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -933,9 +931,6 @@ func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// max_indents limits the indents stack size
|
||||
const max_indents = 10000
|
||||
|
||||
// Push the current indentation level to the stack and set the new level
|
||||
// the current column is greater than the indentation level. In this case,
|
||||
// append or insert the specified token into the token queue.
|
||||
@@ -950,11 +945,6 @@ func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml
|
||||
// indentation level.
|
||||
parser.indents = append(parser.indents, parser.indent)
|
||||
parser.indent = column
|
||||
if len(parser.indents) > max_indents {
|
||||
return yaml_parser_set_scanner_error(parser,
|
||||
"while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark,
|
||||
fmt.Sprintf("exceeded max depth of %d", max_indents))
|
||||
}
|
||||
|
||||
// Create a token and insert it into the queue.
|
||||
token := yaml_token_t{
|
||||
@@ -1954,7 +1944,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma
|
||||
} else {
|
||||
// It's either the '!' tag or not really a tag handle. If it's a %TAG
|
||||
// directive, it's an error. If it's a tag token, it must be a part of URI.
|
||||
if directive && string(s) != "!" {
|
||||
if directive && !(s[0] == '!' && s[1] == 0) {
|
||||
yaml_parser_set_scanner_tag_error(parser, directive,
|
||||
start_mark, "did not find expected '!'")
|
||||
return false
|
||||
@@ -1969,12 +1959,12 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma
|
||||
func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
|
||||
//size_t length = head ? strlen((char *)head) : 0
|
||||
var s []byte
|
||||
hasTag := len(head) > 0
|
||||
length := len(head)
|
||||
|
||||
// Copy the head if needed.
|
||||
//
|
||||
// Note that we don't copy the leading '!' character.
|
||||
if len(head) > 1 {
|
||||
if length > 0 {
|
||||
s = append(s, head[1:]...)
|
||||
}
|
||||
|
||||
@@ -2007,14 +1997,15 @@ func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte
|
||||
}
|
||||
} else {
|
||||
s = read(parser, s)
|
||||
length++
|
||||
}
|
||||
if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||||
return false
|
||||
}
|
||||
hasTag = true
|
||||
}
|
||||
|
||||
if !hasTag {
|
||||
// Check if the tag is non-empty.
|
||||
if length == 0 {
|
||||
yaml_parser_set_scanner_tag_error(parser, directive,
|
||||
start_mark, "did not find expected tag URI")
|
||||
return false
|
||||
@@ -2485,10 +2476,6 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si
|
||||
}
|
||||
}
|
||||
|
||||
if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if we are at the end of the scalar.
|
||||
if single {
|
||||
if parser.buffer[parser.buffer_pos] == '\'' {
|
||||
@@ -2501,6 +2488,10 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si
|
||||
}
|
||||
|
||||
// Consume blank characters.
|
||||
if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
|
||||
return false
|
||||
}
|
||||
|
||||
for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
|
||||
if is_blank(parser.buffer, parser.buffer_pos) {
|
||||
// Consume a space or a tab character.
|
||||
@@ -2602,10 +2593,19 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b
|
||||
// Consume non-blank characters.
|
||||
for !is_blankz(parser.buffer, parser.buffer_pos) {
|
||||
|
||||
// Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
|
||||
if parser.flow_level > 0 &&
|
||||
parser.buffer[parser.buffer_pos] == ':' &&
|
||||
!is_blankz(parser.buffer, parser.buffer_pos+1) {
|
||||
yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
|
||||
start_mark, "found unexpected ':'")
|
||||
return false
|
||||
}
|
||||
|
||||
// Check for indicators that may end a plain scalar.
|
||||
if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
|
||||
(parser.flow_level > 0 &&
|
||||
(parser.buffer[parser.buffer_pos] == ',' ||
|
||||
(parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
|
||||
parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
|
||||
parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
|
||||
parser.buffer[parser.buffer_pos] == '}')) {
|
||||
@@ -2657,10 +2657,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b
|
||||
for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
|
||||
if is_blank(parser.buffer, parser.buffer_pos) {
|
||||
|
||||
// Check for tab characters that abuse indentation.
|
||||
// Check for tab character that abuse indentation.
|
||||
if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
|
||||
yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
|
||||
start_mark, "found a tab character that violates indentation")
|
||||
start_mark, "found a tab character that violate indentation")
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
9
vendor/gopkg.in/yaml.v2/sorter.go
generated
vendored
9
vendor/gopkg.in/yaml.v2/sorter.go
generated
vendored
@@ -51,15 +51,6 @@ func (l keyList) Less(i, j int) bool {
|
||||
}
|
||||
var ai, bi int
|
||||
var an, bn int64
|
||||
if ar[i] == '0' || br[i] == '0' {
|
||||
for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
|
||||
if ar[j] != '0' {
|
||||
an = 1
|
||||
bn = 1
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
|
||||
an = an*10 + int64(ar[ai]-'0')
|
||||
}
|
||||
|
||||
65
vendor/gopkg.in/yaml.v2/writerc.go
generated
vendored
65
vendor/gopkg.in/yaml.v2/writerc.go
generated
vendored
@@ -18,9 +18,72 @@ func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
|
||||
// If the output encoding is UTF-8, we don't need to recode the buffer.
|
||||
if emitter.encoding == yaml_UTF8_ENCODING {
|
||||
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
|
||||
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
|
||||
}
|
||||
emitter.buffer_pos = 0
|
||||
return true
|
||||
}
|
||||
|
||||
// Recode the buffer into the raw buffer.
|
||||
var low, high int
|
||||
if emitter.encoding == yaml_UTF16LE_ENCODING {
|
||||
low, high = 0, 1
|
||||
} else {
|
||||
high, low = 1, 0
|
||||
}
|
||||
|
||||
pos := 0
|
||||
for pos < emitter.buffer_pos {
|
||||
// See the "reader.c" code for more details on UTF-8 encoding. Note
|
||||
// that we assume that the buffer contains a valid UTF-8 sequence.
|
||||
|
||||
// Read the next UTF-8 character.
|
||||
octet := emitter.buffer[pos]
|
||||
|
||||
var w int
|
||||
var value rune
|
||||
switch {
|
||||
case octet&0x80 == 0x00:
|
||||
w, value = 1, rune(octet&0x7F)
|
||||
case octet&0xE0 == 0xC0:
|
||||
w, value = 2, rune(octet&0x1F)
|
||||
case octet&0xF0 == 0xE0:
|
||||
w, value = 3, rune(octet&0x0F)
|
||||
case octet&0xF8 == 0xF0:
|
||||
w, value = 4, rune(octet&0x07)
|
||||
}
|
||||
for k := 1; k < w; k++ {
|
||||
octet = emitter.buffer[pos+k]
|
||||
value = (value << 6) + (rune(octet) & 0x3F)
|
||||
}
|
||||
pos += w
|
||||
|
||||
// Write the character.
|
||||
if value < 0x10000 {
|
||||
var b [2]byte
|
||||
b[high] = byte(value >> 8)
|
||||
b[low] = byte(value & 0xFF)
|
||||
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
|
||||
} else {
|
||||
// Write the character using a surrogate pair (check "reader.c").
|
||||
var b [4]byte
|
||||
value -= 0x10000
|
||||
b[high] = byte(0xD8 + (value >> 18))
|
||||
b[low] = byte((value >> 10) & 0xFF)
|
||||
b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
|
||||
b[low+2] = byte(value & 0xFF)
|
||||
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
|
||||
}
|
||||
}
|
||||
|
||||
// Write the raw buffer.
|
||||
if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
|
||||
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
|
||||
}
|
||||
emitter.buffer_pos = 0
|
||||
emitter.raw_buffer = emitter.raw_buffer[:0]
|
||||
return true
|
||||
}
|
||||
|
||||
125
vendor/gopkg.in/yaml.v2/yaml.go
generated
vendored
125
vendor/gopkg.in/yaml.v2/yaml.go
generated
vendored
@@ -9,7 +9,6 @@ package yaml
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
@@ -82,58 +81,12 @@ func Unmarshal(in []byte, out interface{}) (err error) {
|
||||
}
|
||||
|
||||
// UnmarshalStrict is like Unmarshal except that any fields that are found
|
||||
// in the data that do not have corresponding struct members, or mapping
|
||||
// keys that are duplicates, will result in
|
||||
// in the data that do not have corresponding struct members will result in
|
||||
// an error.
|
||||
func UnmarshalStrict(in []byte, out interface{}) (err error) {
|
||||
return unmarshal(in, out, true)
|
||||
}
|
||||
|
||||
// A Decorder reads and decodes YAML values from an input stream.
|
||||
type Decoder struct {
|
||||
strict bool
|
||||
parser *parser
|
||||
}
|
||||
|
||||
// NewDecoder returns a new decoder that reads from r.
|
||||
//
|
||||
// The decoder introduces its own buffering and may read
|
||||
// data from r beyond the YAML values requested.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
return &Decoder{
|
||||
parser: newParserFromReader(r),
|
||||
}
|
||||
}
|
||||
|
||||
// SetStrict sets whether strict decoding behaviour is enabled when
|
||||
// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
|
||||
func (dec *Decoder) SetStrict(strict bool) {
|
||||
dec.strict = strict
|
||||
}
|
||||
|
||||
// Decode reads the next YAML-encoded value from its input
|
||||
// and stores it in the value pointed to by v.
|
||||
//
|
||||
// See the documentation for Unmarshal for details about the
|
||||
// conversion of YAML into a Go value.
|
||||
func (dec *Decoder) Decode(v interface{}) (err error) {
|
||||
d := newDecoder(dec.strict)
|
||||
defer handleErr(&err)
|
||||
node := dec.parser.parse()
|
||||
if node == nil {
|
||||
return io.EOF
|
||||
}
|
||||
out := reflect.ValueOf(v)
|
||||
if out.Kind() == reflect.Ptr && !out.IsNil() {
|
||||
out = out.Elem()
|
||||
}
|
||||
d.unmarshal(node, out)
|
||||
if len(d.terrors) > 0 {
|
||||
return &TypeError{d.terrors}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmarshal(in []byte, out interface{}, strict bool) (err error) {
|
||||
defer handleErr(&err)
|
||||
d := newDecoder(strict)
|
||||
@@ -157,8 +110,8 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) {
|
||||
// of the generated document will reflect the structure of the value itself.
|
||||
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
|
||||
//
|
||||
// Struct fields are only marshalled if they are exported (have an upper case
|
||||
// first letter), and are marshalled using the field name lowercased as the
|
||||
// Struct fields are only unmarshalled if they are exported (have an upper case
|
||||
// first letter), and are unmarshalled using the field name lowercased as the
|
||||
// default key. Custom keys may be defined via the "yaml" name in the field
|
||||
// tag: the content preceding the first comma is used as the key, and the
|
||||
// following comma-separated options are used to tweak the marshalling process.
|
||||
@@ -172,10 +125,7 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) {
|
||||
//
|
||||
// omitempty Only include the field if it's not set to the zero
|
||||
// value for the type or to empty slices or maps.
|
||||
// Zero valued structs will be omitted if all their public
|
||||
// fields are zero, unless they implement an IsZero
|
||||
// method (see the IsZeroer interface type), in which
|
||||
// case the field will be included if that method returns true.
|
||||
// Does not apply to zero valued structs.
|
||||
//
|
||||
// flow Marshal using a flow style (useful for structs,
|
||||
// sequences and maps).
|
||||
@@ -190,7 +140,7 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) {
|
||||
// For example:
|
||||
//
|
||||
// type T struct {
|
||||
// F int `yaml:"a,omitempty"`
|
||||
// F int "a,omitempty"
|
||||
// B int
|
||||
// }
|
||||
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
|
||||
@@ -200,47 +150,12 @@ func Marshal(in interface{}) (out []byte, err error) {
|
||||
defer handleErr(&err)
|
||||
e := newEncoder()
|
||||
defer e.destroy()
|
||||
e.marshalDoc("", reflect.ValueOf(in))
|
||||
e.marshal("", reflect.ValueOf(in))
|
||||
e.finish()
|
||||
out = e.out
|
||||
return
|
||||
}
|
||||
|
||||
// An Encoder writes YAML values to an output stream.
|
||||
type Encoder struct {
|
||||
encoder *encoder
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder that writes to w.
|
||||
// The Encoder should be closed after use to flush all data
|
||||
// to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
encoder: newEncoderWithWriter(w),
|
||||
}
|
||||
}
|
||||
|
||||
// Encode writes the YAML encoding of v to the stream.
|
||||
// If multiple items are encoded to the stream, the
|
||||
// second and subsequent document will be preceded
|
||||
// with a "---" document separator, but the first will not.
|
||||
//
|
||||
// See the documentation for Marshal for details about the conversion of Go
|
||||
// values to YAML.
|
||||
func (e *Encoder) Encode(v interface{}) (err error) {
|
||||
defer handleErr(&err)
|
||||
e.encoder.marshalDoc("", reflect.ValueOf(v))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the encoder by writing any remaining data.
|
||||
// It does not write a stream terminating string "...".
|
||||
func (e *Encoder) Close() (err error) {
|
||||
defer handleErr(&err)
|
||||
e.encoder.finish()
|
||||
return nil
|
||||
}
|
||||
|
||||
func handleErr(err *error) {
|
||||
if v := recover(); v != nil {
|
||||
if e, ok := v.(yamlError); ok {
|
||||
@@ -296,9 +211,6 @@ type fieldInfo struct {
|
||||
Num int
|
||||
OmitEmpty bool
|
||||
Flow bool
|
||||
// Id holds the unique field identifier, so we can cheaply
|
||||
// check for field duplicates without maintaining an extra map.
|
||||
Id int
|
||||
|
||||
// Inline holds the field index if the field is part of an inlined struct.
|
||||
Inline []int
|
||||
@@ -378,7 +290,6 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
|
||||
} else {
|
||||
finfo.Inline = append([]int{i}, finfo.Inline...)
|
||||
}
|
||||
finfo.Id = len(fieldsList)
|
||||
fieldsMap[finfo.Key] = finfo
|
||||
fieldsList = append(fieldsList, finfo)
|
||||
}
|
||||
@@ -400,16 +311,11 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
|
||||
return nil, errors.New(msg)
|
||||
}
|
||||
|
||||
info.Id = len(fieldsList)
|
||||
fieldsList = append(fieldsList, info)
|
||||
fieldsMap[info.Key] = info
|
||||
}
|
||||
|
||||
sinfo = &structInfo{
|
||||
FieldsMap: fieldsMap,
|
||||
FieldsList: fieldsList,
|
||||
InlineMap: inlineMap,
|
||||
}
|
||||
sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
|
||||
|
||||
fieldMapMutex.Lock()
|
||||
structMap[st] = sinfo
|
||||
@@ -417,23 +323,8 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
|
||||
return sinfo, nil
|
||||
}
|
||||
|
||||
// IsZeroer is used to check whether an object is zero to
|
||||
// determine whether it should be omitted when marshaling
|
||||
// with the omitempty flag. One notable implementation
|
||||
// is time.Time.
|
||||
type IsZeroer interface {
|
||||
IsZero() bool
|
||||
}
|
||||
|
||||
func isZero(v reflect.Value) bool {
|
||||
kind := v.Kind()
|
||||
if z, ok := v.Interface().(IsZeroer); ok {
|
||||
if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
|
||||
return true
|
||||
}
|
||||
return z.IsZero()
|
||||
}
|
||||
switch kind {
|
||||
switch v.Kind() {
|
||||
case reflect.String:
|
||||
return len(v.String()) == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
|
||||
30
vendor/gopkg.in/yaml.v2/yamlh.go
generated
vendored
30
vendor/gopkg.in/yaml.v2/yamlh.go
generated
vendored
@@ -1,7 +1,6 @@
|
||||
package yaml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
@@ -240,27 +239,6 @@ const (
|
||||
yaml_MAPPING_END_EVENT // A MAPPING-END event.
|
||||
)
|
||||
|
||||
var eventStrings = []string{
|
||||
yaml_NO_EVENT: "none",
|
||||
yaml_STREAM_START_EVENT: "stream start",
|
||||
yaml_STREAM_END_EVENT: "stream end",
|
||||
yaml_DOCUMENT_START_EVENT: "document start",
|
||||
yaml_DOCUMENT_END_EVENT: "document end",
|
||||
yaml_ALIAS_EVENT: "alias",
|
||||
yaml_SCALAR_EVENT: "scalar",
|
||||
yaml_SEQUENCE_START_EVENT: "sequence start",
|
||||
yaml_SEQUENCE_END_EVENT: "sequence end",
|
||||
yaml_MAPPING_START_EVENT: "mapping start",
|
||||
yaml_MAPPING_END_EVENT: "mapping end",
|
||||
}
|
||||
|
||||
func (e yaml_event_type_t) String() string {
|
||||
if e < 0 || int(e) >= len(eventStrings) {
|
||||
return fmt.Sprintf("unknown event %d", e)
|
||||
}
|
||||
return eventStrings[e]
|
||||
}
|
||||
|
||||
// The event structure.
|
||||
type yaml_event_t struct {
|
||||
|
||||
@@ -543,9 +521,9 @@ type yaml_parser_t struct {
|
||||
|
||||
read_handler yaml_read_handler_t // Read handler.
|
||||
|
||||
input_reader io.Reader // File input data.
|
||||
input []byte // String input data.
|
||||
input_pos int
|
||||
input_file io.Reader // File input data.
|
||||
input []byte // String input data.
|
||||
input_pos int
|
||||
|
||||
eof bool // EOF flag
|
||||
|
||||
@@ -654,7 +632,7 @@ type yaml_emitter_t struct {
|
||||
write_handler yaml_write_handler_t // Write handler.
|
||||
|
||||
output_buffer *[]byte // String output data.
|
||||
output_writer io.Writer // File output data.
|
||||
output_file io.Writer // File output data.
|
||||
|
||||
buffer []byte // The working buffer.
|
||||
buffer_pos int // The current position of the buffer.
|
||||
|
||||
2
vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go
generated
vendored
2
vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go
generated
vendored
@@ -16,10 +16,10 @@ limitations under the License.
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
// +groupName=admissionregistration.k8s.io
|
||||
|
||||
// Package v1alpha1 is the v1alpha1 version of the API.
|
||||
// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration
|
||||
// InitializerConfiguration and validatingWebhookConfiguration is for the
|
||||
// new dynamic admission controller configuration.
|
||||
// +groupName=admissionregistration.k8s.io
|
||||
package v1alpha1
|
||||
|
||||
38
vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go
generated
vendored
38
vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go
generated
vendored
@@ -14,8 +14,9 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1alpha1 is a generated protocol buffer package.
|
||||
@@ -31,19 +32,14 @@ limitations under the License.
|
||||
*/
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
math "math"
|
||||
|
||||
strings "strings"
|
||||
|
||||
reflect "reflect"
|
||||
|
||||
io "io"
|
||||
)
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@@ -255,6 +251,24 @@ func (m *Rule) MarshalTo(dAtA []byte) (int, error) {
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
||||
6
vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
generated
vendored
6
vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto
generated
vendored
@@ -88,7 +88,7 @@ message Rule {
|
||||
repeated string apiVersions = 2;
|
||||
|
||||
// Resources is a list of resources this rule applies to.
|
||||
//
|
||||
//
|
||||
// For example:
|
||||
// 'pods' means pods.
|
||||
// 'pods/log' means the log subresource of pods.
|
||||
@@ -96,10 +96,10 @@ message Rule {
|
||||
// 'pods/*' means all subresources of pods.
|
||||
// '*/scale' means all scale subresources.
|
||||
// '*/*' means all resources and their subresources.
|
||||
//
|
||||
//
|
||||
// If wildcard is present, the validation rule will ensure resources do not
|
||||
// overlap with each other.
|
||||
//
|
||||
//
|
||||
// Depending on the enclosing object, subresources might not be allowed.
|
||||
// Required.
|
||||
repeated string resources = 3;
|
||||
|
||||
2
vendor/k8s.io/api/admissionregistration/v1beta1/doc.go
generated
vendored
2
vendor/k8s.io/api/admissionregistration/v1beta1/doc.go
generated
vendored
@@ -16,10 +16,10 @@ limitations under the License.
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
// +groupName=admissionregistration.k8s.io
|
||||
|
||||
// Package v1beta1 is the v1beta1 version of the API.
|
||||
// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration
|
||||
// InitializerConfiguration and validatingWebhookConfiguration is for the
|
||||
// new dynamic admission controller configuration.
|
||||
// +groupName=admissionregistration.k8s.io
|
||||
package v1beta1
|
||||
|
||||
40
vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
generated
vendored
40
vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go
generated
vendored
@@ -14,8 +14,9 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1beta1 is a generated protocol buffer package.
|
||||
@@ -36,21 +37,16 @@ limitations under the License.
|
||||
*/
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
math "math"
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
strings "strings"
|
||||
|
||||
reflect "reflect"
|
||||
|
||||
io "io"
|
||||
)
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@@ -510,6 +506,24 @@ func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) {
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
||||
40
vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
generated
vendored
40
vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
generated
vendored
@@ -66,7 +66,7 @@ message Rule {
|
||||
repeated string apiVersions = 2;
|
||||
|
||||
// Resources is a list of resources this rule applies to.
|
||||
//
|
||||
//
|
||||
// For example:
|
||||
// 'pods' means pods.
|
||||
// 'pods/log' means the log subresource of pods.
|
||||
@@ -74,10 +74,10 @@ message Rule {
|
||||
// 'pods/*' means all subresources of pods.
|
||||
// '*/scale' means all scale subresources.
|
||||
// '*/*' means all resources and their subresources.
|
||||
//
|
||||
//
|
||||
// If wildcard is present, the validation rule will ensure resources do not
|
||||
// overlap with each other.
|
||||
//
|
||||
//
|
||||
// Depending on the enclosing object, subresources might not be allowed.
|
||||
// Required.
|
||||
repeated string resources = 3;
|
||||
@@ -168,7 +168,7 @@ message Webhook {
|
||||
// object itself is a namespace, the matching is performed on
|
||||
// object.metadata.labels. If the object is another cluster scoped resource,
|
||||
// it never skips the webhook.
|
||||
//
|
||||
//
|
||||
// For example, to run the webhook on any objects whose namespace is not
|
||||
// associated with "runlevel" of "0" or "1"; you will set the selector as
|
||||
// follows:
|
||||
@@ -184,7 +184,7 @@ message Webhook {
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
//
|
||||
//
|
||||
// If instead you want to only run the webhook on any objects whose
|
||||
// namespace is associated with the "environment" of "prod" or "staging";
|
||||
// you will set the selector as follows:
|
||||
@@ -200,11 +200,11 @@ message Webhook {
|
||||
// }
|
||||
// ]
|
||||
// }
|
||||
//
|
||||
//
|
||||
// See
|
||||
// https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
|
||||
// for more examples of label selectors.
|
||||
//
|
||||
//
|
||||
// Default to the empty LabelSelector, which matches everything.
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5;
|
||||
@@ -223,47 +223,47 @@ message Webhook {
|
||||
// connection with the webhook
|
||||
message WebhookClientConfig {
|
||||
// `url` gives the location of the webhook, in standard URL form
|
||||
// (`scheme://host:port/path`). Exactly one of `url` or `service`
|
||||
// (`[scheme://]host:port/path`). Exactly one of `url` or `service`
|
||||
// must be specified.
|
||||
//
|
||||
//
|
||||
// The `host` should not refer to a service running in the cluster; use
|
||||
// the `service` field instead. The host might be resolved via external
|
||||
// DNS in some apiservers (e.g., `kube-apiserver` cannot resolve
|
||||
// in-cluster DNS as that would be a layering violation). `host` may
|
||||
// also be an IP address.
|
||||
//
|
||||
//
|
||||
// Please note that using `localhost` or `127.0.0.1` as a `host` is
|
||||
// risky unless you take great care to run this webhook on all hosts
|
||||
// which run an apiserver which might need to make calls to this
|
||||
// webhook. Such installs are likely to be non-portable, i.e., not easy
|
||||
// to turn up in a new cluster.
|
||||
//
|
||||
//
|
||||
// The scheme must be "https"; the URL must begin with "https://".
|
||||
//
|
||||
//
|
||||
// A path is optional, and if present may be any string permissible in
|
||||
// a URL. You may use the path to pass an arbitrary string to the
|
||||
// webhook, for example, a cluster identifier.
|
||||
//
|
||||
//
|
||||
// Attempting to use a user or basic auth e.g. "user:password@" is not
|
||||
// allowed. Fragments ("#...") and query parameters ("?...") are not
|
||||
// allowed, either.
|
||||
//
|
||||
//
|
||||
// +optional
|
||||
optional string url = 3;
|
||||
|
||||
// `service` is a reference to the service for this webhook. Either
|
||||
// `service` or `url` must be specified.
|
||||
//
|
||||
//
|
||||
// If the webhook is running within the cluster, then you should use `service`.
|
||||
//
|
||||
//
|
||||
// Port 443 will be used if it is open, otherwise it is an error.
|
||||
//
|
||||
//
|
||||
// +optional
|
||||
optional ServiceReference service = 1;
|
||||
|
||||
// `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate.
|
||||
// If unspecified, system trust roots on the apiserver are used.
|
||||
// +optional
|
||||
// `caBundle` is a PEM encoded CA bundle which will be used to validate
|
||||
// the webhook's server certificate.
|
||||
// Required.
|
||||
optional bytes caBundle = 2;
|
||||
}
|
||||
|
||||
|
||||
12
vendor/k8s.io/api/admissionregistration/v1beta1/types.go
generated
vendored
12
vendor/k8s.io/api/admissionregistration/v1beta1/types.go
generated
vendored
@@ -246,7 +246,7 @@ const (
|
||||
// connection with the webhook
|
||||
type WebhookClientConfig struct {
|
||||
// `url` gives the location of the webhook, in standard URL form
|
||||
// (`scheme://host:port/path`). Exactly one of `url` or `service`
|
||||
// (`[scheme://]host:port/path`). Exactly one of `url` or `service`
|
||||
// must be specified.
|
||||
//
|
||||
// The `host` should not refer to a service running in the cluster; use
|
||||
@@ -282,12 +282,12 @@ type WebhookClientConfig struct {
|
||||
// Port 443 will be used if it is open, otherwise it is an error.
|
||||
//
|
||||
// +optional
|
||||
Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"`
|
||||
Service *ServiceReference `json:"service" protobuf:"bytes,1,opt,name=service"`
|
||||
|
||||
// `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate.
|
||||
// If unspecified, system trust roots on the apiserver are used.
|
||||
// +optional
|
||||
CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"`
|
||||
// `caBundle` is a PEM encoded CA bundle which will be used to validate
|
||||
// the webhook's server certificate.
|
||||
// Required.
|
||||
CABundle []byte `json:"caBundle" protobuf:"bytes,2,opt,name=caBundle"`
|
||||
}
|
||||
|
||||
// ServiceReference holds a reference to Service.legacy.k8s.io
|
||||
|
||||
4
vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
generated
vendored
4
vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
generated
vendored
@@ -114,9 +114,9 @@ func (Webhook) SwaggerDoc() map[string]string {
|
||||
|
||||
var map_WebhookClientConfig = map[string]string{
|
||||
"": "WebhookClientConfig contains the information to make a TLS connection with the webhook",
|
||||
"url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.",
|
||||
"url": "`url` gives the location of the webhook, in standard URL form (`[scheme://]host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.",
|
||||
"service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nPort 443 will be used if it is open, otherwise it is an error.",
|
||||
"caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.",
|
||||
"caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. Required.",
|
||||
}
|
||||
|
||||
func (WebhookClientConfig) SwaggerDoc() map[string]string {
|
||||
|
||||
45
vendor/k8s.io/api/apps/v1/generated.pb.go
generated
vendored
45
vendor/k8s.io/api/apps/v1/generated.pb.go
generated
vendored
@@ -14,8 +14,9 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1 is a generated protocol buffer package.
|
||||
@@ -55,25 +56,19 @@ limitations under the License.
|
||||
*/
|
||||
package v1
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
import k8s_io_api_core_v1 "k8s.io/api/core/v1"
|
||||
import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
math "math"
|
||||
import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
k8s_io_api_core_v1 "k8s.io/api/core/v1"
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
strings "strings"
|
||||
|
||||
reflect "reflect"
|
||||
|
||||
io "io"
|
||||
)
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@@ -1445,6 +1440,24 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) {
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
||||
1
vendor/k8s.io/api/apps/v1/generated.proto
generated
vendored
1
vendor/k8s.io/api/apps/v1/generated.proto
generated
vendored
@@ -280,7 +280,6 @@ message DeploymentSpec {
|
||||
|
||||
// The deployment strategy to use to replace existing pods with new ones.
|
||||
// +optional
|
||||
// +patchStrategy=retainKeys
|
||||
optional DeploymentStrategy strategy = 4;
|
||||
|
||||
// Minimum number of seconds for which a newly created pod should be ready
|
||||
|
||||
9
vendor/k8s.io/api/apps/v1/types.go
generated
vendored
9
vendor/k8s.io/api/apps/v1/types.go
generated
vendored
@@ -32,8 +32,6 @@ const (
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// StatefulSet represents a set of pods with consistent identities.
|
||||
@@ -246,8 +244,6 @@ type StatefulSetList struct {
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Deployment enables declarative updates for Pods and ReplicaSets.
|
||||
@@ -283,8 +279,7 @@ type DeploymentSpec struct {
|
||||
|
||||
// The deployment strategy to use to replace existing pods with new ones.
|
||||
// +optional
|
||||
// +patchStrategy=retainKeys
|
||||
Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"`
|
||||
Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"`
|
||||
|
||||
// Minimum number of seconds for which a newly created pod should be ready
|
||||
// without any of its container crashing, for it to be considered available.
|
||||
@@ -658,8 +653,6 @@ type DaemonSetList struct {
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
|
||||
|
||||
2
vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
generated
vendored
2
vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go
generated
vendored
@@ -96,7 +96,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string {
|
||||
}
|
||||
|
||||
var map_DaemonSetStatus = map[string]string{
|
||||
"": "DaemonSetStatus represents the current status of a daemon set.",
|
||||
"": "DaemonSetStatus represents the current status of a daemon set.",
|
||||
"currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
|
||||
"numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
|
||||
"desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
|
||||
|
||||
319
vendor/k8s.io/api/apps/v1beta1/generated.pb.go
generated
vendored
319
vendor/k8s.io/api/apps/v1beta1/generated.pb.go
generated
vendored
@@ -14,8 +14,9 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1beta1 is a generated protocol buffer package.
|
||||
@@ -48,27 +49,21 @@ limitations under the License.
|
||||
*/
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
import k8s_io_api_core_v1 "k8s.io/api/core/v1"
|
||||
import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
math "math"
|
||||
import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
k8s_io_api_core_v1 "k8s.io/api/core/v1"
|
||||
import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||
|
||||
k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||
|
||||
strings "strings"
|
||||
|
||||
reflect "reflect"
|
||||
|
||||
io "io"
|
||||
)
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@@ -1096,6 +1091,24 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) {
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
@@ -2539,14 +2552,51 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var keykey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
keykey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
if m.UpdatedAnnotations == nil {
|
||||
m.UpdatedAnnotations = make(map[string]string)
|
||||
}
|
||||
var mapkey string
|
||||
var mapvalue string
|
||||
for iNdEx < postIndex {
|
||||
entryPreIndex := iNdEx
|
||||
var wire uint64
|
||||
if iNdEx < postIndex {
|
||||
var valuekey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
@@ -2556,80 +2606,41 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
valuekey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
if fieldNum == 1 {
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
} else if fieldNum == 2 {
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
} else {
|
||||
iNdEx = entryPreIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
m.UpdatedAnnotations[mapkey] = mapvalue
|
||||
} else {
|
||||
var mapvalue string
|
||||
m.UpdatedAnnotations[mapkey] = mapvalue
|
||||
}
|
||||
m.UpdatedAnnotations[mapkey] = mapvalue
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
@@ -3822,14 +3833,51 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var keykey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
keykey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
if m.Selector == nil {
|
||||
m.Selector = make(map[string]string)
|
||||
}
|
||||
var mapkey string
|
||||
var mapvalue string
|
||||
for iNdEx < postIndex {
|
||||
entryPreIndex := iNdEx
|
||||
var wire uint64
|
||||
if iNdEx < postIndex {
|
||||
var valuekey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
@@ -3839,80 +3887,41 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
valuekey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
if fieldNum == 1 {
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
} else if fieldNum == 2 {
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
} else {
|
||||
iNdEx = entryPreIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
m.Selector[mapkey] = mapvalue
|
||||
} else {
|
||||
var mapvalue string
|
||||
m.Selector[mapkey] = mapvalue
|
||||
}
|
||||
m.Selector[mapkey] = mapvalue
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
|
||||
1
vendor/k8s.io/api/apps/v1beta1/generated.proto
generated
vendored
1
vendor/k8s.io/api/apps/v1beta1/generated.proto
generated
vendored
@@ -143,7 +143,6 @@ message DeploymentSpec {
|
||||
|
||||
// The deployment strategy to use to replace existing pods with new ones.
|
||||
// +optional
|
||||
// +patchStrategy=retainKeys
|
||||
optional DeploymentStrategy strategy = 4;
|
||||
|
||||
// Minimum number of seconds for which a newly created pod should be ready
|
||||
|
||||
5
vendor/k8s.io/api/apps/v1beta1/types.go
generated
vendored
5
vendor/k8s.io/api/apps/v1beta1/types.go
generated
vendored
@@ -55,6 +55,8 @@ type ScaleStatus struct {
|
||||
TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:noVerbs
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Scale represents a scaling request for a resource.
|
||||
@@ -321,8 +323,7 @@ type DeploymentSpec struct {
|
||||
|
||||
// The deployment strategy to use to replace existing pods with new ones.
|
||||
// +optional
|
||||
// +patchStrategy=retainKeys
|
||||
Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"`
|
||||
Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"`
|
||||
|
||||
// Minimum number of seconds for which a newly created pod should be ready
|
||||
// without any of its container crashing, for it to be considered available.
|
||||
|
||||
183
vendor/k8s.io/api/apps/v1beta2/generated.pb.go
generated
vendored
183
vendor/k8s.io/api/apps/v1beta2/generated.pb.go
generated
vendored
@@ -14,8 +14,9 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1beta2 is a generated protocol buffer package.
|
||||
@@ -58,27 +59,21 @@ limitations under the License.
|
||||
*/
|
||||
package v1beta2
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
import k8s_io_api_core_v1 "k8s.io/api/core/v1"
|
||||
import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
math "math"
|
||||
import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
k8s_io_api_core_v1 "k8s.io/api/core/v1"
|
||||
import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||
|
||||
k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
||||
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||
|
||||
strings "strings"
|
||||
|
||||
reflect "reflect"
|
||||
|
||||
io "io"
|
||||
)
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@@ -1575,6 +1570,24 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) {
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
@@ -6096,14 +6109,51 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var keykey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
keykey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
if m.Selector == nil {
|
||||
m.Selector = make(map[string]string)
|
||||
}
|
||||
var mapkey string
|
||||
var mapvalue string
|
||||
for iNdEx < postIndex {
|
||||
entryPreIndex := iNdEx
|
||||
var wire uint64
|
||||
if iNdEx < postIndex {
|
||||
var valuekey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
@@ -6113,80 +6163,41 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
valuekey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
if fieldNum == 1 {
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
} else if fieldNum == 2 {
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
} else {
|
||||
iNdEx = entryPreIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
m.Selector[mapkey] = mapvalue
|
||||
} else {
|
||||
var mapvalue string
|
||||
m.Selector[mapkey] = mapvalue
|
||||
}
|
||||
m.Selector[mapkey] = mapvalue
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
|
||||
1
vendor/k8s.io/api/apps/v1beta2/generated.proto
generated
vendored
1
vendor/k8s.io/api/apps/v1beta2/generated.proto
generated
vendored
@@ -286,7 +286,6 @@ message DeploymentSpec {
|
||||
|
||||
// The deployment strategy to use to replace existing pods with new ones.
|
||||
// +optional
|
||||
// +patchStrategy=retainKeys
|
||||
optional DeploymentStrategy strategy = 4;
|
||||
|
||||
// Minimum number of seconds for which a newly created pod should be ready
|
||||
|
||||
5
vendor/k8s.io/api/apps/v1beta2/types.go
generated
vendored
5
vendor/k8s.io/api/apps/v1beta2/types.go
generated
vendored
@@ -57,6 +57,8 @@ type ScaleStatus struct {
|
||||
TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:noVerbs
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Scale represents a scaling request for a resource.
|
||||
@@ -329,8 +331,7 @@ type DeploymentSpec struct {
|
||||
|
||||
// The deployment strategy to use to replace existing pods with new ones.
|
||||
// +optional
|
||||
// +patchStrategy=retainKeys
|
||||
Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"`
|
||||
Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"`
|
||||
|
||||
// Minimum number of seconds for which a newly created pod should be ready
|
||||
// without any of its container crashing, for it to be considered available.
|
||||
|
||||
2
vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
generated
vendored
2
vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go
generated
vendored
@@ -96,7 +96,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string {
|
||||
}
|
||||
|
||||
var map_DaemonSetStatus = map[string]string{
|
||||
"": "DaemonSetStatus represents the current status of a daemon set.",
|
||||
"": "DaemonSetStatus represents the current status of a daemon set.",
|
||||
"currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
|
||||
"numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
|
||||
"desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
|
||||
|
||||
1690
vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go
generated
vendored
1690
vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
158
vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto
generated
vendored
158
vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto
generated
vendored
@@ -1,158 +0,0 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
|
||||
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
|
||||
|
||||
syntax = 'proto2';
|
||||
|
||||
package k8s.io.api.auditregistration.v1alpha1;
|
||||
|
||||
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
|
||||
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
|
||||
|
||||
// Package-wide variables from generator "generated".
|
||||
option go_package = "v1alpha1";
|
||||
|
||||
// AuditSink represents a cluster level audit sink
|
||||
message AuditSink {
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
|
||||
|
||||
// Spec defines the audit configuration spec
|
||||
optional AuditSinkSpec spec = 2;
|
||||
}
|
||||
|
||||
// AuditSinkList is a list of AuditSink items.
|
||||
message AuditSinkList {
|
||||
// +optional
|
||||
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
|
||||
|
||||
// List of audit configurations.
|
||||
repeated AuditSink items = 2;
|
||||
}
|
||||
|
||||
// AuditSinkSpec holds the spec for the audit sink
|
||||
message AuditSinkSpec {
|
||||
// Policy defines the policy for selecting which events should be sent to the webhook
|
||||
// required
|
||||
optional Policy policy = 1;
|
||||
|
||||
// Webhook to send events
|
||||
// required
|
||||
optional Webhook webhook = 2;
|
||||
}
|
||||
|
||||
// Policy defines the configuration of how audit events are logged
|
||||
message Policy {
|
||||
// The Level that all requests are recorded at.
|
||||
// available options: None, Metadata, Request, RequestResponse
|
||||
// required
|
||||
optional string level = 1;
|
||||
|
||||
// Stages is a list of stages for which events are created.
|
||||
// +optional
|
||||
repeated string stages = 2;
|
||||
}
|
||||
|
||||
// ServiceReference holds a reference to Service.legacy.k8s.io
|
||||
message ServiceReference {
|
||||
// `namespace` is the namespace of the service.
|
||||
// Required
|
||||
optional string namespace = 1;
|
||||
|
||||
// `name` is the name of the service.
|
||||
// Required
|
||||
optional string name = 2;
|
||||
|
||||
// `path` is an optional URL path which will be sent in any request to
|
||||
// this service.
|
||||
// +optional
|
||||
optional string path = 3;
|
||||
}
|
||||
|
||||
// Webhook holds the configuration of the webhook
|
||||
message Webhook {
|
||||
// Throttle holds the options for throttling the webhook
|
||||
// +optional
|
||||
optional WebhookThrottleConfig throttle = 1;
|
||||
|
||||
// ClientConfig holds the connection parameters for the webhook
|
||||
// required
|
||||
optional WebhookClientConfig clientConfig = 2;
|
||||
}
|
||||
|
||||
// WebhookClientConfig contains the information to make a connection with the webhook
|
||||
message WebhookClientConfig {
|
||||
// `url` gives the location of the webhook, in standard URL form
|
||||
// (`scheme://host:port/path`). Exactly one of `url` or `service`
|
||||
// must be specified.
|
||||
//
|
||||
// The `host` should not refer to a service running in the cluster; use
|
||||
// the `service` field instead. The host might be resolved via external
|
||||
// DNS in some apiservers (e.g., `kube-apiserver` cannot resolve
|
||||
// in-cluster DNS as that would be a layering violation). `host` may
|
||||
// also be an IP address.
|
||||
//
|
||||
// Please note that using `localhost` or `127.0.0.1` as a `host` is
|
||||
// risky unless you take great care to run this webhook on all hosts
|
||||
// which run an apiserver which might need to make calls to this
|
||||
// webhook. Such installs are likely to be non-portable, i.e., not easy
|
||||
// to turn up in a new cluster.
|
||||
//
|
||||
// The scheme must be "https"; the URL must begin with "https://".
|
||||
//
|
||||
// A path is optional, and if present may be any string permissible in
|
||||
// a URL. You may use the path to pass an arbitrary string to the
|
||||
// webhook, for example, a cluster identifier.
|
||||
//
|
||||
// Attempting to use a user or basic auth e.g. "user:password@" is not
|
||||
// allowed. Fragments ("#...") and query parameters ("?...") are not
|
||||
// allowed, either.
|
||||
//
|
||||
// +optional
|
||||
optional string url = 1;
|
||||
|
||||
// `service` is a reference to the service for this webhook. Either
|
||||
// `service` or `url` must be specified.
|
||||
//
|
||||
// If the webhook is running within the cluster, then you should use `service`.
|
||||
//
|
||||
// Port 443 will be used if it is open, otherwise it is an error.
|
||||
//
|
||||
// +optional
|
||||
optional ServiceReference service = 2;
|
||||
|
||||
// `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate.
|
||||
// If unspecified, system trust roots on the apiserver are used.
|
||||
// +optional
|
||||
optional bytes caBundle = 3;
|
||||
}
|
||||
|
||||
// WebhookThrottleConfig holds the configuration for throttling events
|
||||
message WebhookThrottleConfig {
|
||||
// ThrottleQPS maximum number of batches per second
|
||||
// default 10 QPS
|
||||
// +optional
|
||||
optional int64 qps = 1;
|
||||
|
||||
// ThrottleBurst is the maximum number of events sent at the same moment
|
||||
// default 15 QPS
|
||||
// +optional
|
||||
optional int64 burst = 2;
|
||||
}
|
||||
|
||||
56
vendor/k8s.io/api/auditregistration/v1alpha1/register.go
generated
vendored
56
vendor/k8s.io/api/auditregistration/v1alpha1/register.go
generated
vendored
@@ -1,56 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// GroupName is the group name use in this package
|
||||
const GroupName = "auditregistration.k8s.io"
|
||||
|
||||
// SchemeGroupVersion is group version used to register these objects
|
||||
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"}
|
||||
|
||||
// Resource takes an unqualified resource and returns a Group qualified GroupResource
|
||||
func Resource(resource string) schema.GroupResource {
|
||||
return SchemeGroupVersion.WithResource(resource).GroupResource()
|
||||
}
|
||||
|
||||
var (
|
||||
SchemeBuilder runtime.SchemeBuilder
|
||||
localSchemeBuilder = &SchemeBuilder
|
||||
AddToScheme = localSchemeBuilder.AddToScheme
|
||||
)
|
||||
|
||||
func init() {
|
||||
// We only register manually written functions here. The registration of the
|
||||
// generated functions takes place in the generated files. The separation
|
||||
// makes the code compile even when the generated files are missing.
|
||||
localSchemeBuilder.Register(addKnownTypes)
|
||||
}
|
||||
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
&AuditSink{},
|
||||
&AuditSinkList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
194
vendor/k8s.io/api/auditregistration/v1alpha1/types.go
generated
vendored
194
vendor/k8s.io/api/auditregistration/v1alpha1/types.go
generated
vendored
@@ -1,194 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:openapi-gen=true
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// Level defines the amount of information logged during auditing
|
||||
type Level string
|
||||
|
||||
// Valid audit levels
|
||||
const (
|
||||
// LevelNone disables auditing
|
||||
LevelNone Level = "None"
|
||||
// LevelMetadata provides the basic level of auditing.
|
||||
LevelMetadata Level = "Metadata"
|
||||
// LevelRequest provides Metadata level of auditing, and additionally
|
||||
// logs the request object (does not apply for non-resource requests).
|
||||
LevelRequest Level = "Request"
|
||||
// LevelRequestResponse provides Request level of auditing, and additionally
|
||||
// logs the response object (does not apply for non-resource requests and watches).
|
||||
LevelRequestResponse Level = "RequestResponse"
|
||||
)
|
||||
|
||||
// Stage defines the stages in request handling during which audit events may be generated.
|
||||
type Stage string
|
||||
|
||||
// Valid audit stages.
|
||||
const (
|
||||
// The stage for events generated after the audit handler receives the request, but before it
|
||||
// is delegated down the handler chain.
|
||||
StageRequestReceived = "RequestReceived"
|
||||
// The stage for events generated after the response headers are sent, but before the response body
|
||||
// is sent. This stage is only generated for long-running requests (e.g. watch).
|
||||
StageResponseStarted = "ResponseStarted"
|
||||
// The stage for events generated after the response body has been completed, and no more bytes
|
||||
// will be sent.
|
||||
StageResponseComplete = "ResponseComplete"
|
||||
// The stage for events generated when a panic occurred.
|
||||
StagePanic = "Panic"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// AuditSink represents a cluster level audit sink
|
||||
type AuditSink struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Spec defines the audit configuration spec
|
||||
Spec AuditSinkSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
}
|
||||
|
||||
// AuditSinkSpec holds the spec for the audit sink
|
||||
type AuditSinkSpec struct {
|
||||
// Policy defines the policy for selecting which events should be sent to the webhook
|
||||
// required
|
||||
Policy Policy `json:"policy" protobuf:"bytes,1,opt,name=policy"`
|
||||
|
||||
// Webhook to send events
|
||||
// required
|
||||
Webhook Webhook `json:"webhook" protobuf:"bytes,2,opt,name=webhook"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// AuditSinkList is a list of AuditSink items.
|
||||
type AuditSinkList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// List of audit configurations.
|
||||
Items []AuditSink `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// Policy defines the configuration of how audit events are logged
|
||||
type Policy struct {
|
||||
// The Level that all requests are recorded at.
|
||||
// available options: None, Metadata, Request, RequestResponse
|
||||
// required
|
||||
Level Level `json:"level" protobuf:"bytes,1,opt,name=level"`
|
||||
|
||||
// Stages is a list of stages for which events are created.
|
||||
// +optional
|
||||
Stages []Stage `json:"stages" protobuf:"bytes,2,opt,name=stages"`
|
||||
}
|
||||
|
||||
// Webhook holds the configuration of the webhook
|
||||
type Webhook struct {
|
||||
// Throttle holds the options for throttling the webhook
|
||||
// +optional
|
||||
Throttle *WebhookThrottleConfig `json:"throttle,omitempty" protobuf:"bytes,1,opt,name=throttle"`
|
||||
|
||||
// ClientConfig holds the connection parameters for the webhook
|
||||
// required
|
||||
ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"`
|
||||
}
|
||||
|
||||
// WebhookThrottleConfig holds the configuration for throttling events
|
||||
type WebhookThrottleConfig struct {
|
||||
// ThrottleQPS maximum number of batches per second
|
||||
// default 10 QPS
|
||||
// +optional
|
||||
QPS *int64 `json:"qps,omitempty" protobuf:"bytes,1,opt,name=qps"`
|
||||
|
||||
// ThrottleBurst is the maximum number of events sent at the same moment
|
||||
// default 15 QPS
|
||||
// +optional
|
||||
Burst *int64 `json:"burst,omitempty" protobuf:"bytes,2,opt,name=burst"`
|
||||
}
|
||||
|
||||
// WebhookClientConfig contains the information to make a connection with the webhook
|
||||
type WebhookClientConfig struct {
|
||||
// `url` gives the location of the webhook, in standard URL form
|
||||
// (`scheme://host:port/path`). Exactly one of `url` or `service`
|
||||
// must be specified.
|
||||
//
|
||||
// The `host` should not refer to a service running in the cluster; use
|
||||
// the `service` field instead. The host might be resolved via external
|
||||
// DNS in some apiservers (e.g., `kube-apiserver` cannot resolve
|
||||
// in-cluster DNS as that would be a layering violation). `host` may
|
||||
// also be an IP address.
|
||||
//
|
||||
// Please note that using `localhost` or `127.0.0.1` as a `host` is
|
||||
// risky unless you take great care to run this webhook on all hosts
|
||||
// which run an apiserver which might need to make calls to this
|
||||
// webhook. Such installs are likely to be non-portable, i.e., not easy
|
||||
// to turn up in a new cluster.
|
||||
//
|
||||
// The scheme must be "https"; the URL must begin with "https://".
|
||||
//
|
||||
// A path is optional, and if present may be any string permissible in
|
||||
// a URL. You may use the path to pass an arbitrary string to the
|
||||
// webhook, for example, a cluster identifier.
|
||||
//
|
||||
// Attempting to use a user or basic auth e.g. "user:password@" is not
|
||||
// allowed. Fragments ("#...") and query parameters ("?...") are not
|
||||
// allowed, either.
|
||||
//
|
||||
// +optional
|
||||
URL *string `json:"url,omitempty" protobuf:"bytes,1,opt,name=url"`
|
||||
|
||||
// `service` is a reference to the service for this webhook. Either
|
||||
// `service` or `url` must be specified.
|
||||
//
|
||||
// If the webhook is running within the cluster, then you should use `service`.
|
||||
//
|
||||
// Port 443 will be used if it is open, otherwise it is an error.
|
||||
//
|
||||
// +optional
|
||||
Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,2,opt,name=service"`
|
||||
|
||||
// `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate.
|
||||
// If unspecified, system trust roots on the apiserver are used.
|
||||
// +optional
|
||||
CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,3,opt,name=caBundle"`
|
||||
}
|
||||
|
||||
// ServiceReference holds a reference to Service.legacy.k8s.io
|
||||
type ServiceReference struct {
|
||||
// `namespace` is the namespace of the service.
|
||||
// Required
|
||||
Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
|
||||
|
||||
// `name` is the name of the service.
|
||||
// Required
|
||||
Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
|
||||
|
||||
// `path` is an optional URL path which will be sent in any request to
|
||||
// this service.
|
||||
// +optional
|
||||
Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"`
|
||||
}
|
||||
110
vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go
generated
vendored
110
vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go
generated
vendored
@@ -1,110 +0,0 @@
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
// This file contains a collection of methods that can be used from go-restful to
|
||||
// generate Swagger API documentation for its models. Please read this PR for more
|
||||
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
|
||||
//
|
||||
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
|
||||
// they are on one line! For multiple line or blocks that you want to ignore use ---.
|
||||
// Any context after a --- is ignored.
|
||||
//
|
||||
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
|
||||
var map_AuditSink = map[string]string{
|
||||
"": "AuditSink represents a cluster level audit sink",
|
||||
"spec": "Spec defines the audit configuration spec",
|
||||
}
|
||||
|
||||
func (AuditSink) SwaggerDoc() map[string]string {
|
||||
return map_AuditSink
|
||||
}
|
||||
|
||||
var map_AuditSinkList = map[string]string{
|
||||
"": "AuditSinkList is a list of AuditSink items.",
|
||||
"items": "List of audit configurations.",
|
||||
}
|
||||
|
||||
func (AuditSinkList) SwaggerDoc() map[string]string {
|
||||
return map_AuditSinkList
|
||||
}
|
||||
|
||||
var map_AuditSinkSpec = map[string]string{
|
||||
"": "AuditSinkSpec holds the spec for the audit sink",
|
||||
"policy": "Policy defines the policy for selecting which events should be sent to the webhook required",
|
||||
"webhook": "Webhook to send events required",
|
||||
}
|
||||
|
||||
func (AuditSinkSpec) SwaggerDoc() map[string]string {
|
||||
return map_AuditSinkSpec
|
||||
}
|
||||
|
||||
var map_Policy = map[string]string{
|
||||
"": "Policy defines the configuration of how audit events are logged",
|
||||
"level": "The Level that all requests are recorded at. available options: None, Metadata, Request, RequestResponse required",
|
||||
"stages": "Stages is a list of stages for which events are created.",
|
||||
}
|
||||
|
||||
func (Policy) SwaggerDoc() map[string]string {
|
||||
return map_Policy
|
||||
}
|
||||
|
||||
var map_ServiceReference = map[string]string{
|
||||
"": "ServiceReference holds a reference to Service.legacy.k8s.io",
|
||||
"namespace": "`namespace` is the namespace of the service. Required",
|
||||
"name": "`name` is the name of the service. Required",
|
||||
"path": "`path` is an optional URL path which will be sent in any request to this service.",
|
||||
}
|
||||
|
||||
func (ServiceReference) SwaggerDoc() map[string]string {
|
||||
return map_ServiceReference
|
||||
}
|
||||
|
||||
var map_Webhook = map[string]string{
|
||||
"": "Webhook holds the configuration of the webhook",
|
||||
"throttle": "Throttle holds the options for throttling the webhook",
|
||||
"clientConfig": "ClientConfig holds the connection parameters for the webhook required",
|
||||
}
|
||||
|
||||
func (Webhook) SwaggerDoc() map[string]string {
|
||||
return map_Webhook
|
||||
}
|
||||
|
||||
var map_WebhookClientConfig = map[string]string{
|
||||
"": "WebhookClientConfig contains the information to make a connection with the webhook",
|
||||
"url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.",
|
||||
"service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nPort 443 will be used if it is open, otherwise it is an error.",
|
||||
"caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.",
|
||||
}
|
||||
|
||||
func (WebhookClientConfig) SwaggerDoc() map[string]string {
|
||||
return map_WebhookClientConfig
|
||||
}
|
||||
|
||||
var map_WebhookThrottleConfig = map[string]string{
|
||||
"": "WebhookThrottleConfig holds the configuration for throttling events",
|
||||
"qps": "ThrottleQPS maximum number of batches per second default 10 QPS",
|
||||
"burst": "ThrottleBurst is the maximum number of events sent at the same moment default 15 QPS",
|
||||
}
|
||||
|
||||
func (WebhookThrottleConfig) SwaggerDoc() map[string]string {
|
||||
return map_WebhookThrottleConfig
|
||||
}
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS END HERE
|
||||
224
vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go
generated
vendored
224
vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go
generated
vendored
@@ -1,224 +0,0 @@
|
||||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AuditSink) DeepCopyInto(out *AuditSink) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSink.
|
||||
func (in *AuditSink) DeepCopy() *AuditSink {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AuditSink)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *AuditSink) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AuditSinkList) DeepCopyInto(out *AuditSinkList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]AuditSink, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSinkList.
|
||||
func (in *AuditSinkList) DeepCopy() *AuditSinkList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AuditSinkList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *AuditSinkList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AuditSinkSpec) DeepCopyInto(out *AuditSinkSpec) {
|
||||
*out = *in
|
||||
in.Policy.DeepCopyInto(&out.Policy)
|
||||
in.Webhook.DeepCopyInto(&out.Webhook)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSinkSpec.
|
||||
func (in *AuditSinkSpec) DeepCopy() *AuditSinkSpec {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AuditSinkSpec)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Policy) DeepCopyInto(out *Policy) {
|
||||
*out = *in
|
||||
if in.Stages != nil {
|
||||
in, out := &in.Stages, &out.Stages
|
||||
*out = make([]Stage, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy.
|
||||
func (in *Policy) DeepCopy() *Policy {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Policy)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServiceReference) DeepCopyInto(out *ServiceReference) {
|
||||
*out = *in
|
||||
if in.Path != nil {
|
||||
in, out := &in.Path, &out.Path
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference.
|
||||
func (in *ServiceReference) DeepCopy() *ServiceReference {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServiceReference)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Webhook) DeepCopyInto(out *Webhook) {
|
||||
*out = *in
|
||||
if in.Throttle != nil {
|
||||
in, out := &in.Throttle, &out.Throttle
|
||||
*out = new(WebhookThrottleConfig)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
in.ClientConfig.DeepCopyInto(&out.ClientConfig)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook.
|
||||
func (in *Webhook) DeepCopy() *Webhook {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(Webhook)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) {
|
||||
*out = *in
|
||||
if in.URL != nil {
|
||||
in, out := &in.URL, &out.URL
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.Service != nil {
|
||||
in, out := &in.Service, &out.Service
|
||||
*out = new(ServiceReference)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.CABundle != nil {
|
||||
in, out := &in.CABundle, &out.CABundle
|
||||
*out = make([]byte, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig.
|
||||
func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WebhookClientConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WebhookThrottleConfig) DeepCopyInto(out *WebhookThrottleConfig) {
|
||||
*out = *in
|
||||
if in.QPS != nil {
|
||||
in, out := &in.QPS, &out.QPS
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
if in.Burst != nil {
|
||||
in, out := &in.Burst, &out.Burst
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookThrottleConfig.
|
||||
func (in *WebhookThrottleConfig) DeepCopy() *WebhookThrottleConfig {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(WebhookThrottleConfig)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
1
vendor/k8s.io/api/authentication/v1/doc.go
generated
vendored
1
vendor/k8s.io/api/authentication/v1/doc.go
generated
vendored
@@ -17,5 +17,4 @@ limitations under the License.
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +groupName=authentication.k8s.io
|
||||
// +k8s:openapi-gen=true
|
||||
|
||||
package v1
|
||||
|
||||
405
vendor/k8s.io/api/authentication/v1/generated.pb.go
generated
vendored
405
vendor/k8s.io/api/authentication/v1/generated.pb.go
generated
vendored
@@ -14,8 +14,9 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1 is a generated protocol buffer package.
|
||||
@@ -36,23 +37,18 @@ limitations under the License.
|
||||
*/
|
||||
package v1
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
|
||||
|
||||
math "math"
|
||||
import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||
|
||||
k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types"
|
||||
import strings "strings"
|
||||
import reflect "reflect"
|
||||
|
||||
github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys"
|
||||
|
||||
strings "strings"
|
||||
|
||||
reflect "reflect"
|
||||
|
||||
io "io"
|
||||
)
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@@ -360,21 +356,6 @@ func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) {
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token)))
|
||||
i += copy(dAtA[i:], m.Token)
|
||||
if len(m.Audiences) > 0 {
|
||||
for _, s := range m.Audiences {
|
||||
dAtA[i] = 0x12
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
dAtA[i] = uint8(l)
|
||||
i++
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@@ -413,21 +394,6 @@ func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) {
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error)))
|
||||
i += copy(dAtA[i:], m.Error)
|
||||
if len(m.Audiences) > 0 {
|
||||
for _, s := range m.Audiences {
|
||||
dAtA[i] = 0x22
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
dAtA[i] = uint8(l)
|
||||
i++
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
@@ -503,6 +469,24 @@ func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) {
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
@@ -596,12 +580,6 @@ func (m *TokenReviewSpec) Size() (n int) {
|
||||
_ = l
|
||||
l = len(m.Token)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
if len(m.Audiences) > 0 {
|
||||
for _, s := range m.Audiences {
|
||||
l = len(s)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@@ -613,12 +591,6 @@ func (m *TokenReviewStatus) Size() (n int) {
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
l = len(m.Error)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
if len(m.Audiences) > 0 {
|
||||
for _, s := range m.Audiences {
|
||||
l = len(s)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
@@ -726,7 +698,6 @@ func (this *TokenReviewSpec) String() string {
|
||||
}
|
||||
s := strings.Join([]string{`&TokenReviewSpec{`,
|
||||
`Token:` + fmt.Sprintf("%v", this.Token) + `,`,
|
||||
`Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@@ -739,7 +710,6 @@ func (this *TokenReviewStatus) String() string {
|
||||
`Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`,
|
||||
`User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`,
|
||||
`Error:` + fmt.Sprintf("%v", this.Error) + `,`,
|
||||
`Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
@@ -1599,35 +1569,6 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
m.Token = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
@@ -1757,35 +1698,6 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
m.Error = string(dAtA[iNdEx:postIndex])
|
||||
iNdEx = postIndex
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
@@ -1949,14 +1861,51 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var keykey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
keykey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
if m.Extra == nil {
|
||||
m.Extra = make(map[string]ExtraValue)
|
||||
}
|
||||
var mapkey string
|
||||
mapvalue := &ExtraValue{}
|
||||
for iNdEx < postIndex {
|
||||
entryPreIndex := iNdEx
|
||||
var wire uint64
|
||||
if iNdEx < postIndex {
|
||||
var valuekey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
@@ -1966,85 +1915,46 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error {
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
valuekey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
if fieldNum == 1 {
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
var mapmsglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
} else if fieldNum == 2 {
|
||||
var mapmsglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
mapmsglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
mapmsglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postmsgIndex := iNdEx + mapmsglen
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postmsgIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue = &ExtraValue{}
|
||||
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postmsgIndex
|
||||
} else {
|
||||
iNdEx = entryPreIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postmsgIndex := iNdEx + mapmsglen
|
||||
if mapmsglen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if postmsgIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue := &ExtraValue{}
|
||||
if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postmsgIndex
|
||||
m.Extra[mapkey] = *mapvalue
|
||||
} else {
|
||||
var mapvalue ExtraValue
|
||||
m.Extra[mapkey] = mapvalue
|
||||
}
|
||||
m.Extra[mapkey] = *mapvalue
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
@@ -2177,62 +2087,61 @@ func init() {
|
||||
}
|
||||
|
||||
var fileDescriptorGenerated = []byte{
|
||||
// 900 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x6f, 0xe3, 0x44,
|
||||
0x14, 0x8e, 0xf3, 0xa3, 0x4a, 0x26, 0xdb, 0xd2, 0xce, 0xb2, 0x52, 0x54, 0xc0, 0x2e, 0x41, 0x42,
|
||||
0x15, 0xb0, 0xf6, 0x26, 0x42, 0xb0, 0x5a, 0x24, 0xa4, 0x9a, 0x46, 0x10, 0x21, 0xd8, 0xd5, 0xec,
|
||||
0xb6, 0x20, 0x4e, 0x4c, 0xec, 0xd7, 0xc4, 0x04, 0x8f, 0x8d, 0x3d, 0x0e, 0x9b, 0xdb, 0xfe, 0x09,
|
||||
0x1c, 0x41, 0xe2, 0xc0, 0x1f, 0x81, 0xc4, 0xbf, 0xd0, 0xe3, 0x8a, 0xd3, 0x1e, 0x50, 0x44, 0xcd,
|
||||
0x95, 0x23, 0x27, 0x4e, 0x68, 0xc6, 0xd3, 0x38, 0x4e, 0xda, 0x34, 0x27, 0x6e, 0x9e, 0xf7, 0xbe,
|
||||
0xf7, 0xbd, 0x37, 0xdf, 0x7c, 0x9e, 0x41, 0xbd, 0xf1, 0xfd, 0xd8, 0xf4, 0x02, 0x6b, 0x9c, 0x0c,
|
||||
0x20, 0x62, 0xc0, 0x21, 0xb6, 0x26, 0xc0, 0xdc, 0x20, 0xb2, 0x54, 0x82, 0x86, 0x9e, 0x45, 0x13,
|
||||
0x3e, 0x02, 0xc6, 0x3d, 0x87, 0x72, 0x2f, 0x60, 0xd6, 0xa4, 0x63, 0x0d, 0x81, 0x41, 0x44, 0x39,
|
||||
0xb8, 0x66, 0x18, 0x05, 0x3c, 0xc0, 0xaf, 0x66, 0x68, 0x93, 0x86, 0x9e, 0x59, 0x44, 0x9b, 0x93,
|
||||
0xce, 0xfe, 0xdd, 0xa1, 0xc7, 0x47, 0xc9, 0xc0, 0x74, 0x02, 0xdf, 0x1a, 0x06, 0xc3, 0xc0, 0x92,
|
||||
0x45, 0x83, 0xe4, 0x4c, 0xae, 0xe4, 0x42, 0x7e, 0x65, 0x64, 0xfb, 0xef, 0xe6, 0xad, 0x7d, 0xea,
|
||||
0x8c, 0x3c, 0x06, 0xd1, 0xd4, 0x0a, 0xc7, 0x43, 0x11, 0x88, 0x2d, 0x1f, 0x38, 0xbd, 0x62, 0x84,
|
||||
0x7d, 0xeb, 0xba, 0xaa, 0x28, 0x61, 0xdc, 0xf3, 0x61, 0xa5, 0xe0, 0xbd, 0x9b, 0x0a, 0x62, 0x67,
|
||||
0x04, 0x3e, 0x5d, 0xae, 0x6b, 0xff, 0xae, 0xa1, 0x97, 0xed, 0x20, 0x61, 0xee, 0xc3, 0xc1, 0x37,
|
||||
0xe0, 0x70, 0x02, 0x67, 0x10, 0x01, 0x73, 0x00, 0x1f, 0xa0, 0xea, 0xd8, 0x63, 0x6e, 0x4b, 0x3b,
|
||||
0xd0, 0x0e, 0x1b, 0xf6, 0xad, 0xf3, 0x99, 0x51, 0x4a, 0x67, 0x46, 0xf5, 0x53, 0x8f, 0xb9, 0x44,
|
||||
0x66, 0x70, 0x17, 0x21, 0xfa, 0xa8, 0x7f, 0x0a, 0x51, 0xec, 0x05, 0xac, 0x55, 0x96, 0x38, 0xac,
|
||||
0x70, 0xe8, 0x68, 0x9e, 0x21, 0x0b, 0x28, 0xc1, 0xca, 0xa8, 0x0f, 0xad, 0x4a, 0x91, 0xf5, 0x73,
|
||||
0xea, 0x03, 0x91, 0x19, 0x6c, 0xa3, 0x4a, 0xd2, 0x3f, 0x6e, 0x55, 0x25, 0xe0, 0x9e, 0x02, 0x54,
|
||||
0x4e, 0xfa, 0xc7, 0xff, 0xce, 0x8c, 0xd7, 0xaf, 0xdb, 0x24, 0x9f, 0x86, 0x10, 0x9b, 0x27, 0xfd,
|
||||
0x63, 0x22, 0x8a, 0xdb, 0xef, 0x23, 0xd4, 0x7b, 0xca, 0x23, 0x7a, 0x4a, 0xbf, 0x4d, 0x00, 0x1b,
|
||||
0xa8, 0xe6, 0x71, 0xf0, 0xe3, 0x96, 0x76, 0x50, 0x39, 0x6c, 0xd8, 0x8d, 0x74, 0x66, 0xd4, 0xfa,
|
||||
0x22, 0x40, 0xb2, 0xf8, 0x83, 0xfa, 0x8f, 0xbf, 0x18, 0xa5, 0x67, 0x7f, 0x1c, 0x94, 0xda, 0x3f,
|
||||
0x97, 0xd1, 0xad, 0x27, 0xc1, 0x18, 0x18, 0x81, 0xef, 0x12, 0x88, 0x39, 0xfe, 0x1a, 0xd5, 0xc5,
|
||||
0x11, 0xb9, 0x94, 0x53, 0xa9, 0x44, 0xb3, 0x7b, 0xcf, 0xcc, 0xdd, 0x31, 0x1f, 0xc2, 0x0c, 0xc7,
|
||||
0x43, 0x11, 0x88, 0x4d, 0x81, 0x36, 0x27, 0x1d, 0x33, 0x93, 0xf3, 0x33, 0xe0, 0x34, 0xd7, 0x24,
|
||||
0x8f, 0x91, 0x39, 0x2b, 0x7e, 0x84, 0xaa, 0x71, 0x08, 0x8e, 0xd4, 0xaf, 0xd9, 0x35, 0xcd, 0x75,
|
||||
0xde, 0x33, 0x17, 0x67, 0x7b, 0x1c, 0x82, 0x93, 0x2b, 0x28, 0x56, 0x44, 0x32, 0xe1, 0x2f, 0xd1,
|
||||
0x56, 0xcc, 0x29, 0x4f, 0x62, 0xa9, 0x72, 0x71, 0xe2, 0x9b, 0x38, 0x65, 0x9d, 0xbd, 0xa3, 0x58,
|
||||
0xb7, 0xb2, 0x35, 0x51, 0x7c, 0xed, 0x7f, 0x34, 0xb4, 0xbb, 0x3c, 0x02, 0x7e, 0x1b, 0x35, 0x68,
|
||||
0xe2, 0x7a, 0xc2, 0x34, 0x97, 0x12, 0x6f, 0xa7, 0x33, 0xa3, 0x71, 0x74, 0x19, 0x24, 0x79, 0x1e,
|
||||
0x33, 0xb4, 0x33, 0x28, 0xb8, 0x4d, 0xcd, 0xd8, 0x5d, 0x3f, 0xe3, 0x55, 0x0e, 0xb5, 0x71, 0x3a,
|
||||
0x33, 0x76, 0x8a, 0x19, 0xb2, 0xc4, 0x8e, 0x3f, 0x42, 0x7b, 0xf0, 0x34, 0xf4, 0x22, 0xc9, 0xf4,
|
||||
0x18, 0x9c, 0x80, 0xb9, 0xb1, 0xf4, 0x56, 0xc5, 0xbe, 0x93, 0xce, 0x8c, 0xbd, 0xde, 0x72, 0x92,
|
||||
0xac, 0xe2, 0xdb, 0xbf, 0x6a, 0x08, 0xaf, 0xaa, 0x84, 0xdf, 0x40, 0x35, 0x2e, 0xa2, 0xea, 0x17,
|
||||
0xd9, 0x56, 0xa2, 0xd5, 0x32, 0x68, 0x96, 0xc3, 0x53, 0x74, 0x3b, 0x27, 0x7c, 0xe2, 0xf9, 0x10,
|
||||
0x73, 0xea, 0x87, 0xea, 0xb4, 0xdf, 0xda, 0xcc, 0x4b, 0xa2, 0xcc, 0x7e, 0x45, 0xd1, 0xdf, 0xee,
|
||||
0xad, 0xd2, 0x91, 0xab, 0x7a, 0xb4, 0x7f, 0x2a, 0xa3, 0xa6, 0x1a, 0x7b, 0xe2, 0xc1, 0xf7, 0xff,
|
||||
0x83, 0x97, 0x1f, 0x16, 0xbc, 0x7c, 0x77, 0x23, 0xdf, 0x89, 0xd1, 0xae, 0xb5, 0xf2, 0x17, 0x4b,
|
||||
0x56, 0xb6, 0x36, 0xa7, 0x5c, 0xef, 0x64, 0x07, 0xbd, 0xb4, 0xd4, 0x7f, 0xb3, 0xe3, 0x2c, 0x98,
|
||||
0xbd, 0xbc, 0xde, 0xec, 0xed, 0xbf, 0x35, 0xb4, 0xb7, 0x32, 0x12, 0xfe, 0x00, 0x6d, 0x2f, 0x4c,
|
||||
0x0e, 0xd9, 0x0d, 0x5b, 0xb7, 0xef, 0xa8, 0x7e, 0xdb, 0x47, 0x8b, 0x49, 0x52, 0xc4, 0xe2, 0x4f,
|
||||
0x50, 0x35, 0x89, 0x21, 0x52, 0x0a, 0xbf, 0xb9, 0x5e, 0x8e, 0x93, 0x18, 0xa2, 0x3e, 0x3b, 0x0b,
|
||||
0x72, 0x69, 0x45, 0x84, 0x48, 0x06, 0xb1, 0x5d, 0x88, 0xa2, 0x20, 0x52, 0x57, 0xf1, 0x7c, 0xbb,
|
||||
0x3d, 0x11, 0x24, 0x59, 0xae, 0xb8, 0xdd, 0xea, 0x0d, 0xdb, 0xfd, 0xad, 0x8c, 0xea, 0x97, 0x2d,
|
||||
0xf1, 0x3b, 0xa8, 0x2e, 0xda, 0xc8, 0xcb, 0x3e, 0x13, 0x74, 0x57, 0x75, 0x90, 0x18, 0x11, 0x27,
|
||||
0x73, 0x04, 0x7e, 0x0d, 0x55, 0x12, 0xcf, 0x55, 0x6f, 0x48, 0x73, 0xe1, 0xd2, 0x27, 0x22, 0x8e,
|
||||
0xdb, 0x68, 0x6b, 0x18, 0x05, 0x49, 0x28, 0x6c, 0x20, 0x66, 0x40, 0xe2, 0x44, 0x3f, 0x96, 0x11,
|
||||
0xa2, 0x32, 0xf8, 0x14, 0xd5, 0x40, 0xdc, 0xf9, 0x72, 0xcc, 0x66, 0xb7, 0xb3, 0x99, 0x34, 0xa6,
|
||||
0x7c, 0x27, 0x7a, 0x8c, 0x47, 0xd3, 0x05, 0x09, 0x44, 0x8c, 0x64, 0x74, 0xfb, 0x03, 0xf5, 0x96,
|
||||
0x48, 0x0c, 0xde, 0x45, 0x95, 0x31, 0x4c, 0xb3, 0x1d, 0x11, 0xf1, 0x89, 0x3f, 0x44, 0xb5, 0x89,
|
||||
0x78, 0x66, 0xd4, 0x91, 0x1c, 0xae, 0xef, 0x9b, 0x3f, 0x4b, 0x24, 0x2b, 0x7b, 0x50, 0xbe, 0xaf,
|
||||
0xd9, 0x87, 0xe7, 0x17, 0x7a, 0xe9, 0xf9, 0x85, 0x5e, 0x7a, 0x71, 0xa1, 0x97, 0x9e, 0xa5, 0xba,
|
||||
0x76, 0x9e, 0xea, 0xda, 0xf3, 0x54, 0xd7, 0x5e, 0xa4, 0xba, 0xf6, 0x67, 0xaa, 0x6b, 0x3f, 0xfc,
|
||||
0xa5, 0x97, 0xbe, 0x2a, 0x4f, 0x3a, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0x5f, 0x04, 0x81, 0x6f,
|
||||
0xe2, 0x08, 0x00, 0x00,
|
||||
// 892 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x8f, 0xdb, 0x44,
|
||||
0x14, 0x8e, 0xf3, 0x63, 0xb5, 0x99, 0x74, 0x97, 0xdd, 0x29, 0x95, 0xa2, 0x05, 0xec, 0x60, 0x24,
|
||||
0x14, 0x01, 0xb5, 0x9b, 0x08, 0x95, 0xaa, 0x48, 0x48, 0x6b, 0x36, 0x82, 0x08, 0x41, 0xab, 0x69,
|
||||
0x77, 0x41, 0x9c, 0x98, 0xd8, 0x6f, 0xb3, 0x26, 0x78, 0x6c, 0xec, 0x71, 0x68, 0x6e, 0xfd, 0x13,
|
||||
0x38, 0x82, 0xc4, 0x81, 0x3f, 0x02, 0x89, 0x23, 0xd7, 0x3d, 0x56, 0x9c, 0x7a, 0x40, 0x11, 0x6b,
|
||||
0xfe, 0x05, 0x4e, 0x9c, 0xd0, 0x8c, 0x67, 0xe3, 0xfc, 0xd8, 0x4d, 0x73, 0xea, 0x2d, 0xf3, 0xde,
|
||||
0xf7, 0xbe, 0x79, 0xef, 0x9b, 0x2f, 0xcf, 0xa8, 0x37, 0xba, 0x97, 0x58, 0x7e, 0x68, 0x8f, 0xd2,
|
||||
0x01, 0xc4, 0x0c, 0x38, 0x24, 0xf6, 0x18, 0x98, 0x17, 0xc6, 0xb6, 0x4a, 0xd0, 0xc8, 0xb7, 0x69,
|
||||
0xca, 0xcf, 0x80, 0x71, 0xdf, 0xa5, 0xdc, 0x0f, 0x99, 0x3d, 0xee, 0xd8, 0x43, 0x60, 0x10, 0x53,
|
||||
0x0e, 0x9e, 0x15, 0xc5, 0x21, 0x0f, 0xf1, 0xeb, 0x39, 0xda, 0xa2, 0x91, 0x6f, 0x2d, 0xa2, 0xad,
|
||||
0x71, 0xe7, 0xe0, 0xf6, 0xd0, 0xe7, 0x67, 0xe9, 0xc0, 0x72, 0xc3, 0xc0, 0x1e, 0x86, 0xc3, 0xd0,
|
||||
0x96, 0x45, 0x83, 0xf4, 0x54, 0x9e, 0xe4, 0x41, 0xfe, 0xca, 0xc9, 0x0e, 0xde, 0x2f, 0xae, 0x0e,
|
||||
0xa8, 0x7b, 0xe6, 0x33, 0x88, 0x27, 0x76, 0x34, 0x1a, 0x8a, 0x40, 0x62, 0x07, 0xc0, 0xe9, 0x15,
|
||||
0x2d, 0x1c, 0xd8, 0xd7, 0x55, 0xc5, 0x29, 0xe3, 0x7e, 0x00, 0x2b, 0x05, 0x77, 0x5f, 0x54, 0x90,
|
||||
0xb8, 0x67, 0x10, 0xd0, 0xe5, 0x3a, 0xf3, 0x4f, 0x0d, 0xbd, 0xea, 0x84, 0x29, 0xf3, 0x1e, 0x0c,
|
||||
0xbe, 0x05, 0x97, 0x13, 0x38, 0x85, 0x18, 0x98, 0x0b, 0xb8, 0x85, 0xaa, 0x23, 0x9f, 0x79, 0x4d,
|
||||
0xad, 0xa5, 0xb5, 0xeb, 0xce, 0x8d, 0xf3, 0xa9, 0x51, 0xca, 0xa6, 0x46, 0xf5, 0x33, 0x9f, 0x79,
|
||||
0x44, 0x66, 0x70, 0x17, 0x21, 0xfa, 0xb0, 0x7f, 0x02, 0x71, 0xe2, 0x87, 0xac, 0x59, 0x96, 0x38,
|
||||
0xac, 0x70, 0xe8, 0x70, 0x96, 0x21, 0x73, 0x28, 0xc1, 0xca, 0x68, 0x00, 0xcd, 0xca, 0x22, 0xeb,
|
||||
0x17, 0x34, 0x00, 0x22, 0x33, 0xd8, 0x41, 0x95, 0xb4, 0x7f, 0xd4, 0xac, 0x4a, 0xc0, 0x1d, 0x05,
|
||||
0xa8, 0x1c, 0xf7, 0x8f, 0xfe, 0x9b, 0x1a, 0x6f, 0x5e, 0x37, 0x24, 0x9f, 0x44, 0x90, 0x58, 0xc7,
|
||||
0xfd, 0x23, 0x22, 0x8a, 0xcd, 0x0f, 0x10, 0xea, 0x3d, 0xe1, 0x31, 0x3d, 0xa1, 0xdf, 0xa5, 0x80,
|
||||
0x0d, 0x54, 0xf3, 0x39, 0x04, 0x49, 0x53, 0x6b, 0x55, 0xda, 0x75, 0xa7, 0x9e, 0x4d, 0x8d, 0x5a,
|
||||
0x5f, 0x04, 0x48, 0x1e, 0xbf, 0xbf, 0xfd, 0xd3, 0xaf, 0x46, 0xe9, 0xe9, 0x5f, 0xad, 0x92, 0xf9,
|
||||
0x4b, 0x19, 0xdd, 0x78, 0x1c, 0x8e, 0x80, 0x11, 0xf8, 0x3e, 0x85, 0x84, 0xe3, 0x6f, 0xd0, 0xb6,
|
||||
0x78, 0x22, 0x8f, 0x72, 0x2a, 0x95, 0x68, 0x74, 0xef, 0x58, 0x85, 0x3b, 0x66, 0x4d, 0x58, 0xd1,
|
||||
0x68, 0x28, 0x02, 0x89, 0x25, 0xd0, 0xd6, 0xb8, 0x63, 0xe5, 0x72, 0x7e, 0x0e, 0x9c, 0x16, 0x9a,
|
||||
0x14, 0x31, 0x32, 0x63, 0xc5, 0x0f, 0x51, 0x35, 0x89, 0xc0, 0x95, 0xfa, 0x35, 0xba, 0x96, 0xb5,
|
||||
0xce, 0x7b, 0xd6, 0x7c, 0x6f, 0x8f, 0x22, 0x70, 0x0b, 0x05, 0xc5, 0x89, 0x48, 0x26, 0xfc, 0x15,
|
||||
0xda, 0x4a, 0x38, 0xe5, 0x69, 0x22, 0x55, 0x5e, 0xec, 0xf8, 0x45, 0x9c, 0xb2, 0xce, 0xd9, 0x55,
|
||||
0xac, 0x5b, 0xf9, 0x99, 0x28, 0x3e, 0xf3, 0x5f, 0x0d, 0xed, 0x2d, 0xb7, 0x80, 0xdf, 0x45, 0x75,
|
||||
0x9a, 0x7a, 0xbe, 0x30, 0xcd, 0xa5, 0xc4, 0x3b, 0xd9, 0xd4, 0xa8, 0x1f, 0x5e, 0x06, 0x49, 0x91,
|
||||
0xc7, 0x0c, 0xed, 0x0e, 0x16, 0xdc, 0xa6, 0x7a, 0xec, 0xae, 0xef, 0xf1, 0x2a, 0x87, 0x3a, 0x38,
|
||||
0x9b, 0x1a, 0xbb, 0x8b, 0x19, 0xb2, 0xc4, 0x8e, 0x3f, 0x46, 0xfb, 0xf0, 0x24, 0xf2, 0x63, 0xc9,
|
||||
0xf4, 0x08, 0xdc, 0x90, 0x79, 0x89, 0xf4, 0x56, 0xc5, 0xb9, 0x95, 0x4d, 0x8d, 0xfd, 0xde, 0x72,
|
||||
0x92, 0xac, 0xe2, 0xcd, 0xdf, 0x34, 0x84, 0x57, 0x55, 0xc2, 0x6f, 0xa1, 0x1a, 0x17, 0x51, 0xf5,
|
||||
0x17, 0xd9, 0x51, 0xa2, 0xd5, 0x72, 0x68, 0x9e, 0xc3, 0x13, 0x74, 0xb3, 0x20, 0x7c, 0xec, 0x07,
|
||||
0x90, 0x70, 0x1a, 0x44, 0xea, 0xb5, 0xdf, 0xd9, 0xcc, 0x4b, 0xa2, 0xcc, 0x79, 0x4d, 0xd1, 0xdf,
|
||||
0xec, 0xad, 0xd2, 0x91, 0xab, 0xee, 0x30, 0x7f, 0x2e, 0xa3, 0x86, 0x6a, 0x7b, 0xec, 0xc3, 0x0f,
|
||||
0x2f, 0xc1, 0xcb, 0x0f, 0x16, 0xbc, 0x7c, 0x7b, 0x23, 0xdf, 0x89, 0xd6, 0xae, 0xb5, 0xf2, 0x97,
|
||||
0x4b, 0x56, 0xb6, 0x37, 0xa7, 0x5c, 0xef, 0xe4, 0xbb, 0xe8, 0x95, 0xa5, 0xfb, 0x37, 0x7a, 0x4e,
|
||||
0xf3, 0x0f, 0x0d, 0xed, 0xaf, 0xdc, 0x82, 0x3f, 0x44, 0x3b, 0x73, 0xcd, 0x40, 0xbe, 0x34, 0xb7,
|
||||
0x9d, 0x5b, 0x8a, 0x62, 0xe7, 0x70, 0x3e, 0x49, 0x16, 0xb1, 0xf8, 0x53, 0x54, 0x4d, 0x13, 0x88,
|
||||
0x95, 0x68, 0x6f, 0xaf, 0x9f, 0xf0, 0x38, 0x81, 0xb8, 0xcf, 0x4e, 0xc3, 0x42, 0x2d, 0x11, 0x21,
|
||||
0x92, 0x41, 0x4c, 0x00, 0x71, 0x1c, 0xc6, 0x6a, 0xbb, 0xce, 0x26, 0xe8, 0x89, 0x20, 0xc9, 0x73,
|
||||
0xe6, 0xef, 0x65, 0xb4, 0x7d, 0xc9, 0x82, 0xdf, 0x43, 0xdb, 0xa2, 0x52, 0xae, 0xe4, 0x7c, 0xec,
|
||||
0x3d, 0x55, 0x24, 0x31, 0x22, 0x4e, 0x66, 0x08, 0xfc, 0x06, 0xaa, 0xa4, 0xbe, 0xa7, 0x36, 0x7d,
|
||||
0x63, 0x6e, 0x35, 0x13, 0x11, 0xc7, 0x26, 0xda, 0x1a, 0xc6, 0x61, 0x1a, 0x89, 0xc7, 0x12, 0x5b,
|
||||
0x00, 0x09, 0xdd, 0x3f, 0x91, 0x11, 0xa2, 0x32, 0xf8, 0x04, 0xd5, 0x40, 0x6c, 0xe6, 0x66, 0xb5,
|
||||
0x55, 0x69, 0x37, 0xba, 0x9d, 0xcd, 0xa6, 0xb5, 0xe4, 0x36, 0xef, 0x31, 0x1e, 0x4f, 0xe6, 0xa6,
|
||||
0x12, 0x31, 0x92, 0xd3, 0x1d, 0x0c, 0xd4, 0xc6, 0x97, 0x18, 0xbc, 0x87, 0x2a, 0x23, 0x98, 0xe4,
|
||||
0x13, 0x11, 0xf1, 0x13, 0x7f, 0x84, 0x6a, 0x63, 0xf1, 0x31, 0x50, 0x2a, 0xb7, 0xd7, 0xdf, 0x5b,
|
||||
0x7c, 0x3c, 0x48, 0x5e, 0x76, 0xbf, 0x7c, 0x4f, 0x73, 0xda, 0xe7, 0x17, 0x7a, 0xe9, 0xd9, 0x85,
|
||||
0x5e, 0x7a, 0x7e, 0xa1, 0x97, 0x9e, 0x66, 0xba, 0x76, 0x9e, 0xe9, 0xda, 0xb3, 0x4c, 0xd7, 0x9e,
|
||||
0x67, 0xba, 0xf6, 0x77, 0xa6, 0x6b, 0x3f, 0xfe, 0xa3, 0x97, 0xbe, 0x2e, 0x8f, 0x3b, 0xff, 0x07,
|
||||
0x00, 0x00, 0xff, 0xff, 0x5e, 0x8d, 0x94, 0x78, 0x88, 0x08, 0x00, 0x00,
|
||||
}
|
||||
|
||||
20
vendor/k8s.io/api/authentication/v1/generated.proto
generated
vendored
20
vendor/k8s.io/api/authentication/v1/generated.proto
generated
vendored
@@ -118,14 +118,6 @@ message TokenReviewSpec {
|
||||
// Token is the opaque bearer token.
|
||||
// +optional
|
||||
optional string token = 1;
|
||||
|
||||
// Audiences is a list of the identifiers that the resource server presented
|
||||
// with the token identifies as. Audience-aware token authenticators will
|
||||
// verify that the token was intended for at least one of the audiences in
|
||||
// this list. If no audiences are provided, the audience will default to the
|
||||
// audience of the Kubernetes apiserver.
|
||||
// +optional
|
||||
repeated string audiences = 2;
|
||||
}
|
||||
|
||||
// TokenReviewStatus is the result of the token authentication request.
|
||||
@@ -138,18 +130,6 @@ message TokenReviewStatus {
|
||||
// +optional
|
||||
optional UserInfo user = 2;
|
||||
|
||||
// Audiences are audience identifiers chosen by the authenticator that are
|
||||
// compatible with both the TokenReview and token. An identifier is any
|
||||
// identifier in the intersection of the TokenReviewSpec audiences and the
|
||||
// token's audiences. A client of the TokenReview API that sets the
|
||||
// spec.audiences field should validate that a compatible audience identifier
|
||||
// is returned in the status.audiences field to ensure that the TokenReview
|
||||
// server is audience aware. If a TokenReview returns an empty
|
||||
// status.audience field where status.authenticated is "true", the token is
|
||||
// valid against the audience of the Kubernetes API server.
|
||||
// +optional
|
||||
repeated string audiences = 4;
|
||||
|
||||
// Error indicates that the token couldn't be checked
|
||||
// +optional
|
||||
optional string error = 3;
|
||||
|
||||
18
vendor/k8s.io/api/authentication/v1/types.go
generated
vendored
18
vendor/k8s.io/api/authentication/v1/types.go
generated
vendored
@@ -64,13 +64,6 @@ type TokenReviewSpec struct {
|
||||
// Token is the opaque bearer token.
|
||||
// +optional
|
||||
Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"`
|
||||
// Audiences is a list of the identifiers that the resource server presented
|
||||
// with the token identifies as. Audience-aware token authenticators will
|
||||
// verify that the token was intended for at least one of the audiences in
|
||||
// this list. If no audiences are provided, the audience will default to the
|
||||
// audience of the Kubernetes apiserver.
|
||||
// +optional
|
||||
Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"`
|
||||
}
|
||||
|
||||
// TokenReviewStatus is the result of the token authentication request.
|
||||
@@ -81,17 +74,6 @@ type TokenReviewStatus struct {
|
||||
// User is the UserInfo associated with the provided token.
|
||||
// +optional
|
||||
User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"`
|
||||
// Audiences are audience identifiers chosen by the authenticator that are
|
||||
// compatible with both the TokenReview and token. An identifier is any
|
||||
// identifier in the intersection of the TokenReviewSpec audiences and the
|
||||
// token's audiences. A client of the TokenReview API that sets the
|
||||
// spec.audiences field should validate that a compatible audience identifier
|
||||
// is returned in the status.audiences field to ensure that the TokenReview
|
||||
// server is audience aware. If a TokenReview returns an empty
|
||||
// status.audience field where status.authenticated is "true", the token is
|
||||
// valid against the audience of the Kubernetes API server.
|
||||
// +optional
|
||||
Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"`
|
||||
// Error indicates that the token couldn't be checked
|
||||
// +optional
|
||||
Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"`
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user