mirror of
https://github.com/kubernetes/sample-controller.git
synced 2026-02-15 00:07:52 +08:00
Compare commits
24 Commits
kubernetes
...
kubernetes
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
fbbe6c0825 | ||
|
|
f3ef788c12 | ||
|
|
0a129f69be | ||
|
|
862e188e78 | ||
|
|
135c4c60c0 | ||
|
|
57c108ffac | ||
|
|
e2a931b1e0 | ||
|
|
61b0fff013 | ||
|
|
94623ba852 | ||
|
|
7cbc2364ff | ||
|
|
e0ad4791f7 | ||
|
|
6dcce25719 | ||
|
|
30da3eec49 | ||
|
|
991499de7e | ||
|
|
1509cb1ff3 | ||
|
|
5561807b20 | ||
|
|
0b991a6b31 | ||
|
|
3e0e216b84 | ||
|
|
a10f7a69f2 | ||
|
|
91f2f63250 | ||
|
|
085c1da033 | ||
|
|
43a04a6884 | ||
|
|
cc082a837d | ||
|
|
c11b6bcaeb |
@@ -4,4 +4,4 @@ Do not open pull requests directly against this repository, they will be ignored
|
||||
|
||||
This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/sample-controller](https://git.k8s.io/kubernetes/staging/src/k8s.io/sample-controller) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot).
|
||||
|
||||
Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/sig-architecture/staging.md) for more information
|
||||
Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information
|
||||
|
||||
634
Godeps/Godeps.json
generated
634
Godeps/Godeps.json
generated
File diff suppressed because it is too large
Load Diff
2
Godeps/OWNERS
generated
2
Godeps/OWNERS
generated
@@ -1,4 +1,2 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- dep-approvers
|
||||
|
||||
5
OWNERS
5
OWNERS
@@ -1,12 +1,9 @@
|
||||
# See the OWNERS docs at https://go.k8s.io/owners
|
||||
|
||||
approvers:
|
||||
- sttts
|
||||
- munnerz
|
||||
reviewers:
|
||||
- gregory-m
|
||||
- kargakis
|
||||
- sttts
|
||||
- munnerz
|
||||
- nikhita
|
||||
labels:
|
||||
- sig/api-machinery
|
||||
|
||||
12
README.md
12
README.md
@@ -3,8 +3,6 @@
|
||||
This repository implements a simple controller for watching Foo resources as
|
||||
defined with a CustomResourceDefinition (CRD).
|
||||
|
||||
**Note:** go-get or vendor this package as `k8s.io/sample-controller`.
|
||||
|
||||
This particular example demonstrates how to perform basic operations such as:
|
||||
|
||||
* How to register a new custom resource (custom resource type) of type `Foo` using a CustomResourceDefinition.
|
||||
@@ -42,10 +40,7 @@ This is an example of how to build a kube-like controller with a single type.
|
||||
|
||||
```sh
|
||||
# assumes you have a working kubeconfig, not required if operating in-cluster
|
||||
$ go get k8s.io/sample-controller
|
||||
$ cd $GOPATH/src/k8s.io/sample-controller
|
||||
$ go build -o sample-controller .
|
||||
$ ./sample-controller -kubeconfig=$HOME/.kube/config
|
||||
$ go run *.go -kubeconfig=$HOME/.kube/config
|
||||
|
||||
# create a CustomResourceDefinition
|
||||
$ kubectl create -f artifacts/examples/crd.yaml
|
||||
@@ -107,8 +102,9 @@ $ kubectl create -f artifacts/examples/crd-validation.yaml
|
||||
|
||||
## Subresources
|
||||
|
||||
Custom Resources support `/status` and `/scale` subresources as a [beta feature](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#subresources) in v1.11 and is enabled by default.
|
||||
This feature is [alpha](https://v1-10.docs.kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#subresources) in v1.10 and to enable it you need to set the `CustomResourceSubresources` feature gate on the [kube-apiserver](https://kubernetes.io/docs/admin/kube-apiserver):
|
||||
Custom Resources support `/status` and `/scale` subresources as an
|
||||
[alpha feature](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#subresources) in v1.10.
|
||||
Enable this feature using the `CustomResourceSubresources` feature gate on the [kube-apiserver](https://kubernetes.io/docs/admin/kube-apiserver):
|
||||
|
||||
```sh
|
||||
--feature-gates=CustomResourceSubresources=true
|
||||
|
||||
@@ -20,12 +20,13 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
appsinformers "k8s.io/client-go/informers/apps/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
@@ -35,13 +36,12 @@ import (
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
"k8s.io/klog"
|
||||
|
||||
samplev1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
|
||||
clientset "k8s.io/sample-controller/pkg/generated/clientset/versioned"
|
||||
samplescheme "k8s.io/sample-controller/pkg/generated/clientset/versioned/scheme"
|
||||
informers "k8s.io/sample-controller/pkg/generated/informers/externalversions/samplecontroller/v1alpha1"
|
||||
listers "k8s.io/sample-controller/pkg/generated/listers/samplecontroller/v1alpha1"
|
||||
clientset "k8s.io/sample-controller/pkg/client/clientset/versioned"
|
||||
samplescheme "k8s.io/sample-controller/pkg/client/clientset/versioned/scheme"
|
||||
informers "k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1"
|
||||
listers "k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1"
|
||||
)
|
||||
|
||||
const controllerAgentName = "sample-controller"
|
||||
@@ -94,10 +94,10 @@ func NewController(
|
||||
// Create event broadcaster
|
||||
// Add sample-controller types to the default Kubernetes Scheme so Events can be
|
||||
// logged for sample-controller types.
|
||||
utilruntime.Must(samplescheme.AddToScheme(scheme.Scheme))
|
||||
klog.V(4).Info("Creating event broadcaster")
|
||||
samplescheme.AddToScheme(scheme.Scheme)
|
||||
glog.V(4).Info("Creating event broadcaster")
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(klog.Infof)
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")})
|
||||
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
|
||||
|
||||
@@ -112,7 +112,7 @@ func NewController(
|
||||
recorder: recorder,
|
||||
}
|
||||
|
||||
klog.Info("Setting up event handlers")
|
||||
glog.Info("Setting up event handlers")
|
||||
// Set up an event handler for when Foo resources change
|
||||
fooInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: controller.enqueueFoo,
|
||||
@@ -149,27 +149,27 @@ func NewController(
|
||||
// is closed, at which point it will shutdown the workqueue and wait for
|
||||
// workers to finish processing their current work items.
|
||||
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer runtime.HandleCrash()
|
||||
defer c.workqueue.ShutDown()
|
||||
|
||||
// Start the informer factories to begin populating the informer caches
|
||||
klog.Info("Starting Foo controller")
|
||||
glog.Info("Starting Foo controller")
|
||||
|
||||
// Wait for the caches to be synced before starting workers
|
||||
klog.Info("Waiting for informer caches to sync")
|
||||
glog.Info("Waiting for informer caches to sync")
|
||||
if ok := cache.WaitForCacheSync(stopCh, c.deploymentsSynced, c.foosSynced); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
klog.Info("Starting workers")
|
||||
glog.Info("Starting workers")
|
||||
// Launch two workers to process Foo resources
|
||||
for i := 0; i < threadiness; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
klog.Info("Started workers")
|
||||
glog.Info("Started workers")
|
||||
<-stopCh
|
||||
klog.Info("Shutting down workers")
|
||||
glog.Info("Shutting down workers")
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -212,25 +212,23 @@ func (c *Controller) processNextWorkItem() bool {
|
||||
// Forget here else we'd go into a loop of attempting to
|
||||
// process a work item that is invalid.
|
||||
c.workqueue.Forget(obj)
|
||||
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
|
||||
runtime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
|
||||
return nil
|
||||
}
|
||||
// Run the syncHandler, passing it the namespace/name string of the
|
||||
// Foo resource to be synced.
|
||||
if err := c.syncHandler(key); err != nil {
|
||||
// Put the item back on the workqueue to handle any transient errors.
|
||||
c.workqueue.AddRateLimited(key)
|
||||
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
|
||||
return fmt.Errorf("error syncing '%s': %s", key, err.Error())
|
||||
}
|
||||
// Finally, if no error occurs we Forget this item so it does not
|
||||
// get queued again until another change happens.
|
||||
c.workqueue.Forget(obj)
|
||||
klog.Infof("Successfully synced '%s'", key)
|
||||
glog.Infof("Successfully synced '%s'", key)
|
||||
return nil
|
||||
}(obj)
|
||||
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
runtime.HandleError(err)
|
||||
return true
|
||||
}
|
||||
|
||||
@@ -244,7 +242,7 @@ func (c *Controller) syncHandler(key string) error {
|
||||
// Convert the namespace/name string into a distinct namespace and name
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key))
|
||||
runtime.HandleError(fmt.Errorf("invalid resource key: %s", key))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -254,7 +252,7 @@ func (c *Controller) syncHandler(key string) error {
|
||||
// The Foo resource may no longer exist, in which case we stop
|
||||
// processing.
|
||||
if errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(fmt.Errorf("foo '%s' in work queue no longer exists", key))
|
||||
runtime.HandleError(fmt.Errorf("foo '%s' in work queue no longer exists", key))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -266,7 +264,7 @@ func (c *Controller) syncHandler(key string) error {
|
||||
// We choose to absorb the error here as the worker would requeue the
|
||||
// resource otherwise. Instead, the next time the resource is updated
|
||||
// the resource will be queued again.
|
||||
utilruntime.HandleError(fmt.Errorf("%s: deployment name must be specified", key))
|
||||
runtime.HandleError(fmt.Errorf("%s: deployment name must be specified", key))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -296,7 +294,7 @@ func (c *Controller) syncHandler(key string) error {
|
||||
// number does not equal the current desired replicas on the Deployment, we
|
||||
// should update the Deployment resource.
|
||||
if foo.Spec.Replicas != nil && *foo.Spec.Replicas != *deployment.Spec.Replicas {
|
||||
klog.V(4).Infof("Foo %s replicas: %d, deployment replicas: %d", name, *foo.Spec.Replicas, *deployment.Spec.Replicas)
|
||||
glog.V(4).Infof("Foo %s replicas: %d, deployment replicas: %d", name, *foo.Spec.Replicas, *deployment.Spec.Replicas)
|
||||
deployment, err = c.kubeclientset.AppsV1().Deployments(foo.Namespace).Update(newDeployment(foo))
|
||||
}
|
||||
|
||||
@@ -339,7 +337,7 @@ func (c *Controller) enqueueFoo(obj interface{}) {
|
||||
var key string
|
||||
var err error
|
||||
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
runtime.HandleError(err)
|
||||
return
|
||||
}
|
||||
c.workqueue.AddRateLimited(key)
|
||||
@@ -356,17 +354,17 @@ func (c *Controller) handleObject(obj interface{}) {
|
||||
if object, ok = obj.(metav1.Object); !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("error decoding object, invalid type"))
|
||||
runtime.HandleError(fmt.Errorf("error decoding object, invalid type"))
|
||||
return
|
||||
}
|
||||
object, ok = tombstone.Obj.(metav1.Object)
|
||||
if !ok {
|
||||
utilruntime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type"))
|
||||
runtime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type"))
|
||||
return
|
||||
}
|
||||
klog.V(4).Infof("Recovered deleted object '%s' from tombstone", object.GetName())
|
||||
glog.V(4).Infof("Recovered deleted object '%s' from tombstone", object.GetName())
|
||||
}
|
||||
klog.V(4).Infof("Processing object: %s", object.GetName())
|
||||
glog.V(4).Infof("Processing object: %s", object.GetName())
|
||||
if ownerRef := metav1.GetControllerOf(object); ownerRef != nil {
|
||||
// If this object is not owned by a Foo, we should not do anything more
|
||||
// with it.
|
||||
@@ -376,7 +374,7 @@ func (c *Controller) handleObject(obj interface{}) {
|
||||
|
||||
foo, err := c.foosLister.Foos(object.GetNamespace()).Get(ownerRef.Name)
|
||||
if err != nil {
|
||||
klog.V(4).Infof("ignoring orphaned object '%s' of foo '%s'", object.GetSelfLink(), ownerRef.Name)
|
||||
glog.V(4).Infof("ignoring orphaned object '%s' of foo '%s'", object.GetSelfLink(), ownerRef.Name)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -34,8 +34,8 @@ import (
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
||||
samplecontroller "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
|
||||
"k8s.io/sample-controller/pkg/generated/clientset/versioned/fake"
|
||||
informers "k8s.io/sample-controller/pkg/generated/informers/externalversions"
|
||||
"k8s.io/sample-controller/pkg/client/clientset/versioned/fake"
|
||||
informers "k8s.io/sample-controller/pkg/client/informers/externalversions"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -198,7 +198,7 @@ func checkAction(expected, actual core.Action, t *testing.T) {
|
||||
expPatch := e.GetPatch()
|
||||
patch := a.GetPatch()
|
||||
|
||||
if !reflect.DeepEqual(expPatch, patch) {
|
||||
if !reflect.DeepEqual(expPatch, expPatch) {
|
||||
t.Errorf("Action %s %s has wrong patch\nDiff:\n %s",
|
||||
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintDiff(expPatch, patch))
|
||||
}
|
||||
|
||||
@@ -18,18 +18,18 @@ set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||
CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
|
||||
SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/..
|
||||
CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
|
||||
|
||||
# generate the code with:
|
||||
# --output-base because this script should also be able to run inside the vendor dir of
|
||||
# k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir
|
||||
# instead of the $GOPATH directly. For normal projects this can be dropped.
|
||||
"${CODEGEN_PKG}"/generate-groups.sh "deepcopy,client,informer,lister" \
|
||||
k8s.io/sample-controller/pkg/generated k8s.io/sample-controller/pkg/apis \
|
||||
${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \
|
||||
k8s.io/sample-controller/pkg/client k8s.io/sample-controller/pkg/apis \
|
||||
samplecontroller:v1alpha1 \
|
||||
--output-base "$(dirname "${BASH_SOURCE[0]}")/../../.." \
|
||||
--go-header-file "${SCRIPT_ROOT}"/hack/boilerplate.go.txt
|
||||
--output-base "$(dirname ${BASH_SOURCE})/../../.." \
|
||||
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt
|
||||
|
||||
# To use your own boilerplate text append:
|
||||
# --go-header-file "${SCRIPT_ROOT}"/hack/custom-boilerplate.go.txt
|
||||
# To use your own boilerplate text use:
|
||||
# --go-header-file ${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt
|
||||
|
||||
@@ -18,7 +18,7 @@ set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
|
||||
SCRIPT_ROOT=$(dirname "${BASH_SOURCE}")/..
|
||||
|
||||
DIFFROOT="${SCRIPT_ROOT}/pkg"
|
||||
TMP_DIFFROOT="${SCRIPT_ROOT}/_tmp/pkg"
|
||||
|
||||
20
main.go
20
main.go
@@ -20,15 +20,15 @@ import (
|
||||
"flag"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/klog"
|
||||
// Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
|
||||
// _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||
|
||||
clientset "k8s.io/sample-controller/pkg/generated/clientset/versioned"
|
||||
informers "k8s.io/sample-controller/pkg/generated/informers/externalversions"
|
||||
clientset "k8s.io/sample-controller/pkg/client/clientset/versioned"
|
||||
informers "k8s.io/sample-controller/pkg/client/informers/externalversions"
|
||||
"k8s.io/sample-controller/pkg/signals"
|
||||
)
|
||||
|
||||
@@ -45,17 +45,17 @@ func main() {
|
||||
|
||||
cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig)
|
||||
if err != nil {
|
||||
klog.Fatalf("Error building kubeconfig: %s", err.Error())
|
||||
glog.Fatalf("Error building kubeconfig: %s", err.Error())
|
||||
}
|
||||
|
||||
kubeClient, err := kubernetes.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
klog.Fatalf("Error building kubernetes clientset: %s", err.Error())
|
||||
glog.Fatalf("Error building kubernetes clientset: %s", err.Error())
|
||||
}
|
||||
|
||||
exampleClient, err := clientset.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
klog.Fatalf("Error building example clientset: %s", err.Error())
|
||||
glog.Fatalf("Error building example clientset: %s", err.Error())
|
||||
}
|
||||
|
||||
kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, time.Second*30)
|
||||
@@ -65,13 +65,11 @@ func main() {
|
||||
kubeInformerFactory.Apps().V1().Deployments(),
|
||||
exampleInformerFactory.Samplecontroller().V1alpha1().Foos())
|
||||
|
||||
// notice that there is no need to run Start methods in a separate goroutine. (i.e. go kubeInformerFactory.Start(stopCh)
|
||||
// Start method is non-blocking and runs all registered informers in a dedicated goroutine.
|
||||
kubeInformerFactory.Start(stopCh)
|
||||
exampleInformerFactory.Start(stopCh)
|
||||
go kubeInformerFactory.Start(stopCh)
|
||||
go exampleInformerFactory.Start(stopCh)
|
||||
|
||||
if err = controller.Run(2, stopCh); err != nil {
|
||||
klog.Fatalf("Error running controller: %s", err.Error())
|
||||
glog.Fatalf("Error running controller: %s", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,7 +15,7 @@ limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +groupName=samplecontroller.k8s.io
|
||||
|
||||
// Package v1alpha1 is the v1alpha1 version of the API.
|
||||
// +groupName=samplecontroller.k8s.io
|
||||
package v1alpha1
|
||||
|
||||
@@ -90,8 +90,12 @@ func (in *FooSpec) DeepCopyInto(out *FooSpec) {
|
||||
*out = *in
|
||||
if in.Replicas != nil {
|
||||
in, out := &in.Replicas, &out.Replicas
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
discovery "k8s.io/client-go/discovery"
|
||||
rest "k8s.io/client-go/rest"
|
||||
flowcontrol "k8s.io/client-go/util/flowcontrol"
|
||||
samplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/generated/clientset/versioned/typed/samplecontroller/v1alpha1"
|
||||
samplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1"
|
||||
)
|
||||
|
||||
type Interface interface {
|
||||
@@ -24,9 +24,9 @@ import (
|
||||
"k8s.io/client-go/discovery"
|
||||
fakediscovery "k8s.io/client-go/discovery/fake"
|
||||
"k8s.io/client-go/testing"
|
||||
clientset "k8s.io/sample-controller/pkg/generated/clientset/versioned"
|
||||
samplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/generated/clientset/versioned/typed/samplecontroller/v1alpha1"
|
||||
fakesamplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/generated/clientset/versioned/typed/samplecontroller/v1alpha1/fake"
|
||||
clientset "k8s.io/sample-controller/pkg/client/clientset/versioned"
|
||||
samplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1"
|
||||
fakesamplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/fake"
|
||||
)
|
||||
|
||||
// NewSimpleClientset returns a clientset that will respond with the provided objects.
|
||||
@@ -23,15 +23,16 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
samplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
|
||||
)
|
||||
|
||||
var scheme = runtime.NewScheme()
|
||||
var codecs = serializer.NewCodecFactory(scheme)
|
||||
var parameterCodec = runtime.NewParameterCodec(scheme)
|
||||
var localSchemeBuilder = runtime.SchemeBuilder{
|
||||
samplecontrollerv1alpha1.AddToScheme,
|
||||
|
||||
func init() {
|
||||
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
|
||||
AddToScheme(scheme)
|
||||
}
|
||||
|
||||
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||
@@ -44,13 +45,10 @@ var localSchemeBuilder = runtime.SchemeBuilder{
|
||||
// )
|
||||
//
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
//
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
var AddToScheme = localSchemeBuilder.AddToScheme
|
||||
|
||||
func init() {
|
||||
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
|
||||
utilruntime.Must(AddToScheme(scheme))
|
||||
func AddToScheme(scheme *runtime.Scheme) {
|
||||
samplecontrollerv1alpha1.AddToScheme(scheme)
|
||||
}
|
||||
@@ -23,15 +23,16 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
samplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
|
||||
)
|
||||
|
||||
var Scheme = runtime.NewScheme()
|
||||
var Codecs = serializer.NewCodecFactory(Scheme)
|
||||
var ParameterCodec = runtime.NewParameterCodec(Scheme)
|
||||
var localSchemeBuilder = runtime.SchemeBuilder{
|
||||
samplecontrollerv1alpha1.AddToScheme,
|
||||
|
||||
func init() {
|
||||
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
|
||||
AddToScheme(Scheme)
|
||||
}
|
||||
|
||||
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
|
||||
@@ -44,13 +45,10 @@ var localSchemeBuilder = runtime.SchemeBuilder{
|
||||
// )
|
||||
//
|
||||
// kclientset, _ := kubernetes.NewForConfig(c)
|
||||
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
|
||||
//
|
||||
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
|
||||
// correctly.
|
||||
var AddToScheme = localSchemeBuilder.AddToScheme
|
||||
|
||||
func init() {
|
||||
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
|
||||
utilruntime.Must(AddToScheme(Scheme))
|
||||
func AddToScheme(scheme *runtime.Scheme) {
|
||||
samplecontrollerv1alpha1.AddToScheme(scheme)
|
||||
}
|
||||
@@ -131,7 +131,7 @@ func (c *FakeFoos) DeleteCollection(options *v1.DeleteOptions, listOptions v1.Li
|
||||
// Patch applies the patch and returns the patched foo.
|
||||
func (c *FakeFoos) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Foo, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(foosResource, c.ns, name, pt, data, subresources...), &v1alpha1.Foo{})
|
||||
Invokes(testing.NewPatchSubresourceAction(foosResource, c.ns, name, data, subresources...), &v1alpha1.Foo{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
@@ -21,7 +21,7 @@ package fake
|
||||
import (
|
||||
rest "k8s.io/client-go/rest"
|
||||
testing "k8s.io/client-go/testing"
|
||||
v1alpha1 "k8s.io/sample-controller/pkg/generated/clientset/versioned/typed/samplecontroller/v1alpha1"
|
||||
v1alpha1 "k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1"
|
||||
)
|
||||
|
||||
type FakeSamplecontrollerV1alpha1 struct {
|
||||
@@ -19,14 +19,12 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
rest "k8s.io/client-go/rest"
|
||||
v1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
|
||||
scheme "k8s.io/sample-controller/pkg/generated/clientset/versioned/scheme"
|
||||
scheme "k8s.io/sample-controller/pkg/client/clientset/versioned/scheme"
|
||||
)
|
||||
|
||||
// FoosGetter has a method to return a FooInterface.
|
||||
@@ -78,16 +76,11 @@ func (c *foos) Get(name string, options v1.GetOptions) (result *v1alpha1.Foo, er
|
||||
|
||||
// List takes label and field selectors, and returns the list of Foos that match those selectors.
|
||||
func (c *foos) List(opts v1.ListOptions) (result *v1alpha1.FooList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1alpha1.FooList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("foos").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
@@ -95,16 +88,11 @@ func (c *foos) List(opts v1.ListOptions) (result *v1alpha1.FooList, err error) {
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested foos.
|
||||
func (c *foos) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("foos").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
@@ -162,15 +150,10 @@ func (c *foos) Delete(name string, options *v1.DeleteOptions) error {
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *foos) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("foos").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
rest "k8s.io/client-go/rest"
|
||||
v1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
|
||||
"k8s.io/sample-controller/pkg/generated/clientset/versioned/scheme"
|
||||
"k8s.io/sample-controller/pkg/client/clientset/versioned/scheme"
|
||||
)
|
||||
|
||||
type SamplecontrollerV1alpha1Interface interface {
|
||||
@@ -27,9 +27,9 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
versioned "k8s.io/sample-controller/pkg/generated/clientset/versioned"
|
||||
internalinterfaces "k8s.io/sample-controller/pkg/generated/informers/externalversions/internalinterfaces"
|
||||
samplecontroller "k8s.io/sample-controller/pkg/generated/informers/externalversions/samplecontroller"
|
||||
versioned "k8s.io/sample-controller/pkg/client/clientset/versioned"
|
||||
internalinterfaces "k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces"
|
||||
samplecontroller "k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller"
|
||||
)
|
||||
|
||||
// SharedInformerOption defines the functional option type for SharedInformerFactory.
|
||||
@@ -24,10 +24,9 @@ import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
versioned "k8s.io/sample-controller/pkg/generated/clientset/versioned"
|
||||
versioned "k8s.io/sample-controller/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer.
|
||||
type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer
|
||||
|
||||
// SharedInformerFactory a small interface to allow for adding an informer without an import cycle
|
||||
@@ -36,5 +35,4 @@ type SharedInformerFactory interface {
|
||||
InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer
|
||||
}
|
||||
|
||||
// TweakListOptionsFunc is a function that transforms a v1.ListOptions.
|
||||
type TweakListOptionsFunc func(*v1.ListOptions)
|
||||
@@ -19,8 +19,8 @@ limitations under the License.
|
||||
package samplecontroller
|
||||
|
||||
import (
|
||||
internalinterfaces "k8s.io/sample-controller/pkg/generated/informers/externalversions/internalinterfaces"
|
||||
v1alpha1 "k8s.io/sample-controller/pkg/generated/informers/externalversions/samplecontroller/v1alpha1"
|
||||
internalinterfaces "k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces"
|
||||
v1alpha1 "k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1"
|
||||
)
|
||||
|
||||
// Interface provides access to each of this group's versions.
|
||||
@@ -25,10 +25,10 @@ import (
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
samplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
|
||||
versioned "k8s.io/sample-controller/pkg/generated/clientset/versioned"
|
||||
internalinterfaces "k8s.io/sample-controller/pkg/generated/informers/externalversions/internalinterfaces"
|
||||
v1alpha1 "k8s.io/sample-controller/pkg/generated/listers/samplecontroller/v1alpha1"
|
||||
samplecontroller_v1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
|
||||
versioned "k8s.io/sample-controller/pkg/client/clientset/versioned"
|
||||
internalinterfaces "k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces"
|
||||
v1alpha1 "k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1"
|
||||
)
|
||||
|
||||
// FooInformer provides access to a shared informer and lister for
|
||||
@@ -70,7 +70,7 @@ func NewFilteredFooInformer(client versioned.Interface, namespace string, resync
|
||||
return client.SamplecontrollerV1alpha1().Foos(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&samplecontrollerv1alpha1.Foo{},
|
||||
&samplecontroller_v1alpha1.Foo{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
@@ -81,7 +81,7 @@ func (f *fooInformer) defaultInformer(client versioned.Interface, resyncPeriod t
|
||||
}
|
||||
|
||||
func (f *fooInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&samplecontrollerv1alpha1.Foo{}, f.defaultInformer)
|
||||
return f.factory.InformerFor(&samplecontroller_v1alpha1.Foo{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *fooInformer) Lister() v1alpha1.FooLister {
|
||||
@@ -19,7 +19,7 @@ limitations under the License.
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
internalinterfaces "k8s.io/sample-controller/pkg/generated/informers/externalversions/internalinterfaces"
|
||||
internalinterfaces "k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces"
|
||||
)
|
||||
|
||||
// Interface provides access to all the informers in this group version.
|
||||
16
vendor/github.com/evanphx/json-patch/.travis.yml
generated
vendored
16
vendor/github.com/evanphx/json-patch/.travis.yml
generated
vendored
@@ -1,16 +0,0 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- 1.8
|
||||
- 1.7
|
||||
|
||||
install:
|
||||
- if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
|
||||
- go get github.com/jessevdk/go-flags
|
||||
|
||||
script:
|
||||
- go get
|
||||
- go test -cover ./...
|
||||
|
||||
notifications:
|
||||
email: false
|
||||
25
vendor/github.com/evanphx/json-patch/LICENSE
generated
vendored
25
vendor/github.com/evanphx/json-patch/LICENSE
generated
vendored
@@ -1,25 +0,0 @@
|
||||
Copyright (c) 2014, Evan Phoenix
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice, this
|
||||
list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above copyright notice
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
* Neither the name of the Evan Phoenix nor the names of its contributors
|
||||
may be used to endorse or promote products derived from this software
|
||||
without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
297
vendor/github.com/evanphx/json-patch/README.md
generated
vendored
297
vendor/github.com/evanphx/json-patch/README.md
generated
vendored
@@ -1,297 +0,0 @@
|
||||
# JSON-Patch
|
||||
`jsonpatch` is a library which provides functionallity for both applying
|
||||
[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as
|
||||
well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396).
|
||||
|
||||
[](http://godoc.org/github.com/evanphx/json-patch)
|
||||
[](https://travis-ci.org/evanphx/json-patch)
|
||||
[](https://goreportcard.com/report/github.com/evanphx/json-patch)
|
||||
|
||||
# Get It!
|
||||
|
||||
**Latest and greatest**:
|
||||
```bash
|
||||
go get -u github.com/evanphx/json-patch
|
||||
```
|
||||
|
||||
**Stable Versions**:
|
||||
* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4`
|
||||
|
||||
(previous versions below `v3` are unavailable)
|
||||
|
||||
# Use It!
|
||||
* [Create and apply a merge patch](#create-and-apply-a-merge-patch)
|
||||
* [Create and apply a JSON Patch](#create-and-apply-a-json-patch)
|
||||
* [Comparing JSON documents](#comparing-json-documents)
|
||||
* [Combine merge patches](#combine-merge-patches)
|
||||
|
||||
|
||||
# Configuration
|
||||
|
||||
* There is a global configuration variable `jsonpatch.SupportNegativeIndices`.
|
||||
This defaults to `true` and enables the non-standard practice of allowing
|
||||
negative indices to mean indices starting at the end of an array. This
|
||||
functionality can be disabled by setting `jsonpatch.SupportNegativeIndices =
|
||||
false`.
|
||||
|
||||
* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`,
|
||||
which limits the total size increase in bytes caused by "copy" operations in a
|
||||
patch. It defaults to 0, which means there is no limit.
|
||||
|
||||
## Create and apply a merge patch
|
||||
Given both an original JSON document and a modified JSON document, you can create
|
||||
a [Merge Patch](https://tools.ietf.org/html/rfc7396) document.
|
||||
|
||||
It can describe the changes needed to convert from the original to the
|
||||
modified JSON document.
|
||||
|
||||
Once you have a merge patch, you can apply it to other JSON documents using the
|
||||
`jsonpatch.MergePatch(document, patch)` function.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Let's create a merge patch from these two documents...
|
||||
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
|
||||
target := []byte(`{"name": "Jane", "age": 24}`)
|
||||
|
||||
patch, err := jsonpatch.CreateMergePatch(original, target)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Now lets apply the patch against a different JSON document...
|
||||
|
||||
alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`)
|
||||
modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch)
|
||||
|
||||
fmt.Printf("patch document: %s\n", patch)
|
||||
fmt.Printf("updated alternative doc: %s\n", modifiedAlternative)
|
||||
}
|
||||
```
|
||||
|
||||
When ran, you get the following output:
|
||||
|
||||
```bash
|
||||
$ go run main.go
|
||||
patch document: {"height":null,"name":"Jane"}
|
||||
updated tina doc: {"age":28,"name":"Jane"}
|
||||
```
|
||||
|
||||
## Create and apply a JSON Patch
|
||||
You can create patch objects using `DecodePatch([]byte)`, which can then
|
||||
be applied against JSON documents.
|
||||
|
||||
The following is an example of creating a patch from two operations, and
|
||||
applying it against a JSON document.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
func main() {
|
||||
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
|
||||
patchJSON := []byte(`[
|
||||
{"op": "replace", "path": "/name", "value": "Jane"},
|
||||
{"op": "remove", "path": "/height"}
|
||||
]`)
|
||||
|
||||
patch, err := jsonpatch.DecodePatch(patchJSON)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
modified, err := patch.Apply(original)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Printf("Original document: %s\n", original)
|
||||
fmt.Printf("Modified document: %s\n", modified)
|
||||
}
|
||||
```
|
||||
|
||||
When ran, you get the following output:
|
||||
|
||||
```bash
|
||||
$ go run main.go
|
||||
Original document: {"name": "John", "age": 24, "height": 3.21}
|
||||
Modified document: {"age":24,"name":"Jane"}
|
||||
```
|
||||
|
||||
## Comparing JSON documents
|
||||
Due to potential whitespace and ordering differences, one cannot simply compare
|
||||
JSON strings or byte-arrays directly.
|
||||
|
||||
As such, you can instead use `jsonpatch.Equal(document1, document2)` to
|
||||
determine if two JSON documents are _structurally_ equal. This ignores
|
||||
whitespace differences, and key-value ordering.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
func main() {
|
||||
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
|
||||
similar := []byte(`
|
||||
{
|
||||
"age": 24,
|
||||
"height": 3.21,
|
||||
"name": "John"
|
||||
}
|
||||
`)
|
||||
different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`)
|
||||
|
||||
if jsonpatch.Equal(original, similar) {
|
||||
fmt.Println(`"original" is structurally equal to "similar"`)
|
||||
}
|
||||
|
||||
if !jsonpatch.Equal(original, different) {
|
||||
fmt.Println(`"original" is _not_ structurally equal to "similar"`)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
When ran, you get the following output:
|
||||
```bash
|
||||
$ go run main.go
|
||||
"original" is structurally equal to "similar"
|
||||
"original" is _not_ structurally equal to "similar"
|
||||
```
|
||||
|
||||
## Combine merge patches
|
||||
Given two JSON merge patch documents, it is possible to combine them into a
|
||||
single merge patch which can describe both set of changes.
|
||||
|
||||
The resulting merge patch can be used such that applying it results in a
|
||||
document structurally similar as merging each merge patch to the document
|
||||
in succession.
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
jsonpatch "github.com/evanphx/json-patch"
|
||||
)
|
||||
|
||||
func main() {
|
||||
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
|
||||
|
||||
nameAndHeight := []byte(`{"height":null,"name":"Jane"}`)
|
||||
ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`)
|
||||
|
||||
// Let's combine these merge patch documents...
|
||||
combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Apply each patch individual against the original document
|
||||
withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Apply the combined patch against the original document
|
||||
|
||||
withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Do both result in the same thing? They should!
|
||||
if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) {
|
||||
fmt.Println("Both JSON documents are structurally the same!")
|
||||
}
|
||||
|
||||
fmt.Printf("combined merge patch: %s", combinedPatch)
|
||||
}
|
||||
```
|
||||
|
||||
When ran, you get the following output:
|
||||
```bash
|
||||
$ go run main.go
|
||||
Both JSON documents are structurally the same!
|
||||
combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"}
|
||||
```
|
||||
|
||||
# CLI for comparing JSON documents
|
||||
You can install the commandline program `json-patch`.
|
||||
|
||||
This program can take multiple JSON patch documents as arguments,
|
||||
and fed a JSON document from `stdin`. It will apply the patch(es) against
|
||||
the document and output the modified doc.
|
||||
|
||||
**patch.1.json**
|
||||
```json
|
||||
[
|
||||
{"op": "replace", "path": "/name", "value": "Jane"},
|
||||
{"op": "remove", "path": "/height"}
|
||||
]
|
||||
```
|
||||
|
||||
**patch.2.json**
|
||||
```json
|
||||
[
|
||||
{"op": "add", "path": "/address", "value": "123 Main St"},
|
||||
{"op": "replace", "path": "/age", "value": "21"}
|
||||
]
|
||||
```
|
||||
|
||||
**document.json**
|
||||
```json
|
||||
{
|
||||
"name": "John",
|
||||
"age": 24,
|
||||
"height": 3.21
|
||||
}
|
||||
```
|
||||
|
||||
You can then run:
|
||||
|
||||
```bash
|
||||
$ go install github.com/evanphx/json-patch/cmd/json-patch
|
||||
$ cat document.json | json-patch -p patch.1.json -p patch.2.json
|
||||
{"address":"123 Main St","age":"21","name":"Jane"}
|
||||
```
|
||||
|
||||
# Help It!
|
||||
Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues)
|
||||
or [create a PR](https://github.com/evanphx/json-patch/compare).
|
||||
|
||||
|
||||
Before creating a pull request, we'd ask that you make sure tests are passing
|
||||
and that you have added new tests when applicable.
|
||||
|
||||
Contributors can run tests using:
|
||||
|
||||
```bash
|
||||
go test -cover ./...
|
||||
```
|
||||
|
||||
Builds for pull requests are tested automatically
|
||||
using [TravisCI](https://travis-ci.org/evanphx/json-patch).
|
||||
38
vendor/github.com/evanphx/json-patch/errors.go
generated
vendored
38
vendor/github.com/evanphx/json-patch/errors.go
generated
vendored
@@ -1,38 +0,0 @@
|
||||
package jsonpatch
|
||||
|
||||
import "fmt"
|
||||
|
||||
// AccumulatedCopySizeError is an error type returned when the accumulated size
|
||||
// increase caused by copy operations in a patch operation has exceeded the
|
||||
// limit.
|
||||
type AccumulatedCopySizeError struct {
|
||||
limit int64
|
||||
accumulated int64
|
||||
}
|
||||
|
||||
// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError.
|
||||
func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError {
|
||||
return &AccumulatedCopySizeError{limit: l, accumulated: a}
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (a *AccumulatedCopySizeError) Error() string {
|
||||
return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit)
|
||||
}
|
||||
|
||||
// ArraySizeError is an error type returned when the array size has exceeded
|
||||
// the limit.
|
||||
type ArraySizeError struct {
|
||||
limit int
|
||||
size int
|
||||
}
|
||||
|
||||
// NewArraySizeError returns an ArraySizeError.
|
||||
func NewArraySizeError(l, s int) *ArraySizeError {
|
||||
return &ArraySizeError{limit: l, size: s}
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (a *ArraySizeError) Error() string {
|
||||
return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit)
|
||||
}
|
||||
383
vendor/github.com/evanphx/json-patch/merge.go
generated
vendored
383
vendor/github.com/evanphx/json-patch/merge.go
generated
vendored
@@ -1,383 +0,0 @@
|
||||
package jsonpatch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
|
||||
curDoc, err := cur.intoDoc()
|
||||
|
||||
if err != nil {
|
||||
pruneNulls(patch)
|
||||
return patch
|
||||
}
|
||||
|
||||
patchDoc, err := patch.intoDoc()
|
||||
|
||||
if err != nil {
|
||||
return patch
|
||||
}
|
||||
|
||||
mergeDocs(curDoc, patchDoc, mergeMerge)
|
||||
|
||||
return cur
|
||||
}
|
||||
|
||||
func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
|
||||
for k, v := range *patch {
|
||||
if v == nil {
|
||||
if mergeMerge {
|
||||
(*doc)[k] = nil
|
||||
} else {
|
||||
delete(*doc, k)
|
||||
}
|
||||
} else {
|
||||
cur, ok := (*doc)[k]
|
||||
|
||||
if !ok || cur == nil {
|
||||
pruneNulls(v)
|
||||
(*doc)[k] = v
|
||||
} else {
|
||||
(*doc)[k] = merge(cur, v, mergeMerge)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func pruneNulls(n *lazyNode) {
|
||||
sub, err := n.intoDoc()
|
||||
|
||||
if err == nil {
|
||||
pruneDocNulls(sub)
|
||||
} else {
|
||||
ary, err := n.intoAry()
|
||||
|
||||
if err == nil {
|
||||
pruneAryNulls(ary)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func pruneDocNulls(doc *partialDoc) *partialDoc {
|
||||
for k, v := range *doc {
|
||||
if v == nil {
|
||||
delete(*doc, k)
|
||||
} else {
|
||||
pruneNulls(v)
|
||||
}
|
||||
}
|
||||
|
||||
return doc
|
||||
}
|
||||
|
||||
func pruneAryNulls(ary *partialArray) *partialArray {
|
||||
newAry := []*lazyNode{}
|
||||
|
||||
for _, v := range *ary {
|
||||
if v != nil {
|
||||
pruneNulls(v)
|
||||
newAry = append(newAry, v)
|
||||
}
|
||||
}
|
||||
|
||||
*ary = newAry
|
||||
|
||||
return ary
|
||||
}
|
||||
|
||||
var errBadJSONDoc = fmt.Errorf("Invalid JSON Document")
|
||||
var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
|
||||
var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents")
|
||||
|
||||
// MergeMergePatches merges two merge patches together, such that
|
||||
// applying this resulting merged merge patch to a document yields the same
|
||||
// as merging each merge patch to the document in succession.
|
||||
func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) {
|
||||
return doMergePatch(patch1Data, patch2Data, true)
|
||||
}
|
||||
|
||||
// MergePatch merges the patchData into the docData.
|
||||
func MergePatch(docData, patchData []byte) ([]byte, error) {
|
||||
return doMergePatch(docData, patchData, false)
|
||||
}
|
||||
|
||||
func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
|
||||
doc := &partialDoc{}
|
||||
|
||||
docErr := json.Unmarshal(docData, doc)
|
||||
|
||||
patch := &partialDoc{}
|
||||
|
||||
patchErr := json.Unmarshal(patchData, patch)
|
||||
|
||||
if _, ok := docErr.(*json.SyntaxError); ok {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
if _, ok := patchErr.(*json.SyntaxError); ok {
|
||||
return nil, errBadJSONPatch
|
||||
}
|
||||
|
||||
if docErr == nil && *doc == nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
if patchErr == nil && *patch == nil {
|
||||
return nil, errBadJSONPatch
|
||||
}
|
||||
|
||||
if docErr != nil || patchErr != nil {
|
||||
// Not an error, just not a doc, so we turn straight into the patch
|
||||
if patchErr == nil {
|
||||
if mergeMerge {
|
||||
doc = patch
|
||||
} else {
|
||||
doc = pruneDocNulls(patch)
|
||||
}
|
||||
} else {
|
||||
patchAry := &partialArray{}
|
||||
patchErr = json.Unmarshal(patchData, patchAry)
|
||||
|
||||
if patchErr != nil {
|
||||
return nil, errBadJSONPatch
|
||||
}
|
||||
|
||||
pruneAryNulls(patchAry)
|
||||
|
||||
out, patchErr := json.Marshal(patchAry)
|
||||
|
||||
if patchErr != nil {
|
||||
return nil, errBadJSONPatch
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
} else {
|
||||
mergeDocs(doc, patch, mergeMerge)
|
||||
}
|
||||
|
||||
return json.Marshal(doc)
|
||||
}
|
||||
|
||||
// resemblesJSONArray indicates whether the byte-slice "appears" to be
|
||||
// a JSON array or not.
|
||||
// False-positives are possible, as this function does not check the internal
|
||||
// structure of the array. It only checks that the outer syntax is present and
|
||||
// correct.
|
||||
func resemblesJSONArray(input []byte) bool {
|
||||
input = bytes.TrimSpace(input)
|
||||
|
||||
hasPrefix := bytes.HasPrefix(input, []byte("["))
|
||||
hasSuffix := bytes.HasSuffix(input, []byte("]"))
|
||||
|
||||
return hasPrefix && hasSuffix
|
||||
}
|
||||
|
||||
// CreateMergePatch will return a merge patch document capable of converting
|
||||
// the original document(s) to the modified document(s).
|
||||
// The parameters can be bytes of either two JSON Documents, or two arrays of
|
||||
// JSON documents.
|
||||
// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07
|
||||
func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
||||
originalResemblesArray := resemblesJSONArray(originalJSON)
|
||||
modifiedResemblesArray := resemblesJSONArray(modifiedJSON)
|
||||
|
||||
// Do both byte-slices seem like JSON arrays?
|
||||
if originalResemblesArray && modifiedResemblesArray {
|
||||
return createArrayMergePatch(originalJSON, modifiedJSON)
|
||||
}
|
||||
|
||||
// Are both byte-slices are not arrays? Then they are likely JSON objects...
|
||||
if !originalResemblesArray && !modifiedResemblesArray {
|
||||
return createObjectMergePatch(originalJSON, modifiedJSON)
|
||||
}
|
||||
|
||||
// None of the above? Then return an error because of mismatched types.
|
||||
return nil, errBadMergeTypes
|
||||
}
|
||||
|
||||
// createObjectMergePatch will return a merge-patch document capable of
|
||||
// converting the original document to the modified document.
|
||||
func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
||||
originalDoc := map[string]interface{}{}
|
||||
modifiedDoc := map[string]interface{}{}
|
||||
|
||||
err := json.Unmarshal(originalJSON, &originalDoc)
|
||||
if err != nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
err = json.Unmarshal(modifiedJSON, &modifiedDoc)
|
||||
if err != nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
dest, err := getDiff(originalDoc, modifiedDoc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return json.Marshal(dest)
|
||||
}
|
||||
|
||||
// createArrayMergePatch will return an array of merge-patch documents capable
|
||||
// of converting the original document to the modified document for each
|
||||
// pair of JSON documents provided in the arrays.
|
||||
// Arrays of mismatched sizes will result in an error.
|
||||
func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
|
||||
originalDocs := []json.RawMessage{}
|
||||
modifiedDocs := []json.RawMessage{}
|
||||
|
||||
err := json.Unmarshal(originalJSON, &originalDocs)
|
||||
if err != nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
err = json.Unmarshal(modifiedJSON, &modifiedDocs)
|
||||
if err != nil {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
total := len(originalDocs)
|
||||
if len(modifiedDocs) != total {
|
||||
return nil, errBadJSONDoc
|
||||
}
|
||||
|
||||
result := []json.RawMessage{}
|
||||
for i := 0; i < len(originalDocs); i++ {
|
||||
original := originalDocs[i]
|
||||
modified := modifiedDocs[i]
|
||||
|
||||
patch, err := createObjectMergePatch(original, modified)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result = append(result, json.RawMessage(patch))
|
||||
}
|
||||
|
||||
return json.Marshal(result)
|
||||
}
|
||||
|
||||
// Returns true if the array matches (must be json types).
|
||||
// As is idiomatic for go, an empty array is not the same as a nil array.
|
||||
func matchesArray(a, b []interface{}) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
if (a == nil && b != nil) || (a != nil && b == nil) {
|
||||
return false
|
||||
}
|
||||
for i := range a {
|
||||
if !matchesValue(a[i], b[i]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Returns true if the values matches (must be json types)
|
||||
// The types of the values must match, otherwise it will always return false
|
||||
// If two map[string]interface{} are given, all elements must match.
|
||||
func matchesValue(av, bv interface{}) bool {
|
||||
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
|
||||
return false
|
||||
}
|
||||
switch at := av.(type) {
|
||||
case string:
|
||||
bt := bv.(string)
|
||||
if bt == at {
|
||||
return true
|
||||
}
|
||||
case float64:
|
||||
bt := bv.(float64)
|
||||
if bt == at {
|
||||
return true
|
||||
}
|
||||
case bool:
|
||||
bt := bv.(bool)
|
||||
if bt == at {
|
||||
return true
|
||||
}
|
||||
case nil:
|
||||
// Both nil, fine.
|
||||
return true
|
||||
case map[string]interface{}:
|
||||
bt := bv.(map[string]interface{})
|
||||
for key := range at {
|
||||
if !matchesValue(at[key], bt[key]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for key := range bt {
|
||||
if !matchesValue(at[key], bt[key]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case []interface{}:
|
||||
bt := bv.([]interface{})
|
||||
return matchesArray(at, bt)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// getDiff returns the (recursive) difference between a and b as a map[string]interface{}.
|
||||
func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
|
||||
into := map[string]interface{}{}
|
||||
for key, bv := range b {
|
||||
av, ok := a[key]
|
||||
// value was added
|
||||
if !ok {
|
||||
into[key] = bv
|
||||
continue
|
||||
}
|
||||
// If types have changed, replace completely
|
||||
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
|
||||
into[key] = bv
|
||||
continue
|
||||
}
|
||||
// Types are the same, compare values
|
||||
switch at := av.(type) {
|
||||
case map[string]interface{}:
|
||||
bt := bv.(map[string]interface{})
|
||||
dst := make(map[string]interface{}, len(bt))
|
||||
dst, err := getDiff(at, bt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(dst) > 0 {
|
||||
into[key] = dst
|
||||
}
|
||||
case string, float64, bool:
|
||||
if !matchesValue(av, bv) {
|
||||
into[key] = bv
|
||||
}
|
||||
case []interface{}:
|
||||
bt := bv.([]interface{})
|
||||
if !matchesArray(at, bt) {
|
||||
into[key] = bv
|
||||
}
|
||||
case nil:
|
||||
switch bv.(type) {
|
||||
case nil:
|
||||
// Both nil, fine.
|
||||
default:
|
||||
into[key] = bv
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("Unknown type:%T in key %s", av, key))
|
||||
}
|
||||
}
|
||||
// Now add all deleted values as nil
|
||||
for key := range a {
|
||||
_, found := b[key]
|
||||
if !found {
|
||||
into[key] = nil
|
||||
}
|
||||
}
|
||||
return into, nil
|
||||
}
|
||||
696
vendor/github.com/evanphx/json-patch/patch.go
generated
vendored
696
vendor/github.com/evanphx/json-patch/patch.go
generated
vendored
@@ -1,696 +0,0 @@
|
||||
package jsonpatch
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
eRaw = iota
|
||||
eDoc
|
||||
eAry
|
||||
)
|
||||
|
||||
var (
|
||||
// SupportNegativeIndices decides whether to support non-standard practice of
|
||||
// allowing negative indices to mean indices starting at the end of an array.
|
||||
// Default to true.
|
||||
SupportNegativeIndices bool = true
|
||||
// AccumulatedCopySizeLimit limits the total size increase in bytes caused by
|
||||
// "copy" operations in a patch.
|
||||
AccumulatedCopySizeLimit int64 = 0
|
||||
)
|
||||
|
||||
type lazyNode struct {
|
||||
raw *json.RawMessage
|
||||
doc partialDoc
|
||||
ary partialArray
|
||||
which int
|
||||
}
|
||||
|
||||
type operation map[string]*json.RawMessage
|
||||
|
||||
// Patch is an ordered collection of operations.
|
||||
type Patch []operation
|
||||
|
||||
type partialDoc map[string]*lazyNode
|
||||
type partialArray []*lazyNode
|
||||
|
||||
type container interface {
|
||||
get(key string) (*lazyNode, error)
|
||||
set(key string, val *lazyNode) error
|
||||
add(key string, val *lazyNode) error
|
||||
remove(key string) error
|
||||
}
|
||||
|
||||
func newLazyNode(raw *json.RawMessage) *lazyNode {
|
||||
return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw}
|
||||
}
|
||||
|
||||
func (n *lazyNode) MarshalJSON() ([]byte, error) {
|
||||
switch n.which {
|
||||
case eRaw:
|
||||
return json.Marshal(n.raw)
|
||||
case eDoc:
|
||||
return json.Marshal(n.doc)
|
||||
case eAry:
|
||||
return json.Marshal(n.ary)
|
||||
default:
|
||||
return nil, fmt.Errorf("Unknown type")
|
||||
}
|
||||
}
|
||||
|
||||
func (n *lazyNode) UnmarshalJSON(data []byte) error {
|
||||
dest := make(json.RawMessage, len(data))
|
||||
copy(dest, data)
|
||||
n.raw = &dest
|
||||
n.which = eRaw
|
||||
return nil
|
||||
}
|
||||
|
||||
func deepCopy(src *lazyNode) (*lazyNode, int, error) {
|
||||
if src == nil {
|
||||
return nil, 0, nil
|
||||
}
|
||||
a, err := src.MarshalJSON()
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
sz := len(a)
|
||||
ra := make(json.RawMessage, sz)
|
||||
copy(ra, a)
|
||||
return newLazyNode(&ra), sz, nil
|
||||
}
|
||||
|
||||
func (n *lazyNode) intoDoc() (*partialDoc, error) {
|
||||
if n.which == eDoc {
|
||||
return &n.doc, nil
|
||||
}
|
||||
|
||||
if n.raw == nil {
|
||||
return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial document")
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.doc)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n.which = eDoc
|
||||
return &n.doc, nil
|
||||
}
|
||||
|
||||
func (n *lazyNode) intoAry() (*partialArray, error) {
|
||||
if n.which == eAry {
|
||||
return &n.ary, nil
|
||||
}
|
||||
|
||||
if n.raw == nil {
|
||||
return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial array")
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.ary)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
n.which = eAry
|
||||
return &n.ary, nil
|
||||
}
|
||||
|
||||
func (n *lazyNode) compact() []byte {
|
||||
buf := &bytes.Buffer{}
|
||||
|
||||
if n.raw == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := json.Compact(buf, *n.raw)
|
||||
|
||||
if err != nil {
|
||||
return *n.raw
|
||||
}
|
||||
|
||||
return buf.Bytes()
|
||||
}
|
||||
|
||||
func (n *lazyNode) tryDoc() bool {
|
||||
if n.raw == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.doc)
|
||||
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
n.which = eDoc
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *lazyNode) tryAry() bool {
|
||||
if n.raw == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
err := json.Unmarshal(*n.raw, &n.ary)
|
||||
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
n.which = eAry
|
||||
return true
|
||||
}
|
||||
|
||||
func (n *lazyNode) equal(o *lazyNode) bool {
|
||||
if n.which == eRaw {
|
||||
if !n.tryDoc() && !n.tryAry() {
|
||||
if o.which != eRaw {
|
||||
return false
|
||||
}
|
||||
|
||||
return bytes.Equal(n.compact(), o.compact())
|
||||
}
|
||||
}
|
||||
|
||||
if n.which == eDoc {
|
||||
if o.which == eRaw {
|
||||
if !o.tryDoc() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if o.which != eDoc {
|
||||
return false
|
||||
}
|
||||
|
||||
for k, v := range n.doc {
|
||||
ov, ok := o.doc[k]
|
||||
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if v == nil && ov == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if !v.equal(ov) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
if o.which != eAry && !o.tryAry() {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(n.ary) != len(o.ary) {
|
||||
return false
|
||||
}
|
||||
|
||||
for idx, val := range n.ary {
|
||||
if !val.equal(o.ary[idx]) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (o operation) kind() string {
|
||||
if obj, ok := o["op"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
return op
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func (o operation) path() string {
|
||||
if obj, ok := o["path"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
return op
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func (o operation) from() string {
|
||||
if obj, ok := o["from"]; ok && obj != nil {
|
||||
var op string
|
||||
|
||||
err := json.Unmarshal(*obj, &op)
|
||||
|
||||
if err != nil {
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
return op
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
func (o operation) value() *lazyNode {
|
||||
if obj, ok := o["value"]; ok {
|
||||
return newLazyNode(obj)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func isArray(buf []byte) bool {
|
||||
Loop:
|
||||
for _, c := range buf {
|
||||
switch c {
|
||||
case ' ':
|
||||
case '\n':
|
||||
case '\t':
|
||||
continue
|
||||
case '[':
|
||||
return true
|
||||
default:
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func findObject(pd *container, path string) (container, string) {
|
||||
doc := *pd
|
||||
|
||||
split := strings.Split(path, "/")
|
||||
|
||||
if len(split) < 2 {
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
parts := split[1 : len(split)-1]
|
||||
|
||||
key := split[len(split)-1]
|
||||
|
||||
var err error
|
||||
|
||||
for _, part := range parts {
|
||||
|
||||
next, ok := doc.get(decodePatchKey(part))
|
||||
|
||||
if next == nil || ok != nil {
|
||||
return nil, ""
|
||||
}
|
||||
|
||||
if isArray(*next.raw) {
|
||||
doc, err = next.intoAry()
|
||||
|
||||
if err != nil {
|
||||
return nil, ""
|
||||
}
|
||||
} else {
|
||||
doc, err = next.intoDoc()
|
||||
|
||||
if err != nil {
|
||||
return nil, ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return doc, decodePatchKey(key)
|
||||
}
|
||||
|
||||
func (d *partialDoc) set(key string, val *lazyNode) error {
|
||||
(*d)[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialDoc) add(key string, val *lazyNode) error {
|
||||
(*d)[key] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialDoc) get(key string) (*lazyNode, error) {
|
||||
return (*d)[key], nil
|
||||
}
|
||||
|
||||
func (d *partialDoc) remove(key string) error {
|
||||
_, ok := (*d)[key]
|
||||
if !ok {
|
||||
return fmt.Errorf("Unable to remove nonexistent key: %s", key)
|
||||
}
|
||||
|
||||
delete(*d, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// set should only be used to implement the "replace" operation, so "key" must
|
||||
// be an already existing index in "d".
|
||||
func (d *partialArray) set(key string, val *lazyNode) error {
|
||||
idx, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*d)[idx] = val
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialArray) add(key string, val *lazyNode) error {
|
||||
if key == "-" {
|
||||
*d = append(*d, val)
|
||||
return nil
|
||||
}
|
||||
|
||||
idx, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sz := len(*d) + 1
|
||||
|
||||
ary := make([]*lazyNode, sz)
|
||||
|
||||
cur := *d
|
||||
|
||||
if idx >= len(ary) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if SupportNegativeIndices {
|
||||
if idx < -len(ary) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if idx < 0 {
|
||||
idx += len(ary)
|
||||
}
|
||||
}
|
||||
|
||||
copy(ary[0:idx], cur[0:idx])
|
||||
ary[idx] = val
|
||||
copy(ary[idx+1:], cur[idx:])
|
||||
|
||||
*d = ary
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *partialArray) get(key string) (*lazyNode, error) {
|
||||
idx, err := strconv.Atoi(key)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if idx >= len(*d) {
|
||||
return nil, fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
return (*d)[idx], nil
|
||||
}
|
||||
|
||||
func (d *partialArray) remove(key string) error {
|
||||
idx, err := strconv.Atoi(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cur := *d
|
||||
|
||||
if idx >= len(cur) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if SupportNegativeIndices {
|
||||
if idx < -len(cur) {
|
||||
return fmt.Errorf("Unable to access invalid index: %d", idx)
|
||||
}
|
||||
|
||||
if idx < 0 {
|
||||
idx += len(cur)
|
||||
}
|
||||
}
|
||||
|
||||
ary := make([]*lazyNode, len(cur)-1)
|
||||
|
||||
copy(ary[0:idx], cur[0:idx])
|
||||
copy(ary[idx:], cur[idx+1:])
|
||||
|
||||
*d = ary
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (p Patch) add(doc *container, op operation) error {
|
||||
path := op.path()
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: \"%s\"", path)
|
||||
}
|
||||
|
||||
return con.add(key, op.value())
|
||||
}
|
||||
|
||||
func (p Patch) remove(doc *container, op operation) error {
|
||||
path := op.path()
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: \"%s\"", path)
|
||||
}
|
||||
|
||||
return con.remove(key)
|
||||
}
|
||||
|
||||
func (p Patch) replace(doc *container, op operation) error {
|
||||
path := op.path()
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path)
|
||||
}
|
||||
|
||||
_, ok := con.get(key)
|
||||
if ok != nil {
|
||||
return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing key: %s", path)
|
||||
}
|
||||
|
||||
return con.set(key, op.value())
|
||||
}
|
||||
|
||||
func (p Patch) move(doc *container, op operation) error {
|
||||
from := op.from()
|
||||
|
||||
con, key := findObject(doc, from)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch move operation does not apply: doc is missing from path: %s", from)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = con.remove(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := op.path()
|
||||
|
||||
con, key = findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path)
|
||||
}
|
||||
|
||||
return con.add(key, val)
|
||||
}
|
||||
|
||||
func (p Patch) test(doc *container, op operation) error {
|
||||
path := op.path()
|
||||
|
||||
con, key := findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch test operation does not apply: is missing path: %s", path)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if val == nil {
|
||||
if op.value().raw == nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Testing value %s failed", path)
|
||||
} else if op.value() == nil {
|
||||
return fmt.Errorf("Testing value %s failed", path)
|
||||
}
|
||||
|
||||
if val.equal(op.value()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("Testing value %s failed", path)
|
||||
}
|
||||
|
||||
func (p Patch) copy(doc *container, op operation, accumulatedCopySize *int64) error {
|
||||
from := op.from()
|
||||
|
||||
con, key := findObject(doc, from)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing from path: %s", from)
|
||||
}
|
||||
|
||||
val, err := con.get(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
path := op.path()
|
||||
|
||||
con, key = findObject(doc, path)
|
||||
|
||||
if con == nil {
|
||||
return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path)
|
||||
}
|
||||
|
||||
valCopy, sz, err := deepCopy(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
(*accumulatedCopySize) += int64(sz)
|
||||
if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit {
|
||||
return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize)
|
||||
}
|
||||
|
||||
return con.add(key, valCopy)
|
||||
}
|
||||
|
||||
// Equal indicates if 2 JSON documents have the same structural equality.
|
||||
func Equal(a, b []byte) bool {
|
||||
ra := make(json.RawMessage, len(a))
|
||||
copy(ra, a)
|
||||
la := newLazyNode(&ra)
|
||||
|
||||
rb := make(json.RawMessage, len(b))
|
||||
copy(rb, b)
|
||||
lb := newLazyNode(&rb)
|
||||
|
||||
return la.equal(lb)
|
||||
}
|
||||
|
||||
// DecodePatch decodes the passed JSON document as an RFC 6902 patch.
|
||||
func DecodePatch(buf []byte) (Patch, error) {
|
||||
var p Patch
|
||||
|
||||
err := json.Unmarshal(buf, &p)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Apply mutates a JSON document according to the patch, and returns the new
|
||||
// document.
|
||||
func (p Patch) Apply(doc []byte) ([]byte, error) {
|
||||
return p.ApplyIndent(doc, "")
|
||||
}
|
||||
|
||||
// ApplyIndent mutates a JSON document according to the patch, and returns the new
|
||||
// document indented.
|
||||
func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
|
||||
var pd container
|
||||
if doc[0] == '[' {
|
||||
pd = &partialArray{}
|
||||
} else {
|
||||
pd = &partialDoc{}
|
||||
}
|
||||
|
||||
err := json.Unmarshal(doc, pd)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = nil
|
||||
|
||||
var accumulatedCopySize int64
|
||||
|
||||
for _, op := range p {
|
||||
switch op.kind() {
|
||||
case "add":
|
||||
err = p.add(&pd, op)
|
||||
case "remove":
|
||||
err = p.remove(&pd, op)
|
||||
case "replace":
|
||||
err = p.replace(&pd, op)
|
||||
case "move":
|
||||
err = p.move(&pd, op)
|
||||
case "test":
|
||||
err = p.test(&pd, op)
|
||||
case "copy":
|
||||
err = p.copy(&pd, op, &accumulatedCopySize)
|
||||
default:
|
||||
err = fmt.Errorf("Unexpected kind: %s", op.kind())
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if indent != "" {
|
||||
return json.MarshalIndent(pd, "", indent)
|
||||
}
|
||||
|
||||
return json.Marshal(pd)
|
||||
}
|
||||
|
||||
// From http://tools.ietf.org/html/rfc6901#section-4 :
|
||||
//
|
||||
// Evaluation of each reference token begins by decoding any escaped
|
||||
// character sequence. This is performed by first transforming any
|
||||
// occurrence of the sequence '~1' to '/', and then transforming any
|
||||
// occurrence of the sequence '~0' to '~'.
|
||||
|
||||
var (
|
||||
rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~")
|
||||
)
|
||||
|
||||
func decodePatchKey(k string) string {
|
||||
return rfc6901Decoder.Replace(k)
|
||||
}
|
||||
0
vendor/sigs.k8s.io/yaml/.gitignore → vendor/github.com/ghodss/yaml/.gitignore
generated
vendored
0
vendor/sigs.k8s.io/yaml/.gitignore → vendor/github.com/ghodss/yaml/.gitignore
generated
vendored
7
vendor/github.com/ghodss/yaml/.travis.yml
generated
vendored
Normal file
7
vendor/github.com/ghodss/yaml/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
language: go
|
||||
go:
|
||||
- 1.3
|
||||
- 1.4
|
||||
script:
|
||||
- go test
|
||||
- go build
|
||||
0
vendor/sigs.k8s.io/yaml/LICENSE → vendor/github.com/ghodss/yaml/LICENSE
generated
vendored
0
vendor/sigs.k8s.io/yaml/LICENSE → vendor/github.com/ghodss/yaml/LICENSE
generated
vendored
17
vendor/sigs.k8s.io/yaml/README.md → vendor/github.com/ghodss/yaml/README.md
generated
vendored
17
vendor/sigs.k8s.io/yaml/README.md → vendor/github.com/ghodss/yaml/README.md
generated
vendored
@@ -4,13 +4,13 @@
|
||||
|
||||
## Introduction
|
||||
|
||||
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
|
||||
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
|
||||
|
||||
In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
|
||||
|
||||
## Compatibility
|
||||
|
||||
This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
|
||||
This package uses [go-yaml v2](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
|
||||
|
||||
## Caveats
|
||||
|
||||
@@ -44,8 +44,6 @@ import "github.com/ghodss/yaml"
|
||||
Usage is very similar to the JSON library:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
@@ -53,8 +51,8 @@ import (
|
||||
)
|
||||
|
||||
type Person struct {
|
||||
Name string `json:"name"` // Affects YAML field names too.
|
||||
Age int `json:"age"`
|
||||
Name string `json:"name"` // Affects YAML field names too.
|
||||
Age int `json:"name"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -67,13 +65,13 @@ func main() {
|
||||
}
|
||||
fmt.Println(string(y))
|
||||
/* Output:
|
||||
age: 30
|
||||
name: John
|
||||
age: 30
|
||||
*/
|
||||
|
||||
// Unmarshal the YAML back into a Person struct.
|
||||
var p2 Person
|
||||
err = yaml.Unmarshal(y, &p2)
|
||||
err := yaml.Unmarshal(y, &p2)
|
||||
if err != nil {
|
||||
fmt.Printf("err: %v\n", err)
|
||||
return
|
||||
@@ -88,14 +86,11 @@ func main() {
|
||||
`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
)
|
||||
|
||||
func main() {
|
||||
j := []byte(`{"name": "John", "age": 30}`)
|
||||
y, err := yaml.JSONToYAML(j)
|
||||
7
vendor/sigs.k8s.io/yaml/fields.go → vendor/github.com/ghodss/yaml/fields.go
generated
vendored
7
vendor/sigs.k8s.io/yaml/fields.go → vendor/github.com/ghodss/yaml/fields.go
generated
vendored
@@ -1,7 +1,6 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package yaml
|
||||
|
||||
import (
|
||||
@@ -46,11 +45,7 @@ func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.Te
|
||||
break
|
||||
}
|
||||
if v.IsNil() {
|
||||
if v.CanSet() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
} else {
|
||||
v = reflect.New(v.Type().Elem())
|
||||
}
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
if v.Type().NumMethod() > 0 {
|
||||
if u, ok := v.Interface().(json.Unmarshaler); ok {
|
||||
74
vendor/sigs.k8s.io/yaml/yaml.go → vendor/github.com/ghodss/yaml/yaml.go
generated
vendored
74
vendor/sigs.k8s.io/yaml/yaml.go → vendor/github.com/ghodss/yaml/yaml.go
generated
vendored
@@ -4,58 +4,37 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strconv"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// Marshal marshals the object into JSON then converts JSON to YAML and returns the
|
||||
// Marshals the object into JSON then converts JSON to YAML and returns the
|
||||
// YAML.
|
||||
func Marshal(o interface{}) ([]byte, error) {
|
||||
j, err := json.Marshal(o)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error marshaling into JSON: %v", err)
|
||||
return nil, fmt.Errorf("error marshaling into JSON: ", err)
|
||||
}
|
||||
|
||||
y, err := JSONToYAML(j)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error converting JSON to YAML: %v", err)
|
||||
return nil, fmt.Errorf("error converting JSON to YAML: ", err)
|
||||
}
|
||||
|
||||
return y, nil
|
||||
}
|
||||
|
||||
// JSONOpt is a decoding option for decoding from JSON format.
|
||||
type JSONOpt func(*json.Decoder) *json.Decoder
|
||||
|
||||
// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object,
|
||||
// optionally configuring the behavior of the JSON unmarshal.
|
||||
func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error {
|
||||
return yamlUnmarshal(y, o, false, opts...)
|
||||
}
|
||||
|
||||
// UnmarshalStrict strictly converts YAML to JSON then uses JSON to unmarshal
|
||||
// into an object, optionally configuring the behavior of the JSON unmarshal.
|
||||
func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error {
|
||||
return yamlUnmarshal(y, o, true, append(opts, DisallowUnknownFields)...)
|
||||
}
|
||||
|
||||
// yamlUnmarshal unmarshals the given YAML byte stream into the given interface,
|
||||
// optionally performing the unmarshalling strictly
|
||||
func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error {
|
||||
// Converts YAML to JSON then uses JSON to unmarshal into an object.
|
||||
func Unmarshal(y []byte, o interface{}) error {
|
||||
vo := reflect.ValueOf(o)
|
||||
unmarshalFn := yaml.Unmarshal
|
||||
if strict {
|
||||
unmarshalFn = yaml.UnmarshalStrict
|
||||
}
|
||||
j, err := yamlToJSON(y, &vo, unmarshalFn)
|
||||
j, err := yamlToJSON(y, &vo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error converting YAML to JSON: %v", err)
|
||||
}
|
||||
|
||||
err = jsonUnmarshal(bytes.NewReader(j), o, opts...)
|
||||
err = json.Unmarshal(j, o)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error unmarshaling JSON: %v", err)
|
||||
}
|
||||
@@ -63,28 +42,13 @@ func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error
|
||||
return nil
|
||||
}
|
||||
|
||||
// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the
|
||||
// object, optionally applying decoder options prior to decoding. We are not
|
||||
// using json.Unmarshal directly as we want the chance to pass in non-default
|
||||
// options.
|
||||
func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error {
|
||||
d := json.NewDecoder(r)
|
||||
for _, opt := range opts {
|
||||
d = opt(d)
|
||||
}
|
||||
if err := d.Decode(&o); err != nil {
|
||||
return fmt.Errorf("while decoding JSON: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// JSONToYAML Converts JSON to YAML.
|
||||
// Convert JSON to YAML.
|
||||
func JSONToYAML(j []byte) ([]byte, error) {
|
||||
// Convert the JSON to an object.
|
||||
var jsonObj interface{}
|
||||
// We are using yaml.Unmarshal here (instead of json.Unmarshal) because the
|
||||
// Go JSON library doesn't try to pick the right number type (int, float,
|
||||
// etc.) when unmarshalling to interface{}, it just picks float64
|
||||
// etc.) when unmarshling to interface{}, it just picks float64
|
||||
// universally. go-yaml does go through the effort of picking the right
|
||||
// number type, so we can preserve number type throughout this process.
|
||||
err := yaml.Unmarshal(j, &jsonObj)
|
||||
@@ -96,8 +60,8 @@ func JSONToYAML(j []byte) ([]byte, error) {
|
||||
return yaml.Marshal(jsonObj)
|
||||
}
|
||||
|
||||
// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML,
|
||||
// passing JSON through this method should be a no-op.
|
||||
// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through
|
||||
// this method should be a no-op.
|
||||
//
|
||||
// Things YAML can do that are not supported by JSON:
|
||||
// * In YAML you can have binary and null keys in your maps. These are invalid
|
||||
@@ -106,22 +70,14 @@ func JSONToYAML(j []byte) ([]byte, error) {
|
||||
// use binary data with this library, encode the data as base64 as usual but do
|
||||
// not use the !!binary tag in your YAML. This will ensure the original base64
|
||||
// encoded data makes it all the way through to the JSON.
|
||||
//
|
||||
// For strict decoding of YAML, use YAMLToJSONStrict.
|
||||
func YAMLToJSON(y []byte) ([]byte, error) {
|
||||
return yamlToJSON(y, nil, yaml.Unmarshal)
|
||||
return yamlToJSON(y, nil)
|
||||
}
|
||||
|
||||
// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding,
|
||||
// returning an error on any duplicate field names.
|
||||
func YAMLToJSONStrict(y []byte) ([]byte, error) {
|
||||
return yamlToJSON(y, nil, yaml.UnmarshalStrict)
|
||||
}
|
||||
|
||||
func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) {
|
||||
func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) {
|
||||
// Convert the YAML to an object.
|
||||
var yamlObj interface{}
|
||||
err := yamlUnmarshal(y, &yamlObj)
|
||||
err := yaml.Unmarshal(y, &yamlObj)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -316,4 +272,6 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in
|
||||
}
|
||||
return yamlObj, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
1
vendor/github.com/gogo/protobuf/AUTHORS
generated
vendored
1
vendor/github.com/gogo/protobuf/AUTHORS
generated
vendored
@@ -10,6 +10,5 @@
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Sendgrid, Inc
|
||||
Vastech SA (PTY) LTD
|
||||
Walter Schulze <awalterschulze@gmail.com>
|
||||
|
||||
4
vendor/github.com/gogo/protobuf/CONTRIBUTORS
generated
vendored
4
vendor/github.com/gogo/protobuf/CONTRIBUTORS
generated
vendored
@@ -1,5 +1,4 @@
|
||||
Anton Povarov <anton.povarov@gmail.com>
|
||||
Brian Goff <cpuguy83@gmail.com>
|
||||
Clayton Coleman <ccoleman@redhat.com>
|
||||
Denis Smirnov <denis.smirnov.91@gmail.com>
|
||||
DongYun Kang <ceram1000@gmail.com>
|
||||
@@ -11,12 +10,9 @@ John Shahid <jvshahid@gmail.com>
|
||||
John Tuley <john@tuley.org>
|
||||
Laurent <laurent@adyoulike.com>
|
||||
Patrick Lee <patrick@dropbox.com>
|
||||
Roger Johansson <rogeralsing@gmail.com>
|
||||
Sam Nguyen <sam.nguyen@sendgrid.com>
|
||||
Sergio Arbeo <serabe@gmail.com>
|
||||
Stephen J Day <stephen.day@docker.com>
|
||||
Tamir Duberstein <tamird@gmail.com>
|
||||
Todd Eisenberger <teisenberger@dropbox.com>
|
||||
Tormod Erevik Lea <tormodlea@gmail.com>
|
||||
Vyacheslav Kim <kane@sendgrid.com>
|
||||
Walter Schulze <awalterschulze@gmail.com>
|
||||
|
||||
4
vendor/github.com/gogo/protobuf/proto/encode.go
generated
vendored
4
vendor/github.com/gogo/protobuf/proto/encode.go
generated
vendored
@@ -174,11 +174,11 @@ func sizeFixed32(x uint64) int {
|
||||
// This is the format used for the sint64 protocol buffer type.
|
||||
func (p *Buffer) EncodeZigzag64(x uint64) error {
|
||||
// use signed number to get arithmetic right shift.
|
||||
return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63)))
|
||||
return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
|
||||
func sizeZigzag64(x uint64) int {
|
||||
return sizeVarint((x << 1) ^ uint64((int64(x) >> 63)))
|
||||
return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
|
||||
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
|
||||
|
||||
1
vendor/github.com/gogo/protobuf/proto/lib.go
generated
vendored
1
vendor/github.com/gogo/protobuf/proto/lib.go
generated
vendored
@@ -73,6 +73,7 @@ for a protocol buffer variable v:
|
||||
When the .proto file specifies `syntax="proto3"`, there are some differences:
|
||||
|
||||
- Non-repeated fields of non-message type are values instead of pointers.
|
||||
- Getters are only generated for message and oneof fields.
|
||||
- Enum types do not get an Enum method.
|
||||
|
||||
The simplest way to describe this is to see an example.
|
||||
|
||||
3
vendor/github.com/gogo/protobuf/proto/properties.go
generated
vendored
3
vendor/github.com/gogo/protobuf/proto/properties.go
generated
vendored
@@ -193,7 +193,6 @@ type Properties struct {
|
||||
Default string // default value
|
||||
HasDefault bool // whether an explicit default was provided
|
||||
CustomType string
|
||||
CastType string
|
||||
StdTime bool
|
||||
StdDuration bool
|
||||
|
||||
@@ -342,8 +341,6 @@ func (p *Properties) Parse(s string) {
|
||||
p.OrigName = strings.Split(f, "=")[1]
|
||||
case strings.HasPrefix(f, "customtype="):
|
||||
p.CustomType = strings.Split(f, "=")[1]
|
||||
case strings.HasPrefix(f, "casttype="):
|
||||
p.CastType = strings.Split(f, "=")[1]
|
||||
case f == "stdtime":
|
||||
p.StdTime = true
|
||||
case f == "stdduration":
|
||||
|
||||
23
vendor/github.com/gogo/protobuf/proto/text.go
generated
vendored
23
vendor/github.com/gogo/protobuf/proto/text.go
generated
vendored
@@ -522,17 +522,6 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
|
||||
}
|
||||
return nil
|
||||
}
|
||||
} else if len(props.CastType) > 0 {
|
||||
if _, ok := v.Interface().(interface {
|
||||
String() string
|
||||
}); ok {
|
||||
switch v.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
_, err := fmt.Fprintf(w, "%d", v.Interface())
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else if props.StdTime {
|
||||
t, ok := v.Interface().(time.Time)
|
||||
if !ok {
|
||||
@@ -542,9 +531,9 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
propsCopy := *props // Make a copy so that this is goroutine-safe
|
||||
propsCopy.StdTime = false
|
||||
err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy)
|
||||
props.StdTime = false
|
||||
err = tm.writeAny(w, reflect.ValueOf(tproto), props)
|
||||
props.StdTime = true
|
||||
return err
|
||||
} else if props.StdDuration {
|
||||
d, ok := v.Interface().(time.Duration)
|
||||
@@ -552,9 +541,9 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
|
||||
return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface())
|
||||
}
|
||||
dproto := durationProto(d)
|
||||
propsCopy := *props // Make a copy so that this is goroutine-safe
|
||||
propsCopy.StdDuration = false
|
||||
err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy)
|
||||
props.StdDuration = false
|
||||
err := tm.writeAny(w, reflect.ValueOf(dproto), props)
|
||||
props.StdDuration = true
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
2
vendor/github.com/gogo/protobuf/proto/text_parser.go
generated
vendored
2
vendor/github.com/gogo/protobuf/proto/text_parser.go
generated
vendored
@@ -983,7 +983,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
|
||||
return p.readStruct(fv, terminator)
|
||||
case reflect.Uint32:
|
||||
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
|
||||
fv.SetUint(x)
|
||||
fv.SetUint(uint64(x))
|
||||
return nil
|
||||
}
|
||||
case reflect.Uint64:
|
||||
|
||||
9
vendor/k8s.io/klog/README.md → vendor/github.com/golang/glog/README
generated
vendored
9
vendor/k8s.io/klog/README.md → vendor/github.com/golang/glog/README
generated
vendored
@@ -1,10 +1,3 @@
|
||||
klog
|
||||
====
|
||||
|
||||
klog is a permanant fork of https://github.com/golang/glog. original README from glog is below
|
||||
|
||||
----
|
||||
|
||||
glog
|
||||
====
|
||||
|
||||
@@ -12,7 +5,7 @@ Leveled execution logs for Go.
|
||||
|
||||
This is an efficient pure Go implementation of leveled logs in the
|
||||
manner of the open source C++ package
|
||||
https://github.com/google/glog
|
||||
http://code.google.com/p/google-glog
|
||||
|
||||
By binding methods to booleans it is possible to use the log package
|
||||
without paying the expense of evaluating the arguments to the log.
|
||||
@@ -14,7 +14,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
|
||||
// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup.
|
||||
// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as
|
||||
// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags.
|
||||
//
|
||||
@@ -68,7 +68,7 @@
|
||||
// -vmodule=gopher*=3
|
||||
// sets the V level to 3 in all Go files whose names begin "gopher".
|
||||
//
|
||||
package klog
|
||||
package glog
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
@@ -396,6 +396,13 @@ type flushSyncWriter interface {
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
|
||||
flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
|
||||
flag.Var(&logging.verbosity, "v", "log level for V logs")
|
||||
flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
|
||||
flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
|
||||
flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
|
||||
|
||||
// Default stderrThreshold is ERROR.
|
||||
logging.stderrThreshold = errorLog
|
||||
|
||||
@@ -403,22 +410,6 @@ func init() {
|
||||
go logging.flushDaemon()
|
||||
}
|
||||
|
||||
// InitFlags is for explicitly initializing the flags
|
||||
func InitFlags(flagset *flag.FlagSet) {
|
||||
if flagset == nil {
|
||||
flagset = flag.CommandLine
|
||||
}
|
||||
flagset.StringVar(&logging.logDir, "log_dir", "", "If non-empty, write log files in this directory")
|
||||
flagset.StringVar(&logging.logFile, "log_file", "", "If non-empty, use this log file")
|
||||
flagset.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files")
|
||||
flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files")
|
||||
flagset.Var(&logging.verbosity, "v", "log level for V logs")
|
||||
flagset.BoolVar(&logging.skipHeaders, "skip_headers", false, "If true, avoid header prefixes in the log messages")
|
||||
flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr")
|
||||
flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging")
|
||||
flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace")
|
||||
}
|
||||
|
||||
// Flush flushes all pending log I/O.
|
||||
func Flush() {
|
||||
logging.lockAndFlushAll()
|
||||
@@ -462,17 +453,6 @@ type loggingT struct {
|
||||
// safely using atomic.LoadInt32.
|
||||
vmodule moduleSpec // The state of the -vmodule flag.
|
||||
verbosity Level // V logging level, the value of the -v flag/
|
||||
|
||||
// If non-empty, overrides the choice of directory in which to write logs.
|
||||
// See createLogDirs for the full list of possible destinations.
|
||||
logDir string
|
||||
|
||||
// If non-empty, specifies the path of the file to write logs. mutually exclusive
|
||||
// with the log-dir option.
|
||||
logFile string
|
||||
|
||||
// If true, do not add the prefix headers, useful when used with SetOutput
|
||||
skipHeaders bool
|
||||
}
|
||||
|
||||
// buffer holds a byte Buffer for reuse. The zero value is ready for use.
|
||||
@@ -576,9 +556,6 @@ func (l *loggingT) formatHeader(s severity, file string, line int) *buffer {
|
||||
s = infoLog // for safety.
|
||||
}
|
||||
buf := l.getBuffer()
|
||||
if l.skipHeaders {
|
||||
return buf
|
||||
}
|
||||
|
||||
// Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand.
|
||||
// It's worth about 3X. Fprintf is hard.
|
||||
@@ -690,45 +667,6 @@ func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToSt
|
||||
l.output(s, buf, file, line, alsoToStderr)
|
||||
}
|
||||
|
||||
// redirectBuffer is used to set an alternate destination for the logs
|
||||
type redirectBuffer struct {
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func (rb *redirectBuffer) Sync() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *redirectBuffer) Flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) {
|
||||
return rb.w.Write(bytes)
|
||||
}
|
||||
|
||||
// SetOutput sets the output destination for all severities
|
||||
func SetOutput(w io.Writer) {
|
||||
for s := fatalLog; s >= infoLog; s-- {
|
||||
rb := &redirectBuffer{
|
||||
w: w,
|
||||
}
|
||||
logging.file[s] = rb
|
||||
}
|
||||
}
|
||||
|
||||
// SetOutputBySeverity sets the output destination for specific severity
|
||||
func SetOutputBySeverity(name string, w io.Writer) {
|
||||
sev, ok := severityByName(name)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name))
|
||||
}
|
||||
rb := &redirectBuffer{
|
||||
w: w,
|
||||
}
|
||||
logging.file[sev] = rb
|
||||
}
|
||||
|
||||
// output writes the data to the log files and releases the buffer.
|
||||
func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) {
|
||||
l.mu.Lock()
|
||||
@@ -938,7 +876,7 @@ const flushInterval = 30 * time.Second
|
||||
|
||||
// flushDaemon periodically flushes the log file buffers.
|
||||
func (l *loggingT) flushDaemon() {
|
||||
for range time.NewTicker(flushInterval).C {
|
||||
for _ = range time.NewTicker(flushInterval).C {
|
||||
l.lockAndFlushAll()
|
||||
}
|
||||
}
|
||||
@@ -16,10 +16,11 @@
|
||||
|
||||
// File I/O for logs.
|
||||
|
||||
package klog
|
||||
package glog
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/user"
|
||||
@@ -35,9 +36,13 @@ var MaxSize uint64 = 1024 * 1024 * 1800
|
||||
// logDirs lists the candidate directories for new log files.
|
||||
var logDirs []string
|
||||
|
||||
// If non-empty, overrides the choice of directory in which to write logs.
|
||||
// See createLogDirs for the full list of possible destinations.
|
||||
var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
|
||||
|
||||
func createLogDirs() {
|
||||
if logging.logDir != "" {
|
||||
logDirs = append(logDirs, logging.logDir)
|
||||
if *logDir != "" {
|
||||
logDirs = append(logDirs, *logDir)
|
||||
}
|
||||
logDirs = append(logDirs, os.TempDir())
|
||||
}
|
||||
@@ -98,13 +103,6 @@ var onceLogDirs sync.Once
|
||||
// successfully, create also attempts to update the symlink for that tag, ignoring
|
||||
// errors.
|
||||
func create(tag string, t time.Time) (f *os.File, filename string, err error) {
|
||||
if logging.logFile != "" {
|
||||
f, err := os.Create(logging.logFile)
|
||||
if err == nil {
|
||||
return f, logging.logFile, nil
|
||||
}
|
||||
return nil, "", fmt.Errorf("log: unable to create log: %v", err)
|
||||
}
|
||||
onceLogDirs.Do(createLogDirs)
|
||||
if len(logDirs) == 0 {
|
||||
return nil, "", errors.New("log: no log dirs")
|
||||
1
vendor/github.com/google/btree/.travis.yml
generated
vendored
Normal file
1
vendor/github.com/google/btree/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
language: go
|
||||
0
vendor/k8s.io/utils/LICENSE → vendor/github.com/google/btree/LICENSE
generated
vendored
0
vendor/k8s.io/utils/LICENSE → vendor/github.com/google/btree/LICENSE
generated
vendored
12
vendor/github.com/google/btree/README.md
generated
vendored
Normal file
12
vendor/github.com/google/btree/README.md
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# BTree implementation for Go
|
||||
|
||||

|
||||
|
||||
This package provides an in-memory B-Tree implementation for Go, useful as
|
||||
an ordered, mutable data structure.
|
||||
|
||||
The API is based off of the wonderful
|
||||
http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to
|
||||
act as a drop-in replacement for gollrb trees.
|
||||
|
||||
See http://godoc.org/github.com/google/btree for documentation.
|
||||
649
vendor/github.com/google/btree/btree.go
generated
vendored
Normal file
649
vendor/github.com/google/btree/btree.go
generated
vendored
Normal file
@@ -0,0 +1,649 @@
|
||||
// Copyright 2014 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package btree implements in-memory B-Trees of arbitrary degree.
|
||||
//
|
||||
// btree implements an in-memory B-Tree for use as an ordered data structure.
|
||||
// It is not meant for persistent storage solutions.
|
||||
//
|
||||
// It has a flatter structure than an equivalent red-black or other binary tree,
|
||||
// which in some cases yields better memory usage and/or performance.
|
||||
// See some discussion on the matter here:
|
||||
// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
|
||||
// Note, though, that this project is in no way related to the C++ B-Tree
|
||||
// implmentation written about there.
|
||||
//
|
||||
// Within this tree, each node contains a slice of items and a (possibly nil)
|
||||
// slice of children. For basic numeric values or raw structs, this can cause
|
||||
// efficiency differences when compared to equivalent C++ template code that
|
||||
// stores values in arrays within the node:
|
||||
// * Due to the overhead of storing values as interfaces (each
|
||||
// value needs to be stored as the value itself, then 2 words for the
|
||||
// interface pointing to that value and its type), resulting in higher
|
||||
// memory use.
|
||||
// * Since interfaces can point to values anywhere in memory, values are
|
||||
// most likely not stored in contiguous blocks, resulting in a higher
|
||||
// number of cache misses.
|
||||
// These issues don't tend to matter, though, when working with strings or other
|
||||
// heap-allocated structures, since C++-equivalent structures also must store
|
||||
// pointers and also distribute their values across the heap.
|
||||
//
|
||||
// This implementation is designed to be a drop-in replacement to gollrb.LLRB
|
||||
// trees, (http://github.com/petar/gollrb), an excellent and probably the most
|
||||
// widely used ordered tree implementation in the Go ecosystem currently.
|
||||
// Its functions, therefore, exactly mirror those of
|
||||
// llrb.LLRB where possible. Unlike gollrb, though, we currently don't
|
||||
// support storing multiple equivalent values or backwards iteration.
|
||||
package btree
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Item represents a single object in the tree.
|
||||
type Item interface {
|
||||
// Less tests whether the current item is less than the given argument.
|
||||
//
|
||||
// This must provide a strict weak ordering.
|
||||
// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
|
||||
// hold one of either a or b in the tree).
|
||||
Less(than Item) bool
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultFreeListSize = 32
|
||||
)
|
||||
|
||||
// FreeList represents a free list of btree nodes. By default each
|
||||
// BTree has its own FreeList, but multiple BTrees can share the same
|
||||
// FreeList.
|
||||
// Two Btrees using the same freelist are not safe for concurrent write access.
|
||||
type FreeList struct {
|
||||
freelist []*node
|
||||
}
|
||||
|
||||
// NewFreeList creates a new free list.
|
||||
// size is the maximum size of the returned free list.
|
||||
func NewFreeList(size int) *FreeList {
|
||||
return &FreeList{freelist: make([]*node, 0, size)}
|
||||
}
|
||||
|
||||
func (f *FreeList) newNode() (n *node) {
|
||||
index := len(f.freelist) - 1
|
||||
if index < 0 {
|
||||
return new(node)
|
||||
}
|
||||
f.freelist, n = f.freelist[:index], f.freelist[index]
|
||||
return
|
||||
}
|
||||
|
||||
func (f *FreeList) freeNode(n *node) {
|
||||
if len(f.freelist) < cap(f.freelist) {
|
||||
f.freelist = append(f.freelist, n)
|
||||
}
|
||||
}
|
||||
|
||||
// ItemIterator allows callers of Ascend* to iterate in-order over portions of
|
||||
// the tree. When this function returns false, iteration will stop and the
|
||||
// associated Ascend* function will immediately return.
|
||||
type ItemIterator func(i Item) bool
|
||||
|
||||
// New creates a new B-Tree with the given degree.
|
||||
//
|
||||
// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
|
||||
// and 2-4 children).
|
||||
func New(degree int) *BTree {
|
||||
return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize))
|
||||
}
|
||||
|
||||
// NewWithFreeList creates a new B-Tree that uses the given node free list.
|
||||
func NewWithFreeList(degree int, f *FreeList) *BTree {
|
||||
if degree <= 1 {
|
||||
panic("bad degree")
|
||||
}
|
||||
return &BTree{
|
||||
degree: degree,
|
||||
freelist: f,
|
||||
}
|
||||
}
|
||||
|
||||
// items stores items in a node.
|
||||
type items []Item
|
||||
|
||||
// insertAt inserts a value into the given index, pushing all subsequent values
|
||||
// forward.
|
||||
func (s *items) insertAt(index int, item Item) {
|
||||
*s = append(*s, nil)
|
||||
if index < len(*s) {
|
||||
copy((*s)[index+1:], (*s)[index:])
|
||||
}
|
||||
(*s)[index] = item
|
||||
}
|
||||
|
||||
// removeAt removes a value at a given index, pulling all subsequent values
|
||||
// back.
|
||||
func (s *items) removeAt(index int) Item {
|
||||
item := (*s)[index]
|
||||
(*s)[index] = nil
|
||||
copy((*s)[index:], (*s)[index+1:])
|
||||
*s = (*s)[:len(*s)-1]
|
||||
return item
|
||||
}
|
||||
|
||||
// pop removes and returns the last element in the list.
|
||||
func (s *items) pop() (out Item) {
|
||||
index := len(*s) - 1
|
||||
out = (*s)[index]
|
||||
(*s)[index] = nil
|
||||
*s = (*s)[:index]
|
||||
return
|
||||
}
|
||||
|
||||
// find returns the index where the given item should be inserted into this
|
||||
// list. 'found' is true if the item already exists in the list at the given
|
||||
// index.
|
||||
func (s items) find(item Item) (index int, found bool) {
|
||||
i := sort.Search(len(s), func(i int) bool {
|
||||
return item.Less(s[i])
|
||||
})
|
||||
if i > 0 && !s[i-1].Less(item) {
|
||||
return i - 1, true
|
||||
}
|
||||
return i, false
|
||||
}
|
||||
|
||||
// children stores child nodes in a node.
|
||||
type children []*node
|
||||
|
||||
// insertAt inserts a value into the given index, pushing all subsequent values
|
||||
// forward.
|
||||
func (s *children) insertAt(index int, n *node) {
|
||||
*s = append(*s, nil)
|
||||
if index < len(*s) {
|
||||
copy((*s)[index+1:], (*s)[index:])
|
||||
}
|
||||
(*s)[index] = n
|
||||
}
|
||||
|
||||
// removeAt removes a value at a given index, pulling all subsequent values
|
||||
// back.
|
||||
func (s *children) removeAt(index int) *node {
|
||||
n := (*s)[index]
|
||||
(*s)[index] = nil
|
||||
copy((*s)[index:], (*s)[index+1:])
|
||||
*s = (*s)[:len(*s)-1]
|
||||
return n
|
||||
}
|
||||
|
||||
// pop removes and returns the last element in the list.
|
||||
func (s *children) pop() (out *node) {
|
||||
index := len(*s) - 1
|
||||
out = (*s)[index]
|
||||
(*s)[index] = nil
|
||||
*s = (*s)[:index]
|
||||
return
|
||||
}
|
||||
|
||||
// node is an internal node in a tree.
|
||||
//
|
||||
// It must at all times maintain the invariant that either
|
||||
// * len(children) == 0, len(items) unconstrained
|
||||
// * len(children) == len(items) + 1
|
||||
type node struct {
|
||||
items items
|
||||
children children
|
||||
t *BTree
|
||||
}
|
||||
|
||||
// split splits the given node at the given index. The current node shrinks,
|
||||
// and this function returns the item that existed at that index and a new node
|
||||
// containing all items/children after it.
|
||||
func (n *node) split(i int) (Item, *node) {
|
||||
item := n.items[i]
|
||||
next := n.t.newNode()
|
||||
next.items = append(next.items, n.items[i+1:]...)
|
||||
n.items = n.items[:i]
|
||||
if len(n.children) > 0 {
|
||||
next.children = append(next.children, n.children[i+1:]...)
|
||||
n.children = n.children[:i+1]
|
||||
}
|
||||
return item, next
|
||||
}
|
||||
|
||||
// maybeSplitChild checks if a child should be split, and if so splits it.
|
||||
// Returns whether or not a split occurred.
|
||||
func (n *node) maybeSplitChild(i, maxItems int) bool {
|
||||
if len(n.children[i].items) < maxItems {
|
||||
return false
|
||||
}
|
||||
first := n.children[i]
|
||||
item, second := first.split(maxItems / 2)
|
||||
n.items.insertAt(i, item)
|
||||
n.children.insertAt(i+1, second)
|
||||
return true
|
||||
}
|
||||
|
||||
// insert inserts an item into the subtree rooted at this node, making sure
|
||||
// no nodes in the subtree exceed maxItems items. Should an equivalent item be
|
||||
// be found/replaced by insert, it will be returned.
|
||||
func (n *node) insert(item Item, maxItems int) Item {
|
||||
i, found := n.items.find(item)
|
||||
if found {
|
||||
out := n.items[i]
|
||||
n.items[i] = item
|
||||
return out
|
||||
}
|
||||
if len(n.children) == 0 {
|
||||
n.items.insertAt(i, item)
|
||||
return nil
|
||||
}
|
||||
if n.maybeSplitChild(i, maxItems) {
|
||||
inTree := n.items[i]
|
||||
switch {
|
||||
case item.Less(inTree):
|
||||
// no change, we want first split node
|
||||
case inTree.Less(item):
|
||||
i++ // we want second split node
|
||||
default:
|
||||
out := n.items[i]
|
||||
n.items[i] = item
|
||||
return out
|
||||
}
|
||||
}
|
||||
return n.children[i].insert(item, maxItems)
|
||||
}
|
||||
|
||||
// get finds the given key in the subtree and returns it.
|
||||
func (n *node) get(key Item) Item {
|
||||
i, found := n.items.find(key)
|
||||
if found {
|
||||
return n.items[i]
|
||||
} else if len(n.children) > 0 {
|
||||
return n.children[i].get(key)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// min returns the first item in the subtree.
|
||||
func min(n *node) Item {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
for len(n.children) > 0 {
|
||||
n = n.children[0]
|
||||
}
|
||||
if len(n.items) == 0 {
|
||||
return nil
|
||||
}
|
||||
return n.items[0]
|
||||
}
|
||||
|
||||
// max returns the last item in the subtree.
|
||||
func max(n *node) Item {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
for len(n.children) > 0 {
|
||||
n = n.children[len(n.children)-1]
|
||||
}
|
||||
if len(n.items) == 0 {
|
||||
return nil
|
||||
}
|
||||
return n.items[len(n.items)-1]
|
||||
}
|
||||
|
||||
// toRemove details what item to remove in a node.remove call.
|
||||
type toRemove int
|
||||
|
||||
const (
|
||||
removeItem toRemove = iota // removes the given item
|
||||
removeMin // removes smallest item in the subtree
|
||||
removeMax // removes largest item in the subtree
|
||||
)
|
||||
|
||||
// remove removes an item from the subtree rooted at this node.
|
||||
func (n *node) remove(item Item, minItems int, typ toRemove) Item {
|
||||
var i int
|
||||
var found bool
|
||||
switch typ {
|
||||
case removeMax:
|
||||
if len(n.children) == 0 {
|
||||
return n.items.pop()
|
||||
}
|
||||
i = len(n.items)
|
||||
case removeMin:
|
||||
if len(n.children) == 0 {
|
||||
return n.items.removeAt(0)
|
||||
}
|
||||
i = 0
|
||||
case removeItem:
|
||||
i, found = n.items.find(item)
|
||||
if len(n.children) == 0 {
|
||||
if found {
|
||||
return n.items.removeAt(i)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
default:
|
||||
panic("invalid type")
|
||||
}
|
||||
// If we get to here, we have children.
|
||||
child := n.children[i]
|
||||
if len(child.items) <= minItems {
|
||||
return n.growChildAndRemove(i, item, minItems, typ)
|
||||
}
|
||||
// Either we had enough items to begin with, or we've done some
|
||||
// merging/stealing, because we've got enough now and we're ready to return
|
||||
// stuff.
|
||||
if found {
|
||||
// The item exists at index 'i', and the child we've selected can give us a
|
||||
// predecessor, since if we've gotten here it's got > minItems items in it.
|
||||
out := n.items[i]
|
||||
// We use our special-case 'remove' call with typ=maxItem to pull the
|
||||
// predecessor of item i (the rightmost leaf of our immediate left child)
|
||||
// and set it into where we pulled the item from.
|
||||
n.items[i] = child.remove(nil, minItems, removeMax)
|
||||
return out
|
||||
}
|
||||
// Final recursive call. Once we're here, we know that the item isn't in this
|
||||
// node and that the child is big enough to remove from.
|
||||
return child.remove(item, minItems, typ)
|
||||
}
|
||||
|
||||
// growChildAndRemove grows child 'i' to make sure it's possible to remove an
|
||||
// item from it while keeping it at minItems, then calls remove to actually
|
||||
// remove it.
|
||||
//
|
||||
// Most documentation says we have to do two sets of special casing:
|
||||
// 1) item is in this node
|
||||
// 2) item is in child
|
||||
// In both cases, we need to handle the two subcases:
|
||||
// A) node has enough values that it can spare one
|
||||
// B) node doesn't have enough values
|
||||
// For the latter, we have to check:
|
||||
// a) left sibling has node to spare
|
||||
// b) right sibling has node to spare
|
||||
// c) we must merge
|
||||
// To simplify our code here, we handle cases #1 and #2 the same:
|
||||
// If a node doesn't have enough items, we make sure it does (using a,b,c).
|
||||
// We then simply redo our remove call, and the second time (regardless of
|
||||
// whether we're in case 1 or 2), we'll have enough items and can guarantee
|
||||
// that we hit case A.
|
||||
func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item {
|
||||
child := n.children[i]
|
||||
if i > 0 && len(n.children[i-1].items) > minItems {
|
||||
// Steal from left child
|
||||
stealFrom := n.children[i-1]
|
||||
stolenItem := stealFrom.items.pop()
|
||||
child.items.insertAt(0, n.items[i-1])
|
||||
n.items[i-1] = stolenItem
|
||||
if len(stealFrom.children) > 0 {
|
||||
child.children.insertAt(0, stealFrom.children.pop())
|
||||
}
|
||||
} else if i < len(n.items) && len(n.children[i+1].items) > minItems {
|
||||
// steal from right child
|
||||
stealFrom := n.children[i+1]
|
||||
stolenItem := stealFrom.items.removeAt(0)
|
||||
child.items = append(child.items, n.items[i])
|
||||
n.items[i] = stolenItem
|
||||
if len(stealFrom.children) > 0 {
|
||||
child.children = append(child.children, stealFrom.children.removeAt(0))
|
||||
}
|
||||
} else {
|
||||
if i >= len(n.items) {
|
||||
i--
|
||||
child = n.children[i]
|
||||
}
|
||||
// merge with right child
|
||||
mergeItem := n.items.removeAt(i)
|
||||
mergeChild := n.children.removeAt(i + 1)
|
||||
child.items = append(child.items, mergeItem)
|
||||
child.items = append(child.items, mergeChild.items...)
|
||||
child.children = append(child.children, mergeChild.children...)
|
||||
n.t.freeNode(mergeChild)
|
||||
}
|
||||
return n.remove(item, minItems, typ)
|
||||
}
|
||||
|
||||
// iterate provides a simple method for iterating over elements in the tree.
|
||||
// It could probably use some work to be extra-efficient (it calls from() a
|
||||
// little more than it should), but it works pretty well for now.
|
||||
//
|
||||
// It requires that 'from' and 'to' both return true for values we should hit
|
||||
// with the iterator. It should also be the case that 'from' returns true for
|
||||
// values less than or equal to values 'to' returns true for, and 'to'
|
||||
// returns true for values greater than or equal to those that 'from'
|
||||
// does.
|
||||
func (n *node) iterate(from, to func(Item) bool, iter ItemIterator) bool {
|
||||
for i, item := range n.items {
|
||||
if !from(item) {
|
||||
continue
|
||||
}
|
||||
if len(n.children) > 0 && !n.children[i].iterate(from, to, iter) {
|
||||
return false
|
||||
}
|
||||
if !to(item) {
|
||||
return false
|
||||
}
|
||||
if !iter(item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if len(n.children) > 0 {
|
||||
return n.children[len(n.children)-1].iterate(from, to, iter)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Used for testing/debugging purposes.
|
||||
func (n *node) print(w io.Writer, level int) {
|
||||
fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items)
|
||||
for _, c := range n.children {
|
||||
c.print(w, level+1)
|
||||
}
|
||||
}
|
||||
|
||||
// BTree is an implementation of a B-Tree.
|
||||
//
|
||||
// BTree stores Item instances in an ordered structure, allowing easy insertion,
|
||||
// removal, and iteration.
|
||||
//
|
||||
// Write operations are not safe for concurrent mutation by multiple
|
||||
// goroutines, but Read operations are.
|
||||
type BTree struct {
|
||||
degree int
|
||||
length int
|
||||
root *node
|
||||
freelist *FreeList
|
||||
}
|
||||
|
||||
// maxItems returns the max number of items to allow per node.
|
||||
func (t *BTree) maxItems() int {
|
||||
return t.degree*2 - 1
|
||||
}
|
||||
|
||||
// minItems returns the min number of items to allow per node (ignored for the
|
||||
// root node).
|
||||
func (t *BTree) minItems() int {
|
||||
return t.degree - 1
|
||||
}
|
||||
|
||||
func (t *BTree) newNode() (n *node) {
|
||||
n = t.freelist.newNode()
|
||||
n.t = t
|
||||
return
|
||||
}
|
||||
|
||||
func (t *BTree) freeNode(n *node) {
|
||||
for i := range n.items {
|
||||
n.items[i] = nil // clear to allow GC
|
||||
}
|
||||
n.items = n.items[:0]
|
||||
for i := range n.children {
|
||||
n.children[i] = nil // clear to allow GC
|
||||
}
|
||||
n.children = n.children[:0]
|
||||
n.t = nil // clear to allow GC
|
||||
t.freelist.freeNode(n)
|
||||
}
|
||||
|
||||
// ReplaceOrInsert adds the given item to the tree. If an item in the tree
|
||||
// already equals the given one, it is removed from the tree and returned.
|
||||
// Otherwise, nil is returned.
|
||||
//
|
||||
// nil cannot be added to the tree (will panic).
|
||||
func (t *BTree) ReplaceOrInsert(item Item) Item {
|
||||
if item == nil {
|
||||
panic("nil item being added to BTree")
|
||||
}
|
||||
if t.root == nil {
|
||||
t.root = t.newNode()
|
||||
t.root.items = append(t.root.items, item)
|
||||
t.length++
|
||||
return nil
|
||||
} else if len(t.root.items) >= t.maxItems() {
|
||||
item2, second := t.root.split(t.maxItems() / 2)
|
||||
oldroot := t.root
|
||||
t.root = t.newNode()
|
||||
t.root.items = append(t.root.items, item2)
|
||||
t.root.children = append(t.root.children, oldroot, second)
|
||||
}
|
||||
out := t.root.insert(item, t.maxItems())
|
||||
if out == nil {
|
||||
t.length++
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Delete removes an item equal to the passed in item from the tree, returning
|
||||
// it. If no such item exists, returns nil.
|
||||
func (t *BTree) Delete(item Item) Item {
|
||||
return t.deleteItem(item, removeItem)
|
||||
}
|
||||
|
||||
// DeleteMin removes the smallest item in the tree and returns it.
|
||||
// If no such item exists, returns nil.
|
||||
func (t *BTree) DeleteMin() Item {
|
||||
return t.deleteItem(nil, removeMin)
|
||||
}
|
||||
|
||||
// DeleteMax removes the largest item in the tree and returns it.
|
||||
// If no such item exists, returns nil.
|
||||
func (t *BTree) DeleteMax() Item {
|
||||
return t.deleteItem(nil, removeMax)
|
||||
}
|
||||
|
||||
func (t *BTree) deleteItem(item Item, typ toRemove) Item {
|
||||
if t.root == nil || len(t.root.items) == 0 {
|
||||
return nil
|
||||
}
|
||||
out := t.root.remove(item, t.minItems(), typ)
|
||||
if len(t.root.items) == 0 && len(t.root.children) > 0 {
|
||||
oldroot := t.root
|
||||
t.root = t.root.children[0]
|
||||
t.freeNode(oldroot)
|
||||
}
|
||||
if out != nil {
|
||||
t.length--
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// AscendRange calls the iterator for every value in the tree within the range
|
||||
// [greaterOrEqual, lessThan), until iterator returns false.
|
||||
func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(
|
||||
func(a Item) bool { return !a.Less(greaterOrEqual) },
|
||||
func(a Item) bool { return a.Less(lessThan) },
|
||||
iterator)
|
||||
}
|
||||
|
||||
// AscendLessThan calls the iterator for every value in the tree within the range
|
||||
// [first, pivot), until iterator returns false.
|
||||
func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(
|
||||
func(a Item) bool { return true },
|
||||
func(a Item) bool { return a.Less(pivot) },
|
||||
iterator)
|
||||
}
|
||||
|
||||
// AscendGreaterOrEqual calls the iterator for every value in the tree within
|
||||
// the range [pivot, last], until iterator returns false.
|
||||
func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(
|
||||
func(a Item) bool { return !a.Less(pivot) },
|
||||
func(a Item) bool { return true },
|
||||
iterator)
|
||||
}
|
||||
|
||||
// Ascend calls the iterator for every value in the tree within the range
|
||||
// [first, last], until iterator returns false.
|
||||
func (t *BTree) Ascend(iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(
|
||||
func(a Item) bool { return true },
|
||||
func(a Item) bool { return true },
|
||||
iterator)
|
||||
}
|
||||
|
||||
// Get looks for the key item in the tree, returning it. It returns nil if
|
||||
// unable to find that item.
|
||||
func (t *BTree) Get(key Item) Item {
|
||||
if t.root == nil {
|
||||
return nil
|
||||
}
|
||||
return t.root.get(key)
|
||||
}
|
||||
|
||||
// Min returns the smallest item in the tree, or nil if the tree is empty.
|
||||
func (t *BTree) Min() Item {
|
||||
return min(t.root)
|
||||
}
|
||||
|
||||
// Max returns the largest item in the tree, or nil if the tree is empty.
|
||||
func (t *BTree) Max() Item {
|
||||
return max(t.root)
|
||||
}
|
||||
|
||||
// Has returns true if the given key is in the tree.
|
||||
func (t *BTree) Has(key Item) bool {
|
||||
return t.Get(key) != nil
|
||||
}
|
||||
|
||||
// Len returns the number of items currently in the tree.
|
||||
func (t *BTree) Len() int {
|
||||
return t.length
|
||||
}
|
||||
|
||||
// Int implements the Item interface for integers.
|
||||
type Int int
|
||||
|
||||
// Less returns true if int(a) < int(b).
|
||||
func (a Int) Less(b Item) bool {
|
||||
return a < b.(Int)
|
||||
}
|
||||
76
vendor/github.com/google/btree/btree_mem.go
generated
vendored
Normal file
76
vendor/github.com/google/btree/btree_mem.go
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
// Copyright 2014 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// This binary compares memory usage between btree and gollrb.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/google/btree"
|
||||
"github.com/petar/GoLLRB/llrb"
|
||||
)
|
||||
|
||||
var (
|
||||
size = flag.Int("size", 1000000, "size of the tree to build")
|
||||
degree = flag.Int("degree", 8, "degree of btree")
|
||||
gollrb = flag.Bool("llrb", false, "use llrb instead of btree")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
vals := rand.Perm(*size)
|
||||
var t, v interface{}
|
||||
v = vals
|
||||
var stats runtime.MemStats
|
||||
for i := 0; i < 10; i++ {
|
||||
runtime.GC()
|
||||
}
|
||||
fmt.Println("-------- BEFORE ----------")
|
||||
runtime.ReadMemStats(&stats)
|
||||
fmt.Printf("%+v\n", stats)
|
||||
start := time.Now()
|
||||
if *gollrb {
|
||||
tr := llrb.New()
|
||||
for _, v := range vals {
|
||||
tr.ReplaceOrInsert(llrb.Int(v))
|
||||
}
|
||||
t = tr // keep it around
|
||||
} else {
|
||||
tr := btree.New(*degree)
|
||||
for _, v := range vals {
|
||||
tr.ReplaceOrInsert(btree.Int(v))
|
||||
}
|
||||
t = tr // keep it around
|
||||
}
|
||||
fmt.Printf("%v inserts in %v\n", *size, time.Since(start))
|
||||
fmt.Println("-------- AFTER ----------")
|
||||
runtime.ReadMemStats(&stats)
|
||||
fmt.Printf("%+v\n", stats)
|
||||
for i := 0; i < 10; i++ {
|
||||
runtime.GC()
|
||||
}
|
||||
fmt.Println("-------- AFTER GC ----------")
|
||||
runtime.ReadMemStats(&stats)
|
||||
fmt.Printf("%+v\n", stats)
|
||||
if t == v {
|
||||
fmt.Println("to make sure vals and tree aren't GC'd")
|
||||
}
|
||||
}
|
||||
96
vendor/github.com/google/gofuzz/fuzz.go
generated
vendored
96
vendor/github.com/google/gofuzz/fuzz.go
generated
vendored
@@ -34,27 +34,21 @@ type Fuzzer struct {
|
||||
nilChance float64
|
||||
minElements int
|
||||
maxElements int
|
||||
maxDepth int
|
||||
}
|
||||
|
||||
// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs,
|
||||
// RandSource, NilChance, or NumElements in any order.
|
||||
func New() *Fuzzer {
|
||||
return NewWithSeed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func NewWithSeed(seed int64) *Fuzzer {
|
||||
f := &Fuzzer{
|
||||
defaultFuzzFuncs: fuzzFuncMap{
|
||||
reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime),
|
||||
},
|
||||
|
||||
fuzzFuncs: fuzzFuncMap{},
|
||||
r: rand.New(rand.NewSource(seed)),
|
||||
r: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
nilChance: .2,
|
||||
minElements: 1,
|
||||
maxElements: 10,
|
||||
maxDepth: 100,
|
||||
}
|
||||
return f
|
||||
}
|
||||
@@ -142,14 +136,6 @@ func (f *Fuzzer) genShouldFill() bool {
|
||||
return f.r.Float64() > f.nilChance
|
||||
}
|
||||
|
||||
// MaxDepth sets the maximum number of recursive fuzz calls that will be made
|
||||
// before stopping. This includes struct members, pointers, and map and slice
|
||||
// elements.
|
||||
func (f *Fuzzer) MaxDepth(d int) *Fuzzer {
|
||||
f.maxDepth = d
|
||||
return f
|
||||
}
|
||||
|
||||
// Fuzz recursively fills all of obj's fields with something random. First
|
||||
// this tries to find a custom fuzz function (see Funcs). If there is no
|
||||
// custom function this tests whether the object implements fuzz.Interface and,
|
||||
@@ -158,19 +144,17 @@ func (f *Fuzzer) MaxDepth(d int) *Fuzzer {
|
||||
// fails, this will generate random values for all primitive fields and then
|
||||
// recurse for all non-primitives.
|
||||
//
|
||||
// This is safe for cyclic or tree-like structs, up to a limit. Use the
|
||||
// MaxDepth method to adjust how deep you need it to recurse.
|
||||
// Not safe for cyclic or tree-like structs!
|
||||
//
|
||||
// obj must be a pointer. Only exported (public) fields can be set (thanks,
|
||||
// golang :/ ) Intended for tests, so will panic on bad input or unimplemented
|
||||
// fields.
|
||||
// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ )
|
||||
// Intended for tests, so will panic on bad input or unimplemented fields.
|
||||
func (f *Fuzzer) Fuzz(obj interface{}) {
|
||||
v := reflect.ValueOf(obj)
|
||||
if v.Kind() != reflect.Ptr {
|
||||
panic("needed ptr!")
|
||||
}
|
||||
v = v.Elem()
|
||||
f.fuzzWithContext(v, 0)
|
||||
f.doFuzz(v, 0)
|
||||
}
|
||||
|
||||
// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for
|
||||
@@ -186,7 +170,7 @@ func (f *Fuzzer) FuzzNoCustom(obj interface{}) {
|
||||
panic("needed ptr!")
|
||||
}
|
||||
v = v.Elem()
|
||||
f.fuzzWithContext(v, flagNoCustomFuzz)
|
||||
f.doFuzz(v, flagNoCustomFuzz)
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -194,87 +178,69 @@ const (
|
||||
flagNoCustomFuzz uint64 = 1 << iota
|
||||
)
|
||||
|
||||
func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) {
|
||||
fc := &fuzzerContext{fuzzer: f}
|
||||
fc.doFuzz(v, flags)
|
||||
}
|
||||
|
||||
// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer
|
||||
// be thread-safe.
|
||||
type fuzzerContext struct {
|
||||
fuzzer *Fuzzer
|
||||
curDepth int
|
||||
}
|
||||
|
||||
func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) {
|
||||
if fc.curDepth >= fc.fuzzer.maxDepth {
|
||||
return
|
||||
}
|
||||
fc.curDepth++
|
||||
defer func() { fc.curDepth-- }()
|
||||
|
||||
func (f *Fuzzer) doFuzz(v reflect.Value, flags uint64) {
|
||||
if !v.CanSet() {
|
||||
return
|
||||
}
|
||||
|
||||
if flags&flagNoCustomFuzz == 0 {
|
||||
// Check for both pointer and non-pointer custom functions.
|
||||
if v.CanAddr() && fc.tryCustom(v.Addr()) {
|
||||
if v.CanAddr() && f.tryCustom(v.Addr()) {
|
||||
return
|
||||
}
|
||||
if fc.tryCustom(v) {
|
||||
if f.tryCustom(v) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if fn, ok := fillFuncMap[v.Kind()]; ok {
|
||||
fn(v, fc.fuzzer.r)
|
||||
fn(v, f.r)
|
||||
return
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Map:
|
||||
if fc.fuzzer.genShouldFill() {
|
||||
if f.genShouldFill() {
|
||||
v.Set(reflect.MakeMap(v.Type()))
|
||||
n := fc.fuzzer.genElementCount()
|
||||
n := f.genElementCount()
|
||||
for i := 0; i < n; i++ {
|
||||
key := reflect.New(v.Type().Key()).Elem()
|
||||
fc.doFuzz(key, 0)
|
||||
f.doFuzz(key, 0)
|
||||
val := reflect.New(v.Type().Elem()).Elem()
|
||||
fc.doFuzz(val, 0)
|
||||
f.doFuzz(val, 0)
|
||||
v.SetMapIndex(key, val)
|
||||
}
|
||||
return
|
||||
}
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
case reflect.Ptr:
|
||||
if fc.fuzzer.genShouldFill() {
|
||||
if f.genShouldFill() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
fc.doFuzz(v.Elem(), 0)
|
||||
f.doFuzz(v.Elem(), 0)
|
||||
return
|
||||
}
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
case reflect.Slice:
|
||||
if fc.fuzzer.genShouldFill() {
|
||||
n := fc.fuzzer.genElementCount()
|
||||
if f.genShouldFill() {
|
||||
n := f.genElementCount()
|
||||
v.Set(reflect.MakeSlice(v.Type(), n, n))
|
||||
for i := 0; i < n; i++ {
|
||||
fc.doFuzz(v.Index(i), 0)
|
||||
f.doFuzz(v.Index(i), 0)
|
||||
}
|
||||
return
|
||||
}
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
case reflect.Array:
|
||||
if fc.fuzzer.genShouldFill() {
|
||||
if f.genShouldFill() {
|
||||
n := v.Len()
|
||||
for i := 0; i < n; i++ {
|
||||
fc.doFuzz(v.Index(i), 0)
|
||||
f.doFuzz(v.Index(i), 0)
|
||||
}
|
||||
return
|
||||
}
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
case reflect.Struct:
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
fc.doFuzz(v.Field(i), 0)
|
||||
f.doFuzz(v.Field(i), 0)
|
||||
}
|
||||
case reflect.Chan:
|
||||
fallthrough
|
||||
@@ -289,20 +255,20 @@ func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) {
|
||||
|
||||
// tryCustom searches for custom handlers, and returns true iff it finds a match
|
||||
// and successfully randomizes v.
|
||||
func (fc *fuzzerContext) tryCustom(v reflect.Value) bool {
|
||||
func (f *Fuzzer) tryCustom(v reflect.Value) bool {
|
||||
// First: see if we have a fuzz function for it.
|
||||
doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()]
|
||||
doCustom, ok := f.fuzzFuncs[v.Type()]
|
||||
if !ok {
|
||||
// Second: see if it can fuzz itself.
|
||||
if v.CanInterface() {
|
||||
intf := v.Interface()
|
||||
if fuzzable, ok := intf.(Interface); ok {
|
||||
fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r})
|
||||
fuzzable.Fuzz(Continue{f: f, Rand: f.r})
|
||||
return true
|
||||
}
|
||||
}
|
||||
// Finally: see if there is a default fuzz function.
|
||||
doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()]
|
||||
doCustom, ok = f.defaultFuzzFuncs[v.Type()]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
@@ -328,8 +294,8 @@ func (fc *fuzzerContext) tryCustom(v reflect.Value) bool {
|
||||
}
|
||||
|
||||
doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{
|
||||
fc: fc,
|
||||
Rand: fc.fuzzer.r,
|
||||
f: f,
|
||||
Rand: f.r,
|
||||
})})
|
||||
return true
|
||||
}
|
||||
@@ -344,7 +310,7 @@ type Interface interface {
|
||||
// Continue can be passed to custom fuzzing functions to allow them to use
|
||||
// the correct source of randomness and to continue fuzzing their members.
|
||||
type Continue struct {
|
||||
fc *fuzzerContext
|
||||
f *Fuzzer
|
||||
|
||||
// For convenience, Continue implements rand.Rand via embedding.
|
||||
// Use this for generating any randomness if you want your fuzzing
|
||||
@@ -359,7 +325,7 @@ func (c Continue) Fuzz(obj interface{}) {
|
||||
panic("needed ptr!")
|
||||
}
|
||||
v = v.Elem()
|
||||
c.fc.doFuzz(v, 0)
|
||||
c.f.doFuzz(v, 0)
|
||||
}
|
||||
|
||||
// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for
|
||||
@@ -372,7 +338,7 @@ func (c Continue) FuzzNoCustom(obj interface{}) {
|
||||
panic("needed ptr!")
|
||||
}
|
||||
v = v.Elem()
|
||||
c.fc.doFuzz(v, flagNoCustomFuzz)
|
||||
c.f.doFuzz(v, flagNoCustomFuzz)
|
||||
}
|
||||
|
||||
// RandString makes a random string up to 20 characters long. The returned string
|
||||
|
||||
18
vendor/github.com/gregjones/httpcache/.travis.yml
generated
vendored
Normal file
18
vendor/github.com/gregjones/httpcache/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- master
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: master
|
||||
fast_finish: true
|
||||
install:
|
||||
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
|
||||
script:
|
||||
- go get -t -v ./...
|
||||
- diff -u <(echo -n) <(gofmt -d .)
|
||||
- go tool vet .
|
||||
- go test -v -race ./...
|
||||
7
vendor/github.com/gregjones/httpcache/LICENSE.txt
generated
vendored
Normal file
7
vendor/github.com/gregjones/httpcache/LICENSE.txt
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
Copyright © 2012 Greg Jones (greg.jones@gmail.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
24
vendor/github.com/gregjones/httpcache/README.md
generated
vendored
Normal file
24
vendor/github.com/gregjones/httpcache/README.md
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
httpcache
|
||||
=========
|
||||
|
||||
[](https://travis-ci.org/gregjones/httpcache) [](https://godoc.org/github.com/gregjones/httpcache)
|
||||
|
||||
Package httpcache provides a http.RoundTripper implementation that works as a mostly RFC-compliant cache for http responses.
|
||||
|
||||
It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy).
|
||||
|
||||
Cache Backends
|
||||
--------------
|
||||
|
||||
- The built-in 'memory' cache stores responses in an in-memory map.
|
||||
- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library.
|
||||
- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers.
|
||||
- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage.
|
||||
- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb).
|
||||
- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries.
|
||||
- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache.
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
- [MIT License](LICENSE.txt)
|
||||
61
vendor/github.com/gregjones/httpcache/diskcache/diskcache.go
generated
vendored
Normal file
61
vendor/github.com/gregjones/httpcache/diskcache/diskcache.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package
|
||||
// to supplement an in-memory map with persistent storage
|
||||
//
|
||||
package diskcache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"github.com/peterbourgon/diskv"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage
|
||||
type Cache struct {
|
||||
d *diskv.Diskv
|
||||
}
|
||||
|
||||
// Get returns the response corresponding to key if present
|
||||
func (c *Cache) Get(key string) (resp []byte, ok bool) {
|
||||
key = keyToFilename(key)
|
||||
resp, err := c.d.Read(key)
|
||||
if err != nil {
|
||||
return []byte{}, false
|
||||
}
|
||||
return resp, true
|
||||
}
|
||||
|
||||
// Set saves a response to the cache as key
|
||||
func (c *Cache) Set(key string, resp []byte) {
|
||||
key = keyToFilename(key)
|
||||
c.d.WriteStream(key, bytes.NewReader(resp), true)
|
||||
}
|
||||
|
||||
// Delete removes the response with key from the cache
|
||||
func (c *Cache) Delete(key string) {
|
||||
key = keyToFilename(key)
|
||||
c.d.Erase(key)
|
||||
}
|
||||
|
||||
func keyToFilename(key string) string {
|
||||
h := md5.New()
|
||||
io.WriteString(h, key)
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
// New returns a new Cache that will store files in basePath
|
||||
func New(basePath string) *Cache {
|
||||
return &Cache{
|
||||
d: diskv.New(diskv.Options{
|
||||
BasePath: basePath,
|
||||
CacheSizeMax: 100 * 1024 * 1024, // 100MB
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// NewWithDiskv returns a new Cache using the provided Diskv as underlying
|
||||
// storage.
|
||||
func NewWithDiskv(d *diskv.Diskv) *Cache {
|
||||
return &Cache{d}
|
||||
}
|
||||
553
vendor/github.com/gregjones/httpcache/httpcache.go
generated
vendored
Normal file
553
vendor/github.com/gregjones/httpcache/httpcache.go
generated
vendored
Normal file
@@ -0,0 +1,553 @@
|
||||
// Package httpcache provides a http.RoundTripper implementation that works as a
|
||||
// mostly RFC-compliant cache for http responses.
|
||||
//
|
||||
// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client
|
||||
// and not for a shared proxy).
|
||||
//
|
||||
package httpcache
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
stale = iota
|
||||
fresh
|
||||
transparent
|
||||
// XFromCache is the header added to responses that are returned from the cache
|
||||
XFromCache = "X-From-Cache"
|
||||
)
|
||||
|
||||
// A Cache interface is used by the Transport to store and retrieve responses.
|
||||
type Cache interface {
|
||||
// Get returns the []byte representation of a cached response and a bool
|
||||
// set to true if the value isn't empty
|
||||
Get(key string) (responseBytes []byte, ok bool)
|
||||
// Set stores the []byte representation of a response against a key
|
||||
Set(key string, responseBytes []byte)
|
||||
// Delete removes the value associated with the key
|
||||
Delete(key string)
|
||||
}
|
||||
|
||||
// cacheKey returns the cache key for req.
|
||||
func cacheKey(req *http.Request) string {
|
||||
return req.URL.String()
|
||||
}
|
||||
|
||||
// CachedResponse returns the cached http.Response for req if present, and nil
|
||||
// otherwise.
|
||||
func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) {
|
||||
cachedVal, ok := c.Get(cacheKey(req))
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
b := bytes.NewBuffer(cachedVal)
|
||||
return http.ReadResponse(bufio.NewReader(b), req)
|
||||
}
|
||||
|
||||
// MemoryCache is an implemtation of Cache that stores responses in an in-memory map.
|
||||
type MemoryCache struct {
|
||||
mu sync.RWMutex
|
||||
items map[string][]byte
|
||||
}
|
||||
|
||||
// Get returns the []byte representation of the response and true if present, false if not
|
||||
func (c *MemoryCache) Get(key string) (resp []byte, ok bool) {
|
||||
c.mu.RLock()
|
||||
resp, ok = c.items[key]
|
||||
c.mu.RUnlock()
|
||||
return resp, ok
|
||||
}
|
||||
|
||||
// Set saves response resp to the cache with key
|
||||
func (c *MemoryCache) Set(key string, resp []byte) {
|
||||
c.mu.Lock()
|
||||
c.items[key] = resp
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// Delete removes key from the cache
|
||||
func (c *MemoryCache) Delete(key string) {
|
||||
c.mu.Lock()
|
||||
delete(c.items, key)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// NewMemoryCache returns a new Cache that will store items in an in-memory map
|
||||
func NewMemoryCache() *MemoryCache {
|
||||
c := &MemoryCache{items: map[string][]byte{}}
|
||||
return c
|
||||
}
|
||||
|
||||
// Transport is an implementation of http.RoundTripper that will return values from a cache
|
||||
// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since)
|
||||
// to repeated requests allowing servers to return 304 / Not Modified
|
||||
type Transport struct {
|
||||
// The RoundTripper interface actually used to make requests
|
||||
// If nil, http.DefaultTransport is used
|
||||
Transport http.RoundTripper
|
||||
Cache Cache
|
||||
// If true, responses returned from the cache will be given an extra header, X-From-Cache
|
||||
MarkCachedResponses bool
|
||||
}
|
||||
|
||||
// NewTransport returns a new Transport with the
|
||||
// provided Cache implementation and MarkCachedResponses set to true
|
||||
func NewTransport(c Cache) *Transport {
|
||||
return &Transport{Cache: c, MarkCachedResponses: true}
|
||||
}
|
||||
|
||||
// Client returns an *http.Client that caches responses.
|
||||
func (t *Transport) Client() *http.Client {
|
||||
return &http.Client{Transport: t}
|
||||
}
|
||||
|
||||
// varyMatches will return false unless all of the cached values for the headers listed in Vary
|
||||
// match the new request
|
||||
func varyMatches(cachedResp *http.Response, req *http.Request) bool {
|
||||
for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") {
|
||||
header = http.CanonicalHeaderKey(header)
|
||||
if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// RoundTrip takes a Request and returns a Response
|
||||
//
|
||||
// If there is a fresh Response already in cache, then it will be returned without connecting to
|
||||
// the server.
|
||||
//
|
||||
// If there is a stale Response, then any validators it contains will be set on the new request
|
||||
// to give the server a chance to respond with NotModified. If this happens, then the cached Response
|
||||
// will be returned.
|
||||
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
|
||||
cacheKey := cacheKey(req)
|
||||
cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == ""
|
||||
var cachedResp *http.Response
|
||||
if cacheable {
|
||||
cachedResp, err = CachedResponse(t.Cache, req)
|
||||
} else {
|
||||
// Need to invalidate an existing value
|
||||
t.Cache.Delete(cacheKey)
|
||||
}
|
||||
|
||||
transport := t.Transport
|
||||
if transport == nil {
|
||||
transport = http.DefaultTransport
|
||||
}
|
||||
|
||||
if cacheable && cachedResp != nil && err == nil {
|
||||
if t.MarkCachedResponses {
|
||||
cachedResp.Header.Set(XFromCache, "1")
|
||||
}
|
||||
|
||||
if varyMatches(cachedResp, req) {
|
||||
// Can only use cached value if the new request doesn't Vary significantly
|
||||
freshness := getFreshness(cachedResp.Header, req.Header)
|
||||
if freshness == fresh {
|
||||
return cachedResp, nil
|
||||
}
|
||||
|
||||
if freshness == stale {
|
||||
var req2 *http.Request
|
||||
// Add validators if caller hasn't already done so
|
||||
etag := cachedResp.Header.Get("etag")
|
||||
if etag != "" && req.Header.Get("etag") == "" {
|
||||
req2 = cloneRequest(req)
|
||||
req2.Header.Set("if-none-match", etag)
|
||||
}
|
||||
lastModified := cachedResp.Header.Get("last-modified")
|
||||
if lastModified != "" && req.Header.Get("last-modified") == "" {
|
||||
if req2 == nil {
|
||||
req2 = cloneRequest(req)
|
||||
}
|
||||
req2.Header.Set("if-modified-since", lastModified)
|
||||
}
|
||||
if req2 != nil {
|
||||
req = req2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resp, err = transport.RoundTrip(req)
|
||||
if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified {
|
||||
// Replace the 304 response with the one from cache, but update with some new headers
|
||||
endToEndHeaders := getEndToEndHeaders(resp.Header)
|
||||
for _, header := range endToEndHeaders {
|
||||
cachedResp.Header[header] = resp.Header[header]
|
||||
}
|
||||
cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK))
|
||||
cachedResp.StatusCode = http.StatusOK
|
||||
|
||||
resp = cachedResp
|
||||
} else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) &&
|
||||
req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) {
|
||||
// In case of transport failure and stale-if-error activated, returns cached content
|
||||
// when available
|
||||
cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK))
|
||||
cachedResp.StatusCode = http.StatusOK
|
||||
return cachedResp, nil
|
||||
} else {
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
t.Cache.Delete(cacheKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
reqCacheControl := parseCacheControl(req.Header)
|
||||
if _, ok := reqCacheControl["only-if-cached"]; ok {
|
||||
resp = newGatewayTimeoutResponse(req)
|
||||
} else {
|
||||
resp, err = transport.RoundTrip(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) {
|
||||
for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") {
|
||||
varyKey = http.CanonicalHeaderKey(varyKey)
|
||||
fakeHeader := "X-Varied-" + varyKey
|
||||
reqValue := req.Header.Get(varyKey)
|
||||
if reqValue != "" {
|
||||
resp.Header.Set(fakeHeader, reqValue)
|
||||
}
|
||||
}
|
||||
switch req.Method {
|
||||
case "GET":
|
||||
// Delay caching until EOF is reached.
|
||||
resp.Body = &cachingReadCloser{
|
||||
R: resp.Body,
|
||||
OnEOF: func(r io.Reader) {
|
||||
resp := *resp
|
||||
resp.Body = ioutil.NopCloser(r)
|
||||
respBytes, err := httputil.DumpResponse(&resp, true)
|
||||
if err == nil {
|
||||
t.Cache.Set(cacheKey, respBytes)
|
||||
}
|
||||
},
|
||||
}
|
||||
default:
|
||||
respBytes, err := httputil.DumpResponse(resp, true)
|
||||
if err == nil {
|
||||
t.Cache.Set(cacheKey, respBytes)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
t.Cache.Delete(cacheKey)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ErrNoDateHeader indicates that the HTTP headers contained no Date header.
|
||||
var ErrNoDateHeader = errors.New("no Date header")
|
||||
|
||||
// Date parses and returns the value of the Date header.
|
||||
func Date(respHeaders http.Header) (date time.Time, err error) {
|
||||
dateHeader := respHeaders.Get("date")
|
||||
if dateHeader == "" {
|
||||
err = ErrNoDateHeader
|
||||
return
|
||||
}
|
||||
|
||||
return time.Parse(time.RFC1123, dateHeader)
|
||||
}
|
||||
|
||||
type realClock struct{}
|
||||
|
||||
func (c *realClock) since(d time.Time) time.Duration {
|
||||
return time.Since(d)
|
||||
}
|
||||
|
||||
type timer interface {
|
||||
since(d time.Time) time.Duration
|
||||
}
|
||||
|
||||
var clock timer = &realClock{}
|
||||
|
||||
// getFreshness will return one of fresh/stale/transparent based on the cache-control
|
||||
// values of the request and the response
|
||||
//
|
||||
// fresh indicates the response can be returned
|
||||
// stale indicates that the response needs validating before it is returned
|
||||
// transparent indicates the response should not be used to fulfil the request
|
||||
//
|
||||
// Because this is only a private cache, 'public' and 'private' in cache-control aren't
|
||||
// signficant. Similarly, smax-age isn't used.
|
||||
func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) {
|
||||
respCacheControl := parseCacheControl(respHeaders)
|
||||
reqCacheControl := parseCacheControl(reqHeaders)
|
||||
if _, ok := reqCacheControl["no-cache"]; ok {
|
||||
return transparent
|
||||
}
|
||||
if _, ok := respCacheControl["no-cache"]; ok {
|
||||
return stale
|
||||
}
|
||||
if _, ok := reqCacheControl["only-if-cached"]; ok {
|
||||
return fresh
|
||||
}
|
||||
|
||||
date, err := Date(respHeaders)
|
||||
if err != nil {
|
||||
return stale
|
||||
}
|
||||
currentAge := clock.since(date)
|
||||
|
||||
var lifetime time.Duration
|
||||
var zeroDuration time.Duration
|
||||
|
||||
// If a response includes both an Expires header and a max-age directive,
|
||||
// the max-age directive overrides the Expires header, even if the Expires header is more restrictive.
|
||||
if maxAge, ok := respCacheControl["max-age"]; ok {
|
||||
lifetime, err = time.ParseDuration(maxAge + "s")
|
||||
if err != nil {
|
||||
lifetime = zeroDuration
|
||||
}
|
||||
} else {
|
||||
expiresHeader := respHeaders.Get("Expires")
|
||||
if expiresHeader != "" {
|
||||
expires, err := time.Parse(time.RFC1123, expiresHeader)
|
||||
if err != nil {
|
||||
lifetime = zeroDuration
|
||||
} else {
|
||||
lifetime = expires.Sub(date)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if maxAge, ok := reqCacheControl["max-age"]; ok {
|
||||
// the client is willing to accept a response whose age is no greater than the specified time in seconds
|
||||
lifetime, err = time.ParseDuration(maxAge + "s")
|
||||
if err != nil {
|
||||
lifetime = zeroDuration
|
||||
}
|
||||
}
|
||||
if minfresh, ok := reqCacheControl["min-fresh"]; ok {
|
||||
// the client wants a response that will still be fresh for at least the specified number of seconds.
|
||||
minfreshDuration, err := time.ParseDuration(minfresh + "s")
|
||||
if err == nil {
|
||||
currentAge = time.Duration(currentAge + minfreshDuration)
|
||||
}
|
||||
}
|
||||
|
||||
if maxstale, ok := reqCacheControl["max-stale"]; ok {
|
||||
// Indicates that the client is willing to accept a response that has exceeded its expiration time.
|
||||
// If max-stale is assigned a value, then the client is willing to accept a response that has exceeded
|
||||
// its expiration time by no more than the specified number of seconds.
|
||||
// If no value is assigned to max-stale, then the client is willing to accept a stale response of any age.
|
||||
//
|
||||
// Responses served only because of a max-stale value are supposed to have a Warning header added to them,
|
||||
// but that seems like a hassle, and is it actually useful? If so, then there needs to be a different
|
||||
// return-value available here.
|
||||
if maxstale == "" {
|
||||
return fresh
|
||||
}
|
||||
maxstaleDuration, err := time.ParseDuration(maxstale + "s")
|
||||
if err == nil {
|
||||
currentAge = time.Duration(currentAge - maxstaleDuration)
|
||||
}
|
||||
}
|
||||
|
||||
if lifetime > currentAge {
|
||||
return fresh
|
||||
}
|
||||
|
||||
return stale
|
||||
}
|
||||
|
||||
// Returns true if either the request or the response includes the stale-if-error
|
||||
// cache control extension: https://tools.ietf.org/html/rfc5861
|
||||
func canStaleOnError(respHeaders, reqHeaders http.Header) bool {
|
||||
respCacheControl := parseCacheControl(respHeaders)
|
||||
reqCacheControl := parseCacheControl(reqHeaders)
|
||||
|
||||
var err error
|
||||
lifetime := time.Duration(-1)
|
||||
|
||||
if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok {
|
||||
if staleMaxAge != "" {
|
||||
lifetime, err = time.ParseDuration(staleMaxAge + "s")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok {
|
||||
if staleMaxAge != "" {
|
||||
lifetime, err = time.ParseDuration(staleMaxAge + "s")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if lifetime >= 0 {
|
||||
date, err := Date(respHeaders)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
currentAge := clock.since(date)
|
||||
if lifetime > currentAge {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func getEndToEndHeaders(respHeaders http.Header) []string {
|
||||
// These headers are always hop-by-hop
|
||||
hopByHopHeaders := map[string]struct{}{
|
||||
"Connection": struct{}{},
|
||||
"Keep-Alive": struct{}{},
|
||||
"Proxy-Authenticate": struct{}{},
|
||||
"Proxy-Authorization": struct{}{},
|
||||
"Te": struct{}{},
|
||||
"Trailers": struct{}{},
|
||||
"Transfer-Encoding": struct{}{},
|
||||
"Upgrade": struct{}{},
|
||||
}
|
||||
|
||||
for _, extra := range strings.Split(respHeaders.Get("connection"), ",") {
|
||||
// any header listed in connection, if present, is also considered hop-by-hop
|
||||
if strings.Trim(extra, " ") != "" {
|
||||
hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{}
|
||||
}
|
||||
}
|
||||
endToEndHeaders := []string{}
|
||||
for respHeader, _ := range respHeaders {
|
||||
if _, ok := hopByHopHeaders[respHeader]; !ok {
|
||||
endToEndHeaders = append(endToEndHeaders, respHeader)
|
||||
}
|
||||
}
|
||||
return endToEndHeaders
|
||||
}
|
||||
|
||||
func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) {
|
||||
if _, ok := respCacheControl["no-store"]; ok {
|
||||
return false
|
||||
}
|
||||
if _, ok := reqCacheControl["no-store"]; ok {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func newGatewayTimeoutResponse(req *http.Request) *http.Response {
|
||||
var braw bytes.Buffer
|
||||
braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n")
|
||||
resp, err := http.ReadResponse(bufio.NewReader(&braw), req)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
// cloneRequest returns a clone of the provided *http.Request.
|
||||
// The clone is a shallow copy of the struct and its Header map.
|
||||
// (This function copyright goauth2 authors: https://code.google.com/p/goauth2)
|
||||
func cloneRequest(r *http.Request) *http.Request {
|
||||
// shallow copy of the struct
|
||||
r2 := new(http.Request)
|
||||
*r2 = *r
|
||||
// deep copy of the Header
|
||||
r2.Header = make(http.Header)
|
||||
for k, s := range r.Header {
|
||||
r2.Header[k] = s
|
||||
}
|
||||
return r2
|
||||
}
|
||||
|
||||
type cacheControl map[string]string
|
||||
|
||||
func parseCacheControl(headers http.Header) cacheControl {
|
||||
cc := cacheControl{}
|
||||
ccHeader := headers.Get("Cache-Control")
|
||||
for _, part := range strings.Split(ccHeader, ",") {
|
||||
part = strings.Trim(part, " ")
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
if strings.ContainsRune(part, '=') {
|
||||
keyval := strings.Split(part, "=")
|
||||
cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",")
|
||||
} else {
|
||||
cc[part] = ""
|
||||
}
|
||||
}
|
||||
return cc
|
||||
}
|
||||
|
||||
// headerAllCommaSepValues returns all comma-separated values (each
|
||||
// with whitespace trimmed) for header name in headers. According to
|
||||
// Section 4.2 of the HTTP/1.1 spec
|
||||
// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2),
|
||||
// values from multiple occurrences of a header should be concatenated, if
|
||||
// the header's value is a comma-separated list.
|
||||
func headerAllCommaSepValues(headers http.Header, name string) []string {
|
||||
var vals []string
|
||||
for _, val := range headers[http.CanonicalHeaderKey(name)] {
|
||||
fields := strings.Split(val, ",")
|
||||
for i, f := range fields {
|
||||
fields[i] = strings.TrimSpace(f)
|
||||
}
|
||||
vals = append(vals, fields...)
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF
|
||||
// handler with a full copy of the content read from R when EOF is
|
||||
// reached.
|
||||
type cachingReadCloser struct {
|
||||
// Underlying ReadCloser.
|
||||
R io.ReadCloser
|
||||
// OnEOF is called with a copy of the content of R when EOF is reached.
|
||||
OnEOF func(io.Reader)
|
||||
|
||||
buf bytes.Buffer // buf stores a copy of the content of R.
|
||||
}
|
||||
|
||||
// Read reads the next len(p) bytes from R or until R is drained. The
|
||||
// return value n is the number of bytes read. If R has no data to
|
||||
// return, err is io.EOF and OnEOF is called with a full copy of what
|
||||
// has been read so far.
|
||||
func (r *cachingReadCloser) Read(p []byte) (n int, err error) {
|
||||
n, err = r.R.Read(p)
|
||||
r.buf.Write(p[:n])
|
||||
if err == io.EOF {
|
||||
r.OnEOF(bytes.NewReader(r.buf.Bytes()))
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *cachingReadCloser) Close() error {
|
||||
return r.R.Close()
|
||||
}
|
||||
|
||||
// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation
|
||||
func NewMemoryCacheTransport() *Transport {
|
||||
c := NewMemoryCache()
|
||||
t := NewTransport(c)
|
||||
return t
|
||||
}
|
||||
33
vendor/github.com/imdario/mergo/.gitignore
generated
vendored
33
vendor/github.com/imdario/mergo/.gitignore
generated
vendored
@@ -1,33 +0,0 @@
|
||||
#### joe made this: http://goel.io/joe
|
||||
|
||||
#### go ####
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
|
||||
.glide/
|
||||
|
||||
#### vim ####
|
||||
# Swap
|
||||
[._]*.s[a-v][a-z]
|
||||
[._]*.sw[a-p]
|
||||
[._]s[a-v][a-z]
|
||||
[._]sw[a-p]
|
||||
|
||||
# Session
|
||||
Session.vim
|
||||
|
||||
# Temporary
|
||||
.netrwhist
|
||||
*~
|
||||
# Auto-generated tag files
|
||||
tags
|
||||
7
vendor/github.com/imdario/mergo/.travis.yml
generated
vendored
7
vendor/github.com/imdario/mergo/.travis.yml
generated
vendored
@@ -1,7 +1,2 @@
|
||||
language: go
|
||||
install:
|
||||
- go get -t
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
script:
|
||||
- $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN
|
||||
install: go get -t
|
||||
|
||||
46
vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
generated
vendored
46
vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md
generated
vendored
@@ -1,46 +0,0 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
||||
194
vendor/github.com/imdario/mergo/README.md
generated
vendored
194
vendor/github.com/imdario/mergo/README.md
generated
vendored
@@ -2,72 +2,14 @@
|
||||
|
||||
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
|
||||
|
||||
Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
|
||||
Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region Marche.
|
||||
|
||||

|
||||
|
||||
## Status
|
||||
|
||||
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
|
||||
It is ready for production use. It works fine although it may use more of testing. Here some projects in the wild using Mergo:
|
||||
|
||||
[![GoDoc][3]][4]
|
||||
[![GoCard][5]][6]
|
||||
[![Build Status][1]][2]
|
||||
[![Coverage Status][7]][8]
|
||||
[![Sourcegraph][9]][10]
|
||||
|
||||
[1]: https://travis-ci.org/imdario/mergo.png
|
||||
[2]: https://travis-ci.org/imdario/mergo
|
||||
[3]: https://godoc.org/github.com/imdario/mergo?status.svg
|
||||
[4]: https://godoc.org/github.com/imdario/mergo
|
||||
[5]: https://goreportcard.com/badge/imdario/mergo
|
||||
[6]: https://goreportcard.com/report/github.com/imdario/mergo
|
||||
[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
|
||||
[8]: https://coveralls.io/github/imdario/mergo?branch=master
|
||||
[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
|
||||
[10]: https://sourcegraph.com/github.com/imdario/mergo?badge
|
||||
|
||||
### Latest release
|
||||
|
||||
[Release v0.3.4](https://github.com/imdario/mergo/releases/tag/v0.3.4).
|
||||
|
||||
### Important note
|
||||
|
||||
Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code.
|
||||
|
||||
If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0).
|
||||
|
||||
### Donations
|
||||
|
||||
If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes:
|
||||
|
||||
<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
|
||||
[](https://beerpay.io/imdario/mergo)
|
||||
[](https://beerpay.io/imdario/mergo)
|
||||
<a href="https://liberapay.com/dario/donate"><img alt="Donate using Liberapay" src="https://liberapay.com/assets/widgets/donate.svg"></a>
|
||||
|
||||
### Mergo in the wild
|
||||
|
||||
- [moby/moby](https://github.com/moby/moby)
|
||||
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
|
||||
- [vmware/dispatch](https://github.com/vmware/dispatch)
|
||||
- [Shopify/themekit](https://github.com/Shopify/themekit)
|
||||
- [imdario/zas](https://github.com/imdario/zas)
|
||||
- [matcornic/hermes](https://github.com/matcornic/hermes)
|
||||
- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go)
|
||||
- [kataras/iris](https://github.com/kataras/iris)
|
||||
- [michaelsauter/crane](https://github.com/michaelsauter/crane)
|
||||
- [go-task/task](https://github.com/go-task/task)
|
||||
- [sensu/uchiwa](https://github.com/sensu/uchiwa)
|
||||
- [ory/hydra](https://github.com/ory/hydra)
|
||||
- [sisatech/vcli](https://github.com/sisatech/vcli)
|
||||
- [dairycart/dairycart](https://github.com/dairycart/dairycart)
|
||||
- [projectcalico/felix](https://github.com/projectcalico/felix)
|
||||
- [resin-os/balena](https://github.com/resin-os/balena)
|
||||
- [go-kivik/kivik](https://github.com/go-kivik/kivik)
|
||||
- [Telefonica/govice](https://github.com/Telefonica/govice)
|
||||
- [supergiant/supergiant](supergiant/supergiant)
|
||||
- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce)
|
||||
- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
|
||||
- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel)
|
||||
- [EagerIO/Stout](https://github.com/EagerIO/Stout)
|
||||
- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
|
||||
- [russross/canvasassignments](https://github.com/russross/canvasassignments)
|
||||
@@ -75,17 +17,12 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month
|
||||
- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
|
||||
- [divshot/gitling](https://github.com/divshot/gitling)
|
||||
- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
|
||||
- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
|
||||
- [elwinar/rambler](https://github.com/elwinar/rambler)
|
||||
- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
|
||||
- [jfbus/impressionist](https://github.com/jfbus/impressionist)
|
||||
- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
|
||||
- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
|
||||
- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
|
||||
- [thoas/picfit](https://github.com/thoas/picfit)
|
||||
- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
|
||||
- [jnuthong/item_search](https://github.com/jnuthong/item_search)
|
||||
- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
|
||||
|
||||
[![Build Status][1]][2]
|
||||
[](https://godoc.org/github.com/imdario/mergo)
|
||||
|
||||
[1]: https://travis-ci.org/imdario/mergo.png
|
||||
[2]: https://travis-ci.org/imdario/mergo
|
||||
|
||||
## Installation
|
||||
|
||||
@@ -98,116 +35,25 @@ If Mergo is useful to you, consider buying me a coffee, a beer or making a month
|
||||
|
||||
## Usage
|
||||
|
||||
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
|
||||
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
|
||||
|
||||
```go
|
||||
if err := mergo.Merge(&dst, src); err != nil {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
if err := mergo.Merge(&dst, src); err != nil {
|
||||
// ...
|
||||
}
|
||||
|
||||
Also, you can merge overwriting values using the transformer `WithOverride`.
|
||||
Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
|
||||
|
||||
```go
|
||||
if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
if err := mergo.Map(&dst, srcMap); err != nil {
|
||||
// ...
|
||||
}
|
||||
|
||||
Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
|
||||
|
||||
```go
|
||||
if err := mergo.Map(&dst, srcMap); err != nil {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values.
|
||||
Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
|
||||
|
||||
More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo).
|
||||
|
||||
### Nice example
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/imdario/mergo"
|
||||
)
|
||||
|
||||
type Foo struct {
|
||||
A string
|
||||
B int64
|
||||
}
|
||||
|
||||
func main() {
|
||||
src := Foo{
|
||||
A: "one",
|
||||
B: 2,
|
||||
}
|
||||
dest := Foo{
|
||||
A: "two",
|
||||
}
|
||||
mergo.Merge(&dest, src)
|
||||
fmt.Println(dest)
|
||||
// Will print
|
||||
// {two 2}
|
||||
}
|
||||
```
|
||||
|
||||
Note: if test are failing due missing package, please execute:
|
||||
|
||||
go get gopkg.in/yaml.v2
|
||||
|
||||
### Transformers
|
||||
|
||||
Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`?
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/imdario/mergo"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type timeTransfomer struct {
|
||||
}
|
||||
|
||||
func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
|
||||
if typ == reflect.TypeOf(time.Time{}) {
|
||||
return func(dst, src reflect.Value) error {
|
||||
if dst.CanSet() {
|
||||
isZero := dst.MethodByName("IsZero")
|
||||
result := isZero.Call([]reflect.Value{})
|
||||
if result[0].Bool() {
|
||||
dst.Set(src)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Snapshot struct {
|
||||
Time time.Time
|
||||
// ...
|
||||
}
|
||||
|
||||
func main() {
|
||||
src := Snapshot{time.Now()}
|
||||
dest := Snapshot{}
|
||||
mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{}))
|
||||
fmt.Println(dest)
|
||||
// Will print
|
||||
// { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
|
||||
}
|
||||
```
|
||||
|
||||
go get gopkg.in/yaml.v1
|
||||
|
||||
## Contact me
|
||||
|
||||
|
||||
54
vendor/github.com/imdario/mergo/map.go
generated
vendored
54
vendor/github.com/imdario/mergo/map.go
generated
vendored
@@ -31,8 +31,7 @@ func isExported(field reflect.StructField) bool {
|
||||
// Traverses recursively both values, assigning src's fields values to dst.
|
||||
// The map argument tracks comparisons that have already been seen, which allows
|
||||
// short circuiting on recursive types.
|
||||
func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
|
||||
overwrite := config.Overwrite
|
||||
func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
|
||||
if dst.CanAddr() {
|
||||
addr := dst.UnsafeAddr()
|
||||
h := 17 * addr
|
||||
@@ -58,17 +57,10 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
|
||||
}
|
||||
fieldName := field.Name
|
||||
fieldName = changeInitialCase(fieldName, unicode.ToLower)
|
||||
if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) {
|
||||
if v, ok := dstMap[fieldName]; !ok || isEmptyValue(reflect.ValueOf(v)) {
|
||||
dstMap[fieldName] = src.Field(i).Interface()
|
||||
}
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if dst.IsNil() {
|
||||
v := reflect.New(dst.Type().Elem())
|
||||
dst.Set(v)
|
||||
}
|
||||
dst = dst.Elem()
|
||||
fallthrough
|
||||
case reflect.Struct:
|
||||
srcMap := src.Interface().(map[string]interface{})
|
||||
for key := range srcMap {
|
||||
@@ -93,24 +85,21 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
|
||||
srcKind = reflect.Ptr
|
||||
}
|
||||
}
|
||||
|
||||
if !srcElement.IsValid() {
|
||||
continue
|
||||
}
|
||||
if srcKind == dstKind {
|
||||
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
} else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
|
||||
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
} else if srcKind == reflect.Map {
|
||||
if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil {
|
||||
if err = deepMerge(dstElement, srcElement, visited, depth+1); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
|
||||
if srcKind == reflect.Map {
|
||||
if err = deepMap(dstElement, srcElement, visited, depth+1); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -128,35 +117,18 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf
|
||||
// doesn't apply if dst is a map.
|
||||
// This is separated method from Merge because it is cleaner and it keeps sane
|
||||
// semantics: merging equal types, mapping different (restricted) types.
|
||||
func Map(dst, src interface{}, opts ...func(*Config)) error {
|
||||
return _map(dst, src, opts...)
|
||||
}
|
||||
|
||||
// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by
|
||||
// non-empty src attribute values.
|
||||
// Deprecated: Use Map(…) with WithOverride
|
||||
func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
|
||||
return _map(dst, src, append(opts, WithOverride)...)
|
||||
}
|
||||
|
||||
func _map(dst, src interface{}, opts ...func(*Config)) error {
|
||||
func Map(dst, src interface{}) error {
|
||||
var (
|
||||
vDst, vSrc reflect.Value
|
||||
err error
|
||||
)
|
||||
config := &Config{}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(config)
|
||||
}
|
||||
|
||||
if vDst, vSrc, err = resolveValues(dst, src); err != nil {
|
||||
return err
|
||||
}
|
||||
// To be friction-less, we redirect equal-type arguments
|
||||
// to deepMerge. Only because arguments can be anything.
|
||||
if vSrc.Kind() == vDst.Kind() {
|
||||
return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
|
||||
return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0)
|
||||
}
|
||||
switch vSrc.Kind() {
|
||||
case reflect.Struct:
|
||||
@@ -170,5 +142,5 @@ func _map(dst, src interface{}, opts ...func(*Config)) error {
|
||||
default:
|
||||
return ErrNotSupported
|
||||
}
|
||||
return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config)
|
||||
return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0)
|
||||
}
|
||||
|
||||
190
vendor/github.com/imdario/mergo/merge.go
generated
vendored
190
vendor/github.com/imdario/mergo/merge.go
generated
vendored
@@ -12,34 +12,10 @@ import (
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func hasExportedField(dst reflect.Value) (exported bool) {
|
||||
for i, n := 0, dst.NumField(); i < n; i++ {
|
||||
field := dst.Type().Field(i)
|
||||
if field.Anonymous && dst.Field(i).Kind() == reflect.Struct {
|
||||
exported = exported || hasExportedField(dst.Field(i))
|
||||
} else {
|
||||
exported = exported || len(field.PkgPath) == 0
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Overwrite bool
|
||||
AppendSlice bool
|
||||
Transformers Transformers
|
||||
}
|
||||
|
||||
type Transformers interface {
|
||||
Transformer(reflect.Type) func(dst, src reflect.Value) error
|
||||
}
|
||||
|
||||
// Traverses recursively both values, assigning src's fields values to dst.
|
||||
// The map argument tracks comparisons that have already been seen, which allows
|
||||
// short circuiting on recursive types.
|
||||
func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
|
||||
overwrite := config.Overwrite
|
||||
|
||||
func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) {
|
||||
if !src.IsValid() {
|
||||
return
|
||||
}
|
||||
@@ -56,190 +32,68 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co
|
||||
// Remember, remember...
|
||||
visited[h] = &visit{addr, typ, seen}
|
||||
}
|
||||
|
||||
if config.Transformers != nil && !isEmptyValue(dst) {
|
||||
if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
|
||||
err = fn(dst, src)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
switch dst.Kind() {
|
||||
case reflect.Struct:
|
||||
if hasExportedField(dst) {
|
||||
for i, n := 0, dst.NumField(); i < n; i++ {
|
||||
if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) {
|
||||
dst.Set(src)
|
||||
for i, n := 0, dst.NumField(); i < n; i++ {
|
||||
if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
if dst.IsNil() && !src.IsNil() {
|
||||
dst.Set(reflect.MakeMap(dst.Type()))
|
||||
}
|
||||
for _, key := range src.MapKeys() {
|
||||
srcElement := src.MapIndex(key)
|
||||
if !srcElement.IsValid() {
|
||||
continue
|
||||
}
|
||||
dstElement := dst.MapIndex(key)
|
||||
switch srcElement.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
|
||||
if srcElement.IsNil() {
|
||||
continue
|
||||
}
|
||||
switch reflect.TypeOf(srcElement.Interface()).Kind() {
|
||||
case reflect.Struct:
|
||||
fallthrough
|
||||
default:
|
||||
if !srcElement.CanInterface() {
|
||||
continue
|
||||
}
|
||||
switch reflect.TypeOf(srcElement.Interface()).Kind() {
|
||||
case reflect.Struct:
|
||||
fallthrough
|
||||
case reflect.Ptr:
|
||||
fallthrough
|
||||
case reflect.Map:
|
||||
srcMapElm := srcElement
|
||||
dstMapElm := dstElement
|
||||
if srcMapElm.CanInterface() {
|
||||
srcMapElm = reflect.ValueOf(srcMapElm.Interface())
|
||||
if dstMapElm.IsValid() {
|
||||
dstMapElm = reflect.ValueOf(dstMapElm.Interface())
|
||||
}
|
||||
}
|
||||
if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
case reflect.Slice:
|
||||
srcSlice := reflect.ValueOf(srcElement.Interface())
|
||||
|
||||
var dstSlice reflect.Value
|
||||
if !dstElement.IsValid() || dstElement.IsNil() {
|
||||
dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len())
|
||||
} else {
|
||||
dstSlice = reflect.ValueOf(dstElement.Interface())
|
||||
}
|
||||
|
||||
if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
|
||||
dstSlice = srcSlice
|
||||
} else if config.AppendSlice {
|
||||
dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
|
||||
}
|
||||
dst.SetMapIndex(key, dstSlice)
|
||||
case reflect.Map:
|
||||
if err = deepMerge(dstElement, srcElement, visited, depth+1); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
if dstElement.IsValid() && reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map {
|
||||
continue
|
||||
}
|
||||
|
||||
if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) {
|
||||
if dst.IsNil() {
|
||||
dst.Set(reflect.MakeMap(dst.Type()))
|
||||
}
|
||||
if !dstElement.IsValid() {
|
||||
dst.SetMapIndex(key, srcElement)
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
if !dst.CanSet() {
|
||||
break
|
||||
}
|
||||
if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice {
|
||||
dst.Set(src)
|
||||
} else if config.AppendSlice {
|
||||
dst.Set(reflect.AppendSlice(dst, src))
|
||||
}
|
||||
case reflect.Ptr:
|
||||
fallthrough
|
||||
case reflect.Interface:
|
||||
if src.IsNil() {
|
||||
break
|
||||
}
|
||||
if src.Kind() != reflect.Interface {
|
||||
if dst.IsNil() || overwrite {
|
||||
if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
|
||||
dst.Set(src)
|
||||
}
|
||||
} else if src.Kind() == reflect.Ptr {
|
||||
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
} else if dst.Elem().Type() == src.Type() {
|
||||
if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return ErrDifferentArgumentsTypes
|
||||
}
|
||||
break
|
||||
}
|
||||
if dst.IsNil() || overwrite {
|
||||
if dst.CanSet() && (overwrite || isEmptyValue(dst)) {
|
||||
} else if dst.IsNil() {
|
||||
if dst.CanSet() && isEmptyValue(dst) {
|
||||
dst.Set(src)
|
||||
}
|
||||
} else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
|
||||
} else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1); err != nil {
|
||||
return
|
||||
}
|
||||
default:
|
||||
if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) {
|
||||
if dst.CanSet() && !isEmptyValue(src) {
|
||||
dst.Set(src)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Merge will fill any empty for value type attributes on the dst struct using corresponding
|
||||
// src attributes if they themselves are not empty. dst and src must be valid same-type structs
|
||||
// and dst must be a pointer to struct.
|
||||
// It won't merge unexported (private) fields and will do recursively any exported field.
|
||||
func Merge(dst, src interface{}, opts ...func(*Config)) error {
|
||||
return merge(dst, src, opts...)
|
||||
}
|
||||
|
||||
// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by
|
||||
// non-empty src attribute values.
|
||||
// Deprecated: use Merge(…) with WithOverride
|
||||
func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
|
||||
return merge(dst, src, append(opts, WithOverride)...)
|
||||
}
|
||||
|
||||
// WithTransformers adds transformers to merge, allowing to customize the merging of some types.
|
||||
func WithTransformers(transformers Transformers) func(*Config) {
|
||||
return func(config *Config) {
|
||||
config.Transformers = transformers
|
||||
}
|
||||
}
|
||||
|
||||
// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values.
|
||||
func WithOverride(config *Config) {
|
||||
config.Overwrite = true
|
||||
}
|
||||
|
||||
// WithAppendSlice will make merge append slices instead of overwriting it
|
||||
func WithAppendSlice(config *Config) {
|
||||
config.AppendSlice = true
|
||||
}
|
||||
|
||||
func merge(dst, src interface{}, opts ...func(*Config)) error {
|
||||
// Merge sets fields' values in dst from src if they have a zero
|
||||
// value of their type.
|
||||
// dst and src must be valid same-type structs and dst must be
|
||||
// a pointer to struct.
|
||||
// It won't merge unexported (private) fields and will do recursively
|
||||
// any exported field.
|
||||
func Merge(dst, src interface{}) error {
|
||||
var (
|
||||
vDst, vSrc reflect.Value
|
||||
err error
|
||||
)
|
||||
|
||||
config := &Config{}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(config)
|
||||
}
|
||||
|
||||
if vDst, vSrc, err = resolveValues(dst, src); err != nil {
|
||||
return err
|
||||
}
|
||||
if vDst.Type() != vSrc.Type() {
|
||||
return ErrDifferentArgumentsTypes
|
||||
}
|
||||
return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
|
||||
return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0)
|
||||
}
|
||||
|
||||
9
vendor/github.com/imdario/mergo/mergo.go
generated
vendored
9
vendor/github.com/imdario/mergo/mergo.go
generated
vendored
@@ -32,7 +32,7 @@ type visit struct {
|
||||
next *visit
|
||||
}
|
||||
|
||||
// From src/pkg/encoding/json/encode.go.
|
||||
// From src/pkg/encoding/json.
|
||||
func isEmptyValue(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
@@ -46,14 +46,7 @@ func isEmptyValue(v reflect.Value) bool {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
return true
|
||||
}
|
||||
return isEmptyValue(v.Elem())
|
||||
case reflect.Func:
|
||||
return v.IsNil()
|
||||
case reflect.Invalid:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
12
vendor/github.com/json-iterator/go/Gopkg.lock
generated
vendored
12
vendor/github.com/json-iterator/go/Gopkg.lock
generated
vendored
@@ -1,6 +1,12 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/json-iterator/go"
|
||||
packages = ["."]
|
||||
revision = "ca39e5af3ece67bbcda3d0f4f56a8e24d9f2dad4"
|
||||
version = "1.1.3"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/modern-go/concurrent"
|
||||
packages = ["."]
|
||||
@@ -10,12 +16,12 @@
|
||||
[[projects]]
|
||||
name = "github.com/modern-go/reflect2"
|
||||
packages = ["."]
|
||||
revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
|
||||
version = "1.0.1"
|
||||
revision = "1df9eeb2bb81f327b96228865c5687bc2194af3f"
|
||||
version = "1.0.0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8"
|
||||
inputs-digest = "56a0b9e9e61d2bc8af5e1b68537401b7f4d60805eda3d107058f3171aa5cf793"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
||||
2
vendor/github.com/json-iterator/go/Gopkg.toml
generated
vendored
2
vendor/github.com/json-iterator/go/Gopkg.toml
generated
vendored
@@ -23,4 +23,4 @@ ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/modern-go/reflect2"
|
||||
version = "1.0.1"
|
||||
version = "1.0.0"
|
||||
|
||||
1
vendor/github.com/modern-go/reflect2/.travis.yml
generated
vendored
1
vendor/github.com/modern-go/reflect2/.travis.yml
generated
vendored
@@ -6,7 +6,6 @@ go:
|
||||
|
||||
before_install:
|
||||
- go get -t -v ./...
|
||||
- go get -t -v github.com/modern-go/reflect2-tests/...
|
||||
|
||||
script:
|
||||
- ./test.sh
|
||||
|
||||
2
vendor/github.com/modern-go/reflect2/Gopkg.toml
generated
vendored
2
vendor/github.com/modern-go/reflect2/Gopkg.toml
generated
vendored
@@ -24,7 +24,7 @@
|
||||
# go-tests = true
|
||||
# unused-packages = true
|
||||
|
||||
ignored = []
|
||||
ignored = ["github.com/modern-go/test","github.com/modern-go/test/must","github.com/modern-go/test/should"]
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/modern-go/concurrent"
|
||||
|
||||
3
vendor/github.com/modern-go/reflect2/reflect2.go
generated
vendored
3
vendor/github.com/modern-go/reflect2/reflect2.go
generated
vendored
@@ -150,9 +150,6 @@ func (cfg *frozenConfig) TypeOf(obj interface{}) Type {
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) Type2(type1 reflect.Type) Type {
|
||||
if type1 == nil {
|
||||
return nil
|
||||
}
|
||||
cacheKey := uintptr(unpackEFace(type1).data)
|
||||
typeObj, found := cfg.cache.Load(cacheKey)
|
||||
if found {
|
||||
|
||||
2
vendor/github.com/modern-go/reflect2/test.sh
generated
vendored
2
vendor/github.com/modern-go/reflect2/test.sh
generated
vendored
@@ -3,7 +3,7 @@
|
||||
set -e
|
||||
echo "" > coverage.txt
|
||||
|
||||
for d in $(go list github.com/modern-go/reflect2-tests/... | grep -v vendor); do
|
||||
for d in $(go list ./... | grep -v vendor); do
|
||||
go test -coverprofile=profile.out -coverpkg=github.com/modern-go/reflect2 $d
|
||||
if [ -f profile.out ]; then
|
||||
cat profile.out >> coverage.txt
|
||||
|
||||
16
vendor/github.com/modern-go/reflect2/type_map.go
generated
vendored
16
vendor/github.com/modern-go/reflect2/type_map.go
generated
vendored
@@ -4,7 +4,6 @@ import (
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
@@ -16,17 +15,10 @@ func typelinks1() [][]unsafe.Pointer
|
||||
//go:linkname typelinks2 reflect.typelinks
|
||||
func typelinks2() (sections []unsafe.Pointer, offset [][]int32)
|
||||
|
||||
// initOnce guards initialization of types and packages
|
||||
var initOnce sync.Once
|
||||
|
||||
var types map[string]reflect.Type
|
||||
var packages map[string]map[string]reflect.Type
|
||||
|
||||
// discoverTypes initializes types and packages
|
||||
func discoverTypes() {
|
||||
types = make(map[string]reflect.Type)
|
||||
packages = make(map[string]map[string]reflect.Type)
|
||||
var types = map[string]reflect.Type{}
|
||||
var packages = map[string]map[string]reflect.Type{}
|
||||
|
||||
func init() {
|
||||
ver := runtime.Version()
|
||||
if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") {
|
||||
loadGo15Types()
|
||||
@@ -98,13 +90,11 @@ type emptyInterface struct {
|
||||
|
||||
// TypeByName return the type by its name, just like Class.forName in java
|
||||
func TypeByName(typeName string) Type {
|
||||
initOnce.Do(discoverTypes)
|
||||
return Type2(types[typeName])
|
||||
}
|
||||
|
||||
// TypeByPackageName return the type by its package and name
|
||||
func TypeByPackageName(pkgPath string, name string) Type {
|
||||
initOnce.Do(discoverTypes)
|
||||
pkgTypes := packages[pkgPath]
|
||||
if pkgTypes == nil {
|
||||
return nil
|
||||
|
||||
19
vendor/github.com/peterbourgon/diskv/LICENSE
generated
vendored
Normal file
19
vendor/github.com/peterbourgon/diskv/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
Copyright (c) 2011-2012 Peter Bourgon
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
141
vendor/github.com/peterbourgon/diskv/README.md
generated
vendored
Normal file
141
vendor/github.com/peterbourgon/diskv/README.md
generated
vendored
Normal file
@@ -0,0 +1,141 @@
|
||||
# What is diskv?
|
||||
|
||||
Diskv (disk-vee) is a simple, persistent key-value store written in the Go
|
||||
language. It starts with an incredibly simple API for storing arbitrary data on
|
||||
a filesystem by key, and builds several layers of performance-enhancing
|
||||
abstraction on top. The end result is a conceptually simple, but highly
|
||||
performant, disk-backed storage system.
|
||||
|
||||
[![Build Status][1]][2]
|
||||
|
||||
[1]: https://drone.io/github.com/peterbourgon/diskv/status.png
|
||||
[2]: https://drone.io/github.com/peterbourgon/diskv/latest
|
||||
|
||||
|
||||
# Installing
|
||||
|
||||
Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5].
|
||||
Then,
|
||||
|
||||
```bash
|
||||
$ go get github.com/peterbourgon/diskv
|
||||
```
|
||||
|
||||
[3]: http://golang.org
|
||||
[4]: http://golang.org/doc/install/source
|
||||
[5]: http://golang.org/doc/install
|
||||
|
||||
|
||||
# Usage
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/peterbourgon/diskv"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Simplest transform function: put all the data files into the base dir.
|
||||
flatTransform := func(s string) []string { return []string{} }
|
||||
|
||||
// Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache.
|
||||
d := diskv.New(diskv.Options{
|
||||
BasePath: "my-data-dir",
|
||||
Transform: flatTransform,
|
||||
CacheSizeMax: 1024 * 1024,
|
||||
})
|
||||
|
||||
// Write three bytes to the key "alpha".
|
||||
key := "alpha"
|
||||
d.Write(key, []byte{'1', '2', '3'})
|
||||
|
||||
// Read the value back out of the store.
|
||||
value, _ := d.Read(key)
|
||||
fmt.Printf("%v\n", value)
|
||||
|
||||
// Erase the key+value from the store (and the disk).
|
||||
d.Erase(key)
|
||||
}
|
||||
```
|
||||
|
||||
More complex examples can be found in the "examples" subdirectory.
|
||||
|
||||
|
||||
# Theory
|
||||
|
||||
## Basic idea
|
||||
|
||||
At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`).
|
||||
The data is written to a single file on disk, with the same name as the key.
|
||||
The key determines where that file will be stored, via a user-provided
|
||||
`TransformFunc`, which takes a key and returns a slice (`[]string`)
|
||||
corresponding to a path list where the key file will be stored. The simplest
|
||||
TransformFunc,
|
||||
|
||||
```go
|
||||
func SimpleTransform (key string) []string {
|
||||
return []string{}
|
||||
}
|
||||
```
|
||||
|
||||
will place all keys in the same, base directory. The design is inspired by
|
||||
[Redis diskstore][6]; a TransformFunc which emulates the default diskstore
|
||||
behavior is available in the content-addressable-storage example.
|
||||
|
||||
[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1
|
||||
|
||||
**Note** that your TransformFunc should ensure that one valid key doesn't
|
||||
transform to a subset of another valid key. That is, it shouldn't be possible
|
||||
to construct valid keys that resolve to directory names. As a concrete example,
|
||||
if your TransformFunc splits on every 3 characters, then
|
||||
|
||||
```go
|
||||
d.Write("abcabc", val) // OK: written to <base>/abc/abc/abcabc
|
||||
d.Write("abc", val) // Error: attempted write to <base>/abc/abc, but it's a directory
|
||||
```
|
||||
|
||||
This will be addressed in an upcoming version of diskv.
|
||||
|
||||
Probably the most important design principle behind diskv is that your data is
|
||||
always flatly available on the disk. diskv will never do anything that would
|
||||
prevent you from accessing, copying, backing up, or otherwise interacting with
|
||||
your data via common UNIX commandline tools.
|
||||
|
||||
## Adding a cache
|
||||
|
||||
An in-memory caching layer is provided by combining the BasicStore
|
||||
functionality with a simple map structure, and keeping it up-to-date as
|
||||
appropriate. Since the map structure in Go is not threadsafe, it's combined
|
||||
with a RWMutex to provide safe concurrent access.
|
||||
|
||||
## Adding order
|
||||
|
||||
diskv is a key-value store and therefore inherently unordered. An ordering
|
||||
system can be injected into the store by passing something which satisfies the
|
||||
diskv.Index interface. (A default implementation, using Google's
|
||||
[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a
|
||||
user-provided Less function) index of the keys, which can be queried.
|
||||
|
||||
[7]: https://github.com/google/btree
|
||||
|
||||
## Adding compression
|
||||
|
||||
Something which implements the diskv.Compression interface may be passed
|
||||
during store creation, so that all Writes and Reads are filtered through
|
||||
a compression/decompression pipeline. Several default implementations,
|
||||
using stdlib compression algorithms, are provided. Note that data is cached
|
||||
compressed; the cost of decompression is borne with each Read.
|
||||
|
||||
## Streaming
|
||||
|
||||
diskv also now provides ReadStream and WriteStream methods, to allow very large
|
||||
data to be handled efficiently.
|
||||
|
||||
|
||||
# Future plans
|
||||
|
||||
* Needs plenty of robust testing: huge datasets, etc...
|
||||
* More thorough benchmarking
|
||||
* Your suggestions for use-cases I haven't thought of
|
||||
64
vendor/github.com/peterbourgon/diskv/compression.go
generated
vendored
Normal file
64
vendor/github.com/peterbourgon/diskv/compression.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
package diskv
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Compression is an interface that Diskv uses to implement compression of
|
||||
// data. Writer takes a destination io.Writer and returns a WriteCloser that
|
||||
// compresses all data written through it. Reader takes a source io.Reader and
|
||||
// returns a ReadCloser that decompresses all data read through it. You may
|
||||
// define these methods on your own type, or use one of the NewCompression
|
||||
// helpers.
|
||||
type Compression interface {
|
||||
Writer(dst io.Writer) (io.WriteCloser, error)
|
||||
Reader(src io.Reader) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
// NewGzipCompression returns a Gzip-based Compression.
|
||||
func NewGzipCompression() Compression {
|
||||
return NewGzipCompressionLevel(flate.DefaultCompression)
|
||||
}
|
||||
|
||||
// NewGzipCompressionLevel returns a Gzip-based Compression with the given level.
|
||||
func NewGzipCompressionLevel(level int) Compression {
|
||||
return &genericCompression{
|
||||
wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) },
|
||||
rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) },
|
||||
}
|
||||
}
|
||||
|
||||
// NewZlibCompression returns a Zlib-based Compression.
|
||||
func NewZlibCompression() Compression {
|
||||
return NewZlibCompressionLevel(flate.DefaultCompression)
|
||||
}
|
||||
|
||||
// NewZlibCompressionLevel returns a Zlib-based Compression with the given level.
|
||||
func NewZlibCompressionLevel(level int) Compression {
|
||||
return NewZlibCompressionLevelDict(level, nil)
|
||||
}
|
||||
|
||||
// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given
|
||||
// level, based on the given dictionary.
|
||||
func NewZlibCompressionLevelDict(level int, dict []byte) Compression {
|
||||
return &genericCompression{
|
||||
func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) },
|
||||
func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) },
|
||||
}
|
||||
}
|
||||
|
||||
type genericCompression struct {
|
||||
wf func(w io.Writer) (io.WriteCloser, error)
|
||||
rf func(r io.Reader) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) {
|
||||
return g.wf(dst)
|
||||
}
|
||||
|
||||
func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) {
|
||||
return g.rf(src)
|
||||
}
|
||||
624
vendor/github.com/peterbourgon/diskv/diskv.go
generated
vendored
Normal file
624
vendor/github.com/peterbourgon/diskv/diskv.go
generated
vendored
Normal file
@@ -0,0 +1,624 @@
|
||||
// Diskv (disk-vee) is a simple, persistent, key-value store.
|
||||
// It stores all data flatly on the filesystem.
|
||||
|
||||
package diskv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultBasePath = "diskv"
|
||||
defaultFilePerm os.FileMode = 0666
|
||||
defaultPathPerm os.FileMode = 0777
|
||||
)
|
||||
|
||||
var (
|
||||
defaultTransform = func(s string) []string { return []string{} }
|
||||
errCanceled = errors.New("canceled")
|
||||
errEmptyKey = errors.New("empty key")
|
||||
errBadKey = errors.New("bad key")
|
||||
errImportDirectory = errors.New("can't import a directory")
|
||||
)
|
||||
|
||||
// TransformFunction transforms a key into a slice of strings, with each
|
||||
// element in the slice representing a directory in the file path where the
|
||||
// key's entry will eventually be stored.
|
||||
//
|
||||
// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"],
|
||||
// the final location of the data file will be <basedir>/ab/cde/f/abcdef
|
||||
type TransformFunction func(s string) []string
|
||||
|
||||
// Options define a set of properties that dictate Diskv behavior.
|
||||
// All values are optional.
|
||||
type Options struct {
|
||||
BasePath string
|
||||
Transform TransformFunction
|
||||
CacheSizeMax uint64 // bytes
|
||||
PathPerm os.FileMode
|
||||
FilePerm os.FileMode
|
||||
// If TempDir is set, it will enable filesystem atomic writes by
|
||||
// writing temporary files to that location before being moved
|
||||
// to BasePath.
|
||||
// Note that TempDir MUST be on the same device/partition as
|
||||
// BasePath.
|
||||
TempDir string
|
||||
|
||||
Index Index
|
||||
IndexLess LessFunction
|
||||
|
||||
Compression Compression
|
||||
}
|
||||
|
||||
// Diskv implements the Diskv interface. You shouldn't construct Diskv
|
||||
// structures directly; instead, use the New constructor.
|
||||
type Diskv struct {
|
||||
Options
|
||||
mu sync.RWMutex
|
||||
cache map[string][]byte
|
||||
cacheSize uint64
|
||||
}
|
||||
|
||||
// New returns an initialized Diskv structure, ready to use.
|
||||
// If the path identified by baseDir already contains data,
|
||||
// it will be accessible, but not yet cached.
|
||||
func New(o Options) *Diskv {
|
||||
if o.BasePath == "" {
|
||||
o.BasePath = defaultBasePath
|
||||
}
|
||||
if o.Transform == nil {
|
||||
o.Transform = defaultTransform
|
||||
}
|
||||
if o.PathPerm == 0 {
|
||||
o.PathPerm = defaultPathPerm
|
||||
}
|
||||
if o.FilePerm == 0 {
|
||||
o.FilePerm = defaultFilePerm
|
||||
}
|
||||
|
||||
d := &Diskv{
|
||||
Options: o,
|
||||
cache: map[string][]byte{},
|
||||
cacheSize: 0,
|
||||
}
|
||||
|
||||
if d.Index != nil && d.IndexLess != nil {
|
||||
d.Index.Initialize(d.IndexLess, d.Keys(nil))
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// Write synchronously writes the key-value pair to disk, making it immediately
|
||||
// available for reads. Write relies on the filesystem to perform an eventual
|
||||
// sync to physical media. If you need stronger guarantees, see WriteStream.
|
||||
func (d *Diskv) Write(key string, val []byte) error {
|
||||
return d.WriteStream(key, bytes.NewBuffer(val), false)
|
||||
}
|
||||
|
||||
// WriteStream writes the data represented by the io.Reader to the disk, under
|
||||
// the provided key. If sync is true, WriteStream performs an explicit sync on
|
||||
// the file as soon as it's written.
|
||||
//
|
||||
// bytes.Buffer provides io.Reader semantics for basic data types.
|
||||
func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error {
|
||||
if len(key) <= 0 {
|
||||
return errEmptyKey
|
||||
}
|
||||
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
return d.writeStreamWithLock(key, r, sync)
|
||||
}
|
||||
|
||||
// createKeyFileWithLock either creates the key file directly, or
|
||||
// creates a temporary file in TempDir if it is set.
|
||||
func (d *Diskv) createKeyFileWithLock(key string) (*os.File, error) {
|
||||
if d.TempDir != "" {
|
||||
if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil {
|
||||
return nil, fmt.Errorf("temp mkdir: %s", err)
|
||||
}
|
||||
f, err := ioutil.TempFile(d.TempDir, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("temp file: %s", err)
|
||||
}
|
||||
|
||||
if err := f.Chmod(d.FilePerm); err != nil {
|
||||
f.Close() // error deliberately ignored
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return nil, fmt.Errorf("chmod: %s", err)
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists
|
||||
f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open file: %s", err)
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// writeStream does no input validation checking.
|
||||
func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error {
|
||||
if err := d.ensurePathWithLock(key); err != nil {
|
||||
return fmt.Errorf("ensure path: %s", err)
|
||||
}
|
||||
|
||||
f, err := d.createKeyFileWithLock(key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create key file: %s", err)
|
||||
}
|
||||
|
||||
wc := io.WriteCloser(&nopWriteCloser{f})
|
||||
if d.Compression != nil {
|
||||
wc, err = d.Compression.Writer(f)
|
||||
if err != nil {
|
||||
f.Close() // error deliberately ignored
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return fmt.Errorf("compression writer: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := io.Copy(wc, r); err != nil {
|
||||
f.Close() // error deliberately ignored
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return fmt.Errorf("i/o copy: %s", err)
|
||||
}
|
||||
|
||||
if err := wc.Close(); err != nil {
|
||||
f.Close() // error deliberately ignored
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return fmt.Errorf("compression close: %s", err)
|
||||
}
|
||||
|
||||
if sync {
|
||||
if err := f.Sync(); err != nil {
|
||||
f.Close() // error deliberately ignored
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return fmt.Errorf("file sync: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
return fmt.Errorf("file close: %s", err)
|
||||
}
|
||||
|
||||
if f.Name() != d.completeFilename(key) {
|
||||
if err := os.Rename(f.Name(), d.completeFilename(key)); err != nil {
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return fmt.Errorf("rename: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if d.Index != nil {
|
||||
d.Index.Insert(key)
|
||||
}
|
||||
|
||||
d.bustCacheWithLock(key) // cache only on read
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Import imports the source file into diskv under the destination key. If the
|
||||
// destination key already exists, it's overwritten. If move is true, the
|
||||
// source file is removed after a successful import.
|
||||
func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) {
|
||||
if dstKey == "" {
|
||||
return errEmptyKey
|
||||
}
|
||||
|
||||
if fi, err := os.Stat(srcFilename); err != nil {
|
||||
return err
|
||||
} else if fi.IsDir() {
|
||||
return errImportDirectory
|
||||
}
|
||||
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
if err := d.ensurePathWithLock(dstKey); err != nil {
|
||||
return fmt.Errorf("ensure path: %s", err)
|
||||
}
|
||||
|
||||
if move {
|
||||
if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil {
|
||||
d.bustCacheWithLock(dstKey)
|
||||
return nil
|
||||
} else if err != syscall.EXDEV {
|
||||
// If it failed due to being on a different device, fall back to copying
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
f, err := os.Open(srcFilename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
err = d.writeStreamWithLock(dstKey, f, false)
|
||||
if err == nil && move {
|
||||
err = os.Remove(srcFilename)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Read reads the key and returns the value.
|
||||
// If the key is available in the cache, Read won't touch the disk.
|
||||
// If the key is not in the cache, Read will have the side-effect of
|
||||
// lazily caching the value.
|
||||
func (d *Diskv) Read(key string) ([]byte, error) {
|
||||
rc, err := d.ReadStream(key, false)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
defer rc.Close()
|
||||
return ioutil.ReadAll(rc)
|
||||
}
|
||||
|
||||
// ReadStream reads the key and returns the value (data) as an io.ReadCloser.
|
||||
// If the value is cached from a previous read, and direct is false,
|
||||
// ReadStream will use the cached value. Otherwise, it will return a handle to
|
||||
// the file on disk, and cache the data on read.
|
||||
//
|
||||
// If direct is true, ReadStream will lazily delete any cached value for the
|
||||
// key, and return a direct handle to the file on disk.
|
||||
//
|
||||
// If compression is enabled, ReadStream taps into the io.Reader stream prior
|
||||
// to decompression, and caches the compressed data.
|
||||
func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
if val, ok := d.cache[key]; ok {
|
||||
if !direct {
|
||||
buf := bytes.NewBuffer(val)
|
||||
if d.Compression != nil {
|
||||
return d.Compression.Reader(buf)
|
||||
}
|
||||
return ioutil.NopCloser(buf), nil
|
||||
}
|
||||
|
||||
go func() {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
d.uncacheWithLock(key, uint64(len(val)))
|
||||
}()
|
||||
}
|
||||
|
||||
return d.readWithRLock(key)
|
||||
}
|
||||
|
||||
// read ignores the cache, and returns an io.ReadCloser representing the
|
||||
// decompressed data for the given key, streamed from the disk. Clients should
|
||||
// acquire a read lock on the Diskv and check the cache themselves before
|
||||
// calling read.
|
||||
func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) {
|
||||
filename := d.completeFilename(key)
|
||||
|
||||
fi, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fi.IsDir() {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var r io.Reader
|
||||
if d.CacheSizeMax > 0 {
|
||||
r = newSiphon(f, d, key)
|
||||
} else {
|
||||
r = &closingReader{f}
|
||||
}
|
||||
|
||||
var rc = io.ReadCloser(ioutil.NopCloser(r))
|
||||
if d.Compression != nil {
|
||||
rc, err = d.Compression.Reader(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
// closingReader provides a Reader that automatically closes the
|
||||
// embedded ReadCloser when it reaches EOF
|
||||
type closingReader struct {
|
||||
rc io.ReadCloser
|
||||
}
|
||||
|
||||
func (cr closingReader) Read(p []byte) (int, error) {
|
||||
n, err := cr.rc.Read(p)
|
||||
if err == io.EOF {
|
||||
if closeErr := cr.rc.Close(); closeErr != nil {
|
||||
return n, closeErr // close must succeed for Read to succeed
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// siphon is like a TeeReader: it copies all data read through it to an
|
||||
// internal buffer, and moves that buffer to the cache at EOF.
|
||||
type siphon struct {
|
||||
f *os.File
|
||||
d *Diskv
|
||||
key string
|
||||
buf *bytes.Buffer
|
||||
}
|
||||
|
||||
// newSiphon constructs a siphoning reader that represents the passed file.
|
||||
// When a successful series of reads ends in an EOF, the siphon will write
|
||||
// the buffered data to Diskv's cache under the given key.
|
||||
func newSiphon(f *os.File, d *Diskv, key string) io.Reader {
|
||||
return &siphon{
|
||||
f: f,
|
||||
d: d,
|
||||
key: key,
|
||||
buf: &bytes.Buffer{},
|
||||
}
|
||||
}
|
||||
|
||||
// Read implements the io.Reader interface for siphon.
|
||||
func (s *siphon) Read(p []byte) (int, error) {
|
||||
n, err := s.f.Read(p)
|
||||
|
||||
if err == nil {
|
||||
return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail
|
||||
if closeErr := s.f.Close(); closeErr != nil {
|
||||
return n, closeErr // close must succeed for Read to succeed
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Erase synchronously erases the given key from the disk and the cache.
|
||||
func (d *Diskv) Erase(key string) error {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
d.bustCacheWithLock(key)
|
||||
|
||||
// erase from index
|
||||
if d.Index != nil {
|
||||
d.Index.Delete(key)
|
||||
}
|
||||
|
||||
// erase from disk
|
||||
filename := d.completeFilename(key)
|
||||
if s, err := os.Stat(filename); err == nil {
|
||||
if s.IsDir() {
|
||||
return errBadKey
|
||||
}
|
||||
if err = os.Remove(filename); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Return err as-is so caller can do os.IsNotExist(err).
|
||||
return err
|
||||
}
|
||||
|
||||
// clean up and return
|
||||
d.pruneDirsWithLock(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EraseAll will delete all of the data from the store, both in the cache and on
|
||||
// the disk. Note that EraseAll doesn't distinguish diskv-related data from non-
|
||||
// diskv-related data. Care should be taken to always specify a diskv base
|
||||
// directory that is exclusively for diskv data.
|
||||
func (d *Diskv) EraseAll() error {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
d.cache = make(map[string][]byte)
|
||||
d.cacheSize = 0
|
||||
if d.TempDir != "" {
|
||||
os.RemoveAll(d.TempDir) // errors ignored
|
||||
}
|
||||
return os.RemoveAll(d.BasePath)
|
||||
}
|
||||
|
||||
// Has returns true if the given key exists.
|
||||
func (d *Diskv) Has(key string) bool {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
if _, ok := d.cache[key]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
filename := d.completeFilename(key)
|
||||
s, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if s.IsDir() {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Keys returns a channel that will yield every key accessible by the store,
|
||||
// in undefined order. If a cancel channel is provided, closing it will
|
||||
// terminate and close the keys channel.
|
||||
func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string {
|
||||
return d.KeysPrefix("", cancel)
|
||||
}
|
||||
|
||||
// KeysPrefix returns a channel that will yield every key accessible by the
|
||||
// store with the given prefix, in undefined order. If a cancel channel is
|
||||
// provided, closing it will terminate and close the keys channel. If the
|
||||
// provided prefix is the empty string, all keys will be yielded.
|
||||
func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string {
|
||||
var prepath string
|
||||
if prefix == "" {
|
||||
prepath = d.BasePath
|
||||
} else {
|
||||
prepath = d.pathFor(prefix)
|
||||
}
|
||||
c := make(chan string)
|
||||
go func() {
|
||||
filepath.Walk(prepath, walker(c, prefix, cancel))
|
||||
close(c)
|
||||
}()
|
||||
return c
|
||||
}
|
||||
|
||||
// walker returns a function which satisfies the filepath.WalkFunc interface.
|
||||
// It sends every non-directory file entry down the channel c.
|
||||
func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc {
|
||||
return func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) {
|
||||
return nil // "pass"
|
||||
}
|
||||
|
||||
select {
|
||||
case c <- info.Name():
|
||||
case <-cancel:
|
||||
return errCanceled
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// pathFor returns the absolute path for location on the filesystem where the
|
||||
// data for the given key will be stored.
|
||||
func (d *Diskv) pathFor(key string) string {
|
||||
return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...))
|
||||
}
|
||||
|
||||
// ensurePathWithLock is a helper function that generates all necessary
|
||||
// directories on the filesystem for the given key.
|
||||
func (d *Diskv) ensurePathWithLock(key string) error {
|
||||
return os.MkdirAll(d.pathFor(key), d.PathPerm)
|
||||
}
|
||||
|
||||
// completeFilename returns the absolute path to the file for the given key.
|
||||
func (d *Diskv) completeFilename(key string) string {
|
||||
return filepath.Join(d.pathFor(key), key)
|
||||
}
|
||||
|
||||
// cacheWithLock attempts to cache the given key-value pair in the store's
|
||||
// cache. It can fail if the value is larger than the cache's maximum size.
|
||||
func (d *Diskv) cacheWithLock(key string, val []byte) error {
|
||||
valueSize := uint64(len(val))
|
||||
if err := d.ensureCacheSpaceWithLock(valueSize); err != nil {
|
||||
return fmt.Errorf("%s; not caching", err)
|
||||
}
|
||||
|
||||
// be very strict about memory guarantees
|
||||
if (d.cacheSize + valueSize) > d.CacheSizeMax {
|
||||
panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax))
|
||||
}
|
||||
|
||||
d.cache[key] = val
|
||||
d.cacheSize += valueSize
|
||||
return nil
|
||||
}
|
||||
|
||||
// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock.
|
||||
func (d *Diskv) cacheWithoutLock(key string, val []byte) error {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
return d.cacheWithLock(key, val)
|
||||
}
|
||||
|
||||
func (d *Diskv) bustCacheWithLock(key string) {
|
||||
if val, ok := d.cache[key]; ok {
|
||||
d.uncacheWithLock(key, uint64(len(val)))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Diskv) uncacheWithLock(key string, sz uint64) {
|
||||
d.cacheSize -= sz
|
||||
delete(d.cache, key)
|
||||
}
|
||||
|
||||
// pruneDirsWithLock deletes empty directories in the path walk leading to the
|
||||
// key k. Typically this function is called after an Erase is made.
|
||||
func (d *Diskv) pruneDirsWithLock(key string) error {
|
||||
pathlist := d.Transform(key)
|
||||
for i := range pathlist {
|
||||
dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...))
|
||||
|
||||
// thanks to Steven Blenkinsop for this snippet
|
||||
switch fi, err := os.Stat(dir); true {
|
||||
case err != nil:
|
||||
return err
|
||||
case !fi.IsDir():
|
||||
panic(fmt.Sprintf("corrupt dirstate at %s", dir))
|
||||
}
|
||||
|
||||
nlinks, err := filepath.Glob(filepath.Join(dir, "*"))
|
||||
if err != nil {
|
||||
return err
|
||||
} else if len(nlinks) > 0 {
|
||||
return nil // has subdirs -- do not prune
|
||||
}
|
||||
if err = os.Remove(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order
|
||||
// until the cache has at least valueSize bytes available.
|
||||
func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error {
|
||||
if valueSize > d.CacheSizeMax {
|
||||
return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax)
|
||||
}
|
||||
|
||||
safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax }
|
||||
|
||||
for key, val := range d.cache {
|
||||
if safe() {
|
||||
break
|
||||
}
|
||||
|
||||
d.uncacheWithLock(key, uint64(len(val)))
|
||||
}
|
||||
|
||||
if !safe() {
|
||||
panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// nopWriteCloser wraps an io.Writer and provides a no-op Close method to
|
||||
// satisfy the io.WriteCloser interface.
|
||||
type nopWriteCloser struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) }
|
||||
func (wc *nopWriteCloser) Close() error { return nil }
|
||||
115
vendor/github.com/peterbourgon/diskv/index.go
generated
vendored
Normal file
115
vendor/github.com/peterbourgon/diskv/index.go
generated
vendored
Normal file
@@ -0,0 +1,115 @@
|
||||
package diskv
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/google/btree"
|
||||
)
|
||||
|
||||
// Index is a generic interface for things that can
|
||||
// provide an ordered list of keys.
|
||||
type Index interface {
|
||||
Initialize(less LessFunction, keys <-chan string)
|
||||
Insert(key string)
|
||||
Delete(key string)
|
||||
Keys(from string, n int) []string
|
||||
}
|
||||
|
||||
// LessFunction is used to initialize an Index of keys in a specific order.
|
||||
type LessFunction func(string, string) bool
|
||||
|
||||
// btreeString is a custom data type that satisfies the BTree Less interface,
|
||||
// making the strings it wraps sortable by the BTree package.
|
||||
type btreeString struct {
|
||||
s string
|
||||
l LessFunction
|
||||
}
|
||||
|
||||
// Less satisfies the BTree.Less interface using the btreeString's LessFunction.
|
||||
func (s btreeString) Less(i btree.Item) bool {
|
||||
return s.l(s.s, i.(btreeString).s)
|
||||
}
|
||||
|
||||
// BTreeIndex is an implementation of the Index interface using google/btree.
|
||||
type BTreeIndex struct {
|
||||
sync.RWMutex
|
||||
LessFunction
|
||||
*btree.BTree
|
||||
}
|
||||
|
||||
// Initialize populates the BTree tree with data from the keys channel,
|
||||
// according to the passed less function. It's destructive to the BTreeIndex.
|
||||
func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) {
|
||||
i.Lock()
|
||||
defer i.Unlock()
|
||||
i.LessFunction = less
|
||||
i.BTree = rebuild(less, keys)
|
||||
}
|
||||
|
||||
// Insert inserts the given key (only) into the BTree tree.
|
||||
func (i *BTreeIndex) Insert(key string) {
|
||||
i.Lock()
|
||||
defer i.Unlock()
|
||||
if i.BTree == nil || i.LessFunction == nil {
|
||||
panic("uninitialized index")
|
||||
}
|
||||
i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction})
|
||||
}
|
||||
|
||||
// Delete removes the given key (only) from the BTree tree.
|
||||
func (i *BTreeIndex) Delete(key string) {
|
||||
i.Lock()
|
||||
defer i.Unlock()
|
||||
if i.BTree == nil || i.LessFunction == nil {
|
||||
panic("uninitialized index")
|
||||
}
|
||||
i.BTree.Delete(btreeString{s: key, l: i.LessFunction})
|
||||
}
|
||||
|
||||
// Keys yields a maximum of n keys in order. If the passed 'from' key is empty,
|
||||
// Keys will return the first n keys. If the passed 'from' key is non-empty, the
|
||||
// first key in the returned slice will be the key that immediately follows the
|
||||
// passed key, in key order.
|
||||
func (i *BTreeIndex) Keys(from string, n int) []string {
|
||||
i.RLock()
|
||||
defer i.RUnlock()
|
||||
|
||||
if i.BTree == nil || i.LessFunction == nil {
|
||||
panic("uninitialized index")
|
||||
}
|
||||
|
||||
if i.BTree.Len() <= 0 {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
btreeFrom := btreeString{s: from, l: i.LessFunction}
|
||||
skipFirst := true
|
||||
if len(from) <= 0 || !i.BTree.Has(btreeFrom) {
|
||||
// no such key, so fabricate an always-smallest item
|
||||
btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }}
|
||||
skipFirst = false
|
||||
}
|
||||
|
||||
keys := []string{}
|
||||
iterator := func(i btree.Item) bool {
|
||||
keys = append(keys, i.(btreeString).s)
|
||||
return len(keys) < n
|
||||
}
|
||||
i.BTree.AscendGreaterOrEqual(btreeFrom, iterator)
|
||||
|
||||
if skipFirst && len(keys) > 0 {
|
||||
keys = keys[1:]
|
||||
}
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
// rebuildIndex does the work of regenerating the index
|
||||
// with the given keys.
|
||||
func rebuild(less LessFunction, keys <-chan string) *btree.BTree {
|
||||
tree := btree.New(2)
|
||||
for key := range keys {
|
||||
tree.ReplaceOrInsert(btreeString{s: key, l: less})
|
||||
}
|
||||
return tree
|
||||
}
|
||||
4
vendor/golang.org/x/crypto/ssh/terminal/util.go
generated
vendored
4
vendor/golang.org/x/crypto/ssh/terminal/util.go
generated
vendored
@@ -108,7 +108,9 @@ func ReadPassword(fd int) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer unix.IoctlSetTermios(fd, ioctlWriteTermios, termios)
|
||||
defer func() {
|
||||
unix.IoctlSetTermios(fd, ioctlWriteTermios, termios)
|
||||
}()
|
||||
|
||||
return readPasswordLine(passwordReader(fd))
|
||||
}
|
||||
|
||||
36
vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
generated
vendored
36
vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go
generated
vendored
@@ -14,7 +14,7 @@ import (
|
||||
|
||||
// State contains the state of a terminal.
|
||||
type State struct {
|
||||
termios unix.Termios
|
||||
state *unix.Termios
|
||||
}
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
@@ -75,43 +75,47 @@ func ReadPassword(fd int) ([]byte, error) {
|
||||
// restored.
|
||||
// see http://cr.illumos.org/~webrev/andy_js/1060/
|
||||
func MakeRaw(fd int) (*State, error) {
|
||||
termios, err := unix.IoctlGetTermios(fd, unix.TCGETS)
|
||||
oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
oldTermios := *oldTermiosPtr
|
||||
|
||||
oldState := State{termios: *termios}
|
||||
newTermios := oldTermios
|
||||
newTermios.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON
|
||||
newTermios.Oflag &^= syscall.OPOST
|
||||
newTermios.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN
|
||||
newTermios.Cflag &^= syscall.CSIZE | syscall.PARENB
|
||||
newTermios.Cflag |= syscall.CS8
|
||||
newTermios.Cc[unix.VMIN] = 1
|
||||
newTermios.Cc[unix.VTIME] = 0
|
||||
|
||||
termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON
|
||||
termios.Oflag &^= unix.OPOST
|
||||
termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN
|
||||
termios.Cflag &^= unix.CSIZE | unix.PARENB
|
||||
termios.Cflag |= unix.CS8
|
||||
termios.Cc[unix.VMIN] = 1
|
||||
termios.Cc[unix.VTIME] = 0
|
||||
|
||||
if err := unix.IoctlSetTermios(fd, unix.TCSETS, termios); err != nil {
|
||||
if err := unix.IoctlSetTermios(fd, unix.TCSETS, &newTermios); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &oldState, nil
|
||||
return &State{
|
||||
state: oldTermiosPtr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Restore restores the terminal connected to the given file descriptor to a
|
||||
// previous state.
|
||||
func Restore(fd int, oldState *State) error {
|
||||
return unix.IoctlSetTermios(fd, unix.TCSETS, &oldState.termios)
|
||||
return unix.IoctlSetTermios(fd, unix.TCSETS, oldState.state)
|
||||
}
|
||||
|
||||
// GetState returns the current state of a terminal which may be useful to
|
||||
// restore the terminal after a signal.
|
||||
func GetState(fd int) (*State, error) {
|
||||
termios, err := unix.IoctlGetTermios(fd, unix.TCGETS)
|
||||
oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &State{termios: *termios}, nil
|
||||
return &State{
|
||||
state: oldTermiosPtr,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetSize returns the dimensions of the given terminal.
|
||||
|
||||
4
vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
generated
vendored
4
vendor/golang.org/x/crypto/ssh/terminal/util_windows.go
generated
vendored
@@ -89,7 +89,9 @@ func ReadPassword(fd int) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer windows.SetConsoleMode(windows.Handle(fd), old)
|
||||
defer func() {
|
||||
windows.SetConsoleMode(windows.Handle(fd), old)
|
||||
}()
|
||||
|
||||
var h windows.Handle
|
||||
p, _ := windows.GetCurrentProcess()
|
||||
|
||||
13
vendor/golang.org/x/oauth2/.travis.yml
generated
vendored
13
vendor/golang.org/x/oauth2/.travis.yml
generated
vendored
@@ -1,13 +0,0 @@
|
||||
language: go
|
||||
|
||||
go:
|
||||
- tip
|
||||
|
||||
install:
|
||||
- export GOPATH="$HOME/gopath"
|
||||
- mkdir -p "$GOPATH/src/golang.org/x"
|
||||
- mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2"
|
||||
- go get -v -t -d golang.org/x/oauth2/...
|
||||
|
||||
script:
|
||||
- go test -v golang.org/x/oauth2/...
|
||||
31
vendor/golang.org/x/oauth2/CONTRIBUTING.md
generated
vendored
31
vendor/golang.org/x/oauth2/CONTRIBUTING.md
generated
vendored
@@ -1,31 +0,0 @@
|
||||
# Contributing to Go
|
||||
|
||||
Go is an open source project.
|
||||
|
||||
It is the work of hundreds of contributors. We appreciate your help!
|
||||
|
||||
|
||||
## Filing issues
|
||||
|
||||
When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions:
|
||||
|
||||
1. What version of Go are you using (`go version`)?
|
||||
2. What operating system and processor architecture are you using?
|
||||
3. What did you do?
|
||||
4. What did you expect to see?
|
||||
5. What did you see instead?
|
||||
|
||||
General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker.
|
||||
The gophers there will answer or ask you to file an issue if you've tripped over a bug.
|
||||
|
||||
## Contributing code
|
||||
|
||||
Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html)
|
||||
before sending patches.
|
||||
|
||||
**We do not accept GitHub pull requests**
|
||||
(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review).
|
||||
|
||||
Unless otherwise noted, the Go source files are distributed under
|
||||
the BSD-style license found in the LICENSE file.
|
||||
|
||||
74
vendor/golang.org/x/oauth2/README.md
generated
vendored
74
vendor/golang.org/x/oauth2/README.md
generated
vendored
@@ -1,74 +0,0 @@
|
||||
# OAuth2 for Go
|
||||
|
||||
[](https://travis-ci.org/golang/oauth2)
|
||||
[](https://godoc.org/golang.org/x/oauth2)
|
||||
|
||||
oauth2 package contains a client implementation for OAuth 2.0 spec.
|
||||
|
||||
## Installation
|
||||
|
||||
~~~~
|
||||
go get golang.org/x/oauth2
|
||||
~~~~
|
||||
|
||||
See godoc for further documentation and examples.
|
||||
|
||||
* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)
|
||||
* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)
|
||||
|
||||
|
||||
## App Engine
|
||||
|
||||
In change 96e89be (March 2015) we removed the `oauth2.Context2` type in favor
|
||||
of the [`context.Context`](https://golang.org/x/net/context#Context) type from
|
||||
the `golang.org/x/net/context` package
|
||||
|
||||
This means its no longer possible to use the "Classic App Engine"
|
||||
`appengine.Context` type with the `oauth2` package. (You're using
|
||||
Classic App Engine if you import the package `"appengine"`.)
|
||||
|
||||
To work around this, you may use the new `"google.golang.org/appengine"`
|
||||
package. This package has almost the same API as the `"appengine"` package,
|
||||
but it can be fetched with `go get` and used on "Managed VMs" and well as
|
||||
Classic App Engine.
|
||||
|
||||
See the [new `appengine` package's readme](https://github.com/golang/appengine#updating-a-go-app-engine-app)
|
||||
for information on updating your app.
|
||||
|
||||
If you don't want to update your entire app to use the new App Engine packages,
|
||||
you may use both sets of packages in parallel, using only the new packages
|
||||
with the `oauth2` package.
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
newappengine "google.golang.org/appengine"
|
||||
newurlfetch "google.golang.org/appengine/urlfetch"
|
||||
|
||||
"appengine"
|
||||
)
|
||||
|
||||
func handler(w http.ResponseWriter, r *http.Request) {
|
||||
var c appengine.Context = appengine.NewContext(r)
|
||||
c.Infof("Logging a message with the old package")
|
||||
|
||||
var ctx context.Context = newappengine.NewContext(r)
|
||||
client := &http.Client{
|
||||
Transport: &oauth2.Transport{
|
||||
Source: google.AppEngineTokenSource(ctx, "scope"),
|
||||
Base: &newurlfetch.Transport{Context: ctx},
|
||||
},
|
||||
}
|
||||
client.Get("...")
|
||||
}
|
||||
|
||||
## Contributing
|
||||
|
||||
We appreciate your help!
|
||||
|
||||
To contribute, please read the contribution guidelines:
|
||||
https://golang.org/doc/contribute.html
|
||||
|
||||
Note that the Go project does not use GitHub pull requests but
|
||||
uses Gerrit for code reviews. See the contribution guide for details.
|
||||
25
vendor/golang.org/x/oauth2/client_appengine.go
generated
vendored
25
vendor/golang.org/x/oauth2/client_appengine.go
generated
vendored
@@ -1,25 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build appengine
|
||||
|
||||
// App Engine hooks.
|
||||
|
||||
package oauth2
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2/internal"
|
||||
"google.golang.org/appengine/urlfetch"
|
||||
)
|
||||
|
||||
func init() {
|
||||
internal.RegisterContextClientFunc(contextClientAppEngine)
|
||||
}
|
||||
|
||||
func contextClientAppEngine(ctx context.Context) (*http.Client, error) {
|
||||
return urlfetch.Client(ctx), nil
|
||||
}
|
||||
76
vendor/golang.org/x/oauth2/internal/oauth2.go
generated
vendored
76
vendor/golang.org/x/oauth2/internal/oauth2.go
generated
vendored
@@ -1,76 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package internal contains support packages for oauth2 package.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"crypto/rsa"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ParseKey converts the binary contents of a private key file
|
||||
// to an *rsa.PrivateKey. It detects whether the private key is in a
|
||||
// PEM container or not. If so, it extracts the the private key
|
||||
// from PEM container before conversion. It only supports PEM
|
||||
// containers with no passphrase.
|
||||
func ParseKey(key []byte) (*rsa.PrivateKey, error) {
|
||||
block, _ := pem.Decode(key)
|
||||
if block != nil {
|
||||
key = block.Bytes
|
||||
}
|
||||
parsedKey, err := x509.ParsePKCS8PrivateKey(key)
|
||||
if err != nil {
|
||||
parsedKey, err = x509.ParsePKCS1PrivateKey(key)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("private key should be a PEM or plain PKSC1 or PKCS8; parse error: %v", err)
|
||||
}
|
||||
}
|
||||
parsed, ok := parsedKey.(*rsa.PrivateKey)
|
||||
if !ok {
|
||||
return nil, errors.New("private key is invalid")
|
||||
}
|
||||
return parsed, nil
|
||||
}
|
||||
|
||||
func ParseINI(ini io.Reader) (map[string]map[string]string, error) {
|
||||
result := map[string]map[string]string{
|
||||
"": {}, // root section
|
||||
}
|
||||
scanner := bufio.NewScanner(ini)
|
||||
currentSection := ""
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
if strings.HasPrefix(line, ";") {
|
||||
// comment.
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(line, "[") && strings.HasSuffix(line, "]") {
|
||||
currentSection = strings.TrimSpace(line[1 : len(line)-1])
|
||||
result[currentSection] = map[string]string{}
|
||||
continue
|
||||
}
|
||||
parts := strings.SplitN(line, "=", 2)
|
||||
if len(parts) == 2 && parts[0] != "" {
|
||||
result[currentSection][strings.TrimSpace(parts[0])] = strings.TrimSpace(parts[1])
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return nil, fmt.Errorf("error scanning ini: %v", err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func CondVal(v string) []string {
|
||||
if v == "" {
|
||||
return nil
|
||||
}
|
||||
return []string{v}
|
||||
}
|
||||
247
vendor/golang.org/x/oauth2/internal/token.go
generated
vendored
247
vendor/golang.org/x/oauth2/internal/token.go
generated
vendored
@@ -1,247 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package internal contains support packages for oauth2 package.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"mime"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Token represents the crendentials used to authorize
|
||||
// the requests to access protected resources on the OAuth 2.0
|
||||
// provider's backend.
|
||||
//
|
||||
// This type is a mirror of oauth2.Token and exists to break
|
||||
// an otherwise-circular dependency. Other internal packages
|
||||
// should convert this Token into an oauth2.Token before use.
|
||||
type Token struct {
|
||||
// AccessToken is the token that authorizes and authenticates
|
||||
// the requests.
|
||||
AccessToken string
|
||||
|
||||
// TokenType is the type of token.
|
||||
// The Type method returns either this or "Bearer", the default.
|
||||
TokenType string
|
||||
|
||||
// RefreshToken is a token that's used by the application
|
||||
// (as opposed to the user) to refresh the access token
|
||||
// if it expires.
|
||||
RefreshToken string
|
||||
|
||||
// Expiry is the optional expiration time of the access token.
|
||||
//
|
||||
// If zero, TokenSource implementations will reuse the same
|
||||
// token forever and RefreshToken or equivalent
|
||||
// mechanisms for that TokenSource will not be used.
|
||||
Expiry time.Time
|
||||
|
||||
// Raw optionally contains extra metadata from the server
|
||||
// when updating a token.
|
||||
Raw interface{}
|
||||
}
|
||||
|
||||
// tokenJSON is the struct representing the HTTP response from OAuth2
|
||||
// providers returning a token in JSON form.
|
||||
type tokenJSON struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
TokenType string `json:"token_type"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number
|
||||
Expires expirationTime `json:"expires"` // broken Facebook spelling of expires_in
|
||||
}
|
||||
|
||||
func (e *tokenJSON) expiry() (t time.Time) {
|
||||
if v := e.ExpiresIn; v != 0 {
|
||||
return time.Now().Add(time.Duration(v) * time.Second)
|
||||
}
|
||||
if v := e.Expires; v != 0 {
|
||||
return time.Now().Add(time.Duration(v) * time.Second)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type expirationTime int32
|
||||
|
||||
func (e *expirationTime) UnmarshalJSON(b []byte) error {
|
||||
var n json.Number
|
||||
err := json.Unmarshal(b, &n)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
i, err := n.Int64()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*e = expirationTime(i)
|
||||
return nil
|
||||
}
|
||||
|
||||
var brokenAuthHeaderProviders = []string{
|
||||
"https://accounts.google.com/",
|
||||
"https://api.codeswholesale.com/oauth/token",
|
||||
"https://api.dropbox.com/",
|
||||
"https://api.dropboxapi.com/",
|
||||
"https://api.instagram.com/",
|
||||
"https://api.netatmo.net/",
|
||||
"https://api.odnoklassniki.ru/",
|
||||
"https://api.pushbullet.com/",
|
||||
"https://api.soundcloud.com/",
|
||||
"https://api.twitch.tv/",
|
||||
"https://app.box.com/",
|
||||
"https://connect.stripe.com/",
|
||||
"https://graph.facebook.com", // see https://github.com/golang/oauth2/issues/214
|
||||
"https://login.microsoftonline.com/",
|
||||
"https://login.salesforce.com/",
|
||||
"https://oauth.sandbox.trainingpeaks.com/",
|
||||
"https://oauth.trainingpeaks.com/",
|
||||
"https://oauth.vk.com/",
|
||||
"https://openapi.baidu.com/",
|
||||
"https://slack.com/",
|
||||
"https://test-sandbox.auth.corp.google.com",
|
||||
"https://test.salesforce.com/",
|
||||
"https://user.gini.net/",
|
||||
"https://www.douban.com/",
|
||||
"https://www.googleapis.com/",
|
||||
"https://www.linkedin.com/",
|
||||
"https://www.strava.com/oauth/",
|
||||
"https://www.wunderlist.com/oauth/",
|
||||
"https://api.patreon.com/",
|
||||
"https://sandbox.codeswholesale.com/oauth/token",
|
||||
}
|
||||
|
||||
// brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints.
|
||||
var brokenAuthHeaderDomains = []string{
|
||||
".force.com",
|
||||
".okta.com",
|
||||
".oktapreview.com",
|
||||
}
|
||||
|
||||
func RegisterBrokenAuthHeaderProvider(tokenURL string) {
|
||||
brokenAuthHeaderProviders = append(brokenAuthHeaderProviders, tokenURL)
|
||||
}
|
||||
|
||||
// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL
|
||||
// implements the OAuth2 spec correctly
|
||||
// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
|
||||
// In summary:
|
||||
// - Reddit only accepts client secret in the Authorization header
|
||||
// - Dropbox accepts either it in URL param or Auth header, but not both.
|
||||
// - Google only accepts URL param (not spec compliant?), not Auth header
|
||||
// - Stripe only accepts client secret in Auth header with Bearer method, not Basic
|
||||
func providerAuthHeaderWorks(tokenURL string) bool {
|
||||
for _, s := range brokenAuthHeaderProviders {
|
||||
if strings.HasPrefix(tokenURL, s) {
|
||||
// Some sites fail to implement the OAuth2 spec fully.
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if u, err := url.Parse(tokenURL); err == nil {
|
||||
for _, s := range brokenAuthHeaderDomains {
|
||||
if strings.HasSuffix(u.Host, s) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Assume the provider implements the spec properly
|
||||
// otherwise. We can add more exceptions as they're
|
||||
// discovered. We will _not_ be adding configurable hooks
|
||||
// to this package to let users select server bugs.
|
||||
return true
|
||||
}
|
||||
|
||||
func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values) (*Token, error) {
|
||||
hc, err := ContextClient(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bustedAuth := !providerAuthHeaderWorks(tokenURL)
|
||||
if bustedAuth {
|
||||
if clientID != "" {
|
||||
v.Set("client_id", clientID)
|
||||
}
|
||||
if clientSecret != "" {
|
||||
v.Set("client_secret", clientSecret)
|
||||
}
|
||||
}
|
||||
req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
|
||||
if !bustedAuth {
|
||||
req.SetBasicAuth(clientID, clientSecret)
|
||||
}
|
||||
r, err := hc.Do(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer r.Body.Close()
|
||||
body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
|
||||
}
|
||||
if code := r.StatusCode; code < 200 || code > 299 {
|
||||
return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body)
|
||||
}
|
||||
|
||||
var token *Token
|
||||
content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
|
||||
switch content {
|
||||
case "application/x-www-form-urlencoded", "text/plain":
|
||||
vals, err := url.ParseQuery(string(body))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
token = &Token{
|
||||
AccessToken: vals.Get("access_token"),
|
||||
TokenType: vals.Get("token_type"),
|
||||
RefreshToken: vals.Get("refresh_token"),
|
||||
Raw: vals,
|
||||
}
|
||||
e := vals.Get("expires_in")
|
||||
if e == "" {
|
||||
// TODO(jbd): Facebook's OAuth2 implementation is broken and
|
||||
// returns expires_in field in expires. Remove the fallback to expires,
|
||||
// when Facebook fixes their implementation.
|
||||
e = vals.Get("expires")
|
||||
}
|
||||
expires, _ := strconv.Atoi(e)
|
||||
if expires != 0 {
|
||||
token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
|
||||
}
|
||||
default:
|
||||
var tj tokenJSON
|
||||
if err = json.Unmarshal(body, &tj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
token = &Token{
|
||||
AccessToken: tj.AccessToken,
|
||||
TokenType: tj.TokenType,
|
||||
RefreshToken: tj.RefreshToken,
|
||||
Expiry: tj.expiry(),
|
||||
Raw: make(map[string]interface{}),
|
||||
}
|
||||
json.Unmarshal(body, &token.Raw) // no error checks for optional fields
|
||||
}
|
||||
// Don't overwrite `RefreshToken` with an empty value
|
||||
// if this was a token refreshing request.
|
||||
if token.RefreshToken == "" {
|
||||
token.RefreshToken = v.Get("refresh_token")
|
||||
}
|
||||
return token, nil
|
||||
}
|
||||
69
vendor/golang.org/x/oauth2/internal/transport.go
generated
vendored
69
vendor/golang.org/x/oauth2/internal/transport.go
generated
vendored
@@ -1,69 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package internal contains support packages for oauth2 package.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// HTTPClient is the context key to use with golang.org/x/net/context's
|
||||
// WithValue function to associate an *http.Client value with a context.
|
||||
var HTTPClient ContextKey
|
||||
|
||||
// ContextKey is just an empty struct. It exists so HTTPClient can be
|
||||
// an immutable public variable with a unique type. It's immutable
|
||||
// because nobody else can create a ContextKey, being unexported.
|
||||
type ContextKey struct{}
|
||||
|
||||
// ContextClientFunc is a func which tries to return an *http.Client
|
||||
// given a Context value. If it returns an error, the search stops
|
||||
// with that error. If it returns (nil, nil), the search continues
|
||||
// down the list of registered funcs.
|
||||
type ContextClientFunc func(context.Context) (*http.Client, error)
|
||||
|
||||
var contextClientFuncs []ContextClientFunc
|
||||
|
||||
func RegisterContextClientFunc(fn ContextClientFunc) {
|
||||
contextClientFuncs = append(contextClientFuncs, fn)
|
||||
}
|
||||
|
||||
func ContextClient(ctx context.Context) (*http.Client, error) {
|
||||
if ctx != nil {
|
||||
if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok {
|
||||
return hc, nil
|
||||
}
|
||||
}
|
||||
for _, fn := range contextClientFuncs {
|
||||
c, err := fn(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if c != nil {
|
||||
return c, nil
|
||||
}
|
||||
}
|
||||
return http.DefaultClient, nil
|
||||
}
|
||||
|
||||
func ContextTransport(ctx context.Context) http.RoundTripper {
|
||||
hc, err := ContextClient(ctx)
|
||||
// This is a rare error case (somebody using nil on App Engine).
|
||||
if err != nil {
|
||||
return ErrorTransport{err}
|
||||
}
|
||||
return hc.Transport
|
||||
}
|
||||
|
||||
// ErrorTransport returns the specified error on RoundTrip.
|
||||
// This RoundTripper should be used in rare error cases where
|
||||
// error handling can be postponed to response handling time.
|
||||
type ErrorTransport struct{ Err error }
|
||||
|
||||
func (t ErrorTransport) RoundTrip(*http.Request) (*http.Response, error) {
|
||||
return nil, t.Err
|
||||
}
|
||||
340
vendor/golang.org/x/oauth2/oauth2.go
generated
vendored
340
vendor/golang.org/x/oauth2/oauth2.go
generated
vendored
@@ -1,340 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package oauth2 provides support for making
|
||||
// OAuth2 authorized and authenticated HTTP requests.
|
||||
// It can additionally grant authorization with Bearer JWT.
|
||||
package oauth2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2/internal"
|
||||
)
|
||||
|
||||
// NoContext is the default context you should supply if not using
|
||||
// your own context.Context (see https://golang.org/x/net/context).
|
||||
//
|
||||
// Deprecated: Use context.Background() or context.TODO() instead.
|
||||
var NoContext = context.TODO()
|
||||
|
||||
// RegisterBrokenAuthHeaderProvider registers an OAuth2 server
|
||||
// identified by the tokenURL prefix as an OAuth2 implementation
|
||||
// which doesn't support the HTTP Basic authentication
|
||||
// scheme to authenticate with the authorization server.
|
||||
// Once a server is registered, credentials (client_id and client_secret)
|
||||
// will be passed as query parameters rather than being present
|
||||
// in the Authorization header.
|
||||
// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
|
||||
func RegisterBrokenAuthHeaderProvider(tokenURL string) {
|
||||
internal.RegisterBrokenAuthHeaderProvider(tokenURL)
|
||||
}
|
||||
|
||||
// Config describes a typical 3-legged OAuth2 flow, with both the
|
||||
// client application information and the server's endpoint URLs.
|
||||
// For the client credentials 2-legged OAuth2 flow, see the clientcredentials
|
||||
// package (https://golang.org/x/oauth2/clientcredentials).
|
||||
type Config struct {
|
||||
// ClientID is the application's ID.
|
||||
ClientID string
|
||||
|
||||
// ClientSecret is the application's secret.
|
||||
ClientSecret string
|
||||
|
||||
// Endpoint contains the resource server's token endpoint
|
||||
// URLs. These are constants specific to each server and are
|
||||
// often available via site-specific packages, such as
|
||||
// google.Endpoint or github.Endpoint.
|
||||
Endpoint Endpoint
|
||||
|
||||
// RedirectURL is the URL to redirect users going through
|
||||
// the OAuth flow, after the resource owner's URLs.
|
||||
RedirectURL string
|
||||
|
||||
// Scope specifies optional requested permissions.
|
||||
Scopes []string
|
||||
}
|
||||
|
||||
// A TokenSource is anything that can return a token.
|
||||
type TokenSource interface {
|
||||
// Token returns a token or an error.
|
||||
// Token must be safe for concurrent use by multiple goroutines.
|
||||
// The returned Token must not be modified.
|
||||
Token() (*Token, error)
|
||||
}
|
||||
|
||||
// Endpoint contains the OAuth 2.0 provider's authorization and token
|
||||
// endpoint URLs.
|
||||
type Endpoint struct {
|
||||
AuthURL string
|
||||
TokenURL string
|
||||
}
|
||||
|
||||
var (
|
||||
// AccessTypeOnline and AccessTypeOffline are options passed
|
||||
// to the Options.AuthCodeURL method. They modify the
|
||||
// "access_type" field that gets sent in the URL returned by
|
||||
// AuthCodeURL.
|
||||
//
|
||||
// Online is the default if neither is specified. If your
|
||||
// application needs to refresh access tokens when the user
|
||||
// is not present at the browser, then use offline. This will
|
||||
// result in your application obtaining a refresh token the
|
||||
// first time your application exchanges an authorization
|
||||
// code for a user.
|
||||
AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online")
|
||||
AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline")
|
||||
|
||||
// ApprovalForce forces the users to view the consent dialog
|
||||
// and confirm the permissions request at the URL returned
|
||||
// from AuthCodeURL, even if they've already done so.
|
||||
ApprovalForce AuthCodeOption = SetAuthURLParam("approval_prompt", "force")
|
||||
)
|
||||
|
||||
// An AuthCodeOption is passed to Config.AuthCodeURL.
|
||||
type AuthCodeOption interface {
|
||||
setValue(url.Values)
|
||||
}
|
||||
|
||||
type setParam struct{ k, v string }
|
||||
|
||||
func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
|
||||
|
||||
// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters
|
||||
// to a provider's authorization endpoint.
|
||||
func SetAuthURLParam(key, value string) AuthCodeOption {
|
||||
return setParam{key, value}
|
||||
}
|
||||
|
||||
// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page
|
||||
// that asks for permissions for the required scopes explicitly.
|
||||
//
|
||||
// State is a token to protect the user from CSRF attacks. You must
|
||||
// always provide a non-zero string and validate that it matches the
|
||||
// the state query parameter on your redirect callback.
|
||||
// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
|
||||
//
|
||||
// Opts may include AccessTypeOnline or AccessTypeOffline, as well
|
||||
// as ApprovalForce.
|
||||
func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
|
||||
var buf bytes.Buffer
|
||||
buf.WriteString(c.Endpoint.AuthURL)
|
||||
v := url.Values{
|
||||
"response_type": {"code"},
|
||||
"client_id": {c.ClientID},
|
||||
"redirect_uri": internal.CondVal(c.RedirectURL),
|
||||
"scope": internal.CondVal(strings.Join(c.Scopes, " ")),
|
||||
"state": internal.CondVal(state),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.setValue(v)
|
||||
}
|
||||
if strings.Contains(c.Endpoint.AuthURL, "?") {
|
||||
buf.WriteByte('&')
|
||||
} else {
|
||||
buf.WriteByte('?')
|
||||
}
|
||||
buf.WriteString(v.Encode())
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// PasswordCredentialsToken converts a resource owner username and password
|
||||
// pair into a token.
|
||||
//
|
||||
// Per the RFC, this grant type should only be used "when there is a high
|
||||
// degree of trust between the resource owner and the client (e.g., the client
|
||||
// is part of the device operating system or a highly privileged application),
|
||||
// and when other authorization grant types are not available."
|
||||
// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info.
|
||||
//
|
||||
// The HTTP client to use is derived from the context.
|
||||
// If nil, http.DefaultClient is used.
|
||||
func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) {
|
||||
return retrieveToken(ctx, c, url.Values{
|
||||
"grant_type": {"password"},
|
||||
"username": {username},
|
||||
"password": {password},
|
||||
"scope": internal.CondVal(strings.Join(c.Scopes, " ")),
|
||||
})
|
||||
}
|
||||
|
||||
// Exchange converts an authorization code into a token.
|
||||
//
|
||||
// It is used after a resource provider redirects the user back
|
||||
// to the Redirect URI (the URL obtained from AuthCodeURL).
|
||||
//
|
||||
// The HTTP client to use is derived from the context.
|
||||
// If a client is not provided via the context, http.DefaultClient is used.
|
||||
//
|
||||
// The code will be in the *http.Request.FormValue("code"). Before
|
||||
// calling Exchange, be sure to validate FormValue("state").
|
||||
func (c *Config) Exchange(ctx context.Context, code string) (*Token, error) {
|
||||
return retrieveToken(ctx, c, url.Values{
|
||||
"grant_type": {"authorization_code"},
|
||||
"code": {code},
|
||||
"redirect_uri": internal.CondVal(c.RedirectURL),
|
||||
})
|
||||
}
|
||||
|
||||
// Client returns an HTTP client using the provided token.
|
||||
// The token will auto-refresh as necessary. The underlying
|
||||
// HTTP transport will be obtained using the provided context.
|
||||
// The returned client and its Transport should not be modified.
|
||||
func (c *Config) Client(ctx context.Context, t *Token) *http.Client {
|
||||
return NewClient(ctx, c.TokenSource(ctx, t))
|
||||
}
|
||||
|
||||
// TokenSource returns a TokenSource that returns t until t expires,
|
||||
// automatically refreshing it as necessary using the provided context.
|
||||
//
|
||||
// Most users will use Config.Client instead.
|
||||
func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource {
|
||||
tkr := &tokenRefresher{
|
||||
ctx: ctx,
|
||||
conf: c,
|
||||
}
|
||||
if t != nil {
|
||||
tkr.refreshToken = t.RefreshToken
|
||||
}
|
||||
return &reuseTokenSource{
|
||||
t: t,
|
||||
new: tkr,
|
||||
}
|
||||
}
|
||||
|
||||
// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
|
||||
// HTTP requests to renew a token using a RefreshToken.
|
||||
type tokenRefresher struct {
|
||||
ctx context.Context // used to get HTTP requests
|
||||
conf *Config
|
||||
refreshToken string
|
||||
}
|
||||
|
||||
// WARNING: Token is not safe for concurrent access, as it
|
||||
// updates the tokenRefresher's refreshToken field.
|
||||
// Within this package, it is used by reuseTokenSource which
|
||||
// synchronizes calls to this method with its own mutex.
|
||||
func (tf *tokenRefresher) Token() (*Token, error) {
|
||||
if tf.refreshToken == "" {
|
||||
return nil, errors.New("oauth2: token expired and refresh token is not set")
|
||||
}
|
||||
|
||||
tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{
|
||||
"grant_type": {"refresh_token"},
|
||||
"refresh_token": {tf.refreshToken},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if tf.refreshToken != tk.RefreshToken {
|
||||
tf.refreshToken = tk.RefreshToken
|
||||
}
|
||||
return tk, err
|
||||
}
|
||||
|
||||
// reuseTokenSource is a TokenSource that holds a single token in memory
|
||||
// and validates its expiry before each call to retrieve it with
|
||||
// Token. If it's expired, it will be auto-refreshed using the
|
||||
// new TokenSource.
|
||||
type reuseTokenSource struct {
|
||||
new TokenSource // called when t is expired.
|
||||
|
||||
mu sync.Mutex // guards t
|
||||
t *Token
|
||||
}
|
||||
|
||||
// Token returns the current token if it's still valid, else will
|
||||
// refresh the current token (using r.Context for HTTP client
|
||||
// information) and return the new one.
|
||||
func (s *reuseTokenSource) Token() (*Token, error) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if s.t.Valid() {
|
||||
return s.t, nil
|
||||
}
|
||||
t, err := s.new.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s.t = t
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// StaticTokenSource returns a TokenSource that always returns the same token.
|
||||
// Because the provided token t is never refreshed, StaticTokenSource is only
|
||||
// useful for tokens that never expire.
|
||||
func StaticTokenSource(t *Token) TokenSource {
|
||||
return staticTokenSource{t}
|
||||
}
|
||||
|
||||
// staticTokenSource is a TokenSource that always returns the same Token.
|
||||
type staticTokenSource struct {
|
||||
t *Token
|
||||
}
|
||||
|
||||
func (s staticTokenSource) Token() (*Token, error) {
|
||||
return s.t, nil
|
||||
}
|
||||
|
||||
// HTTPClient is the context key to use with golang.org/x/net/context's
|
||||
// WithValue function to associate an *http.Client value with a context.
|
||||
var HTTPClient internal.ContextKey
|
||||
|
||||
// NewClient creates an *http.Client from a Context and TokenSource.
|
||||
// The returned client is not valid beyond the lifetime of the context.
|
||||
//
|
||||
// As a special case, if src is nil, a non-OAuth2 client is returned
|
||||
// using the provided context. This exists to support related OAuth2
|
||||
// packages.
|
||||
func NewClient(ctx context.Context, src TokenSource) *http.Client {
|
||||
if src == nil {
|
||||
c, err := internal.ContextClient(ctx)
|
||||
if err != nil {
|
||||
return &http.Client{Transport: internal.ErrorTransport{Err: err}}
|
||||
}
|
||||
return c
|
||||
}
|
||||
return &http.Client{
|
||||
Transport: &Transport{
|
||||
Base: internal.ContextTransport(ctx),
|
||||
Source: ReuseTokenSource(nil, src),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// ReuseTokenSource returns a TokenSource which repeatedly returns the
|
||||
// same token as long as it's valid, starting with t.
|
||||
// When its cached token is invalid, a new token is obtained from src.
|
||||
//
|
||||
// ReuseTokenSource is typically used to reuse tokens from a cache
|
||||
// (such as a file on disk) between runs of a program, rather than
|
||||
// obtaining new tokens unnecessarily.
|
||||
//
|
||||
// The initial token t may be nil, in which case the TokenSource is
|
||||
// wrapped in a caching version if it isn't one already. This also
|
||||
// means it's always safe to wrap ReuseTokenSource around any other
|
||||
// TokenSource without adverse effects.
|
||||
func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
|
||||
// Don't wrap a reuseTokenSource in itself. That would work,
|
||||
// but cause an unnecessary number of mutex operations.
|
||||
// Just build the equivalent one.
|
||||
if rt, ok := src.(*reuseTokenSource); ok {
|
||||
if t == nil {
|
||||
// Just use it directly.
|
||||
return rt
|
||||
}
|
||||
src = rt.new
|
||||
}
|
||||
return &reuseTokenSource{
|
||||
t: t,
|
||||
new: src,
|
||||
}
|
||||
}
|
||||
158
vendor/golang.org/x/oauth2/token.go
generated
vendored
158
vendor/golang.org/x/oauth2/token.go
generated
vendored
@@ -1,158 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package oauth2
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"golang.org/x/oauth2/internal"
|
||||
)
|
||||
|
||||
// expiryDelta determines how earlier a token should be considered
|
||||
// expired than its actual expiration time. It is used to avoid late
|
||||
// expirations due to client-server time mismatches.
|
||||
const expiryDelta = 10 * time.Second
|
||||
|
||||
// Token represents the crendentials used to authorize
|
||||
// the requests to access protected resources on the OAuth 2.0
|
||||
// provider's backend.
|
||||
//
|
||||
// Most users of this package should not access fields of Token
|
||||
// directly. They're exported mostly for use by related packages
|
||||
// implementing derivative OAuth2 flows.
|
||||
type Token struct {
|
||||
// AccessToken is the token that authorizes and authenticates
|
||||
// the requests.
|
||||
AccessToken string `json:"access_token"`
|
||||
|
||||
// TokenType is the type of token.
|
||||
// The Type method returns either this or "Bearer", the default.
|
||||
TokenType string `json:"token_type,omitempty"`
|
||||
|
||||
// RefreshToken is a token that's used by the application
|
||||
// (as opposed to the user) to refresh the access token
|
||||
// if it expires.
|
||||
RefreshToken string `json:"refresh_token,omitempty"`
|
||||
|
||||
// Expiry is the optional expiration time of the access token.
|
||||
//
|
||||
// If zero, TokenSource implementations will reuse the same
|
||||
// token forever and RefreshToken or equivalent
|
||||
// mechanisms for that TokenSource will not be used.
|
||||
Expiry time.Time `json:"expiry,omitempty"`
|
||||
|
||||
// raw optionally contains extra metadata from the server
|
||||
// when updating a token.
|
||||
raw interface{}
|
||||
}
|
||||
|
||||
// Type returns t.TokenType if non-empty, else "Bearer".
|
||||
func (t *Token) Type() string {
|
||||
if strings.EqualFold(t.TokenType, "bearer") {
|
||||
return "Bearer"
|
||||
}
|
||||
if strings.EqualFold(t.TokenType, "mac") {
|
||||
return "MAC"
|
||||
}
|
||||
if strings.EqualFold(t.TokenType, "basic") {
|
||||
return "Basic"
|
||||
}
|
||||
if t.TokenType != "" {
|
||||
return t.TokenType
|
||||
}
|
||||
return "Bearer"
|
||||
}
|
||||
|
||||
// SetAuthHeader sets the Authorization header to r using the access
|
||||
// token in t.
|
||||
//
|
||||
// This method is unnecessary when using Transport or an HTTP Client
|
||||
// returned by this package.
|
||||
func (t *Token) SetAuthHeader(r *http.Request) {
|
||||
r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
|
||||
}
|
||||
|
||||
// WithExtra returns a new Token that's a clone of t, but using the
|
||||
// provided raw extra map. This is only intended for use by packages
|
||||
// implementing derivative OAuth2 flows.
|
||||
func (t *Token) WithExtra(extra interface{}) *Token {
|
||||
t2 := new(Token)
|
||||
*t2 = *t
|
||||
t2.raw = extra
|
||||
return t2
|
||||
}
|
||||
|
||||
// Extra returns an extra field.
|
||||
// Extra fields are key-value pairs returned by the server as a
|
||||
// part of the token retrieval response.
|
||||
func (t *Token) Extra(key string) interface{} {
|
||||
if raw, ok := t.raw.(map[string]interface{}); ok {
|
||||
return raw[key]
|
||||
}
|
||||
|
||||
vals, ok := t.raw.(url.Values)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
v := vals.Get(key)
|
||||
switch s := strings.TrimSpace(v); strings.Count(s, ".") {
|
||||
case 0: // Contains no "."; try to parse as int
|
||||
if i, err := strconv.ParseInt(s, 10, 64); err == nil {
|
||||
return i
|
||||
}
|
||||
case 1: // Contains a single "."; try to parse as float
|
||||
if f, err := strconv.ParseFloat(s, 64); err == nil {
|
||||
return f
|
||||
}
|
||||
}
|
||||
|
||||
return v
|
||||
}
|
||||
|
||||
// expired reports whether the token is expired.
|
||||
// t must be non-nil.
|
||||
func (t *Token) expired() bool {
|
||||
if t.Expiry.IsZero() {
|
||||
return false
|
||||
}
|
||||
return t.Expiry.Add(-expiryDelta).Before(time.Now())
|
||||
}
|
||||
|
||||
// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
|
||||
func (t *Token) Valid() bool {
|
||||
return t != nil && t.AccessToken != "" && !t.expired()
|
||||
}
|
||||
|
||||
// tokenFromInternal maps an *internal.Token struct into
|
||||
// a *Token struct.
|
||||
func tokenFromInternal(t *internal.Token) *Token {
|
||||
if t == nil {
|
||||
return nil
|
||||
}
|
||||
return &Token{
|
||||
AccessToken: t.AccessToken,
|
||||
TokenType: t.TokenType,
|
||||
RefreshToken: t.RefreshToken,
|
||||
Expiry: t.Expiry,
|
||||
raw: t.Raw,
|
||||
}
|
||||
}
|
||||
|
||||
// retrieveToken takes a *Config and uses that to retrieve an *internal.Token.
|
||||
// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along
|
||||
// with an error..
|
||||
func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) {
|
||||
tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return tokenFromInternal(tk), nil
|
||||
}
|
||||
132
vendor/golang.org/x/oauth2/transport.go
generated
vendored
132
vendor/golang.org/x/oauth2/transport.go
generated
vendored
@@ -1,132 +0,0 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package oauth2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
|
||||
// wrapping a base RoundTripper and adding an Authorization header
|
||||
// with a token from the supplied Sources.
|
||||
//
|
||||
// Transport is a low-level mechanism. Most code will use the
|
||||
// higher-level Config.Client method instead.
|
||||
type Transport struct {
|
||||
// Source supplies the token to add to outgoing requests'
|
||||
// Authorization headers.
|
||||
Source TokenSource
|
||||
|
||||
// Base is the base RoundTripper used to make HTTP requests.
|
||||
// If nil, http.DefaultTransport is used.
|
||||
Base http.RoundTripper
|
||||
|
||||
mu sync.Mutex // guards modReq
|
||||
modReq map[*http.Request]*http.Request // original -> modified
|
||||
}
|
||||
|
||||
// RoundTrip authorizes and authenticates the request with an
|
||||
// access token. If no token exists or token is expired,
|
||||
// tries to refresh/fetch a new token.
|
||||
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
if t.Source == nil {
|
||||
return nil, errors.New("oauth2: Transport's Source is nil")
|
||||
}
|
||||
token, err := t.Source.Token()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
req2 := cloneRequest(req) // per RoundTripper contract
|
||||
token.SetAuthHeader(req2)
|
||||
t.setModReq(req, req2)
|
||||
res, err := t.base().RoundTrip(req2)
|
||||
if err != nil {
|
||||
t.setModReq(req, nil)
|
||||
return nil, err
|
||||
}
|
||||
res.Body = &onEOFReader{
|
||||
rc: res.Body,
|
||||
fn: func() { t.setModReq(req, nil) },
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// CancelRequest cancels an in-flight request by closing its connection.
|
||||
func (t *Transport) CancelRequest(req *http.Request) {
|
||||
type canceler interface {
|
||||
CancelRequest(*http.Request)
|
||||
}
|
||||
if cr, ok := t.base().(canceler); ok {
|
||||
t.mu.Lock()
|
||||
modReq := t.modReq[req]
|
||||
delete(t.modReq, req)
|
||||
t.mu.Unlock()
|
||||
cr.CancelRequest(modReq)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Transport) base() http.RoundTripper {
|
||||
if t.Base != nil {
|
||||
return t.Base
|
||||
}
|
||||
return http.DefaultTransport
|
||||
}
|
||||
|
||||
func (t *Transport) setModReq(orig, mod *http.Request) {
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if t.modReq == nil {
|
||||
t.modReq = make(map[*http.Request]*http.Request)
|
||||
}
|
||||
if mod == nil {
|
||||
delete(t.modReq, orig)
|
||||
} else {
|
||||
t.modReq[orig] = mod
|
||||
}
|
||||
}
|
||||
|
||||
// cloneRequest returns a clone of the provided *http.Request.
|
||||
// The clone is a shallow copy of the struct and its Header map.
|
||||
func cloneRequest(r *http.Request) *http.Request {
|
||||
// shallow copy of the struct
|
||||
r2 := new(http.Request)
|
||||
*r2 = *r
|
||||
// deep copy of the Header
|
||||
r2.Header = make(http.Header, len(r.Header))
|
||||
for k, s := range r.Header {
|
||||
r2.Header[k] = append([]string(nil), s...)
|
||||
}
|
||||
return r2
|
||||
}
|
||||
|
||||
type onEOFReader struct {
|
||||
rc io.ReadCloser
|
||||
fn func()
|
||||
}
|
||||
|
||||
func (r *onEOFReader) Read(p []byte) (n int, err error) {
|
||||
n, err = r.rc.Read(p)
|
||||
if err == io.EOF {
|
||||
r.runFunc()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (r *onEOFReader) Close() error {
|
||||
err := r.rc.Close()
|
||||
r.runFunc()
|
||||
return err
|
||||
}
|
||||
|
||||
func (r *onEOFReader) runFunc() {
|
||||
if fn := r.fn; fn != nil {
|
||||
fn()
|
||||
r.fn = nil
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user