Merge pull request #70889 from dims/update-cadvisor-and-other-repos-for-klog-take-2

Update cadvisor and other repos for klog

Kubernetes-commit: a3ccea9d8743f2ff82e41b6c2af6dc2c41dc7b10
This commit is contained in:
Kubernetes Publisher 2018-11-10 07:08:39 -08:00
commit ff6be62b4c
331 changed files with 989 additions and 72731 deletions

498
Godeps/Godeps.json generated

File diff suppressed because it is too large Load Diff

View File

@ -20,7 +20,6 @@ import (
"fmt"
"time"
"github.com/golang/glog"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
@ -37,6 +36,7 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog"
samplev1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
clientset "k8s.io/sample-controller/pkg/client/clientset/versioned"
@ -96,9 +96,9 @@ func NewController(
// Add sample-controller types to the default Kubernetes Scheme so Events can be
// logged for sample-controller types.
utilruntime.Must(samplescheme.AddToScheme(scheme.Scheme))
glog.V(4).Info("Creating event broadcaster")
klog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
@ -113,7 +113,7 @@ func NewController(
recorder: recorder,
}
glog.Info("Setting up event handlers")
klog.Info("Setting up event handlers")
// Set up an event handler for when Foo resources change
fooInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.enqueueFoo,
@ -154,23 +154,23 @@ func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer c.workqueue.ShutDown()
// Start the informer factories to begin populating the informer caches
glog.Info("Starting Foo controller")
klog.Info("Starting Foo controller")
// Wait for the caches to be synced before starting workers
glog.Info("Waiting for informer caches to sync")
klog.Info("Waiting for informer caches to sync")
if ok := cache.WaitForCacheSync(stopCh, c.deploymentsSynced, c.foosSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
glog.Info("Starting workers")
klog.Info("Starting workers")
// Launch two workers to process Foo resources
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
glog.Info("Started workers")
klog.Info("Started workers")
<-stopCh
glog.Info("Shutting down workers")
klog.Info("Shutting down workers")
return nil
}
@ -226,7 +226,7 @@ func (c *Controller) processNextWorkItem() bool {
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
glog.Infof("Successfully synced '%s'", key)
klog.Infof("Successfully synced '%s'", key)
return nil
}(obj)
@ -297,7 +297,7 @@ func (c *Controller) syncHandler(key string) error {
// number does not equal the current desired replicas on the Deployment, we
// should update the Deployment resource.
if foo.Spec.Replicas != nil && *foo.Spec.Replicas != *deployment.Spec.Replicas {
glog.V(4).Infof("Foo %s replicas: %d, deployment replicas: %d", name, *foo.Spec.Replicas, *deployment.Spec.Replicas)
klog.V(4).Infof("Foo %s replicas: %d, deployment replicas: %d", name, *foo.Spec.Replicas, *deployment.Spec.Replicas)
deployment, err = c.kubeclientset.AppsV1().Deployments(foo.Namespace).Update(newDeployment(foo))
}
@ -365,9 +365,9 @@ func (c *Controller) handleObject(obj interface{}) {
runtime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type"))
return
}
glog.V(4).Infof("Recovered deleted object '%s' from tombstone", object.GetName())
klog.V(4).Infof("Recovered deleted object '%s' from tombstone", object.GetName())
}
glog.V(4).Infof("Processing object: %s", object.GetName())
klog.V(4).Infof("Processing object: %s", object.GetName())
if ownerRef := metav1.GetControllerOf(object); ownerRef != nil {
// If this object is not owned by a Foo, we should not do anything more
// with it.
@ -377,7 +377,7 @@ func (c *Controller) handleObject(obj interface{}) {
foo, err := c.foosLister.Foos(object.GetNamespace()).Get(ownerRef.Name)
if err != nil {
glog.V(4).Infof("ignoring orphaned object '%s' of foo '%s'", object.GetSelfLink(), ownerRef.Name)
klog.V(4).Infof("ignoring orphaned object '%s' of foo '%s'", object.GetSelfLink(), ownerRef.Name)
return
}

10
main.go
View File

@ -20,10 +20,10 @@ import (
"flag"
"time"
"github.com/golang/glog"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
// Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
// _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
@ -45,17 +45,17 @@ func main() {
cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig)
if err != nil {
glog.Fatalf("Error building kubeconfig: %s", err.Error())
klog.Fatalf("Error building kubeconfig: %s", err.Error())
}
kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
glog.Fatalf("Error building kubernetes clientset: %s", err.Error())
klog.Fatalf("Error building kubernetes clientset: %s", err.Error())
}
exampleClient, err := clientset.NewForConfig(cfg)
if err != nil {
glog.Fatalf("Error building example clientset: %s", err.Error())
klog.Fatalf("Error building example clientset: %s", err.Error())
}
kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, time.Second*30)
@ -71,7 +71,7 @@ func main() {
exampleInformerFactory.Start(stopCh)
if err = controller.Run(2, stopCh); err != nil {
glog.Fatalf("Error running controller: %s", err.Error())
klog.Fatalf("Error running controller: %s", err.Error())
}
}

View File

@ -20,7 +20,7 @@ import (
"fmt"
"reflect"
"github.com/golang/glog"
"k8s.io/klog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1"
@ -607,7 +607,7 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference {
var ret []metav1.OwnerReference
s := a.ownerReferences
if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice {
glog.Errorf("expect %v to be a pointer to slice", s)
klog.Errorf("expect %v to be a pointer to slice", s)
return ret
}
s = s.Elem()
@ -615,7 +615,7 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference {
ret = make([]metav1.OwnerReference, s.Len(), s.Len()+1)
for i := 0; i < s.Len(); i++ {
if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil {
glog.Errorf("extractFromOwnerReference failed: %v", err)
klog.Errorf("extractFromOwnerReference failed: %v", err)
return ret
}
}
@ -625,13 +625,13 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference {
func (a genericAccessor) SetOwnerReferences(references []metav1.OwnerReference) {
s := a.ownerReferences
if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice {
glog.Errorf("expect %v to be a pointer to slice", s)
klog.Errorf("expect %v to be a pointer to slice", s)
}
s = s.Elem()
newReferences := reflect.MakeSlice(s.Type(), len(references), len(references))
for i := 0; i < len(references); i++ {
if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil {
glog.Errorf("setOwnerReference failed: %v", err)
klog.Errorf("setOwnerReference failed: %v", err)
return
}
}

View File

@ -23,10 +23,10 @@ import (
"strconv"
"strings"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/klog"
)
// Requirements is AND of all requirements.
@ -211,13 +211,13 @@ func (r *Requirement) Matches(ls Labels) bool {
}
lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64)
if err != nil {
glog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err)
klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err)
return false
}
// There should be only one strValue in r.strValues, and can be converted to a integer.
if len(r.strValues) != 1 {
glog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r)
klog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r)
return false
}
@ -225,7 +225,7 @@ func (r *Requirement) Matches(ls Labels) bool {
for i := range r.strValues {
rValue, err = strconv.ParseInt(r.strValues[i], 10, 64)
if err != nil {
glog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r)
klog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r)
return false
}
}

View File

@ -33,7 +33,7 @@ import (
"k8s.io/apimachinery/pkg/util/json"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"github.com/golang/glog"
"k8s.io/klog"
)
// UnstructuredConverter is an interface for converting between interface{}
@ -133,10 +133,10 @@ func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj i
newObj := reflect.New(t.Elem()).Interface()
newErr := fromUnstructuredViaJSON(u, newObj)
if (err != nil) != (newErr != nil) {
glog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err)
klog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err)
}
if err == nil && !c.comparison.DeepEqual(obj, newObj) {
glog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj)
klog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj)
}
}
return err
@ -424,10 +424,10 @@ func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]inte
newUnstr := map[string]interface{}{}
newErr := toUnstructuredViaJSON(obj, &newUnstr)
if (err != nil) != (newErr != nil) {
glog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr)
klog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr)
}
if err == nil && !c.comparison.DeepEqual(u, newUnstr) {
glog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr)
klog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr)
}
}
if err != nil {

View File

@ -25,8 +25,8 @@ import (
"strconv"
"strings"
"github.com/golang/glog"
"github.com/google/gofuzz"
"k8s.io/klog"
)
// IntOrString is a type that can hold an int32 or a string. When used in
@ -58,7 +58,7 @@ const (
// TODO: convert to (val int32)
func FromInt(val int) IntOrString {
if val > math.MaxInt32 || val < math.MinInt32 {
glog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack())
klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack())
}
return IntOrString{Type: Int, IntVal: int32(val)}
}

View File

@ -31,8 +31,8 @@ import (
"strconv"
"strings"
"github.com/golang/glog"
"golang.org/x/net/http2"
"k8s.io/klog"
)
// JoinPreservingTrailingSlash does a path.Join of the specified elements,
@ -107,10 +107,10 @@ func SetTransportDefaults(t *http.Transport) *http.Transport {
t = SetOldTransportDefaults(t)
// Allow clients to disable http2 if needed.
if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 {
glog.Infof("HTTP2 has been explicitly disabled")
klog.Infof("HTTP2 has been explicitly disabled")
} else {
if err := http2.ConfigureTransport(t); err != nil {
glog.Warningf("Transport failed http2 configuration: %v", err)
klog.Warningf("Transport failed http2 configuration: %v", err)
}
}
return t
@ -368,7 +368,7 @@ redirectLoop:
resp, err := http.ReadResponse(respReader, nil)
if err != nil {
// Unable to read the backend response; let the client handle it.
glog.Warningf("Error reading backend response: %v", err)
klog.Warningf("Error reading backend response: %v", err)
break redirectLoop
}

View File

@ -26,7 +26,7 @@ import (
"strings"
"github.com/golang/glog"
"k8s.io/klog"
)
type AddressFamily uint
@ -193,7 +193,7 @@ func isInterfaceUp(intf *net.Interface) bool {
return false
}
if intf.Flags&net.FlagUp != 0 {
glog.V(4).Infof("Interface %v is up", intf.Name)
klog.V(4).Infof("Interface %v is up", intf.Name)
return true
}
return false
@ -208,20 +208,20 @@ func isLoopbackOrPointToPoint(intf *net.Interface) bool {
func getMatchingGlobalIP(addrs []net.Addr, family AddressFamily) (net.IP, error) {
if len(addrs) > 0 {
for i := range addrs {
glog.V(4).Infof("Checking addr %s.", addrs[i].String())
klog.V(4).Infof("Checking addr %s.", addrs[i].String())
ip, _, err := net.ParseCIDR(addrs[i].String())
if err != nil {
return nil, err
}
if memberOf(ip, family) {
if ip.IsGlobalUnicast() {
glog.V(4).Infof("IP found %v", ip)
klog.V(4).Infof("IP found %v", ip)
return ip, nil
} else {
glog.V(4).Infof("Non-global unicast address found %v", ip)
klog.V(4).Infof("Non-global unicast address found %v", ip)
}
} else {
glog.V(4).Infof("%v is not an IPv%d address", ip, int(family))
klog.V(4).Infof("%v is not an IPv%d address", ip, int(family))
}
}
@ -241,13 +241,13 @@ func getIPFromInterface(intfName string, forFamily AddressFamily, nw networkInte
if err != nil {
return nil, err
}
glog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs)
klog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs)
matchingIP, err := getMatchingGlobalIP(addrs, forFamily)
if err != nil {
return nil, err
}
if matchingIP != nil {
glog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName)
klog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName)
return matchingIP, nil
}
}
@ -275,14 +275,14 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) {
return nil, fmt.Errorf("no interfaces found on host.")
}
for _, family := range []AddressFamily{familyIPv4, familyIPv6} {
glog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family))
klog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family))
for _, intf := range intfs {
if !isInterfaceUp(&intf) {
glog.V(4).Infof("Skipping: down interface %q", intf.Name)
klog.V(4).Infof("Skipping: down interface %q", intf.Name)
continue
}
if isLoopbackOrPointToPoint(&intf) {
glog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name)
klog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name)
continue
}
addrs, err := nw.Addrs(&intf)
@ -290,7 +290,7 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) {
return nil, err
}
if len(addrs) == 0 {
glog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name)
klog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name)
continue
}
for _, addr := range addrs {
@ -299,15 +299,15 @@ func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) {
return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err)
}
if !memberOf(ip, family) {
glog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name)
klog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name)
continue
}
// TODO: Decide if should open up to allow IPv6 LLAs in future.
if !ip.IsGlobalUnicast() {
glog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name)
klog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name)
continue
}
glog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name)
klog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name)
return ip, nil
}
}
@ -381,23 +381,23 @@ func getAllDefaultRoutes() ([]Route, error) {
// an IPv4 IP, and then will look at each IPv6 route for an IPv6 IP.
func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer) (net.IP, error) {
for _, family := range []AddressFamily{familyIPv4, familyIPv6} {
glog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family))
klog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family))
for _, route := range routes {
if route.Family != family {
continue
}
glog.V(4).Infof("Default route transits interface %q", route.Interface)
klog.V(4).Infof("Default route transits interface %q", route.Interface)
finalIP, err := getIPFromInterface(route.Interface, family, nw)
if err != nil {
return nil, err
}
if finalIP != nil {
glog.V(4).Infof("Found active IP %v ", finalIP)
klog.V(4).Infof("Found active IP %v ", finalIP)
return finalIP, nil
}
}
}
glog.V(4).Infof("No active IP found by looking at default routes")
klog.V(4).Infof("No active IP found by looking at default routes")
return nil, fmt.Errorf("unable to select an IP from default routes.")
}

View File

@ -22,7 +22,7 @@ import (
"sync"
"time"
"github.com/golang/glog"
"k8s.io/klog"
)
var (
@ -63,7 +63,7 @@ func HandleCrash(additionalHandlers ...func(interface{})) {
// logPanic logs the caller tree when a panic occurs.
func logPanic(r interface{}) {
callers := getCallers(r)
glog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers)
klog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers)
}
func getCallers(r interface{}) string {
@ -111,7 +111,7 @@ func HandleError(err error) {
// logError prints an error with the call stack of the location it was reported
func logError(err error) {
glog.ErrorDepth(2, err)
klog.ErrorDepth(2, err)
}
type rudimentaryErrorBackoff struct {

View File

@ -26,7 +26,7 @@ import (
"strings"
"unicode"
"github.com/golang/glog"
"k8s.io/klog"
"sigs.k8s.io/yaml"
)
@ -217,11 +217,11 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error {
if d.decoder == nil {
buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize)
if isJSON {
glog.V(4).Infof("decoding stream as JSON")
klog.V(4).Infof("decoding stream as JSON")
d.decoder = json.NewDecoder(buffer)
d.rawData = origData
} else {
glog.V(4).Infof("decoding stream as YAML")
klog.V(4).Infof("decoding stream as YAML")
d.decoder = NewYAMLToJSONDecoder(buffer)
}
}
@ -230,7 +230,7 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error {
if syntax, ok := err.(*json.SyntaxError); ok {
data, readErr := ioutil.ReadAll(jsonDecoder.Buffered())
if readErr != nil {
glog.V(4).Infof("reading stream failed: %v", readErr)
klog.V(4).Infof("reading stream failed: %v", readErr)
}
js := string(data)

View File

@ -20,10 +20,10 @@ import (
"io"
"sync"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/net"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/klog"
)
// Decoder allows StreamWatcher to watch any stream for which a Decoder can be written.
@ -100,13 +100,13 @@ func (sw *StreamWatcher) receive() {
case io.EOF:
// watch closed normally
case io.ErrUnexpectedEOF:
glog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err)
klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err)
default:
msg := "Unable to decode an event from the watch stream: %v"
if net.IsProbableEOF(err) {
glog.V(5).Infof(msg, err)
klog.V(5).Infof(msg, err)
} else {
glog.Errorf(msg, err)
klog.Errorf(msg, err)
}
}
return

View File

@ -20,7 +20,7 @@ import (
"fmt"
"sync"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/runtime"
)
@ -106,7 +106,7 @@ func (f *FakeWatcher) Stop() {
f.Lock()
defer f.Unlock()
if !f.Stopped {
glog.V(4).Infof("Stopping fake watcher.")
klog.V(4).Infof("Stopping fake watcher.")
close(f.result)
f.Stopped = true
}
@ -173,7 +173,7 @@ func (f *RaceFreeFakeWatcher) Stop() {
f.Lock()
defer f.Unlock()
if !f.Stopped {
glog.V(4).Infof("Stopping fake watcher.")
klog.V(4).Infof("Stopping fake watcher.")
close(f.result)
f.Stopped = true
}

View File

@ -25,8 +25,8 @@ import (
"sync"
"time"
"github.com/golang/glog"
"github.com/googleapis/gnostic/OpenAPIv2"
"k8s.io/klog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -67,23 +67,23 @@ func (d *CachedDiscoveryClient) ServerResourcesForGroupVersion(groupVersion stri
if err == nil {
cachedResources := &metav1.APIResourceList{}
if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedResources); err == nil {
glog.V(10).Infof("returning cached discovery info from %v", filename)
klog.V(10).Infof("returning cached discovery info from %v", filename)
return cachedResources, nil
}
}
liveResources, err := d.delegate.ServerResourcesForGroupVersion(groupVersion)
if err != nil {
glog.V(3).Infof("skipped caching discovery info due to %v", err)
klog.V(3).Infof("skipped caching discovery info due to %v", err)
return liveResources, err
}
if liveResources == nil || len(liveResources.APIResources) == 0 {
glog.V(3).Infof("skipped caching discovery info, no resources found")
klog.V(3).Infof("skipped caching discovery info, no resources found")
return liveResources, err
}
if err := d.writeCachedFile(filename, liveResources); err != nil {
glog.V(1).Infof("failed to write cache to %v due to %v", filename, err)
klog.V(1).Infof("failed to write cache to %v due to %v", filename, err)
}
return liveResources, nil
@ -103,23 +103,23 @@ func (d *CachedDiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) {
if err == nil {
cachedGroups := &metav1.APIGroupList{}
if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedGroups); err == nil {
glog.V(10).Infof("returning cached discovery info from %v", filename)
klog.V(10).Infof("returning cached discovery info from %v", filename)
return cachedGroups, nil
}
}
liveGroups, err := d.delegate.ServerGroups()
if err != nil {
glog.V(3).Infof("skipped caching discovery info due to %v", err)
klog.V(3).Infof("skipped caching discovery info due to %v", err)
return liveGroups, err
}
if liveGroups == nil || len(liveGroups.Groups) == 0 {
glog.V(3).Infof("skipped caching discovery info, no groups found")
klog.V(3).Infof("skipped caching discovery info, no groups found")
return liveGroups, err
}
if err := d.writeCachedFile(filename, liveGroups); err != nil {
glog.V(1).Infof("failed to write cache to %v due to %v", filename, err)
klog.V(1).Infof("failed to write cache to %v due to %v", filename, err)
}
return liveGroups, nil

View File

@ -20,10 +20,10 @@ import (
"net/http"
"path/filepath"
"github.com/golang/glog"
"github.com/gregjones/httpcache"
"github.com/gregjones/httpcache/diskcache"
"github.com/peterbourgon/diskv"
"k8s.io/klog"
)
type cacheRoundTripper struct {
@ -55,7 +55,7 @@ func (rt *cacheRoundTripper) CancelRequest(req *http.Request) {
if cr, ok := rt.rt.Transport.(canceler); ok {
cr.CancelRequest(req)
} else {
glog.Errorf("CancelRequest not implemented by %T", rt.rt.Transport)
klog.Errorf("CancelRequest not implemented by %T", rt.rt.Transport)
}
}

View File

@ -19,11 +19,11 @@ package v1beta1
import (
"fmt"
"github.com/golang/glog"
"k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/klog"
)
// PodDisruptionBudgetListerExpansion allows custom methods to be added to
@ -54,7 +54,7 @@ func (s *podDisruptionBudgetLister) GetPodPodDisruptionBudgets(pod *v1.Pod) ([]*
pdb := list[i]
selector, err = metav1.LabelSelectorAsSelector(pdb.Spec.Selector)
if err != nil {
glog.Warningf("invalid selector: %v", err)
klog.Warningf("invalid selector: %v", err)
// TODO(mml): add an event to the PDB
continue
}

View File

@ -31,7 +31,6 @@ import (
"sync"
"time"
"github.com/golang/glog"
"golang.org/x/crypto/ssh/terminal"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -44,6 +43,7 @@ import (
"k8s.io/client-go/tools/clientcmd/api"
"k8s.io/client-go/transport"
"k8s.io/client-go/util/connrotation"
"k8s.io/klog"
)
const execInfoEnv = "KUBERNETES_EXEC_INFO"
@ -228,7 +228,7 @@ func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
Code: int32(res.StatusCode),
}
if err := r.a.maybeRefreshCreds(creds, resp); err != nil {
glog.Errorf("refreshing credentials: %v", err)
klog.Errorf("refreshing credentials: %v", err)
}
}
return res, nil

View File

@ -29,7 +29,6 @@ import (
"strings"
"time"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@ -37,6 +36,7 @@ import (
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
certutil "k8s.io/client-go/util/cert"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/klog"
)
const (
@ -331,7 +331,7 @@ func InClusterConfig() (*Config, error) {
tlsClientConfig := TLSClientConfig{}
if _, err := certutil.NewPool(rootCAFile); err != nil {
glog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err)
klog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err)
} else {
tlsClientConfig.CAFile = rootCAFile
}

View File

@ -21,7 +21,7 @@ import (
"net/http"
"sync"
"github.com/golang/glog"
"k8s.io/klog"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
)
@ -57,7 +57,7 @@ func RegisterAuthProviderPlugin(name string, plugin Factory) error {
if _, found := plugins[name]; found {
return fmt.Errorf("Auth Provider Plugin %q was registered twice", name)
}
glog.V(4).Infof("Registered Auth Provider Plugin %q", name)
klog.V(4).Infof("Registered Auth Provider Plugin %q", name)
plugins[name] = plugin
return nil
}

View File

@ -32,7 +32,6 @@ import (
"strings"
"time"
"github.com/golang/glog"
"golang.org/x/net/http2"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -44,6 +43,7 @@ import (
restclientwatch "k8s.io/client-go/rest/watch"
"k8s.io/client-go/tools/metrics"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/klog"
)
var (
@ -114,7 +114,7 @@ type Request struct {
// NewRequest creates a new request helper object for accessing runtime.Objects on a server.
func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter, timeout time.Duration) *Request {
if backoff == nil {
glog.V(2).Infof("Not implementing request backoff strategy.")
klog.V(2).Infof("Not implementing request backoff strategy.")
backoff = &NoBackoff{}
}
@ -527,7 +527,7 @@ func (r *Request) tryThrottle() {
r.throttle.Accept()
}
if latency := time.Since(now); latency > longThrottleLatency {
glog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String())
klog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String())
}
}
@ -683,7 +683,7 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error {
}()
if r.err != nil {
glog.V(4).Infof("Error in request: %v", r.err)
klog.V(4).Infof("Error in request: %v", r.err)
return r.err
}
@ -770,13 +770,13 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error {
if seeker, ok := r.body.(io.Seeker); ok && r.body != nil {
_, err := seeker.Seek(0, 0)
if err != nil {
glog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body)
klog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body)
fn(req, resp)
return true
}
}
glog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url)
klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url)
r.backoffMgr.Sleep(time.Duration(seconds) * time.Second)
return false
}
@ -844,13 +844,13 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu
// 2. Apiserver sends back the headers and then part of the body
// 3. Apiserver closes connection.
// 4. client-go should catch this and return an error.
glog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err)
klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err)
streamErr := fmt.Errorf("Stream error %#v when reading response body, may be caused by closed connection. Please retry.", err)
return Result{
err: streamErr,
}
default:
glog.Errorf("Unexpected error when reading response body: %#v", err)
klog.Errorf("Unexpected error when reading response body: %#v", err)
unexpectedErr := fmt.Errorf("Unexpected error %#v when reading response body. Please retry.", err)
return Result{
err: unexpectedErr,
@ -914,11 +914,11 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu
func truncateBody(body string) string {
max := 0
switch {
case bool(glog.V(10)):
case bool(klog.V(10)):
return body
case bool(glog.V(9)):
case bool(klog.V(9)):
max = 10240
case bool(glog.V(8)):
case bool(klog.V(8)):
max = 1024
}
@ -933,13 +933,13 @@ func truncateBody(body string) string {
// allocating a new string for the body output unless necessary. Uses a simple heuristic to determine
// whether the body is printable.
func glogBody(prefix string, body []byte) {
if glog.V(8) {
if klog.V(8) {
if bytes.IndexFunc(body, func(r rune) bool {
return r < 0x0a
}) != -1 {
glog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body)))
klog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body)))
} else {
glog.Infof("%s: %s", prefix, truncateBody(string(body)))
klog.Infof("%s: %s", prefix, truncateBody(string(body)))
}
}
}
@ -1141,7 +1141,7 @@ func (r Result) Error() error {
// to be backwards compatible with old servers that do not return a version, default to "v1"
out, _, err := r.decoder.Decode(r.body, &schema.GroupVersionKind{Version: "v1"}, nil)
if err != nil {
glog.V(5).Infof("body was not decodable (unable to check for Status): %v", err)
klog.V(5).Infof("body was not decodable (unable to check for Status): %v", err)
return r.err
}
switch t := out.(type) {

View File

@ -24,8 +24,8 @@ import (
"sync"
"time"
"github.com/golang/glog"
"golang.org/x/oauth2"
"k8s.io/klog"
)
// TokenSourceWrapTransport returns a WrapTransport that injects bearer tokens
@ -131,7 +131,7 @@ func (ts *cachingTokenSource) Token() (*oauth2.Token, error) {
if ts.tok == nil {
return nil, err
}
glog.Errorf("Unable to rotate token: %v", err)
klog.Errorf("Unable to rotate token: %v", err)
return ts.tok, nil
}

View File

@ -20,9 +20,9 @@ import (
"net/url"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/klog"
)
// Set of resp. Codes that we backoff for.
@ -64,7 +64,7 @@ func (n *NoBackoff) Sleep(d time.Duration) {
// Disable makes the backoff trivial, i.e., sets it to zero. This might be used
// by tests which want to run 1000s of mock requests without slowing down.
func (b *URLBackoff) Disable() {
glog.V(4).Infof("Disabling backoff strategy")
klog.V(4).Infof("Disabling backoff strategy")
b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second)
}
@ -76,7 +76,7 @@ func (b *URLBackoff) baseUrlKey(rawurl *url.URL) string {
// in the future.
host, err := url.Parse(rawurl.String())
if err != nil {
glog.V(4).Infof("Error extracting url: %v", rawurl)
klog.V(4).Infof("Error extracting url: %v", rawurl)
panic("bad url!")
}
return host.Host
@ -89,7 +89,7 @@ func (b *URLBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode i
b.Backoff.Next(b.baseUrlKey(actualUrl), b.Backoff.Clock.Now())
return
} else if responseCode >= 300 || err != nil {
glog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err)
klog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err)
}
//If we got this far, there is no backoff required for this URL anymore.

View File

@ -23,7 +23,7 @@ import (
"k8s.io/apimachinery/pkg/util/sets"
"github.com/golang/glog"
"k8s.io/klog"
)
// NewDeltaFIFO returns a Store which can be used process changes to items.
@ -506,10 +506,10 @@ func (f *DeltaFIFO) Replace(list []interface{}, resourceVersion string) error {
deletedObj, exists, err := f.knownObjects.GetByKey(k)
if err != nil {
deletedObj = nil
glog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k)
klog.Errorf("Unexpected error %v during lookup of key %v, placing DeleteFinalStateUnknown marker without object", err, k)
} else if !exists {
deletedObj = nil
glog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k)
klog.Infof("Key %v does not exist in known objects store, placing DeleteFinalStateUnknown marker without object", k)
}
queuedDeletions++
if err := f.queueActionLocked(Deleted, DeletedFinalStateUnknown{k, deletedObj}); err != nil {
@ -553,10 +553,10 @@ func (f *DeltaFIFO) syncKey(key string) error {
func (f *DeltaFIFO) syncKeyLocked(key string) error {
obj, exists, err := f.knownObjects.GetByKey(key)
if err != nil {
glog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key)
klog.Errorf("Unexpected error %v during lookup of key %v, unable to queue object for sync", err, key)
return nil
} else if !exists {
glog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key)
klog.Infof("Key %v does not exist in known objects store, unable to queue object for sync", key)
return nil
}

View File

@ -20,8 +20,8 @@ import (
"sync"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/klog"
)
// ExpirationCache implements the store interface
@ -95,7 +95,7 @@ func (c *ExpirationCache) getOrExpire(key string) (interface{}, bool) {
return nil, false
}
if c.expirationPolicy.IsExpired(timestampedItem) {
glog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj)
klog.V(4).Infof("Entry %v: %+v has expired", key, timestampedItem.obj)
c.cacheStorage.Delete(key)
return nil, false
}

View File

@ -17,7 +17,7 @@ limitations under the License.
package cache
import (
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
@ -60,7 +60,7 @@ func ListAllByNamespace(indexer Indexer, namespace string, selector labels.Selec
items, err := indexer.Index(NamespaceIndex, &metav1.ObjectMeta{Namespace: namespace})
if err != nil {
// Ignore error; do slow search without index.
glog.Warningf("can not retrieve list of objects using index : %v", err)
klog.Warningf("can not retrieve list of objects using index : %v", err)
for _, m := range indexer.List() {
metadata, err := meta.Accessor(m)
if err != nil {

View File

@ -22,7 +22,7 @@ import (
"sync"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
@ -156,7 +156,7 @@ func (c *mutationCache) ByIndex(name string, indexKey string) ([]interface{}, er
}
elements, err := fn(updated)
if err != nil {
glog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err)
klog.V(4).Infof("Unable to calculate an index entry for mutation cache entry %s: %v", key, err)
continue
}
for _, inIndex := range elements {

View File

@ -24,7 +24,7 @@ import (
"sync"
"time"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/diff"
@ -45,7 +45,7 @@ func NewCacheMutationDetector(name string) CacheMutationDetector {
if !mutationDetectionEnabled {
return dummyMutationDetector{}
}
glog.Warningln("Mutation detector is enabled, this will result in memory leakage.")
klog.Warningln("Mutation detector is enabled, this will result in memory leakage.")
return &defaultCacheMutationDetector{name: name, period: 1 * time.Second}
}

View File

@ -31,7 +31,6 @@ import (
"syscall"
"time"
"github.com/golang/glog"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -41,6 +40,7 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/klog"
)
// Reflector watches a specified resource and causes all changes to be reflected in the given store.
@ -128,7 +128,7 @@ var internalPackages = []string{"client-go/tools/cache/"}
// Run starts a watch and handles watch events. Will restart the watch if it is closed.
// Run will exit when stopCh is closed.
func (r *Reflector) Run(stopCh <-chan struct{}) {
glog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
klog.V(3).Infof("Starting reflector %v (%s) from %s", r.expectedType, r.resyncPeriod, r.name)
wait.Until(func() {
if err := r.ListAndWatch(stopCh); err != nil {
utilruntime.HandleError(err)
@ -166,7 +166,7 @@ func (r *Reflector) resyncChan() (<-chan time.Time, func() bool) {
// and then use the resource version to watch.
// It returns error if ListAndWatch didn't even try to initialize watch.
func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
glog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name)
klog.V(3).Infof("Listing and watching %v from %s", r.expectedType, r.name)
var resourceVersion string
// Explicitly set "0" as resource version - it's fine for the List()
@ -212,7 +212,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
return
}
if r.ShouldResync == nil || r.ShouldResync() {
glog.V(4).Infof("%s: forcing resync", r.name)
klog.V(4).Infof("%s: forcing resync", r.name)
if err := r.store.Resync(); err != nil {
resyncerrc <- err
return
@ -246,7 +246,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
case io.EOF:
// watch closed normally
case io.ErrUnexpectedEOF:
glog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err)
klog.V(1).Infof("%s: Watch for %v closed with unexpected EOF: %v", r.name, r.expectedType, err)
default:
utilruntime.HandleError(fmt.Errorf("%s: Failed to watch %v: %v", r.name, r.expectedType, err))
}
@ -267,7 +267,7 @@ func (r *Reflector) ListAndWatch(stopCh <-chan struct{}) error {
if err := r.watchHandler(w, &resourceVersion, resyncerrc, stopCh); err != nil {
if err != errorStopRequested {
glog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
klog.Warningf("%s: watch of %v ended with: %v", r.name, r.expectedType, err)
}
return nil
}
@ -354,7 +354,7 @@ loop:
r.metrics.numberOfShortWatches.Inc()
return fmt.Errorf("very short watch: %s: Unexpected watch close - watch lasted less than a second and no items received", r.name)
}
glog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount)
klog.V(4).Infof("%s: Watch close - %v total %v items received", r.name, r.expectedType, eventCount)
return nil
}

View File

@ -28,7 +28,7 @@ import (
"k8s.io/client-go/util/buffer"
"k8s.io/client-go/util/retry"
"github.com/golang/glog"
"k8s.io/klog"
)
// SharedInformer has a shared data cache and is capable of distributing notifications for changes
@ -116,11 +116,11 @@ func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool
},
stopCh)
if err != nil {
glog.V(2).Infof("stop requested")
klog.V(2).Infof("stop requested")
return false
}
glog.V(4).Infof("caches populated")
klog.V(4).Infof("caches populated")
return true
}
@ -279,11 +279,11 @@ func determineResyncPeriod(desired, check time.Duration) time.Duration {
return desired
}
if check == 0 {
glog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired)
klog.Warningf("The specified resyncPeriod %v is invalid because this shared informer doesn't support resyncing", desired)
return 0
}
if desired < check {
glog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check)
klog.Warningf("The specified resyncPeriod %v is being increased to the minimum resyncCheckPeriod %v", desired, check)
return check
}
return desired
@ -296,19 +296,19 @@ func (s *sharedIndexInformer) AddEventHandlerWithResyncPeriod(handler ResourceEv
defer s.startedLock.Unlock()
if s.stopped {
glog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler)
klog.V(2).Infof("Handler %v was not added to shared informer because it has stopped already", handler)
return
}
if resyncPeriod > 0 {
if resyncPeriod < minimumResyncPeriod {
glog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod)
klog.Warningf("resyncPeriod %d is too small. Changing it to the minimum allowed value of %d", resyncPeriod, minimumResyncPeriod)
resyncPeriod = minimumResyncPeriod
}
if resyncPeriod < s.resyncCheckPeriod {
if s.started {
glog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
klog.Warningf("resyncPeriod %d is smaller than resyncCheckPeriod %d and the informer has already started. Changing it to %d", resyncPeriod, s.resyncCheckPeriod, s.resyncCheckPeriod)
resyncPeriod = s.resyncCheckPeriod
} else {
// if the event handler's resyncPeriod is smaller than the current resyncCheckPeriod, update

View File

@ -24,8 +24,8 @@ import (
"os"
"strings"
"github.com/golang/glog"
"github.com/imdario/mergo"
"k8s.io/klog"
restclient "k8s.io/client-go/rest"
clientauth "k8s.io/client-go/tools/auth"
@ -545,12 +545,12 @@ func (config *inClusterClientConfig) Possible() bool {
// to the default config.
func BuildConfigFromFlags(masterUrl, kubeconfigPath string) (*restclient.Config, error) {
if kubeconfigPath == "" && masterUrl == "" {
glog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.")
klog.Warningf("Neither --kubeconfig nor --master was specified. Using the inClusterConfig. This might not work.")
kubeconfig, err := restclient.InClusterConfig()
if err == nil {
return kubeconfig, nil
}
glog.Warning("error creating inClusterConfig, falling back to default config: ", err)
klog.Warning("error creating inClusterConfig, falling back to default config: ", err)
}
return NewNonInteractiveDeferredLoadingClientConfig(
&ClientConfigLoadingRules{ExplicitPath: kubeconfigPath},

View File

@ -24,7 +24,7 @@ import (
"reflect"
"sort"
"github.com/golang/glog"
"k8s.io/klog"
restclient "k8s.io/client-go/rest"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
@ -483,7 +483,7 @@ func getConfigFromFile(filename string) (*clientcmdapi.Config, error) {
func GetConfigFromFileOrDie(filename string) *clientcmdapi.Config {
config, err := getConfigFromFile(filename)
if err != nil {
glog.FatalDepth(1, err)
klog.FatalDepth(1, err)
}
return config

View File

@ -27,8 +27,8 @@ import (
goruntime "runtime"
"strings"
"github.com/golang/glog"
"github.com/imdario/mergo"
"k8s.io/klog"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@ -356,7 +356,7 @@ func LoadFromFile(filename string) (*clientcmdapi.Config, error) {
if err != nil {
return nil, err
}
glog.V(6).Infoln("Config loaded from file", filename)
klog.V(6).Infoln("Config loaded from file", filename)
// set LocationOfOrigin on every Cluster, User, and Context
for key, obj := range config.AuthInfos {

View File

@ -20,7 +20,7 @@ import (
"io"
"sync"
"github.com/golang/glog"
"k8s.io/klog"
restclient "k8s.io/client-go/rest"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
@ -119,7 +119,7 @@ func (config *DeferredLoadingClientConfig) ClientConfig() (*restclient.Config, e
// check for in-cluster configuration and use it
if config.icc.Possible() {
glog.V(4).Infof("Using in-cluster configuration")
klog.V(4).Infof("Using in-cluster configuration")
return config.icc.ClientConfig()
}
@ -156,7 +156,7 @@ func (config *DeferredLoadingClientConfig) Namespace() (string, bool, error) {
}
}
glog.V(4).Infof("Using in-cluster namespace")
klog.V(4).Infof("Using in-cluster namespace")
// allow the namespace from the service account token directory to be used.
return config.icc.Namespace()

View File

@ -33,7 +33,7 @@ import (
"net/http"
"github.com/golang/glog"
"k8s.io/klog"
)
const maxTriesPerEvent = 12
@ -144,7 +144,7 @@ func recordToSink(sink EventSink, event *v1.Event, eventCorrelator *EventCorrela
}
tries++
if tries >= maxTriesPerEvent {
glog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event)
klog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event)
break
}
// Randomize the first sleep so that various clients won't all be
@ -194,13 +194,13 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv
switch err.(type) {
case *restclient.RequestConstructionError:
// We will construct the request the same next time, so don't keep trying.
glog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err)
klog.Errorf("Unable to construct event '%#v': '%v' (will not retry!)", event, err)
return true
case *errors.StatusError:
if errors.IsAlreadyExists(err) {
glog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err)
klog.V(5).Infof("Server rejected event '%#v': '%v' (will not retry!)", event, err)
} else {
glog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err)
klog.Errorf("Server rejected event '%#v': '%v' (will not retry!)", event, err)
}
return true
case *errors.UnexpectedObjectError:
@ -209,7 +209,7 @@ func recordEvent(sink EventSink, event *v1.Event, patch []byte, updateExistingEv
default:
// This case includes actual http transport errors. Go ahead and retry.
}
glog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err)
klog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err)
return false
}
@ -256,12 +256,12 @@ type recorderImpl struct {
func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations map[string]string, timestamp metav1.Time, eventtype, reason, message string) {
ref, err := ref.GetReference(recorder.scheme, object)
if err != nil {
glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message)
klog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, eventtype, reason, message)
return
}
if !validateEventType(eventtype) {
glog.Errorf("Unsupported event type: '%v'", eventtype)
klog.Errorf("Unsupported event type: '%v'", eventtype)
return
}

View File

@ -22,7 +22,7 @@ import (
"strings"
"time"
"github.com/golang/glog"
"k8s.io/klog"
utilnet "k8s.io/apimachinery/pkg/util/net"
)
@ -62,13 +62,13 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip
// DebugWrappers wraps a round tripper and logs based on the current log level.
func DebugWrappers(rt http.RoundTripper) http.RoundTripper {
switch {
case bool(glog.V(9)):
case bool(klog.V(9)):
rt = newDebuggingRoundTripper(rt, debugCurlCommand, debugURLTiming, debugResponseHeaders)
case bool(glog.V(8)):
case bool(klog.V(8)):
rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus, debugResponseHeaders)
case bool(glog.V(7)):
case bool(klog.V(7)):
rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus)
case bool(glog.V(6)):
case bool(klog.V(6)):
rt = newDebuggingRoundTripper(rt, debugURLTiming)
}
@ -138,7 +138,7 @@ func (rt *authProxyRoundTripper) CancelRequest(req *http.Request) {
if canceler, ok := rt.rt.(requestCanceler); ok {
canceler.CancelRequest(req)
} else {
glog.Errorf("CancelRequest not implemented by %T", rt.rt)
klog.Errorf("CancelRequest not implemented by %T", rt.rt)
}
}
@ -166,7 +166,7 @@ func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) {
if canceler, ok := rt.rt.(requestCanceler); ok {
canceler.CancelRequest(req)
} else {
glog.Errorf("CancelRequest not implemented by %T", rt.rt)
klog.Errorf("CancelRequest not implemented by %T", rt.rt)
}
}
@ -197,7 +197,7 @@ func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) {
if canceler, ok := rt.rt.(requestCanceler); ok {
canceler.CancelRequest(req)
} else {
glog.Errorf("CancelRequest not implemented by %T", rt.rt)
klog.Errorf("CancelRequest not implemented by %T", rt.rt)
}
}
@ -257,7 +257,7 @@ func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) {
if canceler, ok := rt.delegate.(requestCanceler); ok {
canceler.CancelRequest(req)
} else {
glog.Errorf("CancelRequest not implemented by %T", rt.delegate)
klog.Errorf("CancelRequest not implemented by %T", rt.delegate)
}
}
@ -288,7 +288,7 @@ func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) {
if canceler, ok := rt.rt.(requestCanceler); ok {
canceler.CancelRequest(req)
} else {
glog.Errorf("CancelRequest not implemented by %T", rt.rt)
klog.Errorf("CancelRequest not implemented by %T", rt.rt)
}
}
@ -372,7 +372,7 @@ func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) {
if canceler, ok := rt.delegatedRoundTripper.(requestCanceler); ok {
canceler.CancelRequest(req)
} else {
glog.Errorf("CancelRequest not implemented by %T", rt.delegatedRoundTripper)
klog.Errorf("CancelRequest not implemented by %T", rt.delegatedRoundTripper)
}
}
@ -380,17 +380,17 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e
reqInfo := newRequestInfo(req)
if rt.levels[debugJustURL] {
glog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL)
klog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL)
}
if rt.levels[debugCurlCommand] {
glog.Infof("%s", reqInfo.toCurl())
klog.Infof("%s", reqInfo.toCurl())
}
if rt.levels[debugRequestHeaders] {
glog.Infof("Request Headers:")
klog.Infof("Request Headers:")
for key, values := range reqInfo.RequestHeaders {
for _, value := range values {
glog.Infof(" %s: %s", key, value)
klog.Infof(" %s: %s", key, value)
}
}
}
@ -402,16 +402,16 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e
reqInfo.complete(response, err)
if rt.levels[debugURLTiming] {
glog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond))
klog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond))
}
if rt.levels[debugResponseStatus] {
glog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond))
klog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond))
}
if rt.levels[debugResponseHeaders] {
glog.Infof("Response Headers:")
klog.Infof("Response Headers:")
for key, values := range reqInfo.ResponseHeaders {
for _, value := range values {
glog.Infof(" %s: %s", key, value)
klog.Infof(" %s: %s", key, value)
}
}
}

View File

@ -6,38 +6,6 @@
"./..."
],
"Deps": [
{
"ImportPath": "github.com/PuerkitoBio/purell",
"Rev": "8a290539e2e8629dbc4e6bad948158f790ec31f4"
},
{
"ImportPath": "github.com/PuerkitoBio/urlesc",
"Rev": "5bd2802263f21d8788851d5305584c82a5c75d7e"
},
{
"ImportPath": "github.com/emicklei/go-restful",
"Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46"
},
{
"ImportPath": "github.com/emicklei/go-restful/log",
"Rev": "ff4f55a206334ef123e4f79bbf348980da81ca46"
},
{
"ImportPath": "github.com/go-openapi/jsonpointer",
"Rev": "ef5f0afec364d3b9396b7b77b43dbe26bf1f8004"
},
{
"ImportPath": "github.com/go-openapi/jsonreference",
"Rev": "8483a886a90412cd6858df4ea3483dce9c8e35a3"
},
{
"ImportPath": "github.com/go-openapi/spec",
"Rev": "5bae59e25b21498baea7f9d46e9c147ec106a42e"
},
{
"ImportPath": "github.com/go-openapi/swag",
"Rev": "5899d5c5e619fda5fa86e14795a835f473ca284c"
},
{
"ImportPath": "github.com/gogo/protobuf/gogoproto",
"Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02"
@ -138,74 +106,10 @@
"ImportPath": "github.com/gogo/protobuf/vanity/command",
"Rev": "342cbe0a04158f6dcb03ca0079991a51a4248c02"
},
{
"ImportPath": "github.com/golang/glog",
"Rev": "44145f04b68cf362d9c4df2182967c2275eaefed"
},
{
"ImportPath": "github.com/mailru/easyjson/buffer",
"Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d"
},
{
"ImportPath": "github.com/mailru/easyjson/jlexer",
"Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d"
},
{
"ImportPath": "github.com/mailru/easyjson/jwriter",
"Rev": "2f5df55504ebc322e4d52d34df6a1f5b503bf26d"
},
{
"ImportPath": "github.com/spf13/pflag",
"Rev": "583c0c0531f06d5278b7d917446061adc344b5cd"
},
{
"ImportPath": "golang.org/x/net/idna",
"Rev": "0ed95abb35c445290478a5348a7b38bb154135fd"
},
{
"ImportPath": "golang.org/x/text/cases",
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "golang.org/x/text/internal",
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "golang.org/x/text/internal/tag",
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "golang.org/x/text/language",
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "golang.org/x/text/runes",
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "golang.org/x/text/secure/bidirule",
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "golang.org/x/text/secure/precis",
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "golang.org/x/text/transform",
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "golang.org/x/text/unicode/bidi",
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "golang.org/x/text/unicode/norm",
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "golang.org/x/text/width",
"Rev": "b19bf474d317b857955b12035d2c5acb57ce8b01"
},
{
"ImportPath": "golang.org/x/tools/go/ast/astutil",
"Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32"
@ -214,69 +118,49 @@
"ImportPath": "golang.org/x/tools/imports",
"Rev": "2382e3994d48b1d22acc2c86bcad0a2aff028e32"
},
{
"ImportPath": "gopkg.in/yaml.v2",
"Rev": "5420a8b6744d3b0345ab293f6fcba19c978f1183"
},
{
"ImportPath": "k8s.io/gengo/args",
"Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2"
"Rev": "51747d6e00da1fc578d5a333a93bb2abcbce7a95"
},
{
"ImportPath": "k8s.io/gengo/examples/deepcopy-gen/generators",
"Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2"
"Rev": "51747d6e00da1fc578d5a333a93bb2abcbce7a95"
},
{
"ImportPath": "k8s.io/gengo/examples/defaulter-gen/generators",
"Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2"
"Rev": "51747d6e00da1fc578d5a333a93bb2abcbce7a95"
},
{
"ImportPath": "k8s.io/gengo/examples/import-boss/generators",
"Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2"
"Rev": "51747d6e00da1fc578d5a333a93bb2abcbce7a95"
},
{
"ImportPath": "k8s.io/gengo/examples/set-gen/generators",
"Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2"
"Rev": "51747d6e00da1fc578d5a333a93bb2abcbce7a95"
},
{
"ImportPath": "k8s.io/gengo/examples/set-gen/sets",
"Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2"
"Rev": "51747d6e00da1fc578d5a333a93bb2abcbce7a95"
},
{
"ImportPath": "k8s.io/gengo/generator",
"Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2"
"Rev": "51747d6e00da1fc578d5a333a93bb2abcbce7a95"
},
{
"ImportPath": "k8s.io/gengo/namer",
"Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2"
"Rev": "51747d6e00da1fc578d5a333a93bb2abcbce7a95"
},
{
"ImportPath": "k8s.io/gengo/parser",
"Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2"
"Rev": "51747d6e00da1fc578d5a333a93bb2abcbce7a95"
},
{
"ImportPath": "k8s.io/gengo/types",
"Rev": "fdcf9f9480fdd5bf2b3c3df9bf4ecd22b25b87e2"
"Rev": "51747d6e00da1fc578d5a333a93bb2abcbce7a95"
},
{
"ImportPath": "k8s.io/kube-openapi/cmd/openapi-gen/args",
"Rev": "72693cb1fadd73ae2742f6fe29af77d1aecdd8cd"
},
{
"ImportPath": "k8s.io/kube-openapi/pkg/common",
"Rev": "72693cb1fadd73ae2742f6fe29af77d1aecdd8cd"
},
{
"ImportPath": "k8s.io/kube-openapi/pkg/generators",
"Rev": "72693cb1fadd73ae2742f6fe29af77d1aecdd8cd"
},
{
"ImportPath": "k8s.io/kube-openapi/pkg/generators/rules",
"Rev": "72693cb1fadd73ae2742f6fe29af77d1aecdd8cd"
},
{
"ImportPath": "k8s.io/kube-openapi/pkg/util/sets",
"Rev": "72693cb1fadd73ae2742f6fe29af77d1aecdd8cd"
"ImportPath": "k8s.io/klog",
"Rev": "8139d8cb77af419532b33dfa7dd09fbc5f1d344f"
}
]
}

View File

@ -32,7 +32,7 @@ import (
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
"github.com/golang/glog"
"k8s.io/klog"
)
// NameSystems returns the name system used by the generators in this package.
@ -318,12 +318,12 @@ func applyGroupOverrides(universe types.Universe, customArgs *clientgenargs.Cust
func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages {
boilerplate, err := arguments.LoadGoBoilerplate()
if err != nil {
glog.Fatalf("Failed loading boilerplate: %v", err)
klog.Fatalf("Failed loading boilerplate: %v", err)
}
customArgs, ok := arguments.CustomArgs.(*clientgenargs.CustomArgs)
if !ok {
glog.Fatalf("cannot convert arguments.CustomArgs to clientgenargs.CustomArgs")
klog.Fatalf("cannot convert arguments.CustomArgs to clientgenargs.CustomArgs")
}
includedTypesOverrides := customArgs.IncludedTypesOverrides

View File

@ -21,9 +21,9 @@ import (
"flag"
"path/filepath"
"github.com/golang/glog"
"github.com/spf13/pflag"
"k8s.io/gengo/args"
"k8s.io/klog"
generatorargs "k8s.io/code-generator/cmd/client-gen/args"
"k8s.io/code-generator/cmd/client-gen/generators"
@ -31,6 +31,7 @@ import (
)
func main() {
klog.InitFlags(nil)
genericArgs, customArgs := generatorargs.NewDefaults()
// Override defaults.
@ -52,7 +53,7 @@ func main() {
}
if err := generatorargs.Validate(genericArgs); err != nil {
glog.Fatalf("Error: %v", err)
klog.Fatalf("Error: %v", err)
}
if err := genericArgs.Execute(
@ -60,6 +61,6 @@ func main() {
generators.DefaultNameSystem(),
generators.Packages,
); err != nil {
glog.Fatalf("Error: %v", err)
klog.Fatalf("Error: %v", err)
}
}

View File

@ -29,7 +29,7 @@ import (
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
"github.com/golang/glog"
"k8s.io/klog"
conversionargs "k8s.io/code-generator/cmd/conversion-gen/args"
)
@ -124,10 +124,10 @@ type conversionFuncMap map[conversionPair]*types.Type
// Returns all manually-defined conversion functions in the package.
func getManualConversionFunctions(context *generator.Context, pkg *types.Package, manualMap conversionFuncMap) {
if pkg == nil {
glog.Warningf("Skipping nil package passed to getManualConversionFunctions")
klog.Warningf("Skipping nil package passed to getManualConversionFunctions")
return
}
glog.V(5).Infof("Scanning for conversion functions in %v", pkg.Name)
klog.V(5).Infof("Scanning for conversion functions in %v", pkg.Name)
scopeName := types.Ref(conversionPackagePath, "Scope").Name
errorName := types.Ref("", "error").Name
@ -136,34 +136,34 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package
for _, f := range pkg.Functions {
if f.Underlying == nil || f.Underlying.Kind != types.Func {
glog.Errorf("Malformed function: %#v", f)
klog.Errorf("Malformed function: %#v", f)
continue
}
if f.Underlying.Signature == nil {
glog.Errorf("Function without signature: %#v", f)
klog.Errorf("Function without signature: %#v", f)
continue
}
glog.V(8).Infof("Considering function %s", f.Name)
klog.V(8).Infof("Considering function %s", f.Name)
signature := f.Underlying.Signature
// Check whether the function is conversion function.
// Note that all of them have signature:
// func Convert_inType_To_outType(inType, outType, conversion.Scope) error
if signature.Receiver != nil {
glog.V(8).Infof("%s has a receiver", f.Name)
klog.V(8).Infof("%s has a receiver", f.Name)
continue
}
if len(signature.Parameters) != 3 || signature.Parameters[2].Name != scopeName {
glog.V(8).Infof("%s has wrong parameters", f.Name)
klog.V(8).Infof("%s has wrong parameters", f.Name)
continue
}
if len(signature.Results) != 1 || signature.Results[0].Name != errorName {
glog.V(8).Infof("%s has wrong results", f.Name)
klog.V(8).Infof("%s has wrong results", f.Name)
continue
}
inType := signature.Parameters[0]
outType := signature.Parameters[1]
if inType.Kind != types.Pointer || outType.Kind != types.Pointer {
glog.V(8).Infof("%s has wrong parameter types", f.Name)
klog.V(8).Infof("%s has wrong parameter types", f.Name)
continue
}
// Now check if the name satisfies the convention.
@ -171,7 +171,7 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package
args := argsFromType(inType.Elem, outType.Elem)
sw.Do("Convert_$.inType|public$_To_$.outType|public$", args)
if f.Name.Name == buffer.String() {
glog.V(4).Infof("Found conversion function %s", f.Name)
klog.V(4).Infof("Found conversion function %s", f.Name)
key := conversionPair{inType.Elem, outType.Elem}
// We might scan the same package twice, and that's OK.
if v, ok := manualMap[key]; ok && v != nil && v.Name.Package != pkg.Path {
@ -181,9 +181,9 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package
} else {
// prevent user error when they don't get the correct conversion signature
if strings.HasPrefix(f.Name.Name, "Convert_") {
glog.Errorf("Rename function %s %s -> %s to match expected conversion signature", f.Name.Package, f.Name.Name, buffer.String())
klog.Errorf("Rename function %s %s -> %s to match expected conversion signature", f.Name.Package, f.Name.Name, buffer.String())
}
glog.V(8).Infof("%s has wrong name", f.Name)
klog.V(8).Infof("%s has wrong name", f.Name)
}
buffer.Reset()
}
@ -192,7 +192,7 @@ func getManualConversionFunctions(context *generator.Context, pkg *types.Package
func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages {
boilerplate, err := arguments.LoadGoBoilerplate()
if err != nil {
glog.Fatalf("Failed loading boilerplate: %v", err)
klog.Fatalf("Failed loading boilerplate: %v", err)
}
packages := generator.Packages{}
@ -220,7 +220,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat
}
processed[i] = true
glog.V(5).Infof("considering pkg %q", i)
klog.V(5).Infof("considering pkg %q", i)
pkg := context.Universe[i]
// typesPkg is where the versioned types are defined. Sometimes it is
// different from pkg. For example, kubernetes core/v1 types are defined
@ -239,9 +239,9 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat
// in their doc.go file.
peerPkgs := extractTag(pkg.Comments)
if peerPkgs != nil {
glog.V(5).Infof(" tags: %q", peerPkgs)
klog.V(5).Infof(" tags: %q", peerPkgs)
} else {
glog.V(5).Infof(" no tag")
klog.V(5).Infof(" no tag")
continue
}
skipUnsafe := false
@ -255,14 +255,14 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat
externalTypesValues := extractExternalTypesTag(pkg.Comments)
if externalTypesValues != nil {
if len(externalTypesValues) != 1 {
glog.Fatalf(" expect only one value for %q tag, got: %q", externalTypesTagName, externalTypesValues)
klog.Fatalf(" expect only one value for %q tag, got: %q", externalTypesTagName, externalTypesValues)
}
externalTypes := externalTypesValues[0]
glog.V(5).Infof(" external types tags: %q", externalTypes)
klog.V(5).Infof(" external types tags: %q", externalTypes)
var err error
typesPkg, err = context.AddDirectory(externalTypes)
if err != nil {
glog.Fatalf("cannot import package %s", externalTypes)
klog.Fatalf("cannot import package %s", externalTypes)
}
// update context.Order to the latest context.Universe
orderer := namer.Orderer{Namer: namer.NewPublicNamer(1)}
@ -291,7 +291,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat
context.AddDir(pp)
p := context.Universe[pp]
if nil == p {
glog.Fatalf("failed to find pkg: %s", pp)
klog.Fatalf("failed to find pkg: %s", pp)
}
getManualConversionFunctions(context, p, manualConversions)
}
@ -335,7 +335,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat
// from being a candidate for unsafe conversion
for k, v := range manualConversions {
if isCopyOnly(v.CommentLines) {
glog.V(5).Infof("Conversion function %s will not block memory copy because it is copy-only", v.Name)
klog.V(5).Infof("Conversion function %s will not block memory copy because it is copy-only", v.Name)
continue
}
// this type should be excluded from all equivalence, because the converter must be called.
@ -518,9 +518,9 @@ func (g *genConversion) convertibleOnlyWithinPackage(inType, outType *types.Type
tagvals := extractTag(t.CommentLines)
if tagvals != nil {
if tagvals[0] != "false" {
glog.Fatalf("Type %v: unsupported %s value: %q", t, tagName, tagvals[0])
klog.Fatalf("Type %v: unsupported %s value: %q", t, tagName, tagvals[0])
}
glog.V(5).Infof("type %v requests no conversion generation, skipping", t)
klog.V(5).Infof("type %v requests no conversion generation, skipping", t)
return false
}
// TODO: Consider generating functions for other kinds too.
@ -582,10 +582,10 @@ func (g *genConversion) preexists(inType, outType *types.Type) (*types.Type, boo
}
func (g *genConversion) Init(c *generator.Context, w io.Writer) error {
if glog.V(5) {
if klog.V(5) {
if m, ok := g.useUnsafe.(equalMemoryTypes); ok {
var result []string
glog.Infof("All objects without identical memory layout:")
klog.Infof("All objects without identical memory layout:")
for k, v := range m {
if v {
continue
@ -594,7 +594,7 @@ func (g *genConversion) Init(c *generator.Context, w io.Writer) error {
}
sort.Strings(result)
for _, s := range result {
glog.Infof(s)
klog.Infof(s)
}
}
}
@ -643,7 +643,7 @@ func (g *genConversion) Init(c *generator.Context, w io.Writer) error {
}
func (g *genConversion) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
glog.V(5).Infof("generating for type %v", t)
klog.V(5).Infof("generating for type %v", t)
peerType := getPeerTypeFor(c, t, g.peerPackages)
sw := generator.NewSnippetWriter(w, c, "$", "$")
g.generateConversion(t, peerType, sw)
@ -664,10 +664,10 @@ func (g *genConversion) generateConversion(inType, outType *types.Type, sw *gene
// There is a public manual Conversion method: use it.
} else if skipped := g.skippedFields[inType]; len(skipped) != 0 {
// The inType had some fields we could not generate.
glog.Errorf("Warning: could not find nor generate a final Conversion function for %v -> %v", inType, outType)
glog.Errorf(" the following fields need manual conversion:")
klog.Errorf("Warning: could not find nor generate a final Conversion function for %v -> %v", inType, outType)
klog.Errorf(" the following fields need manual conversion:")
for _, f := range skipped {
glog.Errorf(" - %v", f)
klog.Errorf(" - %v", f)
}
} else {
// Emit a public conversion function.
@ -682,7 +682,7 @@ func (g *genConversion) generateConversion(inType, outType *types.Type, sw *gene
// at any nesting level. This makes the autogenerator easy to understand, and
// the compiler shouldn't care.
func (g *genConversion) generateFor(inType, outType *types.Type, sw *generator.SnippetWriter) {
glog.V(5).Infof("generating %v -> %v", inType, outType)
klog.V(5).Infof("generating %v -> %v", inType, outType)
var f func(*types.Type, *types.Type, *generator.SnippetWriter)
switch inType.Kind {
@ -853,7 +853,7 @@ func (g *genConversion) doStruct(inType, outType *types.Type, sw *generator.Snip
sw.Do("}\n", nil)
continue
}
glog.V(5).Infof("Skipped function %s because it is copy-only and we can use direct assignment", function.Name)
klog.V(5).Infof("Skipped function %s because it is copy-only and we can use direct assignment", function.Name)
}
// If we can't auto-convert, punt before we emit any code.

View File

@ -38,9 +38,9 @@ import (
"flag"
"path/filepath"
"github.com/golang/glog"
"github.com/spf13/pflag"
"k8s.io/gengo/args"
"k8s.io/klog"
generatorargs "k8s.io/code-generator/cmd/conversion-gen/args"
"k8s.io/code-generator/cmd/conversion-gen/generators"
@ -48,6 +48,7 @@ import (
)
func main() {
klog.InitFlags(nil)
genericArgs, customArgs := generatorargs.NewDefaults()
// Override defaults.
@ -61,7 +62,7 @@ func main() {
pflag.Parse()
if err := generatorargs.Validate(genericArgs); err != nil {
glog.Fatalf("Error: %v", err)
klog.Fatalf("Error: %v", err)
}
// Run it.
@ -70,7 +71,7 @@ func main() {
generators.DefaultNameSystem(),
generators.Packages,
); err != nil {
glog.Fatalf("Error: %v", err)
klog.Fatalf("Error: %v", err)
}
glog.V(2).Info("Completed successfully.")
klog.V(2).Info("Completed successfully.")
}

View File

@ -46,16 +46,17 @@ import (
"flag"
"path/filepath"
"github.com/golang/glog"
"github.com/spf13/pflag"
"k8s.io/gengo/args"
"k8s.io/gengo/examples/deepcopy-gen/generators"
"k8s.io/klog"
generatorargs "k8s.io/code-generator/cmd/deepcopy-gen/args"
"k8s.io/code-generator/pkg/util"
)
func main() {
klog.InitFlags(nil)
genericArgs, customArgs := generatorargs.NewDefaults()
// Override defaults.
@ -69,7 +70,7 @@ func main() {
pflag.Parse()
if err := generatorargs.Validate(genericArgs); err != nil {
glog.Fatalf("Error: %v", err)
klog.Fatalf("Error: %v", err)
}
// Run it.
@ -78,7 +79,7 @@ func main() {
generators.DefaultNameSystem(),
generators.Packages,
); err != nil {
glog.Fatalf("Error: %v", err)
klog.Fatalf("Error: %v", err)
}
glog.V(2).Info("Completed successfully.")
klog.V(2).Info("Completed successfully.")
}

View File

@ -45,16 +45,17 @@ import (
"flag"
"path/filepath"
"github.com/golang/glog"
"github.com/spf13/pflag"
"k8s.io/gengo/args"
"k8s.io/gengo/examples/defaulter-gen/generators"
"k8s.io/klog"
generatorargs "k8s.io/code-generator/cmd/defaulter-gen/args"
"k8s.io/code-generator/pkg/util"
)
func main() {
klog.InitFlags(nil)
genericArgs, customArgs := generatorargs.NewDefaults()
// Override defaults.
@ -68,7 +69,7 @@ func main() {
pflag.Parse()
if err := generatorargs.Validate(genericArgs); err != nil {
glog.Fatalf("Error: %v", err)
klog.Fatalf("Error: %v", err)
}
// Run it.
@ -77,7 +78,7 @@ func main() {
generators.DefaultNameSystem(),
generators.Packages,
); err != nil {
glog.Fatalf("Error: %v", err)
klog.Fatalf("Error: %v", err)
}
glog.V(2).Info("Completed successfully.")
klog.V(2).Info("Completed successfully.")
}

View File

@ -25,7 +25,7 @@ import (
"strconv"
"strings"
"github.com/golang/glog"
"k8s.io/klog"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
@ -85,7 +85,7 @@ func (g *genProtoIDL) Filter(c *generator.Context, t *types.Type) bool {
// Type specified "true".
return true
}
glog.Fatalf(`Comment tag "protobuf" must be true or false, found: %q`, tagVals[0])
klog.Fatalf(`Comment tag "protobuf" must be true or false, found: %q`, tagVals[0])
}
if !g.generateAll {
// We're not generating everything.

View File

@ -17,8 +17,8 @@ limitations under the License.
package protobuf
import (
"github.com/golang/glog"
"k8s.io/gengo/types"
"k8s.io/klog"
)
// extractBoolTagOrDie gets the comment-tags for the key and asserts that, if
@ -27,7 +27,7 @@ import (
func extractBoolTagOrDie(key string, lines []string) bool {
val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines)
if err != nil {
glog.Fatal(err)
klog.Fatal(err)
}
return val
}

View File

@ -63,10 +63,11 @@ import (
"k8s.io/gengo/args"
"k8s.io/gengo/examples/import-boss/generators"
"github.com/golang/glog"
"k8s.io/klog"
)
func main() {
klog.InitFlags(nil)
arguments := args.Default()
// Override defaults.
@ -82,8 +83,8 @@ func main() {
generators.DefaultNameSystem(),
generators.Packages,
); err != nil {
glog.Errorf("Error: %v", err)
klog.Errorf("Error: %v", err)
os.Exit(1)
}
glog.V(2).Info("Completed successfully.")
klog.V(2).Info("Completed successfully.")
}

View File

@ -25,7 +25,7 @@ import (
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
"github.com/golang/glog"
"k8s.io/klog"
)
// factoryGenerator produces a file of listers for a given GroupVersion and
@ -65,7 +65,7 @@ func (g *factoryGenerator) Imports(c *generator.Context) (imports []string) {
func (g *factoryGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
sw := generator.NewSnippetWriter(w, c, "{{", "}}")
glog.V(5).Infof("processing type %v", t)
klog.V(5).Infof("processing type %v", t)
gvInterfaces := make(map[string]*types.Type)
gvNewFuncs := make(map[string]*types.Type)

View File

@ -23,7 +23,7 @@ import (
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
"github.com/golang/glog"
"k8s.io/klog"
)
// factoryInterfaceGenerator produces a file of interfaces used to break a dependency cycle for
@ -60,7 +60,7 @@ func (g *factoryInterfaceGenerator) Imports(c *generator.Context) (imports []str
func (g *factoryInterfaceGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
sw := generator.NewSnippetWriter(w, c, "{{", "}}")
glog.V(5).Infof("processing type %v", t)
klog.V(5).Infof("processing type %v", t)
m := map[string]interface{}{
"cacheSharedIndexInformer": c.Universe.Type(cacheSharedIndexInformer),

View File

@ -28,7 +28,7 @@ import (
"k8s.io/code-generator/cmd/client-gen/generators/util"
clientgentypes "k8s.io/code-generator/cmd/client-gen/types"
"github.com/golang/glog"
"k8s.io/klog"
)
// informerGenerator produces a file of listers for a given GroupVersion and
@ -66,7 +66,7 @@ func (g *informerGenerator) Imports(c *generator.Context) (imports []string) {
func (g *informerGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
sw := generator.NewSnippetWriter(w, c, "$", "$")
glog.V(5).Infof("processing type %v", t)
klog.V(5).Infof("processing type %v", t)
listerPackage := fmt.Sprintf("%s/%s/%s", g.listersPackage, g.groupPkgName, strings.ToLower(g.groupVersion.Version.NonEmpty()))
clientSetInterface := c.Universe.Type(types.Name{Package: g.clientSetPackage, Name: "Interface"})

View File

@ -22,11 +22,11 @@ import (
"path/filepath"
"strings"
"github.com/golang/glog"
"k8s.io/gengo/args"
"k8s.io/gengo/generator"
"k8s.io/gengo/namer"
"k8s.io/gengo/types"
"k8s.io/klog"
"k8s.io/code-generator/cmd/client-gen/generators/util"
clientgentypes "k8s.io/code-generator/cmd/client-gen/types"
@ -102,12 +102,12 @@ func vendorless(p string) string {
func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages {
boilerplate, err := arguments.LoadGoBoilerplate()
if err != nil {
glog.Fatalf("Failed loading boilerplate: %v", err)
klog.Fatalf("Failed loading boilerplate: %v", err)
}
customArgs, ok := arguments.CustomArgs.(*informergenargs.CustomArgs)
if !ok {
glog.Fatalf("Wrong CustomArgs type: %T", arguments.CustomArgs)
klog.Fatalf("Wrong CustomArgs type: %T", arguments.CustomArgs)
}
internalVersionPackagePath := filepath.Join(arguments.OutputPackagePath)
@ -128,7 +128,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat
objectMeta, internal, err := objectMetaForPackage(p)
if err != nil {
glog.Fatal(err)
klog.Fatal(err)
}
if objectMeta == nil {
// no types in this package had genclient
@ -141,7 +141,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat
if internal {
lastSlash := strings.LastIndex(p.Path, "/")
if lastSlash == -1 {
glog.Fatalf("error constructing internal group version for package %q", p.Path)
klog.Fatalf("error constructing internal group version for package %q", p.Path)
}
gv.Group = clientgentypes.Group(p.Path[lastSlash+1:])
targetGroupVersions = internalGroupVersions

View File

@ -17,8 +17,8 @@ limitations under the License.
package generators
import (
"github.com/golang/glog"
"k8s.io/gengo/types"
"k8s.io/klog"
)
// extractBoolTagOrDie gets the comment-tags for the key and asserts that, if
@ -27,7 +27,7 @@ import (
func extractBoolTagOrDie(key string, lines []string) bool {
val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines)
if err != nil {
glog.Fatal(err)
klog.Fatal(err)
}
return val
}

View File

@ -20,16 +20,17 @@ import (
"flag"
"path/filepath"
"github.com/golang/glog"
"github.com/spf13/pflag"
"k8s.io/code-generator/cmd/informer-gen/generators"
"k8s.io/code-generator/pkg/util"
"k8s.io/gengo/args"
"k8s.io/klog"
generatorargs "k8s.io/code-generator/cmd/informer-gen/args"
)
func main() {
klog.InitFlags(nil)
genericArgs, customArgs := generatorargs.NewDefaults()
// Override defaults.
@ -47,7 +48,7 @@ func main() {
pflag.Parse()
if err := generatorargs.Validate(genericArgs); err != nil {
glog.Fatalf("Error: %v", err)
klog.Fatalf("Error: %v", err)
}
// Run it.
@ -56,7 +57,7 @@ func main() {
generators.DefaultNameSystem(),
generators.Packages,
); err != nil {
glog.Fatalf("Error: %v", err)
klog.Fatalf("Error: %v", err)
}
glog.V(2).Info("Completed successfully.")
klog.V(2).Info("Completed successfully.")
}

View File

@ -30,7 +30,7 @@ import (
"k8s.io/code-generator/cmd/client-gen/generators/util"
clientgentypes "k8s.io/code-generator/cmd/client-gen/types"
"github.com/golang/glog"
"k8s.io/klog"
)
// NameSystems returns the name system used by the generators in this package.
@ -66,7 +66,7 @@ func DefaultNameSystem() string {
func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages {
boilerplate, err := arguments.LoadGoBoilerplate()
if err != nil {
glog.Fatalf("Failed loading boilerplate: %v", err)
klog.Fatalf("Failed loading boilerplate: %v", err)
}
var packageList generator.Packages
@ -75,7 +75,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat
objectMeta, internal, err := objectMetaForPackage(p)
if err != nil {
glog.Fatal(err)
klog.Fatal(err)
}
if objectMeta == nil {
// no types in this package had genclient
@ -88,7 +88,7 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat
if internal {
lastSlash := strings.LastIndex(p.Path, "/")
if lastSlash == -1 {
glog.Fatalf("error constructing internal group version for package %q", p.Path)
klog.Fatalf("error constructing internal group version for package %q", p.Path)
}
gv.Group = clientgentypes.Group(p.Path[lastSlash+1:])
internalGVPkg = p.Path
@ -223,7 +223,7 @@ func (g *listerGenerator) Imports(c *generator.Context) (imports []string) {
func (g *listerGenerator) GenerateType(c *generator.Context, t *types.Type, w io.Writer) error {
sw := generator.NewSnippetWriter(w, c, "$", "$")
glog.V(5).Infof("processing type %v", t)
klog.V(5).Infof("processing type %v", t)
m := map[string]interface{}{
"Resource": c.Universe.Function(types.Name{Package: t.Name.Package, Name: "Resource"}),
"type": t,

View File

@ -17,8 +17,8 @@ limitations under the License.
package generators
import (
"github.com/golang/glog"
"k8s.io/gengo/types"
"k8s.io/klog"
)
// extractBoolTagOrDie gets the comment-tags for the key and asserts that, if
@ -27,7 +27,7 @@ import (
func extractBoolTagOrDie(key string, lines []string) bool {
val, err := types.ExtractSingleBoolCommentTag("+", key, false, lines)
if err != nil {
glog.Fatal(err)
klog.Fatal(err)
}
return val
}

View File

@ -20,16 +20,17 @@ import (
"flag"
"path/filepath"
"github.com/golang/glog"
"github.com/spf13/pflag"
"k8s.io/code-generator/cmd/lister-gen/generators"
"k8s.io/code-generator/pkg/util"
"k8s.io/gengo/args"
"k8s.io/klog"
generatorargs "k8s.io/code-generator/cmd/lister-gen/args"
)
func main() {
klog.InitFlags(nil)
genericArgs, customArgs := generatorargs.NewDefaults()
// Override defaults.
@ -44,7 +45,7 @@ func main() {
pflag.Parse()
if err := generatorargs.Validate(genericArgs); err != nil {
glog.Fatalf("Error: %v", err)
klog.Fatalf("Error: %v", err)
}
// Run it.
@ -53,7 +54,7 @@ func main() {
generators.DefaultNameSystem(),
generators.Packages,
); err != nil {
glog.Fatalf("Error: %v", err)
klog.Fatalf("Error: %v", err)
}
glog.V(2).Info("Completed successfully.")
klog.V(2).Info("Completed successfully.")
}

View File

@ -1,13 +0,0 @@
# Generate OpenAPI definitions
- To generate definition for a specific type or package add "+k8s:openapi-gen=true" tag to the type/package comment lines.
- To exclude a type or a member from a tagged package/type, add "+k8s:openapi-gen=false" tag to the comment lines.
# OpenAPI Extensions
OpenAPI spec can have extensions on types. To define one or more extensions on a type or its member
add "+k8s:openapi-gen=x-kubernetes-$NAME:$VALUE" to the comment lines before type/member. A type/member can
have multiple extensions. The rest of the line in the comment will be used as $VALUE so there is no need to
escape or quote the value string. Extensions can be use to pass more information to client generators or
documentation generators. For example a type my have a friendly name to be displayed in documentation or
being used in a client's fluent interface.

View File

@ -1,53 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package args
import (
"fmt"
"github.com/spf13/pflag"
"k8s.io/gengo/args"
)
// CustomArgs is used by the gengo framework to pass args specific to this generator.
type CustomArgs struct{}
// NewDefaults returns default arguments for the generator.
func NewDefaults() (*args.GeneratorArgs, *CustomArgs) {
genericArgs := args.Default().WithoutDefaultFlagParsing()
customArgs := &CustomArgs{}
genericArgs.CustomArgs = customArgs
genericArgs.OutputFileBaseName = "openapi_generated"
return genericArgs, customArgs
}
// AddFlags add the generator flags to the flag set.
func (ca *CustomArgs) AddFlags(fs *pflag.FlagSet) {}
// Validate checks the given arguments.
func Validate(genericArgs *args.GeneratorArgs) error {
_ = genericArgs.CustomArgs.(*CustomArgs)
if len(genericArgs.OutputFileBaseName) == 0 {
return fmt.Errorf("output file base name cannot be empty")
}
if len(genericArgs.OutputPackagePath) == 0 {
return fmt.Errorf("output package cannot be empty")
}
return nil
}

View File

@ -1,61 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This package generates openAPI definition file to be used in open API spec generation on API servers. To generate
// definition for a specific type or package add "+k8s:openapi-gen=true" tag to the type/package comment lines. To
// exclude a type from a tagged package, add "+k8s:openapi-gen=false" tag to the type comment lines.
package main
import (
"flag"
"path/filepath"
"github.com/golang/glog"
"github.com/spf13/pflag"
"k8s.io/gengo/args"
"k8s.io/kube-openapi/pkg/generators"
generatorargs "k8s.io/code-generator/cmd/openapi-gen/args"
"k8s.io/code-generator/pkg/util"
)
func main() {
genericArgs, customArgs := generatorargs.NewDefaults()
// Override defaults.
// TODO: move this out of openapi-gen
genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath())
genericArgs.AddFlags(pflag.CommandLine)
customArgs.AddFlags(pflag.CommandLine)
flag.Set("logtostderr", "true")
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
pflag.Parse()
if err := generatorargs.Validate(genericArgs); err != nil {
glog.Fatalf("Error: %v", err)
}
// Run it.
if err := genericArgs.Execute(
generators.NameSystems(),
generators.DefaultNameSystem(),
generators.Packages,
); err != nil {
glog.Fatalf("Error: %v", err)
}
glog.V(2).Info("Completed successfully.")
}

View File

@ -22,7 +22,7 @@ import (
"path"
"strings"
"github.com/golang/glog"
"k8s.io/klog"
clientgentypes "k8s.io/code-generator/cmd/client-gen/types"
"k8s.io/gengo/args"
@ -46,7 +46,7 @@ func DefaultNameSystem() string {
func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages {
boilerplate, err := arguments.LoadGoBoilerplate()
if err != nil {
glog.Fatalf("Failed loading boilerplate: %v", err)
klog.Fatalf("Failed loading boilerplate: %v", err)
}
packages := generator.Packages{}
@ -54,27 +54,27 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat
pkg := context.Universe.Package(inputDir)
internal, err := isInternal(pkg)
if err != nil {
glog.V(5).Infof("skipping the generation of %s file, due to err %v", arguments.OutputFileBaseName, err)
klog.V(5).Infof("skipping the generation of %s file, due to err %v", arguments.OutputFileBaseName, err)
continue
}
if internal {
glog.V(5).Infof("skipping the generation of %s file because %s package contains internal types, note that internal types don't have \"json\" tags", arguments.OutputFileBaseName, pkg.Name)
klog.V(5).Infof("skipping the generation of %s file because %s package contains internal types, note that internal types don't have \"json\" tags", arguments.OutputFileBaseName, pkg.Name)
continue
}
registerFileName := "register.go"
searchPath := path.Join(args.DefaultSourceTree(), inputDir, registerFileName)
if _, err := os.Stat(path.Join(searchPath)); err == nil {
glog.V(5).Infof("skipping the generation of %s file because %s already exists in the path %s", arguments.OutputFileBaseName, registerFileName, searchPath)
klog.V(5).Infof("skipping the generation of %s file because %s already exists in the path %s", arguments.OutputFileBaseName, registerFileName, searchPath)
continue
} else if err != nil && !os.IsNotExist(err) {
glog.Fatalf("an error %v has occurred while checking if %s exists", err, registerFileName)
klog.Fatalf("an error %v has occurred while checking if %s exists", err, registerFileName)
}
gv := clientgentypes.GroupVersion{}
{
pathParts := strings.Split(pkg.Path, "/")
if len(pathParts) < 2 {
glog.Errorf("the path of the package must contain the group name and the version, path = %s", pkg.Path)
klog.Errorf("the path of the package must contain the group name and the version, path = %s", pkg.Path)
continue
}
gv.Group = clientgentypes.Group(pathParts[len(pathParts)-2])
@ -84,14 +84,14 @@ func Packages(context *generator.Context, arguments *args.GeneratorArgs) generat
// extract the fully qualified API group name from it and overwrite the group inferred from the package path
if override := types.ExtractCommentTags("+", pkg.DocComments)["groupName"]; override != nil {
groupName := override[0]
glog.V(5).Infof("overriding the group name with = %s", groupName)
klog.V(5).Infof("overriding the group name with = %s", groupName)
gv.Group = clientgentypes.Group(groupName)
}
}
typesToRegister := []*types.Type{}
for _, t := range pkg.Types {
glog.V(5).Infof("considering type = %s", t.Name.String())
klog.V(5).Infof("considering type = %s", t.Name.String())
for _, typeMember := range t.Members {
if typeMember.Name == "TypeMeta" && typeMember.Embedded == true {
typesToRegister = append(typesToRegister, t)

View File

@ -20,8 +20,8 @@ import (
"flag"
"path/filepath"
"github.com/golang/glog"
"github.com/spf13/pflag"
"k8s.io/klog"
generatorargs "k8s.io/code-generator/cmd/register-gen/args"
"k8s.io/code-generator/cmd/register-gen/generators"
@ -30,6 +30,7 @@ import (
)
func main() {
klog.InitFlags(nil)
genericArgs := generatorargs.NewDefaults()
genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), util.BoilerplatePath())
genericArgs.AddFlags(pflag.CommandLine)
@ -38,7 +39,7 @@ func main() {
pflag.Parse()
if err := generatorargs.Validate(genericArgs); err != nil {
glog.Fatalf("Error: %v", err)
klog.Fatalf("Error: %v", err)
}
if err := genericArgs.Execute(
@ -46,7 +47,7 @@ func main() {
generators.DefaultNameSystem(),
generators.Packages,
); err != nil {
glog.Fatalf("Error: %v", err)
klog.Fatalf("Error: %v", err)
}
glog.V(2).Info("Completed successfully.")
klog.V(2).Info("Completed successfully.")
}

View File

@ -32,10 +32,11 @@ import (
"k8s.io/gengo/args"
"k8s.io/gengo/examples/set-gen/generators"
"github.com/golang/glog"
"k8s.io/klog"
)
func main() {
klog.InitFlags(nil)
arguments := args.Default()
// Override defaults.
@ -48,8 +49,8 @@ func main() {
generators.DefaultNameSystem(),
generators.Packages,
); err != nil {
glog.Errorf("Error: %v", err)
klog.Errorf("Error: %v", err)
os.Exit(1)
}
glog.V(2).Info("Completed successfully.")
klog.V(2).Info("Completed successfully.")
}

View File

@ -1,5 +0,0 @@
*.sublime-*
.DS_Store
*.swp
*.swo
tags

View File

@ -1,7 +0,0 @@
language: go
go:
- 1.4
- 1.5
- 1.6
- tip

View File

@ -1,12 +0,0 @@
Copyright (c) 2012, Martin Angers
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,185 +0,0 @@
# Purell
Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know...
Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc].
[![build status](https://secure.travis-ci.org/PuerkitoBio/purell.png)](http://travis-ci.org/PuerkitoBio/purell)
## Install
`go get github.com/PuerkitoBio/purell`
## Changelog
* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich).
* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]).
* **v0.2.0** : Add benchmarks, Attempt IDN support.
* **v0.1.0** : Initial release.
## Examples
From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."):
```go
package purell
import (
"fmt"
"net/url"
)
func ExampleNormalizeURLString() {
if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/",
FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil {
panic(err)
} else {
fmt.Print(normalized)
}
// Output: http://somewebsite.com:80/Amazing%3F/url/
}
func ExampleMustNormalizeURLString() {
normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/",
FlagsUnsafeGreedy)
fmt.Print(normalized)
// Output: http://somewebsite.com/Amazing%FA/url
}
func ExampleNormalizeURL() {
if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil {
panic(err)
} else {
normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment)
fmt.Print(normalized)
}
// Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0
}
```
## API
As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags:
```go
const (
// Safe normalizations
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
FlagLowercaseHost // http://HOST -> http://host
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
FlagRemoveDefaultPort // http://host:80 -> http://host
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
// Usually safe normalizations
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
// Unsafe normalizations
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
FlagRemoveFragment // http://host/path#fragment -> http://host/path
FlagForceHTTP // https://host -> http://host
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
FlagRemoveWWW // http://www.host/ -> http://host/
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
// Normalizations not in the wikipedia article, required to cover tests cases
// submitted by jehiah
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
// Convenience set of safe normalizations
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
// Convenience set of usually safe normalizations (includes FlagsSafe)
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
// Convenience set of all available flags
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
)
```
For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set.
The [full godoc reference is available on gopkgdoc][godoc].
Some things to note:
* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it.
* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*):
- %24 -> $
- %26 -> &
- %2B-%3B -> +,-./0123456789:;
- %3D -> =
- %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ
- %5F -> _
- %61-%7A -> abcdefghijklmnopqrstuvwxyz
- %7E -> ~
* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization).
* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell.
* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object.
### Safe vs Usually Safe vs Unsafe
Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between.
Consider the following URL:
`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
Normalizing with the `FlagsSafe` gives:
`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
With the `FlagsUsuallySafeGreedy`:
`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid`
And with `FlagsUnsafeGreedy`:
`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3`
## TODOs
* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`.
## Thanks / Contributions
@rogpeppe
@jehiah
@opennota
@pchristopher1275
@zenovich
## License
The [BSD 3-Clause license][bsd].
[bsd]: http://opensource.org/licenses/BSD-3-Clause
[wiki]: http://en.wikipedia.org/wiki/URL_normalization
[rfc]: http://tools.ietf.org/html/rfc3986#section-6
[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell
[pr5]: https://github.com/PuerkitoBio/purell/pull/5
[iss7]: https://github.com/PuerkitoBio/purell/issues/7

View File

@ -1,375 +0,0 @@
/*
Package purell offers URL normalization as described on the wikipedia page:
http://en.wikipedia.org/wiki/URL_normalization
*/
package purell
import (
"bytes"
"fmt"
"net/url"
"regexp"
"sort"
"strconv"
"strings"
"github.com/PuerkitoBio/urlesc"
"golang.org/x/net/idna"
"golang.org/x/text/secure/precis"
"golang.org/x/text/unicode/norm"
)
// A set of normalization flags determines how a URL will
// be normalized.
type NormalizationFlags uint
const (
// Safe normalizations
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
FlagLowercaseHost // http://HOST -> http://host
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
FlagRemoveDefaultPort // http://host:80 -> http://host
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
// Usually safe normalizations
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
// Unsafe normalizations
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
FlagRemoveFragment // http://host/path#fragment -> http://host/path
FlagForceHTTP // https://host -> http://host
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
FlagRemoveWWW // http://www.host/ -> http://host/
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
// Normalizations not in the wikipedia article, required to cover tests cases
// submitted by jehiah
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
// Convenience set of safe normalizations
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
// Convenience set of usually safe normalizations (includes FlagsSafe)
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
// Convenience set of all available flags
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
)
const (
defaultHttpPort = ":80"
defaultHttpsPort = ":443"
)
// Regular expressions used by the normalizations
var rxPort = regexp.MustCompile(`(:\d+)/?$`)
var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`)
var rxDupSlashes = regexp.MustCompile(`/{2,}`)
var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`)
var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`)
var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`)
var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`)
var rxEmptyPort = regexp.MustCompile(`:+$`)
// Map of flags to implementation function.
// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically
// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator.
// Since maps have undefined traversing order, make a slice of ordered keys
var flagsOrder = []NormalizationFlags{
FlagLowercaseScheme,
FlagLowercaseHost,
FlagRemoveDefaultPort,
FlagRemoveDirectoryIndex,
FlagRemoveDotSegments,
FlagRemoveFragment,
FlagForceHTTP, // Must be after remove default port (because https=443/http=80)
FlagRemoveDuplicateSlashes,
FlagRemoveWWW,
FlagAddWWW,
FlagSortQuery,
FlagDecodeDWORDHost,
FlagDecodeOctalHost,
FlagDecodeHexHost,
FlagRemoveUnnecessaryHostDots,
FlagRemoveEmptyPortSeparator,
FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last
FlagAddTrailingSlash,
}
// ... and then the map, where order is unimportant
var flags = map[NormalizationFlags]func(*url.URL){
FlagLowercaseScheme: lowercaseScheme,
FlagLowercaseHost: lowercaseHost,
FlagRemoveDefaultPort: removeDefaultPort,
FlagRemoveDirectoryIndex: removeDirectoryIndex,
FlagRemoveDotSegments: removeDotSegments,
FlagRemoveFragment: removeFragment,
FlagForceHTTP: forceHTTP,
FlagRemoveDuplicateSlashes: removeDuplicateSlashes,
FlagRemoveWWW: removeWWW,
FlagAddWWW: addWWW,
FlagSortQuery: sortQuery,
FlagDecodeDWORDHost: decodeDWORDHost,
FlagDecodeOctalHost: decodeOctalHost,
FlagDecodeHexHost: decodeHexHost,
FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots,
FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator,
FlagRemoveTrailingSlash: removeTrailingSlash,
FlagAddTrailingSlash: addTrailingSlash,
}
// MustNormalizeURLString returns the normalized string, and panics if an error occurs.
// It takes an URL string as input, as well as the normalization flags.
func MustNormalizeURLString(u string, f NormalizationFlags) string {
result, e := NormalizeURLString(u, f)
if e != nil {
panic(e)
}
return result
}
// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
// It takes an URL string as input, as well as the normalization flags.
func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
if parsed, e := url.Parse(u); e != nil {
return "", e
} else {
options := make([]precis.Option, 1, 3)
options[0] = precis.IgnoreCase
if f&FlagLowercaseHost == FlagLowercaseHost {
options = append(options, precis.FoldCase())
}
options = append(options, precis.Norm(norm.NFC))
profile := precis.NewFreeform(options...)
if parsed.Host, e = idna.ToASCII(profile.NewTransformer().String(parsed.Host)); e != nil {
return "", e
}
return NormalizeURL(parsed, f), nil
}
panic("Unreachable code.")
}
// NormalizeURL returns the normalized string.
// It takes a parsed URL object as input, as well as the normalization flags.
func NormalizeURL(u *url.URL, f NormalizationFlags) string {
for _, k := range flagsOrder {
if f&k == k {
flags[k](u)
}
}
return urlesc.Escape(u)
}
func lowercaseScheme(u *url.URL) {
if len(u.Scheme) > 0 {
u.Scheme = strings.ToLower(u.Scheme)
}
}
func lowercaseHost(u *url.URL) {
if len(u.Host) > 0 {
u.Host = strings.ToLower(u.Host)
}
}
func removeDefaultPort(u *url.URL) {
if len(u.Host) > 0 {
scheme := strings.ToLower(u.Scheme)
u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
return ""
}
return val
})
}
}
func removeTrailingSlash(u *url.URL) {
if l := len(u.Path); l > 0 {
if strings.HasSuffix(u.Path, "/") {
u.Path = u.Path[:l-1]
}
} else if l = len(u.Host); l > 0 {
if strings.HasSuffix(u.Host, "/") {
u.Host = u.Host[:l-1]
}
}
}
func addTrailingSlash(u *url.URL) {
if l := len(u.Path); l > 0 {
if !strings.HasSuffix(u.Path, "/") {
u.Path += "/"
}
} else if l = len(u.Host); l > 0 {
if !strings.HasSuffix(u.Host, "/") {
u.Host += "/"
}
}
}
func removeDotSegments(u *url.URL) {
if len(u.Path) > 0 {
var dotFree []string
var lastIsDot bool
sections := strings.Split(u.Path, "/")
for _, s := range sections {
if s == ".." {
if len(dotFree) > 0 {
dotFree = dotFree[:len(dotFree)-1]
}
} else if s != "." {
dotFree = append(dotFree, s)
}
lastIsDot = (s == "." || s == "..")
}
// Special case if host does not end with / and new path does not begin with /
u.Path = strings.Join(dotFree, "/")
if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") {
u.Path = "/" + u.Path
}
// Special case if the last segment was a dot, make sure the path ends with a slash
if lastIsDot && !strings.HasSuffix(u.Path, "/") {
u.Path += "/"
}
}
}
func removeDirectoryIndex(u *url.URL) {
if len(u.Path) > 0 {
u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1")
}
}
func removeFragment(u *url.URL) {
u.Fragment = ""
}
func forceHTTP(u *url.URL) {
if strings.ToLower(u.Scheme) == "https" {
u.Scheme = "http"
}
}
func removeDuplicateSlashes(u *url.URL) {
if len(u.Path) > 0 {
u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
}
}
func removeWWW(u *url.URL) {
if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") {
u.Host = u.Host[4:]
}
}
func addWWW(u *url.URL) {
if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") {
u.Host = "www." + u.Host
}
}
func sortQuery(u *url.URL) {
q := u.Query()
if len(q) > 0 {
arKeys := make([]string, len(q))
i := 0
for k, _ := range q {
arKeys[i] = k
i++
}
sort.Strings(arKeys)
buf := new(bytes.Buffer)
for _, k := range arKeys {
sort.Strings(q[k])
for _, v := range q[k] {
if buf.Len() > 0 {
buf.WriteRune('&')
}
buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v)))
}
}
// Rebuild the raw query string
u.RawQuery = buf.String()
}
}
func decodeDWORDHost(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 {
var parts [4]int64
dword, _ := strconv.ParseInt(matches[1], 10, 0)
for i, shift := range []uint{24, 16, 8, 0} {
parts[i] = dword >> shift & 0xFF
}
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2])
}
}
}
func decodeOctalHost(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 {
var parts [4]int64
for i := 1; i <= 4; i++ {
parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0)
}
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5])
}
}
}
func decodeHexHost(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 {
// Conversion is safe because of regex validation
parsed, _ := strconv.ParseInt(matches[1], 16, 0)
// Set host as DWORD (base 10) encoded host
u.Host = fmt.Sprintf("%d%s", parsed, matches[2])
// The rest is the same as decoding a DWORD host
decodeDWORDHost(u)
}
}
}
func removeUnncessaryHostDots(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 {
// Trim the leading and trailing dots
u.Host = strings.Trim(matches[1], ".")
if len(matches) > 2 {
u.Host += matches[2]
}
}
}
}
func removeEmptyPortSeparator(u *url.URL) {
if len(u.Host) > 0 {
u.Host = rxEmptyPort.ReplaceAllString(u.Host, "")
}
}

View File

@ -1,11 +0,0 @@
language: go
go:
- 1.4
- tip
install:
- go build .
script:
- go test -v

View File

@ -1,27 +0,0 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,16 +0,0 @@
urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.png?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc)
======
Package urlesc implements query escaping as per RFC 3986.
It contains some parts of the net/url package, modified so as to allow
some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)).
## Install
go get github.com/PuerkitoBio/urlesc
## License
Go license (BSD-3-Clause)

View File

@ -1,180 +0,0 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package urlesc implements query escaping as per RFC 3986.
// It contains some parts of the net/url package, modified so as to allow
// some reserved characters incorrectly escaped by net/url.
// See https://github.com/golang/go/issues/5684
package urlesc
import (
"bytes"
"net/url"
"strings"
)
type encoding int
const (
encodePath encoding = 1 + iota
encodeUserPassword
encodeQueryComponent
encodeFragment
)
// Return true if the specified character should be escaped when
// appearing in a URL string, according to RFC 3986.
func shouldEscape(c byte, mode encoding) bool {
// §2.3 Unreserved characters (alphanum)
if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
return false
}
switch c {
case '-', '.', '_', '~': // §2.3 Unreserved characters (mark)
return false
// §2.2 Reserved characters (reserved)
case ':', '/', '?', '#', '[', ']', '@', // gen-delims
'!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims
// Different sections of the URL allow a few of
// the reserved characters to appear unescaped.
switch mode {
case encodePath: // §3.3
// The RFC allows sub-delims and : @.
// '/', '[' and ']' can be used to assign meaning to individual path
// segments. This package only manipulates the path as a whole,
// so we allow those as well. That leaves only ? and # to escape.
return c == '?' || c == '#'
case encodeUserPassword: // §3.2.1
// The RFC allows : and sub-delims in
// userinfo. The parsing of userinfo treats ':' as special so we must escape
// all the gen-delims.
return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@'
case encodeQueryComponent: // §3.4
// The RFC allows / and ?.
return c != '/' && c != '?'
case encodeFragment: // §4.1
// The RFC text is silent but the grammar allows
// everything, so escape nothing but #
return c == '#'
}
}
// Everything else must be escaped.
return true
}
// QueryEscape escapes the string so it can be safely placed
// inside a URL query.
func QueryEscape(s string) string {
return escape(s, encodeQueryComponent)
}
func escape(s string, mode encoding) string {
spaceCount, hexCount := 0, 0
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(c, mode) {
if c == ' ' && mode == encodeQueryComponent {
spaceCount++
} else {
hexCount++
}
}
}
if spaceCount == 0 && hexCount == 0 {
return s
}
t := make([]byte, len(s)+2*hexCount)
j := 0
for i := 0; i < len(s); i++ {
switch c := s[i]; {
case c == ' ' && mode == encodeQueryComponent:
t[j] = '+'
j++
case shouldEscape(c, mode):
t[j] = '%'
t[j+1] = "0123456789ABCDEF"[c>>4]
t[j+2] = "0123456789ABCDEF"[c&15]
j += 3
default:
t[j] = s[i]
j++
}
}
return string(t)
}
var uiReplacer = strings.NewReplacer(
"%21", "!",
"%27", "'",
"%28", "(",
"%29", ")",
"%2A", "*",
)
// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986.
func unescapeUserinfo(s string) string {
return uiReplacer.Replace(s)
}
// Escape reassembles the URL into a valid URL string.
// The general form of the result is one of:
//
// scheme:opaque
// scheme://userinfo@host/path?query#fragment
//
// If u.Opaque is non-empty, String uses the first form;
// otherwise it uses the second form.
//
// In the second form, the following rules apply:
// - if u.Scheme is empty, scheme: is omitted.
// - if u.User is nil, userinfo@ is omitted.
// - if u.Host is empty, host/ is omitted.
// - if u.Scheme and u.Host are empty and u.User is nil,
// the entire scheme://userinfo@host/ is omitted.
// - if u.Host is non-empty and u.Path begins with a /,
// the form host/path does not add its own /.
// - if u.RawQuery is empty, ?query is omitted.
// - if u.Fragment is empty, #fragment is omitted.
func Escape(u *url.URL) string {
var buf bytes.Buffer
if u.Scheme != "" {
buf.WriteString(u.Scheme)
buf.WriteByte(':')
}
if u.Opaque != "" {
buf.WriteString(u.Opaque)
} else {
if u.Scheme != "" || u.Host != "" || u.User != nil {
buf.WriteString("//")
if ui := u.User; ui != nil {
buf.WriteString(unescapeUserinfo(ui.String()))
buf.WriteByte('@')
}
if h := u.Host; h != "" {
buf.WriteString(h)
}
}
if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
buf.WriteByte('/')
}
buf.WriteString(escape(u.Path, encodePath))
}
if u.RawQuery != "" {
buf.WriteByte('?')
buf.WriteString(u.RawQuery)
}
if u.Fragment != "" {
buf.WriteByte('#')
buf.WriteString(escape(u.Fragment, encodeFragment))
}
return buf.String()
}

View File

@ -1,70 +0,0 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
restful.html
*.out
tmp.prof
go-restful.test
examples/restful-basic-authentication
examples/restful-encoding-filter
examples/restful-filters
examples/restful-hello-world
examples/restful-resource-functions
examples/restful-serve-static
examples/restful-user-service
*.DS_Store
examples/restful-user-resource
examples/restful-multi-containers
examples/restful-form-handling
examples/restful-CORS-filter
examples/restful-options-filter
examples/restful-curly-router
examples/restful-cpuprofiler-service
examples/restful-pre-post-filters
curly.prof
examples/restful-NCSA-logging
examples/restful-html-template
s.html
restful-path-tail

View File

@ -1,6 +0,0 @@
language: go
go:
- 1.x
script: go test -v

View File

@ -1,223 +0,0 @@
Change history of go-restful
=
2017-02-16
- solved issue #304, make operation names unique
2017-01-30
[IMPORTANT] For swagger users, change your import statement to:
swagger "github.com/emicklei/go-restful-swagger12"
- moved swagger 1.2 code to go-restful-swagger12
- created TAG 2.0.0
2017-01-27
- remove defer request body close
- expose Dispatch for testing filters and Routefunctions
- swagger response model cannot be array
- created TAG 1.0.0
2016-12-22
- (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool)
2016-11-26
- Default change! now use CurlyRouter (was RouterJSR311)
- Default change! no more caching of request content
- Default change! do not recover from panics
2016-09-22
- fix the DefaultRequestContentType feature
2016-02-14
- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response
- add constructors for custom entity accessors for xml and json
2015-09-27
- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency
2015-09-25
- fixed problem with changing Header after WriteHeader (issue 235)
2015-09-14
- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write)
- added support for custom EntityReaderWriters.
2015-08-06
- add support for reading entities from compressed request content
- use sync.Pool for compressors of http response and request body
- add Description to Parameter for documentation in Swagger UI
2015-03-20
- add configurable logging
2015-03-18
- if not specified, the Operation is derived from the Route function
2015-03-17
- expose Parameter creation functions
- make trace logger an interface
- fix OPTIONSFilter
- customize rendering of ServiceError
- JSR311 router now handles wildcards
- add Notes to Route
2014-11-27
- (api add) PrettyPrint per response. (as proposed in #167)
2014-11-12
- (api add) ApiVersion(.) for documentation in Swagger UI
2014-11-10
- (api change) struct fields tagged with "description" show up in Swagger UI
2014-10-31
- (api change) ReturnsError -> Returns
- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder
- fix swagger nested structs
- sort Swagger response messages by code
2014-10-23
- (api add) ReturnsError allows you to document Http codes in swagger
- fixed problem with greedy CurlyRouter
- (api add) Access-Control-Max-Age in CORS
- add tracing functionality (injectable) for debugging purposes
- support JSON parse 64bit int
- fix empty parameters for swagger
- WebServicesUrl is now optional for swagger
- fixed duplicate AccessControlAllowOrigin in CORS
- (api change) expose ServeMux in container
- (api add) added AllowedDomains in CORS
- (api add) ParameterNamed for detailed documentation
2014-04-16
- (api add) expose constructor of Request for testing.
2014-06-27
- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification).
- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons).
2014-07-03
- (api add) CORS can be configured with a list of allowed domains
2014-03-12
- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter)
2014-02-26
- (api add) Request now provides information about the matched Route, see method SelectedRoutePath
2014-02-17
- (api change) renamed parameter constants (go-lint checks)
2014-01-10
- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
2014-01-07
- (api change) Write* methods in Response now return the error or nil.
- added example of serving HTML from a Go template.
- fixed comparing Allowed headers in CORS (is now case-insensitive)
2013-11-13
- (api add) Response knows how many bytes are written to the response body.
2013-10-29
- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
2013-10-04
- (api add) Response knows what HTTP status has been written
- (api add) Request can have attributes (map of string->interface, also called request-scoped variables
2013-09-12
- (api change) Router interface simplified
- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
2013-08-05
- add OPTIONS support
- add CORS support
2013-08-27
- fixed some reported issues (see github)
- (api change) deprecated use of WriteError; use WriteErrorString instead
2014-04-15
- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
2013-08-08
- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
- (api add) the swagger package has be extended to have a UI per container.
- if panic is detected then a small stack trace is printed (thanks to runner-mei)
- (api add) WriteErrorString to Response
Important API changes:
- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
2013-07-06
- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
2013-06-19
- (improve) DoNotRecover option, moved request body closer, improved ReadEntity
2013-06-03
- (api change) removed Dispatcher interface, hide PathExpression
- changed receiver names of type functions to be more idiomatic Go
2013-06-02
- (optimize) Cache the RegExp compilation of Paths.
2013-05-22
- (api add) Added support for request/response filter functions
2013-05-18
- (api add) Added feature to change the default Http Request Dispatch function (travis cline)
- (api change) Moved Swagger Webservice to swagger package (see example restful-user)
[2012-11-14 .. 2013-05-18>
- See https://github.com/emicklei/go-restful/commits
2012-11-14
- Initial commit

View File

@ -1,22 +0,0 @@
Copyright (c) 2012,2013 Ernest Micklei
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@ -1,7 +0,0 @@
all: test
test:
go test -v .
ex:
cd examples && ls *.go | xargs go build -o /tmp/ignore

View File

@ -1,74 +0,0 @@
go-restful
==========
package for building REST-style Web Services using Google Go
[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful)
[![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful)
[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://godoc.org/github.com/emicklei/go-restful)
- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
- GET = Retrieve a representation of a resource
- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm.
- PUT = Create if you are sending the full content of the specified resource (URI).
- PUT = Update if you are updating the full content of the specified resource.
- DELETE = Delete if you are requesting the server to delete the resource
- PATCH = Update partial content of a resource
- OPTIONS = Get information about the communication options for the request URI
### Example
```Go
ws := new(restful.WebService)
ws.
Path("/users").
Consumes(restful.MIME_XML, restful.MIME_JSON).
Produces(restful.MIME_JSON, restful.MIME_XML)
ws.Route(ws.GET("/{user-id}").To(u.findUser).
Doc("get a user").
Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
Writes(User{}))
...
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
id := request.PathParameter("user-id")
...
}
```
[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go)
### Features
- Routes for request &#8594; function mapping with path parameter (e.g. {id}) support
- Configurable router:
- (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}
- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions
- Request API for reading structs from JSON/XML and accesing parameters (path,query,header)
- Response API for writing structs to JSON/XML and setting headers
- Customizable encoding using EntityReaderWriter registration
- Filters for intercepting the request &#8594; response flow on Service or Route level
- Request-scoped variables using attributes
- Containers for WebServices on different HTTP endpoints
- Content encoding (gzip,deflate) of request and response payloads
- Automatic responses on OPTIONS (using a filter)
- Automatic CORS request handling (using a filter)
- API declaration for Swagger UI (see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12),[go-restful-openapi](https://github.com/emicklei/go-restful-openapi))
- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
- Configurable (trace) logging
- Customizable gzip/deflate readers and writers using CompressorProvider registration
### Resources
- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/)
- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/)
- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
Type ```git shortlog -s``` for a full list of contributors.
© 2012 - 2017, http://ernestmicklei.com. MIT License. Contributions are welcome.

View File

@ -1 +0,0 @@
{"SkipDirs": ["examples"]}

View File

@ -1,10 +0,0 @@
#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out
go test -c
./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany
./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly
#go tool pprof go-restful.test tmp.prof
go tool pprof go-restful.test curly.prof

View File

@ -1,123 +0,0 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bufio"
"compress/gzip"
"compress/zlib"
"errors"
"io"
"net"
"net/http"
"strings"
)
// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting.
var EnableContentEncoding = false
// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib)
type CompressingResponseWriter struct {
writer http.ResponseWriter
compressor io.WriteCloser
encoding string
}
// Header is part of http.ResponseWriter interface
func (c *CompressingResponseWriter) Header() http.Header {
return c.writer.Header()
}
// WriteHeader is part of http.ResponseWriter interface
func (c *CompressingResponseWriter) WriteHeader(status int) {
c.writer.WriteHeader(status)
}
// Write is part of http.ResponseWriter interface
// It is passed through the compressor
func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) {
if c.isCompressorClosed() {
return -1, errors.New("Compressing error: tried to write data using closed compressor")
}
return c.compressor.Write(bytes)
}
// CloseNotify is part of http.CloseNotifier interface
func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
return c.writer.(http.CloseNotifier).CloseNotify()
}
// Close the underlying compressor
func (c *CompressingResponseWriter) Close() error {
if c.isCompressorClosed() {
return errors.New("Compressing error: tried to close already closed compressor")
}
c.compressor.Close()
if ENCODING_GZIP == c.encoding {
currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer))
}
if ENCODING_DEFLATE == c.encoding {
currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer))
}
// gc hint needed?
c.compressor = nil
return nil
}
func (c *CompressingResponseWriter) isCompressorClosed() bool {
return nil == c.compressor
}
// Hijack implements the Hijacker interface
// This is especially useful when combining Container.EnabledContentEncoding
// in combination with websockets (for instance gorilla/websocket)
func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
hijacker, ok := c.writer.(http.Hijacker)
if !ok {
return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface")
}
return hijacker.Hijack()
}
// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
func wantsCompressedResponse(httpRequest *http.Request) (bool, string) {
header := httpRequest.Header.Get(HEADER_AcceptEncoding)
gi := strings.Index(header, ENCODING_GZIP)
zi := strings.Index(header, ENCODING_DEFLATE)
// use in order of appearance
if gi == -1 {
return zi != -1, ENCODING_DEFLATE
} else if zi == -1 {
return gi != -1, ENCODING_GZIP
} else {
if gi < zi {
return true, ENCODING_GZIP
}
return true, ENCODING_DEFLATE
}
}
// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate}
func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) {
httpWriter.Header().Set(HEADER_ContentEncoding, encoding)
c := new(CompressingResponseWriter)
c.writer = httpWriter
var err error
if ENCODING_GZIP == encoding {
w := currentCompressorProvider.AcquireGzipWriter()
w.Reset(httpWriter)
c.compressor = w
c.encoding = ENCODING_GZIP
} else if ENCODING_DEFLATE == encoding {
w := currentCompressorProvider.AcquireZlibWriter()
w.Reset(httpWriter)
c.compressor = w
c.encoding = ENCODING_DEFLATE
} else {
return nil, errors.New("Unknown encoding:" + encoding)
}
return c, err
}

View File

@ -1,103 +0,0 @@
package restful
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"compress/gzip"
"compress/zlib"
)
// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount
// of writers and readers (resources).
// If a new resource is acquired and all are in use, it will return a new unmanaged resource.
type BoundedCachedCompressors struct {
gzipWriters chan *gzip.Writer
gzipReaders chan *gzip.Reader
zlibWriters chan *zlib.Writer
writersCapacity int
readersCapacity int
}
// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors.
func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors {
b := &BoundedCachedCompressors{
gzipWriters: make(chan *gzip.Writer, writersCapacity),
gzipReaders: make(chan *gzip.Reader, readersCapacity),
zlibWriters: make(chan *zlib.Writer, writersCapacity),
writersCapacity: writersCapacity,
readersCapacity: readersCapacity,
}
for ix := 0; ix < writersCapacity; ix++ {
b.gzipWriters <- newGzipWriter()
b.zlibWriters <- newZlibWriter()
}
for ix := 0; ix < readersCapacity; ix++ {
b.gzipReaders <- newGzipReader()
}
return b
}
// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released.
func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer {
var writer *gzip.Writer
select {
case writer, _ = <-b.gzipWriters:
default:
// return a new unmanaged one
writer = newGzipWriter()
}
return writer
}
// ReleaseGzipWriter accepts a writer (does not have to be one that was cached)
// only when the cache has room for it. It will ignore it otherwise.
func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) {
// forget the unmanaged ones
if len(b.gzipWriters) < b.writersCapacity {
b.gzipWriters <- w
}
}
// AcquireGzipReader returns a *gzip.Reader. Needs to be released.
func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader {
var reader *gzip.Reader
select {
case reader, _ = <-b.gzipReaders:
default:
// return a new unmanaged one
reader = newGzipReader()
}
return reader
}
// ReleaseGzipReader accepts a reader (does not have to be one that was cached)
// only when the cache has room for it. It will ignore it otherwise.
func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) {
// forget the unmanaged ones
if len(b.gzipReaders) < b.readersCapacity {
b.gzipReaders <- r
}
}
// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released.
func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer {
var writer *zlib.Writer
select {
case writer, _ = <-b.zlibWriters:
default:
// return a new unmanaged one
writer = newZlibWriter()
}
return writer
}
// ReleaseZlibWriter accepts a writer (does not have to be one that was cached)
// only when the cache has room for it. It will ignore it otherwise.
func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) {
// forget the unmanaged ones
if len(b.zlibWriters) < b.writersCapacity {
b.zlibWriters <- w
}
}

View File

@ -1,91 +0,0 @@
package restful
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"compress/gzip"
"compress/zlib"
"sync"
)
// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool.
type SyncPoolCompessors struct {
GzipWriterPool *sync.Pool
GzipReaderPool *sync.Pool
ZlibWriterPool *sync.Pool
}
// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors.
func NewSyncPoolCompessors() *SyncPoolCompessors {
return &SyncPoolCompessors{
GzipWriterPool: &sync.Pool{
New: func() interface{} { return newGzipWriter() },
},
GzipReaderPool: &sync.Pool{
New: func() interface{} { return newGzipReader() },
},
ZlibWriterPool: &sync.Pool{
New: func() interface{} { return newZlibWriter() },
},
}
}
func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer {
return s.GzipWriterPool.Get().(*gzip.Writer)
}
func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) {
s.GzipWriterPool.Put(w)
}
func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader {
return s.GzipReaderPool.Get().(*gzip.Reader)
}
func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) {
s.GzipReaderPool.Put(r)
}
func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer {
return s.ZlibWriterPool.Get().(*zlib.Writer)
}
func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) {
s.ZlibWriterPool.Put(w)
}
func newGzipWriter() *gzip.Writer {
// create with an empty bytes writer; it will be replaced before using the gzipWriter
writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
if err != nil {
panic(err.Error())
}
return writer
}
func newGzipReader() *gzip.Reader {
// create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader
// we can safely use currentCompressProvider because it is set on package initialization.
w := currentCompressorProvider.AcquireGzipWriter()
defer currentCompressorProvider.ReleaseGzipWriter(w)
b := new(bytes.Buffer)
w.Reset(b)
w.Flush()
w.Close()
reader, err := gzip.NewReader(bytes.NewReader(b.Bytes()))
if err != nil {
panic(err.Error())
}
return reader
}
func newZlibWriter() *zlib.Writer {
writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
if err != nil {
panic(err.Error())
}
return writer
}

View File

@ -1,54 +0,0 @@
package restful
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"compress/gzip"
"compress/zlib"
)
// CompressorProvider describes a component that can provider compressors for the std methods.
type CompressorProvider interface {
// Returns a *gzip.Writer which needs to be released later.
// Before using it, call Reset().
AcquireGzipWriter() *gzip.Writer
// Releases an aqcuired *gzip.Writer.
ReleaseGzipWriter(w *gzip.Writer)
// Returns a *gzip.Reader which needs to be released later.
AcquireGzipReader() *gzip.Reader
// Releases an aqcuired *gzip.Reader.
ReleaseGzipReader(w *gzip.Reader)
// Returns a *zlib.Writer which needs to be released later.
// Before using it, call Reset().
AcquireZlibWriter() *zlib.Writer
// Releases an aqcuired *zlib.Writer.
ReleaseZlibWriter(w *zlib.Writer)
}
// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip).
var currentCompressorProvider CompressorProvider
func init() {
currentCompressorProvider = NewSyncPoolCompessors()
}
// CurrentCompressorProvider returns the current CompressorProvider.
// It is initialized using a SyncPoolCompessors.
func CurrentCompressorProvider() CompressorProvider {
return currentCompressorProvider
}
// CompressorProvider sets the actual provider of compressors (zlib or gzip).
func SetCompressorProvider(p CompressorProvider) {
if p == nil {
panic("cannot set compressor provider to nil")
}
currentCompressorProvider = p
}

View File

@ -1,30 +0,0 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
const (
MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
HEADER_Allow = "Allow"
HEADER_Accept = "Accept"
HEADER_Origin = "Origin"
HEADER_ContentType = "Content-Type"
HEADER_LastModified = "Last-Modified"
HEADER_AcceptEncoding = "Accept-Encoding"
HEADER_ContentEncoding = "Content-Encoding"
HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers"
HEADER_AccessControlRequestMethod = "Access-Control-Request-Method"
HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers"
HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods"
HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin"
HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials"
HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers"
HEADER_AccessControlMaxAge = "Access-Control-Max-Age"
ENCODING_GZIP = "gzip"
ENCODING_DEFLATE = "deflate"
)

View File

@ -1,366 +0,0 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"errors"
"fmt"
"net/http"
"os"
"runtime"
"strings"
"sync"
"github.com/emicklei/go-restful/log"
)
// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests.
// The requests are further dispatched to routes of WebServices using a RouteSelector
type Container struct {
webServicesLock sync.RWMutex
webServices []*WebService
ServeMux *http.ServeMux
isRegisteredOnRoot bool
containerFilters []FilterFunction
doNotRecover bool // default is true
recoverHandleFunc RecoverHandleFunction
serviceErrorHandleFunc ServiceErrorHandleFunction
router RouteSelector // default is a CurlyRouter (RouterJSR311 is a slower alternative)
contentEncodingEnabled bool // default is false
}
// NewContainer creates a new Container using a new ServeMux and default router (CurlyRouter)
func NewContainer() *Container {
return &Container{
webServices: []*WebService{},
ServeMux: http.NewServeMux(),
isRegisteredOnRoot: false,
containerFilters: []FilterFunction{},
doNotRecover: true,
recoverHandleFunc: logStackOnRecover,
serviceErrorHandleFunc: writeServiceError,
router: CurlyRouter{},
contentEncodingEnabled: false}
}
// RecoverHandleFunction declares functions that can be used to handle a panic situation.
// The first argument is what recover() returns. The second must be used to communicate an error response.
type RecoverHandleFunction func(interface{}, http.ResponseWriter)
// RecoverHandler changes the default function (logStackOnRecover) to be called
// when a panic is detected. DoNotRecover must be have its default value (=false).
func (c *Container) RecoverHandler(handler RecoverHandleFunction) {
c.recoverHandleFunc = handler
}
// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation.
// The first argument is the service error, the second is the request that resulted in the error and
// the third must be used to communicate an error response.
type ServiceErrorHandleFunction func(ServiceError, *Request, *Response)
// ServiceErrorHandler changes the default function (writeServiceError) to be called
// when a ServiceError is detected.
func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) {
c.serviceErrorHandleFunc = handler
}
// DoNotRecover controls whether panics will be caught to return HTTP 500.
// If set to true, Route functions are responsible for handling any error situation.
// Default value is true.
func (c *Container) DoNotRecover(doNot bool) {
c.doNotRecover = doNot
}
// Router changes the default Router (currently CurlyRouter)
func (c *Container) Router(aRouter RouteSelector) {
c.router = aRouter
}
// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses.
func (c *Container) EnableContentEncoding(enabled bool) {
c.contentEncodingEnabled = enabled
}
// Add a WebService to the Container. It will detect duplicate root paths and exit in that case.
func (c *Container) Add(service *WebService) *Container {
c.webServicesLock.Lock()
defer c.webServicesLock.Unlock()
// if rootPath was not set then lazy initialize it
if len(service.rootPath) == 0 {
service.Path("/")
}
// cannot have duplicate root paths
for _, each := range c.webServices {
if each.RootPath() == service.RootPath() {
log.Printf("[restful] WebService with duplicate root path detected:['%v']", each)
os.Exit(1)
}
}
// If not registered on root then add specific mapping
if !c.isRegisteredOnRoot {
c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux)
}
c.webServices = append(c.webServices, service)
return c
}
// addHandler may set a new HandleFunc for the serveMux
// this function must run inside the critical region protected by the webServicesLock.
// returns true if the function was registered on root ("/")
func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool {
pattern := fixedPrefixPath(service.RootPath())
// check if root path registration is needed
if "/" == pattern || "" == pattern {
serveMux.HandleFunc("/", c.dispatch)
return true
}
// detect if registration already exists
alreadyMapped := false
for _, each := range c.webServices {
if each.RootPath() == service.RootPath() {
alreadyMapped = true
break
}
}
if !alreadyMapped {
serveMux.HandleFunc(pattern, c.dispatch)
if !strings.HasSuffix(pattern, "/") {
serveMux.HandleFunc(pattern+"/", c.dispatch)
}
}
return false
}
func (c *Container) Remove(ws *WebService) error {
if c.ServeMux == http.DefaultServeMux {
errMsg := fmt.Sprintf("[restful] cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws)
log.Printf(errMsg)
return errors.New(errMsg)
}
c.webServicesLock.Lock()
defer c.webServicesLock.Unlock()
// build a new ServeMux and re-register all WebServices
newServeMux := http.NewServeMux()
newServices := []*WebService{}
newIsRegisteredOnRoot := false
for _, each := range c.webServices {
if each.rootPath != ws.rootPath {
// If not registered on root then add specific mapping
if !newIsRegisteredOnRoot {
newIsRegisteredOnRoot = c.addHandler(each, newServeMux)
}
newServices = append(newServices, each)
}
}
c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot
return nil
}
// logStackOnRecover is the default RecoverHandleFunction and is called
// when DoNotRecover is false and the recoverHandleFunc is not set for the container.
// Default implementation logs the stacktrace and writes the stacktrace on the response.
// This may be a security issue as it exposes sourcecode information.
func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) {
var buffer bytes.Buffer
buffer.WriteString(fmt.Sprintf("[restful] recover from panic situation: - %v\r\n", panicReason))
for i := 2; ; i += 1 {
_, file, line, ok := runtime.Caller(i)
if !ok {
break
}
buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line))
}
log.Print(buffer.String())
httpWriter.WriteHeader(http.StatusInternalServerError)
httpWriter.Write(buffer.Bytes())
}
// writeServiceError is the default ServiceErrorHandleFunction and is called
// when a ServiceError is returned during route selection. Default implementation
// calls resp.WriteErrorString(err.Code, err.Message)
func writeServiceError(err ServiceError, req *Request, resp *Response) {
resp.WriteErrorString(err.Code, err.Message)
}
// Dispatch the incoming Http Request to a matching WebService.
func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
if httpWriter == nil {
panic("httpWriter cannot be nil")
}
if httpRequest == nil {
panic("httpRequest cannot be nil")
}
c.dispatch(httpWriter, httpRequest)
}
// Dispatch the incoming Http Request to a matching WebService.
func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
writer := httpWriter
// CompressingResponseWriter should be closed after all operations are done
defer func() {
if compressWriter, ok := writer.(*CompressingResponseWriter); ok {
compressWriter.Close()
}
}()
// Instal panic recovery unless told otherwise
if !c.doNotRecover { // catch all for 500 response
defer func() {
if r := recover(); r != nil {
c.recoverHandleFunc(r, writer)
return
}
}()
}
// Detect if compression is needed
// assume without compression, test for override
if c.contentEncodingEnabled {
doCompress, encoding := wantsCompressedResponse(httpRequest)
if doCompress {
var err error
writer, err = NewCompressingResponseWriter(httpWriter, encoding)
if err != nil {
log.Print("[restful] unable to install compressor: ", err)
httpWriter.WriteHeader(http.StatusInternalServerError)
return
}
}
}
// Find best match Route ; err is non nil if no match was found
var webService *WebService
var route *Route
var err error
func() {
c.webServicesLock.RLock()
defer c.webServicesLock.RUnlock()
webService, route, err = c.router.SelectRoute(
c.webServices,
httpRequest)
}()
if err != nil {
// a non-200 response has already been written
// run container filters anyway ; they should not touch the response...
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
switch err.(type) {
case ServiceError:
ser := err.(ServiceError)
c.serviceErrorHandleFunc(ser, req, resp)
}
// TODO
}}
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer))
return
}
wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest)
// pass through filters (if any)
if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 {
// compose filter chain
allFilters := []FilterFunction{}
allFilters = append(allFilters, c.containerFilters...)
allFilters = append(allFilters, webService.filters...)
allFilters = append(allFilters, route.Filters...)
chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) {
// handle request by route after passing all filters
route.Function(wrappedRequest, wrappedResponse)
}}
chain.ProcessFilter(wrappedRequest, wrappedResponse)
} else {
// no filters, handle request by route
route.Function(wrappedRequest, wrappedResponse)
}
}
// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {}
func fixedPrefixPath(pathspec string) string {
varBegin := strings.Index(pathspec, "{")
if -1 == varBegin {
return pathspec
}
return pathspec[:varBegin]
}
// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server
func (c *Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) {
c.ServeMux.ServeHTTP(httpwriter, httpRequest)
}
// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics.
func (c *Container) Handle(pattern string, handler http.Handler) {
c.ServeMux.Handle(pattern, handler)
}
// HandleWithFilter registers the handler for the given pattern.
// Container's filter chain is applied for handler.
// If a handler already exists for pattern, HandleWithFilter panics.
func (c *Container) HandleWithFilter(pattern string, handler http.Handler) {
f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) {
if len(c.containerFilters) == 0 {
handler.ServeHTTP(httpResponse, httpRequest)
return
}
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
handler.ServeHTTP(httpResponse, httpRequest)
}}
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse))
}
c.Handle(pattern, http.HandlerFunc(f))
}
// Filter appends a container FilterFunction. These are called before dispatching
// a http.Request to a WebService from the container
func (c *Container) Filter(filter FilterFunction) {
c.containerFilters = append(c.containerFilters, filter)
}
// RegisteredWebServices returns the collections of added WebServices
func (c *Container) RegisteredWebServices() []*WebService {
c.webServicesLock.RLock()
defer c.webServicesLock.RUnlock()
result := make([]*WebService, len(c.webServices))
for ix := range c.webServices {
result[ix] = c.webServices[ix]
}
return result
}
// computeAllowedMethods returns a list of HTTP methods that are valid for a Request
func (c *Container) computeAllowedMethods(req *Request) []string {
// Go through all RegisteredWebServices() and all its Routes to collect the options
methods := []string{}
requestPath := req.Request.URL.Path
for _, ws := range c.RegisteredWebServices() {
matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath)
if matches != nil {
finalMatch := matches[len(matches)-1]
for _, rt := range ws.Routes() {
matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch)
if matches != nil {
lastMatch := matches[len(matches)-1]
if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor /.
methods = append(methods, rt.Method)
}
}
}
}
}
// methods = append(methods, "OPTIONS") not sure about this
return methods
}
// newBasicRequestResponse creates a pair of Request,Response from its http versions.
// It is basic because no parameter or (produces) content-type information is given.
func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
resp := NewResponse(httpWriter)
resp.requestAccept = httpRequest.Header.Get(HEADER_Accept)
return NewRequest(httpRequest), resp
}

View File

@ -1,202 +0,0 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"regexp"
"strconv"
"strings"
)
// CrossOriginResourceSharing is used to create a Container Filter that implements CORS.
// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page
// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from.
//
// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
// http://enable-cors.org/server.html
// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
type CrossOriginResourceSharing struct {
ExposeHeaders []string // list of Header names
AllowedHeaders []string // list of Header names
AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed.
AllowedMethods []string
MaxAge int // number of seconds before requiring new Options request
CookiesAllowed bool
Container *Container
allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check.
}
// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html
// and http://www.html5rocks.com/static/images/cors_server_flowchart.png
func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) {
origin := req.Request.Header.Get(HEADER_Origin)
if len(origin) == 0 {
if trace {
traceLogger.Print("no Http header Origin set")
}
chain.ProcessFilter(req, resp)
return
}
if !c.isOriginAllowed(origin) { // check whether this origin is allowed
if trace {
traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns)
}
chain.ProcessFilter(req, resp)
return
}
if req.Request.Method != "OPTIONS" {
c.doActualRequest(req, resp)
chain.ProcessFilter(req, resp)
return
}
if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" {
c.doPreflightRequest(req, resp)
} else {
c.doActualRequest(req, resp)
chain.ProcessFilter(req, resp)
return
}
}
func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) {
c.setOptionsHeaders(req, resp)
// continue processing the response
}
func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) {
if len(c.AllowedMethods) == 0 {
if c.Container == nil {
c.AllowedMethods = DefaultContainer.computeAllowedMethods(req)
} else {
c.AllowedMethods = c.Container.computeAllowedMethods(req)
}
}
acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod)
if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) {
if trace {
traceLogger.Printf("Http header %s:%s is not in %v",
HEADER_AccessControlRequestMethod,
acrm,
c.AllowedMethods)
}
return
}
acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
if len(acrhs) > 0 {
for _, each := range strings.Split(acrhs, ",") {
if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) {
if trace {
traceLogger.Printf("Http header %s:%s is not in %v",
HEADER_AccessControlRequestHeaders,
acrhs,
c.AllowedHeaders)
}
return
}
}
}
resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ","))
resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs)
c.setOptionsHeaders(req, resp)
// return http 200 response, no body
}
func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) {
c.checkAndSetExposeHeaders(resp)
c.setAllowOriginHeader(req, resp)
c.checkAndSetAllowCredentials(resp)
if c.MaxAge > 0 {
resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge))
}
}
func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool {
if len(origin) == 0 {
return false
}
if len(c.AllowedDomains) == 0 {
return true
}
allowed := false
for _, domain := range c.AllowedDomains {
if domain == origin {
allowed = true
break
}
}
if !allowed {
if len(c.allowedOriginPatterns) == 0 {
// compile allowed domains to allowed origin patterns
allowedOriginRegexps, err := compileRegexps(c.AllowedDomains)
if err != nil {
return false
}
c.allowedOriginPatterns = allowedOriginRegexps
}
for _, pattern := range c.allowedOriginPatterns {
if allowed = pattern.MatchString(origin); allowed {
break
}
}
}
return allowed
}
func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) {
origin := req.Request.Header.Get(HEADER_Origin)
if c.isOriginAllowed(origin) {
resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
}
}
func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) {
if len(c.ExposeHeaders) > 0 {
resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ","))
}
}
func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) {
if c.CookiesAllowed {
resp.AddHeader(HEADER_AccessControlAllowCredentials, "true")
}
}
func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool {
for _, each := range allowedMethods {
if each == method {
return true
}
}
return false
}
func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool {
for _, each := range c.AllowedHeaders {
if strings.ToLower(each) == strings.ToLower(header) {
return true
}
}
return false
}
// Take a list of strings and compile them into a list of regular expressions.
func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) {
regexps := []*regexp.Regexp{}
for _, regexpStr := range regexpStrings {
r, err := regexp.Compile(regexpStr)
if err != nil {
return regexps, err
}
regexps = append(regexps, r)
}
return regexps, nil
}

View File

@ -1,2 +0,0 @@
go test -coverprofile=coverage.out
go tool cover -html=coverage.out

View File

@ -1,164 +0,0 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"net/http"
"regexp"
"sort"
"strings"
)
// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets.
type CurlyRouter struct{}
// SelectRoute is part of the Router interface and returns the best match
// for the WebService and its Route for the given Request.
func (c CurlyRouter) SelectRoute(
webServices []*WebService,
httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) {
requestTokens := tokenizePath(httpRequest.URL.Path)
detectedService := c.detectWebService(requestTokens, webServices)
if detectedService == nil {
if trace {
traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path)
}
return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found")
}
candidateRoutes := c.selectRoutes(detectedService, requestTokens)
if len(candidateRoutes) == 0 {
if trace {
traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path)
}
return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found")
}
selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest)
if selectedRoute == nil {
return detectedService, nil, err
}
return detectedService, selectedRoute, nil
}
// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
candidates := sortableCurlyRoutes{}
for _, each := range ws.routes {
matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens)
if matches {
candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
}
}
sort.Sort(sort.Reverse(candidates))
return candidates
}
// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) {
if len(routeTokens) < len(requestTokens) {
// proceed in matching only if last routeToken is wildcard
count := len(routeTokens)
if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") {
return false, 0, 0
}
// proceed
}
for i, routeToken := range routeTokens {
if i == len(requestTokens) {
// reached end of request path
return false, 0, 0
}
requestToken := requestTokens[i]
if strings.HasPrefix(routeToken, "{") {
paramCount++
if colon := strings.Index(routeToken, ":"); colon != -1 {
// match by regex
matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken)
if !matchesToken {
return false, 0, 0
}
if matchesRemainder {
break
}
}
} else { // no { prefix
if requestToken != routeToken {
return false, 0, 0
}
staticCount++
}
}
return true, paramCount, staticCount
}
// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens
// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]}
func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) {
regPart := routeToken[colon+1 : len(routeToken)-1]
if regPart == "*" {
if trace {
traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken)
}
return true, true
}
matched, err := regexp.MatchString(regPart, requestToken)
return (matched && err == nil), false
}
var jsr311Router = RouterJSR311{}
// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type
// headers of the Request. See also RouterJSR311 in jsr311.go
func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) {
// tracing is done inside detectRoute
return jsr311Router.detectRoute(candidateRoutes.routes(), httpRequest)
}
// detectWebService returns the best matching webService given the list of path tokens.
// see also computeWebserviceScore
func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService {
var best *WebService
score := -1
for _, each := range webServices {
matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens)
if matches && (eachScore > score) {
best = each
score = eachScore
}
}
return best
}
// computeWebserviceScore returns whether tokens match and
// the weighted score of the longest matching consecutive tokens from the beginning.
func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) {
if len(tokens) > len(requestTokens) {
return false, 0
}
score := 0
for i := 0; i < len(tokens); i++ {
each := requestTokens[i]
other := tokens[i]
if len(each) == 0 && len(other) == 0 {
score++
continue
}
if len(other) > 0 && strings.HasPrefix(other, "{") {
// no empty match
if len(each) == 0 {
return false, score
}
score += 1
} else {
// not a parameter
if each != other {
return false, score
}
score += (len(tokens) - i) * 10 //fuzzy
}
}
return true, score
}

View File

@ -1,52 +0,0 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements.
type curlyRoute struct {
route Route
paramCount int
staticCount int
}
type sortableCurlyRoutes []curlyRoute
func (s *sortableCurlyRoutes) add(route curlyRoute) {
*s = append(*s, route)
}
func (s sortableCurlyRoutes) routes() (routes []Route) {
for _, each := range s {
routes = append(routes, each.route) // TODO change return type
}
return routes
}
func (s sortableCurlyRoutes) Len() int {
return len(s)
}
func (s sortableCurlyRoutes) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s sortableCurlyRoutes) Less(i, j int) bool {
ci := s[i]
cj := s[j]
// primary key
if ci.staticCount < cj.staticCount {
return true
}
if ci.staticCount > cj.staticCount {
return false
}
// secundary key
if ci.paramCount < cj.paramCount {
return true
}
if ci.paramCount > cj.paramCount {
return false
}
return ci.route.Path < cj.route.Path
}

View File

@ -1,185 +0,0 @@
/*
Package restful , a lean package for creating REST-style WebServices without magic.
WebServices and Routes
A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls.
Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes.
WebServices must be added to a container (see below) in order to handler Http requests from a server.
A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept).
This package has the logic to find the best matching Route and if found, call its Function.
ws := new(restful.WebService)
ws.
Path("/users").
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON, restful.MIME_XML)
ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource
...
// GET http://localhost:8080/users/1
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
id := request.PathParameter("user-id")
...
}
The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response.
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation.
Regular expression matching Routes
A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path.
For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters.
Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax)
This feature requires the use of a CurlyRouter.
Containers
A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests.
Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container.
The Default container of go-restful uses the http.DefaultServeMux.
You can create your own Container and create a new http.Server for that particular container.
container := restful.NewContainer()
server := &http.Server{Addr: ":8081", Handler: container}
Filters
A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses.
You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc.
In the restful package there are three hooks into the request,response flow where filters can be added.
Each filter must define a FilterFunction:
func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain)
Use the following statement to pass the request,response pair to the next filter or RouteFunction
chain.ProcessFilter(req, resp)
Container Filters
These are processed before any registered WebService.
// install a (global) filter for the default container (processed before any webservice)
restful.Filter(globalLogging)
WebService Filters
These are processed before any Route of a WebService.
// install a webservice filter (processed before any route)
ws.Filter(webserviceLogging).Filter(measureTime)
Route Filters
These are processed before calling the function associated with the Route.
// install 2 chained route filters (processed before calling findUser)
ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser))
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations.
Response Encoding
Two encodings are supported: gzip and deflate. To enable this for all responses:
restful.DefaultContainer.EnableContentEncoding(true)
If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding.
Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route.
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go
OPTIONS support
By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request.
Filter(OPTIONSFilter())
CORS
By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests.
cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer}
Filter(cors.Filter)
Error Handling
Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why.
For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation.
400: Bad Request
If path or query parameters are not valid (content or type) then use http.StatusBadRequest.
404: Not Found
Despite a valid URI, the resource requested may not be available
500: Internal Server Error
If the application logic could not process the request (or write the response) then use http.StatusInternalServerError.
405: Method Not Allowed
The request has a valid URL but the method (GET,PUT,POST,...) is not allowed.
406: Not Acceptable
The request does not have or has an unknown Accept Header set for this operation.
415: Unsupported Media Type
The request does not have or has an unknown Content-Type Header set for this operation.
ServiceError
In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response.
Performance options
This package has several options that affect the performance of your service. It is important to understand them and how you can change it.
restful.DefaultContainer.DoNotRecover(false)
DoNotRecover controls whether panics will be caught to return HTTP 500.
If set to false, the container will recover from panics.
Default value is true
restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20))
If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool.
Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation.
Trouble shooting
This package has the means to produce detail logging of the complete Http request matching process and filter invocation.
Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as:
restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile))
Logging
The restful.SetLogger() method allows you to override the logger used by the package. By default restful
uses the standard library `log` package and logs to stdout. Different logging packages are supported as
long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your
preferred package is simple.
Resources
[project]: https://github.com/emicklei/go-restful
[examples]: https://github.com/emicklei/go-restful/blob/master/examples
[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/
[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape
(c) 2012-2015, http://ernestmicklei.com. MIT License
*/
package restful

View File

@ -1,163 +0,0 @@
package restful
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"encoding/json"
"encoding/xml"
"strings"
"sync"
)
// EntityReaderWriter can read and write values using an encoding such as JSON,XML.
type EntityReaderWriter interface {
// Read a serialized version of the value from the request.
// The Request may have a decompressing reader. Depends on Content-Encoding.
Read(req *Request, v interface{}) error
// Write a serialized version of the value on the response.
// The Response may have a compressing writer. Depends on Accept-Encoding.
// status should be a valid Http Status code
Write(resp *Response, status int, v interface{}) error
}
// entityAccessRegistry is a singleton
var entityAccessRegistry = &entityReaderWriters{
protection: new(sync.RWMutex),
accessors: map[string]EntityReaderWriter{},
}
// entityReaderWriters associates MIME to an EntityReaderWriter
type entityReaderWriters struct {
protection *sync.RWMutex
accessors map[string]EntityReaderWriter
}
func init() {
RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON))
RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML))
}
// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type.
func RegisterEntityAccessor(mime string, erw EntityReaderWriter) {
entityAccessRegistry.protection.Lock()
defer entityAccessRegistry.protection.Unlock()
entityAccessRegistry.accessors[mime] = erw
}
// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content.
// This package is already initialized with such an accessor using the MIME_JSON contentType.
func NewEntityAccessorJSON(contentType string) EntityReaderWriter {
return entityJSONAccess{ContentType: contentType}
}
// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content.
// This package is already initialized with such an accessor using the MIME_XML contentType.
func NewEntityAccessorXML(contentType string) EntityReaderWriter {
return entityXMLAccess{ContentType: contentType}
}
// accessorAt returns the registered ReaderWriter for this MIME type.
func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) {
r.protection.RLock()
defer r.protection.RUnlock()
er, ok := r.accessors[mime]
if !ok {
// retry with reverse lookup
// more expensive but we are in an exceptional situation anyway
for k, v := range r.accessors {
if strings.Contains(mime, k) {
return v, true
}
}
}
return er, ok
}
// entityXMLAccess is a EntityReaderWriter for XML encoding
type entityXMLAccess struct {
// This is used for setting the Content-Type header when writing
ContentType string
}
// Read unmarshalls the value from XML
func (e entityXMLAccess) Read(req *Request, v interface{}) error {
return xml.NewDecoder(req.Request.Body).Decode(v)
}
// Write marshalls the value to JSON and set the Content-Type Header.
func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error {
return writeXML(resp, status, e.ContentType, v)
}
// writeXML marshalls the value to JSON and set the Content-Type Header.
func writeXML(resp *Response, status int, contentType string, v interface{}) error {
if v == nil {
resp.WriteHeader(status)
// do not write a nil representation
return nil
}
if resp.prettyPrint {
// pretty output must be created and written explicitly
output, err := xml.MarshalIndent(v, " ", " ")
if err != nil {
return err
}
resp.Header().Set(HEADER_ContentType, contentType)
resp.WriteHeader(status)
_, err = resp.Write([]byte(xml.Header))
if err != nil {
return err
}
_, err = resp.Write(output)
return err
}
// not-so-pretty
resp.Header().Set(HEADER_ContentType, contentType)
resp.WriteHeader(status)
return xml.NewEncoder(resp).Encode(v)
}
// entityJSONAccess is a EntityReaderWriter for JSON encoding
type entityJSONAccess struct {
// This is used for setting the Content-Type header when writing
ContentType string
}
// Read unmarshalls the value from JSON
func (e entityJSONAccess) Read(req *Request, v interface{}) error {
decoder := json.NewDecoder(req.Request.Body)
decoder.UseNumber()
return decoder.Decode(v)
}
// Write marshalls the value to JSON and set the Content-Type Header.
func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error {
return writeJSON(resp, status, e.ContentType, v)
}
// write marshalls the value to JSON and set the Content-Type Header.
func writeJSON(resp *Response, status int, contentType string, v interface{}) error {
if v == nil {
resp.WriteHeader(status)
// do not write a nil representation
return nil
}
if resp.prettyPrint {
// pretty output must be created and written explicitly
output, err := json.MarshalIndent(v, " ", " ")
if err != nil {
return err
}
resp.Header().Set(HEADER_ContentType, contentType)
resp.WriteHeader(status)
_, err = resp.Write(output)
return err
}
// not-so-pretty
resp.Header().Set(HEADER_ContentType, contentType)
resp.WriteHeader(status)
return json.NewEncoder(resp).Encode(v)
}

View File

@ -1,35 +0,0 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction.
type FilterChain struct {
Filters []FilterFunction // ordered list of FilterFunction
Index int // index into filters that is currently in progress
Target RouteFunction // function to call after passing all filters
}
// ProcessFilter passes the request,response pair through the next of Filters.
// Each filter can decide to proceed to the next Filter or handle the Response itself.
func (f *FilterChain) ProcessFilter(request *Request, response *Response) {
if f.Index < len(f.Filters) {
f.Index++
f.Filters[f.Index-1](request, response, f)
} else {
f.Target(request, response)
}
}
// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction
type FilterFunction func(*Request, *Response, *FilterChain)
// NoBrowserCacheFilter is a filter function to set HTTP headers that disable browser caching
// See examples/restful-no-cache-filter.go for usage
func NoBrowserCacheFilter(req *Request, resp *Response, chain *FilterChain) {
resp.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1.
resp.Header().Set("Pragma", "no-cache") // HTTP 1.0.
resp.Header().Set("Expires", "0") // Proxies.
chain.ProcessFilter(req, resp)
}

View File

@ -1,248 +0,0 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"errors"
"fmt"
"net/http"
"sort"
)
// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions)
// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html.
// RouterJSR311 implements the Router interface.
// Concept of locators is not implemented.
type RouterJSR311 struct{}
// SelectRoute is part of the Router interface and returns the best match
// for the WebService and its Route for the given Request.
func (r RouterJSR311) SelectRoute(
webServices []*WebService,
httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) {
// Identify the root resource class (WebService)
dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices)
if err != nil {
return nil, nil, NewError(http.StatusNotFound, "")
}
// Obtain the set of candidate methods (Routes)
routes := r.selectRoutes(dispatcher, finalMatch)
if len(routes) == 0 {
return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found")
}
// Identify the method (Route) that will handle the request
route, ok := r.detectRoute(routes, httpRequest)
return dispatcher, route, ok
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
// http method
methodOk := []Route{}
for _, each := range routes {
if httpRequest.Method == each.Method {
methodOk = append(methodOk, each)
}
}
if len(methodOk) == 0 {
if trace {
traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(routes), httpRequest.Method)
}
return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed")
}
inputMediaOk := methodOk
// content-type
contentType := httpRequest.Header.Get(HEADER_ContentType)
inputMediaOk = []Route{}
for _, each := range methodOk {
if each.matchesContentType(contentType) {
inputMediaOk = append(inputMediaOk, each)
}
}
if len(inputMediaOk) == 0 {
if trace {
traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(methodOk), contentType)
}
return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
}
// accept
outputMediaOk := []Route{}
accept := httpRequest.Header.Get(HEADER_Accept)
if len(accept) == 0 {
accept = "*/*"
}
for _, each := range inputMediaOk {
if each.matchesAccept(accept) {
outputMediaOk = append(outputMediaOk, each)
}
}
if len(outputMediaOk) == 0 {
if trace {
traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(inputMediaOk), accept)
}
return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable")
}
// return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
return &outputMediaOk[0], nil
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
// n/m > n/* > */*
func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route {
// TODO
return &routes[0]
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 2)
func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route {
filtered := &sortableRouteCandidates{}
for _, each := range dispatcher.Routes() {
pathExpr := each.pathExpr
matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder)
if matches != nil {
lastMatch := matches[len(matches)-1]
if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor /.
filtered.candidates = append(filtered.candidates,
routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount})
}
}
}
if len(filtered.candidates) == 0 {
if trace {
traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder)
}
return []Route{}
}
sort.Sort(sort.Reverse(filtered))
// select other routes from candidates whoes expression matches rmatch
matchingRoutes := []Route{filtered.candidates[0].route}
for c := 1; c < len(filtered.candidates); c++ {
each := filtered.candidates[c]
if each.route.pathExpr.Matcher.MatchString(pathRemainder) {
matchingRoutes = append(matchingRoutes, each.route)
}
}
return matchingRoutes
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1)
func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) {
filtered := &sortableDispatcherCandidates{}
for _, each := range dispatchers {
matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath)
if matches != nil {
filtered.candidates = append(filtered.candidates,
dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount})
}
}
if len(filtered.candidates) == 0 {
if trace {
traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath)
}
return nil, "", errors.New("not found")
}
sort.Sort(sort.Reverse(filtered))
return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil
}
// Types and functions to support the sorting of Routes
type routeCandidate struct {
route Route
matchesCount int // the number of capturing groups
literalCount int // the number of literal characters (means those not resulting from template variable substitution)
nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ([^ /]+?))
}
func (r routeCandidate) expressionToMatch() string {
return r.route.pathExpr.Source
}
func (r routeCandidate) String() string {
return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount)
}
type sortableRouteCandidates struct {
candidates []routeCandidate
}
func (rcs *sortableRouteCandidates) Len() int {
return len(rcs.candidates)
}
func (rcs *sortableRouteCandidates) Swap(i, j int) {
rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i]
}
func (rcs *sortableRouteCandidates) Less(i, j int) bool {
ci := rcs.candidates[i]
cj := rcs.candidates[j]
// primary key
if ci.literalCount < cj.literalCount {
return true
}
if ci.literalCount > cj.literalCount {
return false
}
// secundary key
if ci.matchesCount < cj.matchesCount {
return true
}
if ci.matchesCount > cj.matchesCount {
return false
}
// tertiary key
if ci.nonDefaultCount < cj.nonDefaultCount {
return true
}
if ci.nonDefaultCount > cj.nonDefaultCount {
return false
}
// quaternary key ("source" is interpreted as Path)
return ci.route.Path < cj.route.Path
}
// Types and functions to support the sorting of Dispatchers
type dispatcherCandidate struct {
dispatcher *WebService
finalMatch string
matchesCount int // the number of capturing groups
literalCount int // the number of literal characters (means those not resulting from template variable substitution)
nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ([^ /]+?))
}
type sortableDispatcherCandidates struct {
candidates []dispatcherCandidate
}
func (dc *sortableDispatcherCandidates) Len() int {
return len(dc.candidates)
}
func (dc *sortableDispatcherCandidates) Swap(i, j int) {
dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i]
}
func (dc *sortableDispatcherCandidates) Less(i, j int) bool {
ci := dc.candidates[i]
cj := dc.candidates[j]
// primary key
if ci.matchesCount < cj.matchesCount {
return true
}
if ci.matchesCount > cj.matchesCount {
return false
}
// secundary key
if ci.literalCount < cj.literalCount {
return true
}
if ci.literalCount > cj.literalCount {
return false
}
// tertiary key
return ci.nonDefaultCount < cj.nonDefaultCount
}

View File

@ -1,34 +0,0 @@
package log
import (
stdlog "log"
"os"
)
// StdLogger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
type StdLogger interface {
Print(v ...interface{})
Printf(format string, v ...interface{})
}
var Logger StdLogger
func init() {
// default Logger
SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile))
}
// SetLogger sets the logger for this package
func SetLogger(customLogger StdLogger) {
Logger = customLogger
}
// Print delegates to the Logger
func Print(v ...interface{}) {
Logger.Print(v...)
}
// Printf delegates to the Logger
func Printf(format string, v ...interface{}) {
Logger.Printf(format, v...)
}

View File

@ -1,32 +0,0 @@
package restful
// Copyright 2014 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"github.com/emicklei/go-restful/log"
)
var trace bool = false
var traceLogger log.StdLogger
func init() {
traceLogger = log.Logger // use the package logger by default
}
// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set.
// You may call EnableTracing() directly to enable trace logging to the package-wide logger.
func TraceLogger(logger log.StdLogger) {
traceLogger = logger
EnableTracing(logger != nil)
}
// expose the setter for the global logger on the top-level package
func SetLogger(customLogger log.StdLogger) {
log.SetLogger(customLogger)
}
// EnableTracing can be used to Trace logging on and off.
func EnableTracing(enabled bool) {
trace = enabled
}

View File

@ -1,45 +0,0 @@
package restful
import (
"strconv"
"strings"
)
type mime struct {
media string
quality float64
}
// insertMime adds a mime to a list and keeps it sorted by quality.
func insertMime(l []mime, e mime) []mime {
for i, each := range l {
// if current mime has lower quality then insert before
if e.quality > each.quality {
left := append([]mime{}, l[0:i]...)
return append(append(left, e), l[i:]...)
}
}
return append(l, e)
}
// sortedMimes returns a list of mime sorted (desc) by its specified quality.
func sortedMimes(accept string) (sorted []mime) {
for _, each := range strings.Split(accept, ",") {
typeAndQuality := strings.Split(strings.Trim(each, " "), ";")
if len(typeAndQuality) == 1 {
sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0})
} else {
// take factor
parts := strings.Split(typeAndQuality[1], "=")
if len(parts) == 2 {
f, err := strconv.ParseFloat(parts[1], 64)
if err != nil {
traceLogger.Printf("unable to parse quality in %s, %v", each, err)
} else {
sorted = insertMime(sorted, mime{typeAndQuality[0], f})
}
}
}
}
return
}

View File

@ -1,26 +0,0 @@
package restful
import "strings"
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
// and provides the response with a set of allowed methods for the request URL Path.
// As for any filter, you can also install it for a particular WebService within a Container.
// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) {
if "OPTIONS" != req.Request.Method {
chain.ProcessFilter(req, resp)
return
}
resp.AddHeader(HEADER_Allow, strings.Join(c.computeAllowedMethods(req), ","))
}
// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
// and provides the response with a set of allowed methods for the request URL Path.
// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
func OPTIONSFilter() FilterFunction {
return DefaultContainer.OPTIONSFilter
}

View File

@ -1,114 +0,0 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
const (
// PathParameterKind = indicator of Request parameter type "path"
PathParameterKind = iota
// QueryParameterKind = indicator of Request parameter type "query"
QueryParameterKind
// BodyParameterKind = indicator of Request parameter type "body"
BodyParameterKind
// HeaderParameterKind = indicator of Request parameter type "header"
HeaderParameterKind
// FormParameterKind = indicator of Request parameter type "form"
FormParameterKind
)
// Parameter is for documententing the parameter used in a Http Request
// ParameterData kinds are Path,Query and Body
type Parameter struct {
data *ParameterData
}
// ParameterData represents the state of a Parameter.
// It is made public to make it accessible to e.g. the Swagger package.
type ParameterData struct {
Name, Description, DataType, DataFormat string
Kind int
Required bool
AllowableValues map[string]string
AllowMultiple bool
DefaultValue string
}
// Data returns the state of the Parameter
func (p *Parameter) Data() ParameterData {
return *p.data
}
// Kind returns the parameter type indicator (see const for valid values)
func (p *Parameter) Kind() int {
return p.data.Kind
}
func (p *Parameter) bePath() *Parameter {
p.data.Kind = PathParameterKind
return p
}
func (p *Parameter) beQuery() *Parameter {
p.data.Kind = QueryParameterKind
return p
}
func (p *Parameter) beBody() *Parameter {
p.data.Kind = BodyParameterKind
return p
}
func (p *Parameter) beHeader() *Parameter {
p.data.Kind = HeaderParameterKind
return p
}
func (p *Parameter) beForm() *Parameter {
p.data.Kind = FormParameterKind
return p
}
// Required sets the required field and returns the receiver
func (p *Parameter) Required(required bool) *Parameter {
p.data.Required = required
return p
}
// AllowMultiple sets the allowMultiple field and returns the receiver
func (p *Parameter) AllowMultiple(multiple bool) *Parameter {
p.data.AllowMultiple = multiple
return p
}
// AllowableValues sets the allowableValues field and returns the receiver
func (p *Parameter) AllowableValues(values map[string]string) *Parameter {
p.data.AllowableValues = values
return p
}
// DataType sets the dataType field and returns the receiver
func (p *Parameter) DataType(typeName string) *Parameter {
p.data.DataType = typeName
return p
}
// DataFormat sets the dataFormat field for Swagger UI
func (p *Parameter) DataFormat(formatName string) *Parameter {
p.data.DataFormat = formatName
return p
}
// DefaultValue sets the default value field and returns the receiver
func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter {
p.data.DefaultValue = stringRepresentation
return p
}
// Description sets the description value field and returns the receiver
func (p *Parameter) Description(doc string) *Parameter {
p.data.Description = doc
return p
}

View File

@ -1,69 +0,0 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"fmt"
"regexp"
"strings"
)
// PathExpression holds a compiled path expression (RegExp) needed to match against
// Http request paths and to extract path parameter values.
type pathExpression struct {
LiteralCount int // the number of literal characters (means those not resulting from template variable substitution)
VarCount int // the number of named parameters (enclosed by {}) in the path
Matcher *regexp.Regexp
Source string // Path as defined by the RouteBuilder
tokens []string
}
// NewPathExpression creates a PathExpression from the input URL path.
// Returns an error if the path is invalid.
func newPathExpression(path string) (*pathExpression, error) {
expression, literalCount, varCount, tokens := templateToRegularExpression(path)
compiled, err := regexp.Compile(expression)
if err != nil {
return nil, err
}
return &pathExpression{literalCount, varCount, compiled, expression, tokens}, nil
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3
func templateToRegularExpression(template string) (expression string, literalCount int, varCount int, tokens []string) {
var buffer bytes.Buffer
buffer.WriteString("^")
//tokens = strings.Split(template, "/")
tokens = tokenizePath(template)
for _, each := range tokens {
if each == "" {
continue
}
buffer.WriteString("/")
if strings.HasPrefix(each, "{") {
// check for regular expression in variable
colon := strings.Index(each, ":")
if colon != -1 {
// extract expression
paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1])
if paramExpr == "*" { // special case
buffer.WriteString("(.*)")
} else {
buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache
}
} else {
// plain var
buffer.WriteString("([^/]+?)")
}
varCount += 1
} else {
literalCount += len(each)
encoded := each // TODO URI encode
buffer.WriteString(regexp.QuoteMeta(encoded))
}
}
return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varCount, tokens
}

View File

@ -1,113 +0,0 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"compress/zlib"
"net/http"
)
var defaultRequestContentType string
// Request is a wrapper for a http Request that provides convenience methods
type Request struct {
Request *http.Request
pathParameters map[string]string
attributes map[string]interface{} // for storing request-scoped values
selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees
}
func NewRequest(httpRequest *http.Request) *Request {
return &Request{
Request: httpRequest,
pathParameters: map[string]string{},
attributes: map[string]interface{}{},
} // empty parameters, attributes
}
// If ContentType is missing or */* is given then fall back to this type, otherwise
// a "Unable to unmarshal content of type:" response is returned.
// Valid values are restful.MIME_JSON and restful.MIME_XML
// Example:
// restful.DefaultRequestContentType(restful.MIME_JSON)
func DefaultRequestContentType(mime string) {
defaultRequestContentType = mime
}
// PathParameter accesses the Path parameter value by its name
func (r *Request) PathParameter(name string) string {
return r.pathParameters[name]
}
// PathParameters accesses the Path parameter values
func (r *Request) PathParameters() map[string]string {
return r.pathParameters
}
// QueryParameter returns the (first) Query parameter value by its name
func (r *Request) QueryParameter(name string) string {
return r.Request.FormValue(name)
}
// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error.
func (r *Request) BodyParameter(name string) (string, error) {
err := r.Request.ParseForm()
if err != nil {
return "", err
}
return r.Request.PostFormValue(name), nil
}
// HeaderParameter returns the HTTP Header value of a Header name or empty if missing
func (r *Request) HeaderParameter(name string) string {
return r.Request.Header.Get(name)
}
// ReadEntity checks the Accept header and reads the content into the entityPointer.
func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
contentType := r.Request.Header.Get(HEADER_ContentType)
contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding)
// check if the request body needs decompression
if ENCODING_GZIP == contentEncoding {
gzipReader := currentCompressorProvider.AcquireGzipReader()
defer currentCompressorProvider.ReleaseGzipReader(gzipReader)
gzipReader.Reset(r.Request.Body)
r.Request.Body = gzipReader
} else if ENCODING_DEFLATE == contentEncoding {
zlibReader, err := zlib.NewReader(r.Request.Body)
if err != nil {
return err
}
r.Request.Body = zlibReader
}
// lookup the EntityReader, use defaultRequestContentType if needed and provided
entityReader, ok := entityAccessRegistry.accessorAt(contentType)
if !ok {
if len(defaultRequestContentType) != 0 {
entityReader, ok = entityAccessRegistry.accessorAt(defaultRequestContentType)
}
if !ok {
return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType)
}
}
return entityReader.Read(r, entityPointer)
}
// SetAttribute adds or replaces the attribute with the given value.
func (r *Request) SetAttribute(name string, value interface{}) {
r.attributes[name] = value
}
// Attribute returns the value associated to the given name. Returns nil if absent.
func (r Request) Attribute(name string) interface{} {
return r.attributes[name]
}
// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees
func (r Request) SelectedRoutePath() string {
return r.selectedRoutePath
}

View File

@ -1,236 +0,0 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"errors"
"net/http"
)
// DefaultResponseMimeType is DEPRECATED, use DefaultResponseContentType(mime)
var DefaultResponseMimeType string
//PrettyPrintResponses controls the indentation feature of XML and JSON serialization
var PrettyPrintResponses = true
// Response is a wrapper on the actual http ResponseWriter
// It provides several convenience methods to prepare and write response content.
type Response struct {
http.ResponseWriter
requestAccept string // mime-type what the Http Request says it wants to receive
routeProduces []string // mime-types what the Route says it can produce
statusCode int // HTTP status code that has been written explicity (if zero then net/http has written 200)
contentLength int // number of bytes written for the response body
prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses.
err error // err property is kept when WriteError is called
}
// NewResponse creates a new response based on a http ResponseWriter.
func NewResponse(httpWriter http.ResponseWriter) *Response {
return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses, nil} // empty content-types
}
// DefaultResponseContentType set a default.
// If Accept header matching fails, fall back to this type.
// Valid values are restful.MIME_JSON and restful.MIME_XML
// Example:
// restful.DefaultResponseContentType(restful.MIME_JSON)
func DefaultResponseContentType(mime string) {
DefaultResponseMimeType = mime
}
// InternalServerError writes the StatusInternalServerError header.
// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason)
func (r Response) InternalServerError() Response {
r.WriteHeader(http.StatusInternalServerError)
return r
}
// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output.
func (r *Response) PrettyPrint(bePretty bool) {
r.prettyPrint = bePretty
}
// AddHeader is a shortcut for .Header().Add(header,value)
func (r Response) AddHeader(header string, value string) Response {
r.Header().Add(header, value)
return r
}
// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing.
func (r *Response) SetRequestAccepts(mime string) {
r.requestAccept = mime
}
// EntityWriter returns the registered EntityWriter that the entity (requested resource)
// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say.
// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable.
func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
sorted := sortedMimes(r.requestAccept)
for _, eachAccept := range sorted {
for _, eachProduce := range r.routeProduces {
if eachProduce == eachAccept.media {
if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok {
return w, true
}
}
}
if eachAccept.media == "*/*" {
for _, each := range r.routeProduces {
if w, ok := entityAccessRegistry.accessorAt(each); ok {
return w, true
}
}
}
}
// if requestAccept is empty
writer, ok := entityAccessRegistry.accessorAt(r.requestAccept)
if !ok {
// if not registered then fallback to the defaults (if set)
if DefaultResponseMimeType == MIME_JSON {
return entityAccessRegistry.accessorAt(MIME_JSON)
}
if DefaultResponseMimeType == MIME_XML {
return entityAccessRegistry.accessorAt(MIME_XML)
}
// Fallback to whatever the route says it can produce.
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
for _, each := range r.routeProduces {
if w, ok := entityAccessRegistry.accessorAt(each); ok {
return w, true
}
}
if trace {
traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept)
}
}
return writer, ok
}
// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200)
func (r *Response) WriteEntity(value interface{}) error {
return r.WriteHeaderAndEntity(http.StatusOK, value)
}
// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters.
// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces.
// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header.
// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead.
// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written.
// Current implementation ignores any q-parameters in the Accept Header.
// Returns an error if the value could not be written on the response.
func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error {
writer, ok := r.EntityWriter()
if !ok {
r.WriteHeader(http.StatusNotAcceptable)
return nil
}
return writer.Write(r, status, value)
}
// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value)
// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
func (r *Response) WriteAsXml(value interface{}) error {
return writeXML(r, http.StatusOK, MIME_XML, value)
}
// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value)
// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
func (r *Response) WriteHeaderAndXml(status int, value interface{}) error {
return writeXML(r, status, MIME_XML, value)
}
// WriteAsJson is a convenience method for writing a value in json.
// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
func (r *Response) WriteAsJson(value interface{}) error {
return writeJSON(r, http.StatusOK, MIME_JSON, value)
}
// WriteJson is a convenience method for writing a value in Json with a given Content-Type.
// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
func (r *Response) WriteJson(value interface{}, contentType string) error {
return writeJSON(r, http.StatusOK, contentType, value)
}
// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type.
// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error {
return writeJSON(r, status, contentType, value)
}
// WriteError write the http status and the error string on the response.
func (r *Response) WriteError(httpStatus int, err error) error {
r.err = err
return r.WriteErrorString(httpStatus, err.Error())
}
// WriteServiceError is a convenience method for a responding with a status and a ServiceError
func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error {
r.err = err
return r.WriteHeaderAndEntity(httpStatus, err)
}
// WriteErrorString is a convenience method for an error status with the actual error
func (r *Response) WriteErrorString(httpStatus int, errorReason string) error {
if r.err == nil {
// if not called from WriteError
r.err = errors.New(errorReason)
}
r.WriteHeader(httpStatus)
if _, err := r.Write([]byte(errorReason)); err != nil {
return err
}
return nil
}
// Flush implements http.Flusher interface, which sends any buffered data to the client.
func (r *Response) Flush() {
if f, ok := r.ResponseWriter.(http.Flusher); ok {
f.Flush()
} else if trace {
traceLogger.Printf("ResponseWriter %v doesn't support Flush", r)
}
}
// WriteHeader is overridden to remember the Status Code that has been written.
// Changes to the Header of the response have no effect after this.
func (r *Response) WriteHeader(httpStatus int) {
r.statusCode = httpStatus
r.ResponseWriter.WriteHeader(httpStatus)
}
// StatusCode returns the code that has been written using WriteHeader.
func (r Response) StatusCode() int {
if 0 == r.statusCode {
// no status code has been written yet; assume OK
return http.StatusOK
}
return r.statusCode
}
// Write writes the data to the connection as part of an HTTP reply.
// Write is part of http.ResponseWriter interface.
func (r *Response) Write(bytes []byte) (int, error) {
written, err := r.ResponseWriter.Write(bytes)
r.contentLength += written
return written, err
}
// ContentLength returns the number of bytes written for the response content.
// Note that this value is only correct if all data is written through the Response using its Write* methods.
// Data written directly using the underlying http.ResponseWriter is not accounted for.
func (r Response) ContentLength() int {
return r.contentLength
}
// CloseNotify is part of http.CloseNotifier interface
func (r Response) CloseNotify() <-chan bool {
return r.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
// Error returns the err created by WriteError
func (r Response) Error() error {
return r.err
}

View File

@ -1,186 +0,0 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"net/http"
"strings"
)
// RouteFunction declares the signature of a function that can be bound to a Route.
type RouteFunction func(*Request, *Response)
// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction.
type Route struct {
Method string
Produces []string
Consumes []string
Path string // webservice root path + described path
Function RouteFunction
Filters []FilterFunction
// cached values for dispatching
relativePath string
pathParts []string
pathExpr *pathExpression // cached compilation of relativePath as RegExp
// documentation
Doc string
Notes string
Operation string
ParameterDocs []*Parameter
ResponseErrors map[int]ResponseError
ReadSample, WriteSample interface{} // structs that model an example request or response payload
// Extra information used to store custom information about the route.
Metadata map[string]interface{}
}
// Initialize for Route
func (r *Route) postBuild() {
r.pathParts = tokenizePath(r.Path)
}
// Create Request and Response from their http versions
func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
params := r.extractParameters(httpRequest.URL.Path)
wrappedRequest := NewRequest(httpRequest)
wrappedRequest.pathParameters = params
wrappedRequest.selectedRoutePath = r.Path
wrappedResponse := NewResponse(httpWriter)
wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept)
wrappedResponse.routeProduces = r.Produces
return wrappedRequest, wrappedResponse
}
// dispatchWithFilters call the function after passing through its own filters
func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Response) {
if len(r.Filters) > 0 {
chain := FilterChain{Filters: r.Filters, Target: r.Function}
chain.ProcessFilter(wrappedRequest, wrappedResponse)
} else {
// unfiltered
r.Function(wrappedRequest, wrappedResponse)
}
}
// Return whether the mimeType matches to what this Route can produce.
func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
parts := strings.Split(mimeTypesWithQuality, ",")
for _, each := range parts {
var withoutQuality string
if strings.Contains(each, ";") {
withoutQuality = strings.Split(each, ";")[0]
} else {
withoutQuality = each
}
// trim before compare
withoutQuality = strings.Trim(withoutQuality, " ")
if withoutQuality == "*/*" {
return true
}
for _, producibleType := range r.Produces {
if producibleType == "*/*" || producibleType == withoutQuality {
return true
}
}
}
return false
}
// Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
func (r Route) matchesContentType(mimeTypes string) bool {
if len(r.Consumes) == 0 {
// did not specify what it can consume ; any media type (“*/*”) is assumed
return true
}
if len(mimeTypes) == 0 {
// idempotent methods with (most-likely or garanteed) empty content match missing Content-Type
m := r.Method
if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" {
return true
}
// proceed with default
mimeTypes = MIME_OCTET
}
parts := strings.Split(mimeTypes, ",")
for _, each := range parts {
var contentType string
if strings.Contains(each, ";") {
contentType = strings.Split(each, ";")[0]
} else {
contentType = each
}
// trim before compare
contentType = strings.Trim(contentType, " ")
for _, consumeableType := range r.Consumes {
if consumeableType == "*/*" || consumeableType == contentType {
return true
}
}
}
return false
}
// Extract the parameters from the request url path
func (r Route) extractParameters(urlPath string) map[string]string {
urlParts := tokenizePath(urlPath)
pathParameters := map[string]string{}
for i, key := range r.pathParts {
var value string
if i >= len(urlParts) {
value = ""
} else {
value = urlParts[i]
}
if strings.HasPrefix(key, "{") { // path-parameter
if colon := strings.Index(key, ":"); colon != -1 {
// extract by regex
regPart := key[colon+1 : len(key)-1]
keyPart := key[1:colon]
if regPart == "*" {
pathParameters[keyPart] = untokenizePath(i, urlParts)
break
} else {
pathParameters[keyPart] = value
}
} else {
// without enclosing {}
pathParameters[key[1:len(key)-1]] = value
}
}
}
return pathParameters
}
// Untokenize back into an URL path using the slash separator
func untokenizePath(offset int, parts []string) string {
var buffer bytes.Buffer
for p := offset; p < len(parts); p++ {
buffer.WriteString(parts[p])
// do not end
if p < len(parts)-1 {
buffer.WriteString("/")
}
}
return buffer.String()
}
// Tokenize an URL path using the slash separator ; the result does not have empty tokens
func tokenizePath(path string) []string {
if "/" == path {
return []string{}
}
return strings.Split(strings.Trim(path, "/"), "/")
}
// for debugging
func (r Route) String() string {
return r.Method + " " + r.Path
}

Some files were not shown because too many files have changed in this diff Show More