Compare commits

..

567 Commits

Author SHA1 Message Date
Kubernetes Publisher
e0611f3d05 Merge pull request #65298 from nikhita/cherrypick-jsoniter-bump-1.9
Automatic merge from submit-queue.

Manual cherrypick of #65034 to 1.9: make json serializer case sensitive

fixes partially https://github.com/kubernetes/kubernetes/issues/64612

This PR imports the latest jsoniterator library so that case sensitivity during unmarshalling is optional. The PR also sets Kubernetes json serializer to be case sensitive.

**Release note**:

```release-note
ACTION REQUIRED: Kubernetes JSON deserializer is now case-sensitive to restore compatibility with pre-1.8 servers. If your config files contains fields with wrong case, the config files will be now invalid.
```

/sig api-machinery
/kind bug
/assign caesarxuchao liggitt thockin sttts mbohlool

Kubernetes-commit: f4cf484c2cb6056e28fb9759a3c913be3eed990a
2018-06-27 08:49:37 +00:00
Nikhita Raghunath
dd4f9e79b8 update staging godeps
Kubernetes-commit: 0f9246104e2c75a96150d8d06a87dfe7b40024c8
2018-06-21 11:59:07 +05:30
Kubernetes Publisher
77833cd56b Merge pull request #63626 from roycaihw/release-1.9
Automatic merge from submit-queue.

Manual cherrypick of kube-openapi changes for release-1.9

**What this PR does / why we need it**:
Cherry-picks https://github.com/kubernetes/kube-openapi/pull/64 and https://github.com/kubernetes/kube-openapi/pull/67
Fixes bugs that make apiserver panic when aggregating valid but not well formed OpenAPI spec (with empty `Paths`/`Definitions`)

**Release note**:

```release-note
Fixes bugs that make apiserver panic when aggregating valid but not well formed OpenAPI spec
```

/cc @mbohlool
/sig api-machinery

Kubernetes-commit: 8d48604c2eca912159636be258a5e68bfc4c6643
2018-05-10 15:37:37 +00:00
Haowei Cai
666c7f735a generated
Kubernetes-commit: 446c4f107857f2e3589f8ce1632ad697161aba11
2018-05-09 15:22:42 -07:00
Kubernetes Publisher
9d60f698b8 sync: update godeps 2018-04-17 15:29:40 +00:00
Kubernetes Publisher
9bcfb16e0e Merge pull request #62028 from nikhita/automated-cherry-pick-of-#57243-upstream-release-1.9
Automatic merge from submit-queue.

Automated cherry pick of #57243: Register metav1 types into samplecontroller api scheme

Fixes kubernetes/sample-controller#14

Cherry pick of #57243 on release-1.9.

#57243: Register metav1 types into samplecontroller api scheme

```release-note
Fix error message regarding conversion of `v1.ListOptions` to `samplecontroller.k8s.io/v1alpha1`.
```

Kubernetes-commit: 53daf2849cdd6c31a7f801c987e8386bb2fd404d
2018-04-15 23:19:05 +00:00
Kubernetes Publisher
b7e50d1f46 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-09 11:24:24 +00:00
Kubernetes Publisher
5505589d66 sync: update required packages 2018-04-09 03:57:07 +00:00
Kubernetes Publisher
40c99c90ab sync: update godeps 2018-04-09 03:57:06 +00:00
Kubernetes Publisher
31535e4b2c sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-09 03:56:50 +00:00
Kubernetes Publisher
48ead0a103 sync: update required packages 2018-04-08 23:57:27 +00:00
Kubernetes Publisher
89d4b438a9 sync: update godeps 2018-04-08 23:57:27 +00:00
Kubernetes Publisher
91a11b7265 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-08 23:57:11 +00:00
Kubernetes Publisher
01c2d0d977 sync: update required packages 2018-04-08 19:57:40 +00:00
Kubernetes Publisher
c03ae22c88 sync: update godeps 2018-04-08 19:57:40 +00:00
Kubernetes Publisher
f34d956640 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-08 19:57:18 +00:00
Kubernetes Publisher
79859ee6b4 sync: update required packages 2018-04-08 15:56:28 +00:00
Kubernetes Publisher
15169be8b7 sync: update godeps 2018-04-08 15:56:28 +00:00
Kubernetes Publisher
3e565e8d13 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-08 15:56:11 +00:00
Kubernetes Publisher
c9e66f884f sync: update required packages 2018-04-08 11:58:35 +00:00
Kubernetes Publisher
9805f78ed8 sync: update godeps 2018-04-08 11:58:35 +00:00
Kubernetes Publisher
aaf097dc2c sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-08 11:58:19 +00:00
Kubernetes Publisher
370e21e6ac sync: update required packages 2018-04-08 07:57:55 +00:00
Kubernetes Publisher
00eef0056a sync: update godeps 2018-04-08 07:57:55 +00:00
Kubernetes Publisher
81cbcf8b43 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-08 07:57:39 +00:00
Kubernetes Publisher
9db1fe1142 sync: update required packages 2018-04-08 03:56:23 +00:00
Kubernetes Publisher
33b61d6ee0 sync: update godeps 2018-04-08 03:56:23 +00:00
Kubernetes Publisher
1172c606cf sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-08 03:56:04 +00:00
Kubernetes Publisher
5b2be3e834 sync: update required packages 2018-04-07 23:56:22 +00:00
Kubernetes Publisher
5d2cb1ffcd sync: update godeps 2018-04-07 23:56:22 +00:00
Kubernetes Publisher
0693f7d9f5 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-07 23:56:06 +00:00
Kubernetes Publisher
6a94facd76 sync: update required packages 2018-04-07 19:56:48 +00:00
Kubernetes Publisher
78802a16da sync: update godeps 2018-04-07 19:56:48 +00:00
Kubernetes Publisher
dc0261e124 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-07 19:56:31 +00:00
Kubernetes Publisher
5ce173c831 sync: update required packages 2018-04-07 15:56:55 +00:00
Kubernetes Publisher
789b653f4e sync: update godeps 2018-04-07 15:56:54 +00:00
Kubernetes Publisher
eab374c0ce sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-07 15:56:38 +00:00
Kubernetes Publisher
4a9c498f71 sync: update required packages 2018-04-07 11:56:47 +00:00
Kubernetes Publisher
3e12fc7430 sync: update godeps 2018-04-07 11:56:47 +00:00
Kubernetes Publisher
31e98b39f9 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-07 11:56:31 +00:00
Kubernetes Publisher
7158d5b151 sync: update required packages 2018-04-07 07:58:00 +00:00
Kubernetes Publisher
1437c07c81 sync: update godeps 2018-04-07 07:58:00 +00:00
Kubernetes Publisher
861fb89c5a sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-07 07:57:44 +00:00
Kubernetes Publisher
4c39992cfc sync: update required packages 2018-04-07 03:57:26 +00:00
Kubernetes Publisher
5cd1d8fcf0 sync: update godeps 2018-04-07 03:57:25 +00:00
Kubernetes Publisher
723f028710 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-07 03:57:09 +00:00
Kubernetes Publisher
3f0b110807 sync: update required packages 2018-04-06 23:56:43 +00:00
Kubernetes Publisher
f3356673b1 sync: update godeps 2018-04-06 23:56:43 +00:00
Kubernetes Publisher
e74eecb6a5 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-06 23:56:16 +00:00
Kubernetes Publisher
affa9f42a6 sync: update required packages 2018-04-06 19:55:34 +00:00
Kubernetes Publisher
9be544dd78 sync: update godeps 2018-04-06 19:55:34 +00:00
Kubernetes Publisher
e105d4753f sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-06 19:55:12 +00:00
Kubernetes Publisher
f07fdd5433 sync: update required packages 2018-04-06 15:58:12 +00:00
Kubernetes Publisher
2572123197 sync: update godeps 2018-04-06 15:58:12 +00:00
Kubernetes Publisher
41d0ae3162 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-06 15:57:55 +00:00
Kubernetes Publisher
cfd7646563 sync: update required packages 2018-04-06 12:08:26 +00:00
Kubernetes Publisher
be5633101c sync: update godeps 2018-04-06 12:08:25 +00:00
Kubernetes Publisher
b4c220b23a sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-06 12:08:08 +00:00
Kubernetes Publisher
ad2add216d sync: update required packages 2018-03-30 19:49:37 +00:00
Kubernetes Publisher
60dd841254 sync: update godeps 2018-03-30 19:49:37 +00:00
Kubernetes Publisher
f27f3a4e25 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-30 19:49:21 +00:00
Kubernetes Publisher
e3a3550cd1 sync: update required packages 2018-03-30 16:03:38 +00:00
Kubernetes Publisher
705db51c65 sync: update godeps 2018-03-30 16:03:38 +00:00
Kubernetes Publisher
8328c32c62 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-30 16:03:21 +00:00
Kubernetes Publisher
136e706b4c sync: update required packages 2018-03-30 12:02:36 +00:00
Kubernetes Publisher
636e3563e9 sync: update godeps 2018-03-30 12:02:36 +00:00
Kubernetes Publisher
ebdedbf1bd sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-30 12:02:19 +00:00
Kubernetes Publisher
39a08d806b sync: update required packages 2018-03-30 08:01:30 +00:00
Kubernetes Publisher
a3edbf0f55 sync: update godeps 2018-03-30 08:01:29 +00:00
Kubernetes Publisher
c0a5c7afc3 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-30 08:01:13 +00:00
Kubernetes Publisher
f32d99a8b0 sync: update required packages 2018-03-30 04:01:08 +00:00
Kubernetes Publisher
f1f5b1bdfd sync: update godeps 2018-03-30 04:01:08 +00:00
Kubernetes Publisher
1e3e1ed2e5 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-30 04:00:52 +00:00
Kubernetes Publisher
0c9e8c5f70 sync: update required packages 2018-03-30 00:03:16 +00:00
Kubernetes Publisher
28b4cad66f sync: update godeps 2018-03-30 00:03:15 +00:00
Kubernetes Publisher
35803781a3 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-30 00:02:59 +00:00
Kubernetes Publisher
c3464f21df sync: update required packages 2018-03-29 19:52:32 +00:00
Kubernetes Publisher
0c7c8fd549 sync: update godeps 2018-03-29 19:52:32 +00:00
Kubernetes Publisher
4dee085a73 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-29 19:52:15 +00:00
Kubernetes Publisher
2bf2d7dbe8 sync: update required packages 2018-03-29 15:49:09 +00:00
Kubernetes Publisher
257069fde5 sync: update godeps 2018-03-29 15:49:08 +00:00
Kubernetes Publisher
04301fd3c5 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-29 15:48:52 +00:00
Kubernetes Publisher
a38154c6f0 sync: update required packages 2018-03-29 11:48:10 +00:00
Kubernetes Publisher
8cb48ea581 sync: update godeps 2018-03-29 11:48:10 +00:00
Kubernetes Publisher
794493552f sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-29 11:47:55 +00:00
Kubernetes Publisher
aeced7fc1b sync: update required packages 2018-03-29 07:47:36 +00:00
Kubernetes Publisher
5d4cc7f0df sync: update godeps 2018-03-29 07:47:36 +00:00
Kubernetes Publisher
cc1587bce1 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-29 07:47:21 +00:00
Kubernetes Publisher
9cfc94aec0 sync: update required packages 2018-03-29 03:48:53 +00:00
Kubernetes Publisher
f22830a5a4 sync: update godeps 2018-03-29 03:48:53 +00:00
Kubernetes Publisher
c94b786fd6 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-29 03:48:37 +00:00
Kubernetes Publisher
88807ad857 sync: update required packages 2018-03-28 23:48:33 +00:00
Kubernetes Publisher
c4f9a274d5 sync: update godeps 2018-03-28 23:48:33 +00:00
Kubernetes Publisher
e532577d08 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-28 23:48:17 +00:00
Kubernetes Publisher
f11d3b975c sync: update required packages 2018-03-28 19:49:49 +00:00
Kubernetes Publisher
19ef491e61 sync: update godeps 2018-03-28 19:49:49 +00:00
Kubernetes Publisher
b3551d48e9 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-28 19:49:32 +00:00
Kubernetes Publisher
a5df989be9 sync: update required packages 2018-03-28 15:50:57 +00:00
Kubernetes Publisher
1b307a2992 sync: update godeps 2018-03-28 15:50:57 +00:00
Kubernetes Publisher
e30c026764 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-28 15:50:42 +00:00
Kubernetes Publisher
801aa68b9a sync: update required packages 2018-03-28 11:48:59 +00:00
Kubernetes Publisher
3e5ef906c9 sync: update godeps 2018-03-28 11:48:58 +00:00
Kubernetes Publisher
3e8c2df88f sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-28 11:48:43 +00:00
Kubernetes Publisher
cabbdd3a4a sync: update required packages 2018-03-28 07:50:46 +00:00
Kubernetes Publisher
a53169b342 sync: update godeps 2018-03-28 07:50:46 +00:00
Kubernetes Publisher
e2632c74c7 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-28 07:50:30 +00:00
Kubernetes Publisher
3ba3801e6b sync: update required packages 2018-03-28 03:47:44 +00:00
Kubernetes Publisher
ead8264daa sync: update godeps 2018-03-28 03:47:44 +00:00
Kubernetes Publisher
6c0c6fda6c sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-28 03:47:28 +00:00
Kubernetes Publisher
5e18a249f3 sync: update required packages 2018-03-27 23:57:52 +00:00
Kubernetes Publisher
c669b1200b sync: update godeps 2018-03-27 23:57:52 +00:00
Kubernetes Publisher
a66ec0f631 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-27 23:57:37 +00:00
Kubernetes Publisher
f2b8a6a1ce sync: update required packages 2018-03-27 19:30:47 +00:00
Kubernetes Publisher
2ce2991d13 sync: update godeps 2018-03-27 19:30:46 +00:00
Kubernetes Publisher
57da285c45 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-27 19:30:30 +00:00
Kubernetes Publisher
090204ee74 sync: update required packages 2018-03-27 15:28:23 +00:00
Kubernetes Publisher
196664ef0a sync: update godeps 2018-03-27 15:28:22 +00:00
Kubernetes Publisher
ab66a41a45 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-27 15:28:03 +00:00
Kubernetes Publisher
c6072492ab sync: update required packages 2018-03-27 11:27:27 +00:00
Kubernetes Publisher
4889956629 sync: update godeps 2018-03-27 11:27:27 +00:00
Kubernetes Publisher
01ca511fab sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-27 11:27:12 +00:00
Kubernetes Publisher
24d03c1990 sync: update required packages 2018-03-27 07:26:25 +00:00
Kubernetes Publisher
2d343b3104 sync: update godeps 2018-03-27 07:26:25 +00:00
Kubernetes Publisher
b8d8379a63 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-27 07:26:08 +00:00
Kubernetes Publisher
e776beac72 sync: update required packages 2018-03-27 03:26:40 +00:00
Kubernetes Publisher
7a8e1471ac sync: update godeps 2018-03-27 03:26:40 +00:00
Kubernetes Publisher
52394067ca sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-27 03:26:26 +00:00
Kubernetes Publisher
944e612cf6 sync: update required packages 2018-03-26 23:26:54 +00:00
Kubernetes Publisher
5b3315c9ad sync: update godeps 2018-03-26 23:26:54 +00:00
Kubernetes Publisher
c126cee505 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-26 23:26:40 +00:00
Kubernetes Publisher
96d5ebc58c sync: update required packages 2018-03-26 19:26:44 +00:00
Kubernetes Publisher
5f1cdfb762 sync: update godeps 2018-03-26 19:26:44 +00:00
Kubernetes Publisher
1e88dd3b14 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-26 19:26:27 +00:00
Kubernetes Publisher
e5bd4b7263 sync: update required packages 2018-03-26 15:26:26 +00:00
Kubernetes Publisher
a02cb5fa93 sync: update godeps 2018-03-26 15:26:25 +00:00
Kubernetes Publisher
e3cff51486 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-26 15:26:12 +00:00
Kubernetes Publisher
d557917bc6 sync: update required packages 2018-03-26 11:26:01 +00:00
Kubernetes Publisher
6ecf33226c sync: update godeps 2018-03-26 11:26:01 +00:00
Kubernetes Publisher
0d0d55ce39 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-26 11:25:40 +00:00
Kubernetes Publisher
b21258f277 sync: update required packages 2018-03-26 07:25:23 +00:00
Kubernetes Publisher
bc5cf53ea8 sync: update godeps 2018-03-26 07:25:23 +00:00
Kubernetes Publisher
1c7d62fc2b sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-26 07:25:08 +00:00
Kubernetes Publisher
a4e2048a95 sync: update required packages 2018-03-26 03:28:57 +00:00
Kubernetes Publisher
69189ff05f sync: update godeps 2018-03-26 03:28:57 +00:00
Kubernetes Publisher
303509fc61 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-26 03:28:43 +00:00
Kubernetes Publisher
4ea14d0f28 sync: update required packages 2018-03-25 23:29:30 +00:00
Kubernetes Publisher
4b56f384b1 sync: update godeps 2018-03-25 23:29:29 +00:00
Kubernetes Publisher
b530b0149f sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-25 23:29:14 +00:00
Kubernetes Publisher
9ac6ab8ab4 sync: update required packages 2018-03-24 23:32:08 +00:00
Kubernetes Publisher
f1cf4d202c sync: update godeps 2018-03-24 23:32:08 +00:00
Kubernetes Publisher
752d40238d sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-24 23:31:53 +00:00
Kubernetes Publisher
7c83fbaa4e sync: update required packages 2018-03-24 19:32:58 +00:00
Kubernetes Publisher
5de1f33eb5 sync: update godeps 2018-03-24 19:32:58 +00:00
Kubernetes Publisher
984ed70c43 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-24 19:32:43 +00:00
Kubernetes Publisher
d16a860a84 sync: update required packages 2018-03-24 15:32:29 +00:00
Kubernetes Publisher
f4a700098e sync: update godeps 2018-03-24 15:32:28 +00:00
Kubernetes Publisher
8de7e5c929 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-24 15:32:13 +00:00
Kubernetes Publisher
506a03ddc2 sync: update required packages 2018-03-24 11:33:31 +00:00
Kubernetes Publisher
7002b8320c sync: update godeps 2018-03-24 11:33:31 +00:00
Kubernetes Publisher
4b90c6d90b sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-24 11:33:10 +00:00
Kubernetes Publisher
c8878a7ab0 sync: update required packages 2018-03-24 07:35:49 +00:00
Kubernetes Publisher
0dc4d8ffd1 sync: update godeps 2018-03-24 07:35:49 +00:00
Kubernetes Publisher
33737a7505 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-24 07:35:33 +00:00
Kubernetes Publisher
47b0434129 sync: update required packages 2018-03-24 03:34:06 +00:00
Kubernetes Publisher
8db7761355 sync: update godeps 2018-03-24 03:34:06 +00:00
Kubernetes Publisher
372e59a9bc sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-24 03:33:46 +00:00
Kubernetes Publisher
d682b42c10 sync: update required packages 2018-03-23 23:32:21 +00:00
Kubernetes Publisher
bfcb8b515a sync: update godeps 2018-03-23 23:32:20 +00:00
Kubernetes Publisher
a2c3c85472 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-23 23:32:05 +00:00
Kubernetes Publisher
b71d090eaa sync: update required packages 2018-03-23 19:34:28 +00:00
Kubernetes Publisher
232fc56e6d sync: update godeps 2018-03-23 19:34:27 +00:00
Kubernetes Publisher
e10ce13fcf sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-23 19:34:13 +00:00
Kubernetes Publisher
4cc0d45751 sync: update required packages 2018-03-23 15:33:05 +00:00
Kubernetes Publisher
fca461f716 sync: update godeps 2018-03-23 15:33:05 +00:00
Kubernetes Publisher
1e85dcee05 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-23 15:32:50 +00:00
Kubernetes Publisher
336f9cc4f4 sync: update required packages 2018-03-23 11:33:11 +00:00
Kubernetes Publisher
ea5e19f218 sync: update godeps 2018-03-23 11:33:11 +00:00
Kubernetes Publisher
1d48ee6a82 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-23 11:32:54 +00:00
Kubernetes Publisher
6d7fa9df76 sync: update required packages 2018-03-23 07:32:59 +00:00
Kubernetes Publisher
b3245822a0 sync: update godeps 2018-03-23 07:32:58 +00:00
Kubernetes Publisher
9b9376ca13 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-23 07:32:43 +00:00
Kubernetes Publisher
62b8ebe870 sync: update required packages 2018-03-23 03:33:39 +00:00
Kubernetes Publisher
8f01773354 sync: update godeps 2018-03-23 03:33:39 +00:00
Kubernetes Publisher
a10dae27b0 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-23 03:33:25 +00:00
Kubernetes Publisher
0e4fb00c5a sync: update required packages 2018-03-22 23:33:37 +00:00
Kubernetes Publisher
f63a4e14f0 sync: update godeps 2018-03-22 23:33:37 +00:00
Kubernetes Publisher
9564ec8fed sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-22 23:33:23 +00:00
Kubernetes Publisher
bac76c5aba sync: update required packages 2018-03-22 19:30:54 +00:00
Kubernetes Publisher
fd4c4a6f66 sync: update godeps 2018-03-22 19:30:54 +00:00
Kubernetes Publisher
47672c745e sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-22 19:30:37 +00:00
Kubernetes Publisher
2d9cfd3124 sync: update required packages 2018-03-22 15:36:55 +00:00
Kubernetes Publisher
866e5a4ba4 sync: update godeps 2018-03-22 15:36:55 +00:00
Kubernetes Publisher
b303c556ed sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-22 15:36:36 +00:00
Kubernetes Publisher
6d849d7b88 sync: update required packages 2018-03-22 11:32:48 +00:00
Kubernetes Publisher
58a5799828 sync: update godeps 2018-03-22 11:32:47 +00:00
Kubernetes Publisher
ed2c93f4c5 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-22 11:32:31 +00:00
Kubernetes Publisher
1043547d9e sync: update required packages 2018-03-22 07:34:01 +00:00
Kubernetes Publisher
b34ea26403 sync: update godeps 2018-03-22 07:34:01 +00:00
Kubernetes Publisher
ff09ce689e sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-22 07:33:46 +00:00
Kubernetes Publisher
50fad5266b sync: update required packages 2018-03-22 03:30:13 +00:00
Kubernetes Publisher
40501ad430 sync: update godeps 2018-03-22 03:30:13 +00:00
Kubernetes Publisher
7e1decea89 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-22 03:29:58 +00:00
Kubernetes Publisher
1545e7608e sync: update required packages 2018-03-21 23:31:20 +00:00
Kubernetes Publisher
0bc0fca5ad sync: update godeps 2018-03-21 23:31:19 +00:00
Kubernetes Publisher
623572ae55 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-21 23:31:02 +00:00
Kubernetes Publisher
8ec9c07706 sync: update required packages 2018-03-21 19:34:32 +00:00
Kubernetes Publisher
66469e2e2c sync: update godeps 2018-03-21 19:34:31 +00:00
Kubernetes Publisher
173a5af9a0 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-21 19:34:14 +00:00
Kubernetes Publisher
571239366f sync: update required packages 2018-03-21 15:29:05 +00:00
Kubernetes Publisher
bb8ccc9d1c sync: update godeps 2018-03-21 15:29:04 +00:00
Kubernetes Publisher
e1d7bb7b27 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-21 15:28:50 +00:00
Kubernetes Publisher
7cca023b75 sync: update required packages 2018-03-21 11:29:09 +00:00
Kubernetes Publisher
d54ee108ed sync: update godeps 2018-03-21 11:29:09 +00:00
Kubernetes Publisher
6a77b41613 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-21 11:28:55 +00:00
Kubernetes Publisher
aff0f5186c sync: update required packages 2018-03-21 07:27:50 +00:00
Kubernetes Publisher
51d41171cd sync: update godeps 2018-03-21 07:27:49 +00:00
Kubernetes Publisher
d64a6cb57b sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-21 07:27:35 +00:00
Kubernetes Publisher
014b34a2ed sync: update required packages 2018-03-21 03:28:04 +00:00
Kubernetes Publisher
060cadb8fb sync: update godeps 2018-03-21 03:28:03 +00:00
Kubernetes Publisher
190c4fd874 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-21 03:27:49 +00:00
Kubernetes Publisher
c018c988b1 sync: update required packages 2018-03-20 23:28:49 +00:00
Kubernetes Publisher
f377caac7b sync: update godeps 2018-03-20 23:28:49 +00:00
Kubernetes Publisher
7286cccb85 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-20 23:28:35 +00:00
Kubernetes Publisher
6a6a1784f2 sync: update required packages 2018-03-20 19:31:06 +00:00
Kubernetes Publisher
2f912ec7a8 sync: update godeps 2018-03-20 19:31:06 +00:00
Kubernetes Publisher
e1d54b229f sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-20 19:30:51 +00:00
Kubernetes Publisher
a0e84100b1 sync: update required packages 2018-03-20 15:36:38 +00:00
Kubernetes Publisher
ba451bd597 sync: update godeps 2018-03-20 15:36:37 +00:00
Kubernetes Publisher
fc859d5209 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-20 15:36:21 +00:00
Kubernetes Publisher
aa9264bc07 sync: update required packages 2018-03-20 10:16:46 +00:00
Kubernetes Publisher
70d11f2a30 sync: update godeps 2018-03-20 10:16:46 +00:00
Kubernetes Publisher
69ffb08101 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-20 10:16:23 +00:00
Kubernetes Publisher
059e9239e9 sync: update required packages 2018-03-20 06:18:57 +00:00
Kubernetes Publisher
917b53770e sync: update godeps 2018-03-20 06:18:57 +00:00
Kubernetes Publisher
63fb83ce79 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-20 06:18:41 +00:00
Kubernetes Publisher
13da09bc0b sync: update required packages 2018-03-20 02:31:30 +00:00
Kubernetes Publisher
57b73875d5 sync: update godeps 2018-03-20 02:31:30 +00:00
Kubernetes Publisher
53b546852b sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-20 02:31:14 +00:00
Kubernetes Publisher
afffe07c71 sync: update required packages 2018-03-19 22:09:59 +00:00
Kubernetes Publisher
8158424898 sync: update godeps 2018-03-19 22:09:58 +00:00
Kubernetes Publisher
a6d5698393 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-19 22:09:46 +00:00
Kubernetes Publisher
7b904c986a sync: update required packages 2018-03-19 18:12:52 +00:00
Kubernetes Publisher
8517759dd8 sync: update godeps 2018-03-19 18:12:51 +00:00
Kubernetes Publisher
a6eeb2126d sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-19 18:12:36 +00:00
Kubernetes Publisher
72b40086de sync: update required packages 2018-03-19 14:06:48 +00:00
Kubernetes Publisher
1e1ae0deb6 sync: update godeps 2018-03-19 14:06:48 +00:00
Kubernetes Publisher
5438934527 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-19 14:06:35 +00:00
Kubernetes Publisher
d99e6c8091 sync: update required packages 2018-03-19 10:05:02 +00:00
Kubernetes Publisher
55c0b9177f sync: update godeps 2018-03-19 10:05:02 +00:00
Kubernetes Publisher
0e131efa8a sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-19 10:04:50 +00:00
Kubernetes Publisher
7b409b5d44 sync: update required packages 2018-03-19 06:05:28 +00:00
Kubernetes Publisher
13898fe855 sync: update godeps 2018-03-19 06:05:28 +00:00
Kubernetes Publisher
c465ff81af sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-19 06:05:16 +00:00
Kubernetes Publisher
18d80fdaa3 sync: update required packages 2018-03-19 02:05:48 +00:00
Kubernetes Publisher
41bd698990 sync: update godeps 2018-03-19 02:05:48 +00:00
Kubernetes Publisher
655c7262e0 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-19 02:05:35 +00:00
Kubernetes Publisher
43e6337581 sync: update required packages 2018-03-18 22:06:10 +00:00
Kubernetes Publisher
ca7ed5f32d sync: update godeps 2018-03-18 22:06:09 +00:00
Kubernetes Publisher
b6481a76d9 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-18 22:05:57 +00:00
Kubernetes Publisher
72974accc2 sync: update required packages 2018-03-18 18:05:40 +00:00
Kubernetes Publisher
8951e10159 sync: update godeps 2018-03-18 18:05:39 +00:00
Kubernetes Publisher
f4267e0146 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-18 18:05:27 +00:00
Kubernetes Publisher
4ba4467316 sync: update required packages 2018-03-18 14:05:12 +00:00
Kubernetes Publisher
9a85163d45 sync: update godeps 2018-03-18 14:05:11 +00:00
Kubernetes Publisher
d146a35b91 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-18 14:04:59 +00:00
Kubernetes Publisher
2a95d0ebf7 sync: update required packages 2018-03-18 10:04:46 +00:00
Kubernetes Publisher
545c0a2511 sync: update godeps 2018-03-18 10:04:46 +00:00
Kubernetes Publisher
6a2e9d3200 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-18 10:04:34 +00:00
Kubernetes Publisher
3d22156e9e sync: update required packages 2018-03-18 06:04:16 +00:00
Kubernetes Publisher
5c2d7051fe sync: update godeps 2018-03-18 06:04:16 +00:00
Kubernetes Publisher
81b94b904d sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-18 06:04:03 +00:00
Kubernetes Publisher
8d701b19f5 sync: update required packages 2018-03-18 02:04:56 +00:00
Kubernetes Publisher
d76152b457 sync: update godeps 2018-03-18 02:04:55 +00:00
Kubernetes Publisher
dedfb2d82a sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-18 02:04:42 +00:00
Kubernetes Publisher
de9ca2b0a7 sync: update required packages 2018-03-17 22:05:26 +00:00
Kubernetes Publisher
2b9e8c3b82 sync: update godeps 2018-03-17 22:05:25 +00:00
Kubernetes Publisher
10c3b98a0d sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-17 22:05:13 +00:00
Kubernetes Publisher
369c48f882 sync: update required packages 2018-03-17 18:05:55 +00:00
Kubernetes Publisher
9e98f67046 sync: update godeps 2018-03-17 18:05:54 +00:00
Kubernetes Publisher
e130d8c483 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-17 18:05:42 +00:00
Kubernetes Publisher
dd6f3a04ef sync: update required packages 2018-03-17 14:05:08 +00:00
Kubernetes Publisher
22c2eae497 sync: update godeps 2018-03-17 14:05:07 +00:00
Kubernetes Publisher
1113ab3764 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-17 14:04:55 +00:00
Kubernetes Publisher
6e3ceee05f sync: update required packages 2018-03-17 10:04:29 +00:00
Kubernetes Publisher
e1bb61dc32 sync: update godeps 2018-03-17 10:04:29 +00:00
Kubernetes Publisher
0f932205bc sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-17 10:04:16 +00:00
Kubernetes Publisher
75ace777e0 sync: update required packages 2018-03-17 06:04:35 +00:00
Kubernetes Publisher
54eb7c8c42 sync: update godeps 2018-03-17 06:04:35 +00:00
Kubernetes Publisher
facd585c31 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-17 06:04:23 +00:00
Kubernetes Publisher
50a0923dd6 sync: update required packages 2018-03-17 02:05:32 +00:00
Kubernetes Publisher
44d7caaa9d sync: update godeps 2018-03-17 02:05:31 +00:00
Kubernetes Publisher
5e77d6b95d sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-17 02:05:18 +00:00
Kubernetes Publisher
3fe21ebe51 sync: update required packages 2018-03-16 22:05:17 +00:00
Kubernetes Publisher
86272edfa5 sync: update godeps 2018-03-16 22:05:17 +00:00
Kubernetes Publisher
4e8ca57d20 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-16 22:05:04 +00:00
Kubernetes Publisher
a2355fc8fe sync: update required packages 2018-03-16 18:05:17 +00:00
Kubernetes Publisher
d26dc4b326 sync: update godeps 2018-03-16 18:05:17 +00:00
Kubernetes Publisher
29aec055d1 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-16 18:05:04 +00:00
Kubernetes Publisher
c1de96833f sync: update required packages 2018-03-16 14:05:35 +00:00
Kubernetes Publisher
2f7d854d66 sync: update godeps 2018-03-16 14:05:35 +00:00
Kubernetes Publisher
6fa40109a4 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-16 14:05:22 +00:00
Kubernetes Publisher
085505ea2d sync: update required packages 2018-03-16 10:04:51 +00:00
Kubernetes Publisher
0c3d9bdeab sync: update godeps 2018-03-16 10:04:51 +00:00
Kubernetes Publisher
be735f1c19 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-16 10:04:39 +00:00
Kubernetes Publisher
842cfbbdb7 sync: update required packages 2018-03-16 06:03:23 +00:00
Kubernetes Publisher
5074ef82d2 sync: update godeps 2018-03-16 06:03:23 +00:00
Kubernetes Publisher
0a18cf3397 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-16 06:03:11 +00:00
Kubernetes Publisher
5ead969852 sync: update required packages 2018-03-16 02:05:18 +00:00
Kubernetes Publisher
f8106b9564 sync: update godeps 2018-03-16 02:05:18 +00:00
Kubernetes Publisher
e7f796cb9f sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-16 02:05:06 +00:00
Kubernetes Publisher
7c52cc3846 sync: update required packages 2018-03-15 22:05:16 +00:00
Kubernetes Publisher
49500d5614 sync: update godeps 2018-03-15 22:05:15 +00:00
Kubernetes Publisher
e27847e63b sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-15 22:05:03 +00:00
Kubernetes Publisher
65d06de2f3 sync: update required packages 2018-03-15 18:05:45 +00:00
Kubernetes Publisher
496c0fb7c6 sync: update godeps 2018-03-15 18:05:45 +00:00
Kubernetes Publisher
2d749b5ee4 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-15 18:05:23 +00:00
Kubernetes Publisher
3b3c86ad0a sync: update required packages 2018-03-15 14:04:28 +00:00
Kubernetes Publisher
8b48e5925f sync: update godeps 2018-03-15 14:04:28 +00:00
Kubernetes Publisher
2831cd8c7d sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-15 14:04:15 +00:00
Kubernetes Publisher
5bd6bbb0f3 sync: update required packages 2018-03-15 10:11:27 +00:00
Kubernetes Publisher
0af3465cf7 sync: update godeps 2018-03-15 10:11:27 +00:00
Kubernetes Publisher
7c1c1c3231 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-15 10:11:15 +00:00
Kubernetes Publisher
7d340d449b sync: update required packages 2018-03-14 19:32:56 +00:00
Kubernetes Publisher
678a801515 sync: update godeps 2018-03-14 19:32:56 +00:00
Kubernetes Publisher
082d762348 sync: update required packages 2018-03-14 15:32:15 +00:00
Kubernetes Publisher
7916e7170c sync: update godeps 2018-03-14 15:32:14 +00:00
Kubernetes Publisher
980a06392d sync: update required packages 2018-03-14 11:33:23 +00:00
Kubernetes Publisher
ca5aa904ad sync: update godeps 2018-03-14 11:33:23 +00:00
Kubernetes Publisher
9167ba3418 sync: update required packages 2018-03-14 07:31:48 +00:00
Kubernetes Publisher
8eabf90249 sync: update godeps 2018-03-14 07:31:47 +00:00
Kubernetes Publisher
4c769facf5 sync: update required packages 2018-03-14 03:32:57 +00:00
Kubernetes Publisher
69d9adf603 sync: update godeps 2018-03-14 03:32:56 +00:00
Kubernetes Publisher
210ae25d64 sync: update required packages 2018-03-13 23:32:45 +00:00
Kubernetes Publisher
f253a601ea sync: update godeps 2018-03-13 23:32:44 +00:00
Kubernetes Publisher
e6ffb98f4b sync: update required packages 2018-03-13 19:39:31 +00:00
Kubernetes Publisher
cb0ed97306 sync: update godeps 2018-03-13 19:39:30 +00:00
Kubernetes Publisher
5ca0e41e23 sync: update required packages 2018-03-13 15:35:10 +00:00
Kubernetes Publisher
d12153b9cb sync: update godeps 2018-03-13 15:35:10 +00:00
Kubernetes Publisher
6f2a867803 sync: update required packages 2018-03-13 11:37:13 +00:00
Kubernetes Publisher
f47092e6df sync: update godeps 2018-03-13 11:37:13 +00:00
Kubernetes Publisher
ac65c3e8db sync: update required packages 2018-03-13 07:36:40 +00:00
Kubernetes Publisher
0ebc5f0dc2 sync: update godeps 2018-03-13 07:36:40 +00:00
Kubernetes Publisher
d15195aaf5 sync: update required packages 2018-03-13 03:38:53 +00:00
Kubernetes Publisher
624f37ce80 sync: update godeps 2018-03-13 03:38:53 +00:00
Kubernetes Publisher
e924edbb01 sync: update required packages 2018-03-12 23:38:44 +00:00
Kubernetes Publisher
bb942c7c0f sync: update godeps 2018-03-12 23:38:44 +00:00
Kubernetes Publisher
287183cf60 sync: update required packages 2018-03-12 19:34:56 +00:00
Kubernetes Publisher
4c529afd2e sync: update godeps 2018-03-12 19:34:55 +00:00
Kubernetes Publisher
03c69a0ee3 sync: update required packages 2018-03-12 15:33:20 +00:00
Kubernetes Publisher
5bb111d6f9 sync: update godeps 2018-03-12 15:33:20 +00:00
Kubernetes Publisher
4d3eb95e4a sync: update required packages 2018-03-12 11:34:33 +00:00
Kubernetes Publisher
e0035eb4bf sync: update godeps 2018-03-12 11:34:33 +00:00
Kubernetes Publisher
9bf6c22881 sync: update required packages 2018-03-12 07:33:22 +00:00
Kubernetes Publisher
0c2ed9f43a sync: update godeps 2018-03-12 07:33:22 +00:00
Kubernetes Publisher
84b4ee98ce sync: update required packages 2018-03-12 03:41:28 +00:00
Kubernetes Publisher
14f24ae3a7 sync: update godeps 2018-03-12 03:41:28 +00:00
Kubernetes Publisher
3da33816ed sync: update required packages 2018-03-11 23:40:28 +00:00
Kubernetes Publisher
d81fb5dbf0 sync: update godeps 2018-03-11 23:40:28 +00:00
Kubernetes Publisher
d3aa8af507 sync: update required packages 2018-03-11 19:33:30 +00:00
Kubernetes Publisher
5532d0eb0b sync: update godeps 2018-03-11 19:33:30 +00:00
Kubernetes Publisher
9abc0196af sync: update required packages 2018-03-11 15:32:15 +00:00
Kubernetes Publisher
388cd37bbd sync: update godeps 2018-03-11 15:32:15 +00:00
Kubernetes Publisher
b7b46f2c49 sync: update required packages 2018-03-11 11:31:19 +00:00
Kubernetes Publisher
9e804d3f1a sync: update godeps 2018-03-11 11:31:19 +00:00
Kubernetes Publisher
634ece07d8 sync: update required packages 2018-03-11 07:32:00 +00:00
Kubernetes Publisher
ad21141682 sync: update godeps 2018-03-11 07:32:00 +00:00
Kubernetes Publisher
5d741260f8 sync: update required packages 2018-03-11 03:33:58 +00:00
Kubernetes Publisher
507f78b23c sync: update godeps 2018-03-11 03:33:58 +00:00
Kubernetes Publisher
82f419b119 sync: update required packages 2018-03-10 23:30:44 +00:00
Kubernetes Publisher
61cfaed5c9 sync: update godeps 2018-03-10 23:30:43 +00:00
Kubernetes Publisher
d3c2557301 sync: update required packages 2018-03-10 19:34:06 +00:00
Kubernetes Publisher
6b3f16892d sync: update godeps 2018-03-10 19:34:05 +00:00
Kubernetes Publisher
4b19c8ed86 sync: update required packages 2018-03-10 15:32:44 +00:00
Kubernetes Publisher
1cdde8260e sync: update godeps 2018-03-10 15:32:43 +00:00
Kubernetes Publisher
1e7d636d3c sync: update required packages 2018-03-10 11:33:45 +00:00
Kubernetes Publisher
a5d6700ca5 sync: update godeps 2018-03-10 11:33:45 +00:00
Kubernetes Publisher
daa07de354 sync: update required packages 2018-03-10 07:33:10 +00:00
Kubernetes Publisher
f30c1c7ec0 sync: update godeps 2018-03-10 07:33:10 +00:00
Kubernetes Publisher
4e1d276383 sync: update required packages 2018-03-10 03:32:18 +00:00
Kubernetes Publisher
49d58a13f1 sync: update godeps 2018-03-10 03:32:17 +00:00
Kubernetes Publisher
5dd8225d4a sync: update required packages 2018-03-09 23:35:34 +00:00
Kubernetes Publisher
1b6af89cc8 sync: update godeps 2018-03-09 23:35:33 +00:00
Kubernetes Publisher
60d0fdecd7 sync: update required packages 2018-03-09 19:37:31 +00:00
Kubernetes Publisher
89abbbb290 sync: update godeps 2018-03-09 19:37:30 +00:00
Kubernetes Publisher
5137dc6fac sync: update required packages 2018-03-09 15:34:13 +00:00
Kubernetes Publisher
e493bc5a04 sync: update godeps 2018-03-09 15:34:13 +00:00
Kubernetes Publisher
78273b5003 sync: update required packages 2018-03-09 11:31:06 +00:00
Kubernetes Publisher
cb9153ce16 sync: update godeps 2018-03-09 11:31:06 +00:00
Kubernetes Publisher
0f5009c80c sync: update required packages 2018-03-09 07:33:51 +00:00
Kubernetes Publisher
aad8d36591 sync: update godeps 2018-03-09 07:33:51 +00:00
Kubernetes Publisher
70477c2803 sync: update required packages 2018-03-09 03:34:08 +00:00
Kubernetes Publisher
ac2b699c47 sync: update godeps 2018-03-09 03:34:08 +00:00
Kubernetes Publisher
fcb27565f9 sync: update required packages 2018-03-08 23:41:00 +00:00
Kubernetes Publisher
c2d33650e6 sync: update godeps 2018-03-08 23:41:00 +00:00
Kubernetes Publisher
b4279d9727 sync: update required packages 2018-03-08 19:51:44 +00:00
Kubernetes Publisher
f5b7d74914 sync: update godeps 2018-03-08 19:51:43 +00:00
Kubernetes Publisher
4276946744 sync: update required packages 2018-03-08 15:50:44 +00:00
Kubernetes Publisher
0ecd18def0 sync: update godeps 2018-03-08 15:50:43 +00:00
Kubernetes Publisher
5cc9c89e9c sync: update required packages 2018-03-08 11:49:57 +00:00
Kubernetes Publisher
0c779380a7 sync: update godeps 2018-03-08 11:49:57 +00:00
Kubernetes Publisher
792ff950ed sync: update required packages 2018-03-08 07:51:38 +00:00
Kubernetes Publisher
a7477f6a56 sync: update godeps 2018-03-08 07:51:38 +00:00
Kubernetes Publisher
1dac7c7c47 sync: update required packages 2018-03-08 03:52:04 +00:00
Kubernetes Publisher
841b6a8a9a sync: update godeps 2018-03-08 03:52:04 +00:00
Kubernetes Publisher
6aa66afdd0 sync: update required packages 2018-03-07 23:59:11 +00:00
Kubernetes Publisher
b9ad2957a6 sync: update godeps 2018-03-07 23:59:10 +00:00
Kubernetes Publisher
14be768882 sync: update required packages 2018-03-07 19:59:00 +00:00
Kubernetes Publisher
17a7c8df92 sync: update godeps 2018-03-07 19:59:00 +00:00
Kubernetes Publisher
84e8c768cb sync: update required packages 2018-03-07 15:49:32 +00:00
Kubernetes Publisher
a3e1c89e31 sync: update godeps 2018-03-07 15:49:31 +00:00
Kubernetes Publisher
8c8415e082 sync: update required packages 2018-03-07 11:49:49 +00:00
Kubernetes Publisher
3bc7b5ba7a sync: update godeps 2018-03-07 11:49:49 +00:00
Kubernetes Publisher
4125f9c0dd sync: update required packages 2018-03-07 07:52:13 +00:00
Kubernetes Publisher
8ef820ed39 sync: update godeps 2018-03-07 07:52:12 +00:00
Kubernetes Publisher
429189f29c sync: update required packages 2018-03-07 04:12:09 +00:00
Kubernetes Publisher
be44b253d5 sync: update godeps 2018-03-07 04:12:09 +00:00
Kubernetes Publisher
e68a0e4264 sync: update required packages 2018-03-06 23:56:37 +00:00
Kubernetes Publisher
5353bcd9e1 sync: update godeps 2018-03-06 23:56:36 +00:00
Kubernetes Publisher
6fee01c3f0 sync: update required packages 2018-03-06 19:59:32 +00:00
Kubernetes Publisher
7d7fc3adaf sync: update godeps 2018-03-06 19:59:31 +00:00
Kubernetes Publisher
4cb0cb2765 sync: update required packages 2018-03-06 15:58:36 +00:00
Kubernetes Publisher
d61958cb23 sync: update godeps 2018-03-06 15:58:36 +00:00
Kubernetes Publisher
b98412556b sync: update required packages 2018-03-06 11:49:06 +00:00
Kubernetes Publisher
2e6b971ed3 sync: update godeps 2018-03-06 11:49:06 +00:00
Kubernetes Publisher
b9e68f7a2f sync: update required packages 2018-03-06 07:49:24 +00:00
Kubernetes Publisher
2f2a3ea54e sync: update godeps 2018-03-06 07:49:24 +00:00
Kubernetes Publisher
7ab3ce3a27 sync: update required packages 2018-03-06 03:59:10 +00:00
Kubernetes Publisher
7b852c8aff sync: update godeps 2018-03-06 03:59:10 +00:00
Kubernetes Publisher
d4c1318544 sync: update required packages 2018-03-05 23:58:09 +00:00
Kubernetes Publisher
a3bd2e0656 sync: update godeps 2018-03-05 23:58:09 +00:00
Kubernetes Publisher
cc30125c64 sync: update required packages 2018-03-05 20:00:07 +00:00
Kubernetes Publisher
19b8e3540a sync: update godeps 2018-03-05 20:00:07 +00:00
Kubernetes Publisher
75cf0f528c sync: update required packages 2018-03-05 15:56:05 +00:00
Kubernetes Publisher
a16c4fb99b sync: update godeps 2018-03-05 15:56:05 +00:00
Kubernetes Publisher
9088d7142a sync: update required packages 2018-03-05 11:48:29 +00:00
Kubernetes Publisher
31b57c47c8 sync: update godeps 2018-03-05 11:48:29 +00:00
Kubernetes Publisher
0b86001e9b sync: update required packages 2018-03-05 07:44:26 +00:00
Kubernetes Publisher
5ad3ab254f sync: update godeps 2018-03-05 07:44:26 +00:00
Kubernetes Publisher
9e8d1fbac4 sync: update required packages 2018-03-05 03:49:24 +00:00
Kubernetes Publisher
3f83e68bc6 sync: update godeps 2018-03-05 03:49:24 +00:00
Kubernetes Publisher
50a4134f7a sync: update required packages 2018-03-04 23:55:48 +00:00
Kubernetes Publisher
ccea8ee456 sync: update godeps 2018-03-04 23:55:47 +00:00
Kubernetes Publisher
d2c5632382 sync: update required packages 2018-03-04 20:00:01 +00:00
Kubernetes Publisher
cfef5fe0ba sync: update godeps 2018-03-04 20:00:01 +00:00
Kubernetes Publisher
e34dc1f6e6 sync: update required packages 2018-03-04 15:53:12 +00:00
Kubernetes Publisher
f2855d13f5 sync: update godeps 2018-03-04 15:53:12 +00:00
Kubernetes Publisher
0583c2a4d0 sync: update required packages 2018-03-04 11:40:30 +00:00
Kubernetes Publisher
69ff418f23 sync: update godeps 2018-03-04 11:40:30 +00:00
Kubernetes Publisher
5570564744 sync: update required packages 2018-03-04 07:42:21 +00:00
Kubernetes Publisher
d312ad8870 sync: update godeps 2018-03-04 07:42:21 +00:00
Kubernetes Publisher
f4c5a481a9 sync: update required packages 2018-03-04 03:44:23 +00:00
Kubernetes Publisher
7f8129ae63 sync: update godeps 2018-03-04 03:44:22 +00:00
Kubernetes Publisher
4196a8f28d sync: update required packages 2018-03-03 23:50:39 +00:00
Kubernetes Publisher
b562b505e4 sync: update godeps 2018-03-03 23:50:39 +00:00
Kubernetes Publisher
be6ef5ae3c sync: update required packages 2018-03-03 19:52:39 +00:00
Kubernetes Publisher
79eba74050 sync: update godeps 2018-03-03 19:52:39 +00:00
Kubernetes Publisher
7e97c9c5dd sync: update required packages 2018-03-03 16:11:59 +00:00
Kubernetes Publisher
41bb33bcd1 sync: update godeps 2018-03-03 16:11:59 +00:00
Kubernetes Publisher
a803a63815 sync: update required packages 2018-03-03 11:42:17 +00:00
Kubernetes Publisher
180391af66 sync: update godeps 2018-03-03 11:42:17 +00:00
Kubernetes Publisher
cc91eb9269 sync: update required packages 2018-03-03 07:42:46 +00:00
Kubernetes Publisher
1f655253ed sync: update godeps 2018-03-03 07:42:46 +00:00
Kubernetes Publisher
f7ca68e207 sync: update required packages 2018-03-03 03:59:53 +00:00
Kubernetes Publisher
9bddf931ac sync: update godeps 2018-03-03 03:59:53 +00:00
Kubernetes Publisher
e9102ed147 sync: update required packages 2018-03-02 23:47:39 +00:00
Kubernetes Publisher
19b0c0e5d5 sync: update godeps 2018-03-02 23:47:39 +00:00
Kubernetes Publisher
0d6167db53 sync: update required packages 2018-03-02 19:55:30 +00:00
Kubernetes Publisher
f0136d9ef6 sync: update godeps 2018-03-02 19:55:29 +00:00
Kubernetes Publisher
d240e98dbf sync: update required packages 2018-03-01 14:06:31 +00:00
Kubernetes Publisher
7e7bd92fcc sync: update godeps 2018-03-01 14:06:31 +00:00
Kubernetes Publisher
8b87f64d92 sync: update required packages 2018-03-01 10:08:33 +00:00
Kubernetes Publisher
e0a5573a16 sync: update godeps 2018-03-01 10:08:33 +00:00
Kubernetes Publisher
8104f25e9c sync: update required packages 2018-03-01 06:04:15 +00:00
Kubernetes Publisher
9d53e197e3 sync: update godeps 2018-03-01 06:04:15 +00:00
Kubernetes Publisher
bbd7d73a17 sync: update required packages 2018-03-01 02:04:18 +00:00
Kubernetes Publisher
05f8c9c368 sync: update godeps 2018-03-01 02:04:17 +00:00
Kubernetes Publisher
d8cec7dd7f sync: update required packages 2018-02-28 22:04:00 +00:00
Kubernetes Publisher
6b94834b03 sync: update godeps 2018-02-28 22:04:00 +00:00
Kubernetes Publisher
9362e12f91 sync: update required packages 2018-02-28 18:05:21 +00:00
Kubernetes Publisher
6709183647 sync: update godeps 2018-02-28 18:05:21 +00:00
Kubernetes Publisher
68e80ea442 sync: update required packages 2018-02-28 14:04:17 +00:00
Kubernetes Publisher
53d077b6c5 sync: update godeps 2018-02-28 14:04:17 +00:00
Kubernetes Publisher
a7cb310dc7 sync: update required packages 2018-02-28 10:04:03 +00:00
Kubernetes Publisher
d035305bd3 sync: update godeps 2018-02-28 10:04:03 +00:00
Kubernetes Publisher
59ae96900f sync: update required packages 2018-02-28 06:08:32 +00:00
Kubernetes Publisher
84c0d3015d sync: update godeps 2018-02-28 06:08:31 +00:00
Kubernetes Publisher
cf90a35856 sync: update required packages 2018-02-27 18:03:10 +00:00
Kubernetes Publisher
d29a0202ed sync: update godeps 2018-02-27 18:03:10 +00:00
Kubernetes Publisher
7cbb8b720b sync: update required packages 2018-02-27 14:02:21 +00:00
Kubernetes Publisher
a6f3c78c6c sync: update godeps 2018-02-27 14:02:21 +00:00
Kubernetes Publisher
f48ef6c21f sync: update required packages 2018-02-27 10:05:52 +00:00
Kubernetes Publisher
8f3858614e sync: update godeps 2018-02-27 10:05:52 +00:00
Kubernetes Publisher
bc192d0caa sync: update required packages 2018-02-27 06:02:50 +00:00
Kubernetes Publisher
142711e628 sync: update godeps 2018-02-27 06:02:49 +00:00
Kubernetes Publisher
d725f17bf7 sync: update required packages 2018-02-27 02:19:09 +00:00
Kubernetes Publisher
dec30864c6 sync: update godeps 2018-02-27 02:19:09 +00:00
Kubernetes Publisher
84c2646f90 sync: update required packages 2018-02-20 05:58:37 +00:00
Kubernetes Publisher
91d9864e1d sync: update godeps 2018-02-20 05:58:37 +00:00
Kubernetes Publisher
ce55123f3e sync: update required packages 2018-02-20 02:00:22 +00:00
Kubernetes Publisher
805e61cfd6 sync: update godeps 2018-02-20 02:00:22 +00:00
Kubernetes Publisher
451f001f73 sync: update required packages 2018-02-19 22:00:58 +00:00
Kubernetes Publisher
56cfe6abbb sync: update godeps 2018-02-19 22:00:58 +00:00
Kubernetes Publisher
12c53a6ab6 sync: update required packages 2018-02-19 17:59:27 +00:00
Kubernetes Publisher
b7f6f6f28a sync: update godeps 2018-02-19 17:59:27 +00:00
Kubernetes Publisher
134ab0b427 sync: update required packages 2018-02-19 13:55:41 +00:00
Kubernetes Publisher
2a02277721 sync: update godeps 2018-02-19 13:55:40 +00:00
Kubernetes Publisher
e38ca3f036 sync: update required packages 2018-02-19 09:55:32 +00:00
Kubernetes Publisher
2ec915b1b1 sync: update godeps 2018-02-19 09:55:32 +00:00
Kubernetes Publisher
61588233ac sync: update required packages 2018-02-19 05:55:04 +00:00
Kubernetes Publisher
31ef9cb12c sync: update godeps 2018-02-19 05:55:03 +00:00
Kubernetes Publisher
ba6f924a21 sync: update required packages 2018-02-19 01:56:26 +00:00
Kubernetes Publisher
338746c7cf sync: update godeps 2018-02-19 01:56:25 +00:00
Kubernetes Publisher
0e53eaf550 sync: update required packages 2018-02-18 21:55:32 +00:00
Kubernetes Publisher
55f322f22a sync: update godeps 2018-02-18 21:55:32 +00:00
Kubernetes Publisher
e2c80626e2 sync: update required packages 2018-02-18 17:55:05 +00:00
Kubernetes Publisher
6b5b7ab360 sync: update godeps 2018-02-18 17:55:05 +00:00
Kubernetes Publisher
3d80126e25 sync: update required packages 2018-02-18 13:55:08 +00:00
Kubernetes Publisher
34542966a9 sync: update godeps 2018-02-18 13:55:07 +00:00
Kubernetes Publisher
bebf37070e sync: update required packages 2018-02-18 09:55:29 +00:00
Kubernetes Publisher
3c4bf10ec4 sync: update godeps 2018-02-18 09:55:29 +00:00
Kubernetes Publisher
6d17a3ab97 sync: update required packages 2018-02-18 01:58:15 +00:00
Kubernetes Publisher
c19e1bad73 sync: update godeps 2018-02-18 01:58:15 +00:00
Kubernetes Publisher
8834415ee0 sync: update required packages 2018-02-17 21:59:16 +00:00
Kubernetes Publisher
0a4e351759 sync: update godeps 2018-02-17 21:59:16 +00:00
Kubernetes Publisher
972458bc4a sync: update required packages 2018-02-17 17:56:34 +00:00
Kubernetes Publisher
d8e448e238 sync: update godeps 2018-02-17 17:56:34 +00:00
Kubernetes Publisher
911c4bddb3 sync: update required packages 2018-02-17 13:55:36 +00:00
Kubernetes Publisher
bbb9d6e6cf sync: update godeps 2018-02-17 13:55:36 +00:00
Kubernetes Publisher
9ddb9dd975 sync: update required packages 2018-02-17 09:56:16 +00:00
Kubernetes Publisher
326e0fa643 sync: update godeps 2018-02-17 09:56:16 +00:00
Kubernetes Publisher
e2cdd4167d sync: update required packages 2018-02-17 01:57:39 +00:00
Kubernetes Publisher
84e9db4824 sync: update godeps 2018-02-17 01:57:39 +00:00
Kubernetes Publisher
f6fbb7c9e6 sync: update required packages 2018-02-16 21:59:09 +00:00
Kubernetes Publisher
893fcc8883 sync: update godeps 2018-02-16 21:59:08 +00:00
Kubernetes Publisher
c580a09219 sync: update required packages 2018-02-16 17:57:18 +00:00
Kubernetes Publisher
7edd44b8ae sync: update godeps 2018-02-16 17:57:17 +00:00
Kubernetes Publisher
22d95131ed sync: update required packages 2018-02-16 13:59:32 +00:00
Kubernetes Publisher
f0bc03cb6e sync: update godeps 2018-02-16 13:59:31 +00:00
Kubernetes Publisher
50df46ee8a sync: update required packages 2018-02-14 22:41:30 +00:00
Kubernetes Publisher
7773a08d1f sync: update godeps 2018-02-14 22:41:30 +00:00
Kubernetes Publisher
3994e6e1e9 sync: update required packages 2018-02-14 18:39:07 +00:00
Kubernetes Publisher
40010f9d80 sync: update godeps 2018-02-14 18:39:07 +00:00
Kubernetes Publisher
8f67176b70 sync: update required packages 2018-02-14 14:38:02 +00:00
Kubernetes Publisher
6e9d3f4357 sync: update godeps 2018-02-14 14:38:02 +00:00
Kubernetes Publisher
0d8e4e9897 sync: update required packages 2018-02-14 10:37:58 +00:00
Kubernetes Publisher
6ef250b07e sync: update godeps 2018-02-14 10:37:58 +00:00
Kubernetes Publisher
858bbdb28a sync: update required packages 2018-02-14 06:37:48 +00:00
Kubernetes Publisher
05ea3b7b8e sync: update godeps 2018-02-14 06:37:48 +00:00
Kubernetes Publisher
6554d14778 sync: update required packages 2018-02-14 02:39:29 +00:00
Kubernetes Publisher
f57acb722c sync: update godeps 2018-02-14 02:39:29 +00:00
Kubernetes Publisher
3eddd4a21c sync: update required packages 2018-02-13 22:41:21 +00:00
Kubernetes Publisher
3a36453eed sync: update godeps 2018-02-13 22:41:20 +00:00
Kubernetes Publisher
da193dcba3 sync: update required packages 2018-02-13 18:39:16 +00:00
Kubernetes Publisher
3910befcef sync: update godeps 2018-02-13 18:39:16 +00:00
Kubernetes Publisher
8627abbcbb sync: update required packages 2018-02-13 14:38:00 +00:00
Kubernetes Publisher
e484b004e9 sync: update godeps 2018-02-13 14:38:00 +00:00
Kubernetes Publisher
1d93e36736 sync: update required packages 2018-02-13 10:37:54 +00:00
Kubernetes Publisher
a30a66741b sync: update godeps 2018-02-13 10:37:53 +00:00
Kubernetes Publisher
ca886847d4 sync: update required packages 2018-02-13 06:36:49 +00:00
Kubernetes Publisher
992029bae7 sync: update godeps 2018-02-13 06:36:49 +00:00
Kubernetes Publisher
847abf4722 sync: update required packages 2018-02-13 02:41:45 +00:00
Kubernetes Publisher
edf73dabe5 Merge pull request #57767 from mbohlool/automated-cherry-pick-of-#57735-upstream-release-1.9
Automated cherry pick of #57735: Update boilerplate for 2018

Kubernetes-commit: 3968dfcc5de611f872995476b524bca12aec13eb
2018-01-03 02:19:13 +00:00
Christoph Blecker
8ec2db261d Regenerate all generated code
Kubernetes-commit: 1526308622b9e9c27c71151edf59b2f15e9e02f7
2018-01-02 00:21:07 -08:00
James Munnelly
e83deb8c0f Register metav1 types into samplecontroller api scheme
Kubernetes-commit: c2df2ad4a089bf1d8f7686125db77bd1ce53e88d
2017-12-15 12:21:16 +00:00
Kubernetes Publisher
541fac8336 Merge remote-tracking branch 'origin/master' into release-1.9
Kubernetes-commit: 7c9967f6de296b968505885781e9ed9fc65156c3
2017-12-07 05:06:33 +00:00
Kubernetes Publisher
0f1d5c27c3 Merge remote-tracking branch 'origin/master' into release-1.9
Kubernetes-commit: 1e8eb335b3a59daa26c25333c28d50def634961d
2017-12-07 05:06:20 +00:00
Kubernetes Publisher
86077011f5 Merge remote-tracking branch 'origin/master' into release-1.9
Kubernetes-commit: fb2de097796aafb38cdb6dc4d3184ebd43d525b4
2017-12-07 05:06:20 +00:00
2275 changed files with 692989 additions and 1637 deletions

View File

@@ -1,2 +0,0 @@
Sorry, we do not accept changes directly against this repository. Please see
CONTRIBUTING.md for information on where and how to contribute instead.

View File

@@ -1,7 +0,0 @@
# Contributing guidelines
Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kubernetes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes.
This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/sample-controller](https://git.k8s.io/kubernetes/staging/src/k8s.io/sample-controller) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot).
Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/sig-architecture/staging.md) for more information

1002
Godeps/Godeps.json generated Normal file

File diff suppressed because it is too large Load Diff

5
Godeps/Readme generated Normal file
View File

@@ -0,0 +1,5 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

18
OWNERS
View File

@@ -1,13 +1,9 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- deads2k
- sttts
- munnerz
- sttts
- munnerz
reviewers:
- deads2k
- sttts
- munnerz
- nikhita
labels:
- sig/api-machinery
- gregory-m
- kargakis
- sttts
- munnerz
- nikhita

101
README.md
View File

@@ -3,8 +3,6 @@
This repository implements a simple controller for watching Foo resources as
defined with a CustomResourceDefinition (CRD).
**Note:** go-get or vendor this package as `k8s.io/sample-controller`.
This particular example demonstrates how to perform basic operations such as:
* How to register a new custom resource (custom resource type) of type `Foo` using a CustomResourceDefinition.
@@ -19,81 +17,30 @@ The `update-codegen` script will automatically generate the following files &
directories:
* `pkg/apis/samplecontroller/v1alpha1/zz_generated.deepcopy.go`
* `pkg/generated/`
* `pkg/client/`
Changes should not be made to these files manually, and when creating your own
controller based off of this implementation you should not copy these files and
instead run the `update-codegen` script to generate your own.
## Details
The sample controller uses [client-go library](https://github.com/kubernetes/client-go/tree/master/tools/cache) extensively.
The details of interaction points of the sample controller with various mechanisms from this library are
explained [here](docs/controller-client-go.md).
## Fetch sample-controller and its dependencies
Like the rest of Kubernetes, sample-controller has used
[godep](https://github.com/tools/godep) and `$GOPATH` for years and is
now adopting go 1.11 modules. There are thus two alternative ways to
go about fetching this demo and its dependencies.
### Fetch with godep
When NOT using go 1.11 modules, you can use the following commands.
```sh
go get -d k8s.io/sample-controller
cd $GOPATH/src/k8s.io/sample-controller
godep restore
```
### When using go 1.11 modules
When using go 1.11 modules (`GO111MODULE=on`), issue the following
commands --- starting in whatever working directory you like.
```sh
git clone https://github.com/kubernetes/sample-controller.git
cd sample-controller
```
Note, however, that if you intend to
generate code then you will also need the
code-generator repo to exist in an old-style location. One easy way
to do this is to use the command `go mod vendor` to create and
populate the `vendor` directory.
### A Note on kubernetes/kubernetes
If you are developing Kubernetes according to
https://github.com/kubernetes/community/blob/master/contributors/guide/github-workflow.md
then you already have a copy of this demo in
`kubernetes/staging/src/k8s.io/sample-controller` and its dependencies
--- including the code generator --- are in usable locations
(valid for all Go versions).
## Purpose
This is an example of how to build a kube-like controller with a single type.
## Running
**Prerequisite**: Since the sample-controller uses `apps/v1` deployments, the Kubernetes cluster version should be greater than 1.9.
```sh
# assumes you have a working kubeconfig, not required if operating in-cluster
go build -o sample-controller .
./sample-controller -kubeconfig=$HOME/.kube/config
$ go run *.go -kubeconfig=$HOME/.kube/config
# create a CustomResourceDefinition
kubectl create -f artifacts/examples/crd-status-subresource.yaml
$ kubectl create -f artifacts/examples/crd.yaml
# create a custom resource of type Foo
kubectl create -f artifacts/examples/example-foo.yaml
$ kubectl create -f artifacts/examples/example-foo.yaml
# check deployments created through the custom resource
kubectl get deployments
$ kubectl get deployments
```
## Use Cases
@@ -126,44 +73,11 @@ type User struct {
}
```
Note, the JSON tag `json:` is required on all user facing fields within your type. Typically API types contain only user facing fields. When the JSON tag is omitted from the field, Kubernetes generators consider the field to be internal and will not expose the field in their generated external output. For example, this means that the field would not be included in a generated CRD schema.
## Validation
To validate custom resources, use the [`CustomResourceValidation`](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#validation) feature. Validation in the form of a [structured schema](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definitions/#specifying-a-structural-schema) is mandatory to be provided for `apiextensions.k8s.io/v1`.
### Example
The schema in [`crd.yaml`](./artifacts/examples/crd.yaml) applies the following validation on the custom resource:
`spec.replicas` must be an integer and must have a minimum value of 1 and a maximum value of 10.
## Subresources
Custom Resources support `/status` and `/scale` [subresources](https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#subresources). The `CustomResourceSubresources` feature is in GA from v1.16.
### Example
The CRD in [`crd-status-subresource.yaml`](./artifacts/examples/crd-status-subresource.yaml) enables the `/status` subresource for custom resources.
This means that [`UpdateStatus`](./controller.go) can be used by the controller to update only the status part of the custom resource.
To understand why only the status part of the custom resource should be updated, please refer to the [Kubernetes API conventions](https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status).
In the above steps, use `crd-status-subresource.yaml` to create the CRD:
```sh
# create a CustomResourceDefinition supporting the status subresource
kubectl create -f artifacts/examples/crd-status-subresource.yaml
```
## A Note on the API version
The [group](https://kubernetes.io/docs/reference/using-api/#api-groups) version of the custom resource in `crd.yaml` is `v1alpha`, this can be evolved to a stable API version, `v1`, using [CRD Versioning](https://kubernetes.io/docs/tasks/extend-kubernetes/custom-resources/custom-resource-definition-versioning/).
## Cleanup
You can clean up the created CustomResourceDefinition with:
```sh
kubectl delete crd foos.samplecontroller.k8s.io
```
$ kubectl delete crd foos.samplecontroller.k8s.io
## Compatibility
@@ -176,4 +90,3 @@ k8s.io/client-go.
https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/sample-controller.
Code changes are made in that location, merged into k8s.io/kubernetes and
later synced here.

View File

@@ -1,16 +0,0 @@
# Defined below are the security contacts for this repo.
#
# They are the contact point for the Product Security Committee to reach out
# to for triaging and handling of incoming issues.
#
# The below names agree to abide by the
# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy)
# and will be removed and replaced if they violate that agreement.
#
# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE
# INSTRUCTIONS AT https://kubernetes.io/security/
cheftako
deads2k
lavalamp
sttts

View File

@@ -1,41 +0,0 @@
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: foos.samplecontroller.k8s.io
# for more information on the below annotation, please see
# https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/2337-k8s.io-group-protection/README.md
annotations:
"api-approved.kubernetes.io": "unapproved, experimental-only; please get an approval from Kubernetes API reviewers if you're trying to develop a CRD in the *.k8s.io or *.kubernetes.io groups"
spec:
group: samplecontroller.k8s.io
versions:
- name: v1alpha1
served: true
storage: true
schema:
# schema used for validation
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
deploymentName:
type: string
replicas:
type: integer
minimum: 1
maximum: 10
status:
type: object
properties:
availableReplicas:
type: integer
# subresources for the custom resource
subresources:
# enables the status subresource
status: {}
names:
kind: Foo
plural: foos
scope: Namespaced

View File

@@ -1,36 +1,10 @@
apiVersion: apiextensions.k8s.io/v1
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: foos.samplecontroller.k8s.io
# for more information on the below annotation, please see
# https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/2337-k8s.io-group-protection/README.md
annotations:
"api-approved.kubernetes.io": "unapproved, experimental-only; please get an approval from Kubernetes API reviewers if you're trying to develop a CRD in the *.k8s.io or *.kubernetes.io groups"
spec:
group: samplecontroller.k8s.io
versions:
- name: v1alpha1
served: true
storage: true
schema:
# schema used for validation
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
deploymentName:
type: string
replicas:
type: integer
minimum: 1
maximum: 10
status:
type: object
properties:
availableReplicas:
type: integer
version: v1alpha1
names:
kind: Foo
plural: foos

View File

@@ -1,3 +0,0 @@
# Kubernetes Community Code of Conduct
Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)

View File

@@ -17,31 +17,31 @@ limitations under the License.
package main
import (
"context"
"fmt"
"time"
appsv1 "k8s.io/api/apps/v1"
"github.com/golang/glog"
appsv1beta2 "k8s.io/api/apps/v1beta2"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
appsinformers "k8s.io/client-go/informers/apps/v1"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
appslisters "k8s.io/client-go/listers/apps/v1"
appslisters "k8s.io/client-go/listers/apps/v1beta2"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog/v2"
samplev1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
clientset "k8s.io/sample-controller/pkg/generated/clientset/versioned"
samplescheme "k8s.io/sample-controller/pkg/generated/clientset/versioned/scheme"
informers "k8s.io/sample-controller/pkg/generated/informers/externalversions/samplecontroller/v1alpha1"
listers "k8s.io/sample-controller/pkg/generated/listers/samplecontroller/v1alpha1"
clientset "k8s.io/sample-controller/pkg/client/clientset/versioned"
samplescheme "k8s.io/sample-controller/pkg/client/clientset/versioned/scheme"
informers "k8s.io/sample-controller/pkg/client/informers/externalversions"
listers "k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1"
)
const controllerAgentName = "sample-controller"
@@ -86,21 +86,23 @@ type Controller struct {
// NewController returns a new sample controller
func NewController(
ctx context.Context,
kubeclientset kubernetes.Interface,
sampleclientset clientset.Interface,
deploymentInformer appsinformers.DeploymentInformer,
fooInformer informers.FooInformer) *Controller {
logger := klog.FromContext(ctx)
kubeInformerFactory kubeinformers.SharedInformerFactory,
sampleInformerFactory informers.SharedInformerFactory) *Controller {
// obtain references to shared index informers for the Deployment and Foo
// types.
deploymentInformer := kubeInformerFactory.Apps().V1beta2().Deployments()
fooInformer := sampleInformerFactory.Samplecontroller().V1alpha1().Foos()
// Create event broadcaster
// Add sample-controller types to the default Kubernetes Scheme so Events can be
// logged for sample-controller types.
utilruntime.Must(samplescheme.AddToScheme(scheme.Scheme))
logger.V(4).Info("Creating event broadcaster")
samplescheme.AddToScheme(scheme.Scheme)
glog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartStructuredLogging(0)
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
@@ -115,7 +117,7 @@ func NewController(
recorder: recorder,
}
logger.Info("Setting up event handlers")
glog.Info("Setting up event handlers")
// Set up an event handler for when Foo resources change
fooInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.enqueueFoo,
@@ -125,15 +127,15 @@ func NewController(
})
// Set up an event handler for when Deployment resources change. This
// handler will lookup the owner of the given Deployment, and if it is
// owned by a Foo resource then the handler will enqueue that Foo resource for
// owned by a Foo resource will enqueue that Foo resource for
// processing. This way, we don't need to implement custom logic for
// handling Deployment resources. More info on this pattern:
// https://github.com/kubernetes/community/blob/8cafef897a22026d42f5e5bb3f104febe7e29830/contributors/devel/controllers.md
deploymentInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.handleObject,
UpdateFunc: func(old, new interface{}) {
newDepl := new.(*appsv1.Deployment)
oldDepl := old.(*appsv1.Deployment)
newDepl := new.(*appsv1beta2.Deployment)
oldDepl := old.(*appsv1beta2.Deployment)
if newDepl.ResourceVersion == oldDepl.ResourceVersion {
// Periodic resync will send update events for all known Deployments.
// Two different versions of the same Deployment will always have different RVs.
@@ -151,30 +153,28 @@ func NewController(
// as syncing informer caches and starting workers. It will block until stopCh
// is closed, at which point it will shutdown the workqueue and wait for
// workers to finish processing their current work items.
func (c *Controller) Run(ctx context.Context, workers int) error {
defer utilruntime.HandleCrash()
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer runtime.HandleCrash()
defer c.workqueue.ShutDown()
logger := klog.FromContext(ctx)
// Start the informer factories to begin populating the informer caches
logger.Info("Starting Foo controller")
glog.Info("Starting Foo controller")
// Wait for the caches to be synced before starting workers
logger.Info("Waiting for informer caches to sync")
if ok := cache.WaitForCacheSync(ctx.Done(), c.deploymentsSynced, c.foosSynced); !ok {
glog.Info("Waiting for informer caches to sync")
if ok := cache.WaitForCacheSync(stopCh, c.deploymentsSynced, c.foosSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
logger.Info("Starting workers", "count", workers)
glog.Info("Starting workers")
// Launch two workers to process Foo resources
for i := 0; i < workers; i++ {
go wait.UntilWithContext(ctx, c.runWorker, time.Second)
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
logger.Info("Started workers")
<-ctx.Done()
logger.Info("Shutting down workers")
glog.Info("Started workers")
<-stopCh
glog.Info("Shutting down workers")
return nil
}
@@ -182,16 +182,15 @@ func (c *Controller) Run(ctx context.Context, workers int) error {
// runWorker is a long-running function that will continually call the
// processNextWorkItem function in order to read and process a message on the
// workqueue.
func (c *Controller) runWorker(ctx context.Context) {
for c.processNextWorkItem(ctx) {
func (c *Controller) runWorker() {
for c.processNextWorkItem() {
}
}
// processNextWorkItem will read a single work item off the workqueue and
// attempt to process it, by calling the syncHandler.
func (c *Controller) processNextWorkItem(ctx context.Context) bool {
func (c *Controller) processNextWorkItem() bool {
obj, shutdown := c.workqueue.Get()
logger := klog.FromContext(ctx)
if shutdown {
return false
@@ -218,25 +217,23 @@ func (c *Controller) processNextWorkItem(ctx context.Context) bool {
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.workqueue.Forget(obj)
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
runtime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil
}
// Run the syncHandler, passing it the namespace/name string of the
// Foo resource to be synced.
if err := c.syncHandler(ctx, key); err != nil {
// Put the item back on the workqueue to handle any transient errors.
c.workqueue.AddRateLimited(key)
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
if err := c.syncHandler(key); err != nil {
return fmt.Errorf("error syncing '%s': %s", key, err.Error())
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
logger.Info("Successfully synced", "resourceName", key)
glog.Infof("Successfully synced '%s'", key)
return nil
}(obj)
if err != nil {
utilruntime.HandleError(err)
runtime.HandleError(err)
return true
}
@@ -246,13 +243,11 @@ func (c *Controller) processNextWorkItem(ctx context.Context) bool {
// syncHandler compares the actual state with the desired, and attempts to
// converge the two. It then updates the Status block of the Foo resource
// with the current status of the resource.
func (c *Controller) syncHandler(ctx context.Context, key string) error {
func (c *Controller) syncHandler(key string) error {
// Convert the namespace/name string into a distinct namespace and name
logger := klog.LoggerWithValues(klog.FromContext(ctx), "resourceName", key)
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key))
runtime.HandleError(fmt.Errorf("invalid resource key: %s", key))
return nil
}
@@ -262,7 +257,7 @@ func (c *Controller) syncHandler(ctx context.Context, key string) error {
// The Foo resource may no longer exist, in which case we stop
// processing.
if errors.IsNotFound(err) {
utilruntime.HandleError(fmt.Errorf("foo '%s' in work queue no longer exists", key))
runtime.HandleError(fmt.Errorf("foo '%s' in work queue no longer exists", key))
return nil
}
@@ -274,7 +269,7 @@ func (c *Controller) syncHandler(ctx context.Context, key string) error {
// We choose to absorb the error here as the worker would requeue the
// resource otherwise. Instead, the next time the resource is updated
// the resource will be queued again.
utilruntime.HandleError(fmt.Errorf("%s: deployment name must be specified", key))
runtime.HandleError(fmt.Errorf("%s: deployment name must be specified", key))
return nil
}
@@ -282,7 +277,7 @@ func (c *Controller) syncHandler(ctx context.Context, key string) error {
deployment, err := c.deploymentsLister.Deployments(foo.Namespace).Get(deploymentName)
// If the resource doesn't exist, we'll create it
if errors.IsNotFound(err) {
deployment, err = c.kubeclientset.AppsV1().Deployments(foo.Namespace).Create(context.TODO(), newDeployment(foo), metav1.CreateOptions{})
deployment, err = c.kubeclientset.AppsV1beta2().Deployments(foo.Namespace).Create(newDeployment(foo))
}
// If an error occurs during Get/Create, we'll requeue the item so we can
@@ -293,23 +288,23 @@ func (c *Controller) syncHandler(ctx context.Context, key string) error {
}
// If the Deployment is not controlled by this Foo resource, we should log
// a warning to the event recorder and return error msg.
// a warning to the event recorder and ret
if !metav1.IsControlledBy(deployment, foo) {
msg := fmt.Sprintf(MessageResourceExists, deployment.Name)
c.recorder.Event(foo, corev1.EventTypeWarning, ErrResourceExists, msg)
return fmt.Errorf("%s", msg)
return fmt.Errorf(msg)
}
// If this number of the replicas on the Foo resource is specified, and the
// number does not equal the current desired replicas on the Deployment, we
// should update the Deployment resource.
if foo.Spec.Replicas != nil && *foo.Spec.Replicas != *deployment.Spec.Replicas {
logger.V(4).Info("Update deployment resource", "currentReplicas", *foo.Spec.Replicas, "desiredReplicas", *deployment.Spec.Replicas)
deployment, err = c.kubeclientset.AppsV1().Deployments(foo.Namespace).Update(context.TODO(), newDeployment(foo), metav1.UpdateOptions{})
glog.V(4).Infof("Foor: %d, deplR: %d", *foo.Spec.Replicas, *deployment.Spec.Replicas)
deployment, err = c.kubeclientset.AppsV1beta2().Deployments(foo.Namespace).Update(newDeployment(foo))
}
// If an error occurs during Update, we'll requeue the item so we can
// attempt processing again later. This could have been caused by a
// attempt processing again later. THis could have been caused by a
// temporary network failure, or any other transient reason.
if err != nil {
return err
@@ -326,17 +321,17 @@ func (c *Controller) syncHandler(ctx context.Context, key string) error {
return nil
}
func (c *Controller) updateFooStatus(foo *samplev1alpha1.Foo, deployment *appsv1.Deployment) error {
func (c *Controller) updateFooStatus(foo *samplev1alpha1.Foo, deployment *appsv1beta2.Deployment) error {
// NEVER modify objects from the store. It's a read-only, local cache.
// You can use DeepCopy() to make a deep copy of original object and modify this copy
// Or create a copy manually for better performance
fooCopy := foo.DeepCopy()
fooCopy.Status.AvailableReplicas = deployment.Status.AvailableReplicas
// If the CustomResourceSubresources feature gate is not enabled,
// we must use Update instead of UpdateStatus to update the Status block of the Foo resource.
// UpdateStatus will not allow changes to the Spec of the resource,
// which is ideal for ensuring nothing other than resource status has been updated.
_, err := c.sampleclientset.SamplecontrollerV1alpha1().Foos(foo.Namespace).UpdateStatus(context.TODO(), fooCopy, metav1.UpdateOptions{})
// Until #38113 is merged, we must use Update instead of UpdateStatus to
// update the Status block of the Foo resource. UpdateStatus will not
// allow changes to the Spec of the resource, which is ideal for ensuring
// nothing other than resource status has been updated.
_, err := c.sampleclientset.SamplecontrollerV1alpha1().Foos(foo.Namespace).Update(fooCopy)
return err
}
@@ -347,10 +342,10 @@ func (c *Controller) enqueueFoo(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
utilruntime.HandleError(err)
runtime.HandleError(err)
return
}
c.workqueue.Add(key)
c.workqueue.AddRateLimited(key)
}
// handleObject will take any resource implementing metav1.Object and attempt
@@ -361,21 +356,20 @@ func (c *Controller) enqueueFoo(obj interface{}) {
func (c *Controller) handleObject(obj interface{}) {
var object metav1.Object
var ok bool
logger := klog.FromContext(context.Background())
if object, ok = obj.(metav1.Object); !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
utilruntime.HandleError(fmt.Errorf("error decoding object, invalid type"))
runtime.HandleError(fmt.Errorf("error decoding object, invalid type"))
return
}
object, ok = tombstone.Obj.(metav1.Object)
if !ok {
utilruntime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type"))
runtime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type"))
return
}
logger.V(4).Info("Recovered deleted object", "resourceName", object.GetName())
glog.V(4).Infof("Recovered deleted object '%s' from tombstone", object.GetName())
}
logger.V(4).Info("Processing object", "object", klog.KObj(object))
glog.V(4).Infof("Processing object: %s", object.GetName())
if ownerRef := metav1.GetControllerOf(object); ownerRef != nil {
// If this object is not owned by a Foo, we should not do anything more
// with it.
@@ -385,7 +379,7 @@ func (c *Controller) handleObject(obj interface{}) {
foo, err := c.foosLister.Foos(object.GetNamespace()).Get(ownerRef.Name)
if err != nil {
logger.V(4).Info("Ignore orphaned object", "object", klog.KObj(object), "foo", ownerRef.Name)
glog.V(4).Infof("ignoring orphaned object '%s' of foo '%s'", object.GetSelfLink(), ownerRef.Name)
return
}
@@ -397,20 +391,24 @@ func (c *Controller) handleObject(obj interface{}) {
// newDeployment creates a new Deployment for a Foo resource. It also sets
// the appropriate OwnerReferences on the resource so handleObject can discover
// the Foo resource that 'owns' it.
func newDeployment(foo *samplev1alpha1.Foo) *appsv1.Deployment {
func newDeployment(foo *samplev1alpha1.Foo) *appsv1beta2.Deployment {
labels := map[string]string{
"app": "nginx",
"controller": foo.Name,
}
return &appsv1.Deployment{
return &appsv1beta2.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: foo.Spec.DeploymentName,
Namespace: foo.Namespace,
OwnerReferences: []metav1.OwnerReference{
*metav1.NewControllerRef(foo, samplev1alpha1.SchemeGroupVersion.WithKind("Foo")),
*metav1.NewControllerRef(foo, schema.GroupVersionKind{
Group: samplev1alpha1.SchemeGroupVersion.Group,
Version: samplev1alpha1.SchemeGroupVersion.Version,
Kind: "Foo",
}),
},
},
Spec: appsv1.DeploymentSpec{
Spec: appsv1beta2.DeploymentSpec{
Replicas: foo.Spec.Replicas,
Selector: &metav1.LabelSelector{
MatchLabels: labels,

View File

@@ -1,321 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package main
import (
"context"
"fmt"
"reflect"
"testing"
"time"
apps "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/diff"
kubeinformers "k8s.io/client-go/informers"
k8sfake "k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/klog/v2/ktesting"
samplecontroller "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
"k8s.io/sample-controller/pkg/generated/clientset/versioned/fake"
informers "k8s.io/sample-controller/pkg/generated/informers/externalversions"
)
var (
alwaysReady = func() bool { return true }
noResyncPeriodFunc = func() time.Duration { return 0 }
)
type fixture struct {
t *testing.T
client *fake.Clientset
kubeclient *k8sfake.Clientset
// Objects to put in the store.
fooLister []*samplecontroller.Foo
deploymentLister []*apps.Deployment
// Actions expected to happen on the client.
kubeactions []core.Action
actions []core.Action
// Objects from here preloaded into NewSimpleFake.
kubeobjects []runtime.Object
objects []runtime.Object
}
func newFixture(t *testing.T) *fixture {
f := &fixture{}
f.t = t
f.objects = []runtime.Object{}
f.kubeobjects = []runtime.Object{}
return f
}
func newFoo(name string, replicas *int32) *samplecontroller.Foo {
return &samplecontroller.Foo{
TypeMeta: metav1.TypeMeta{APIVersion: samplecontroller.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: metav1.NamespaceDefault,
},
Spec: samplecontroller.FooSpec{
DeploymentName: fmt.Sprintf("%s-deployment", name),
Replicas: replicas,
},
}
}
func (f *fixture) newController(ctx context.Context) (*Controller, informers.SharedInformerFactory, kubeinformers.SharedInformerFactory) {
f.client = fake.NewSimpleClientset(f.objects...)
f.kubeclient = k8sfake.NewSimpleClientset(f.kubeobjects...)
i := informers.NewSharedInformerFactory(f.client, noResyncPeriodFunc())
k8sI := kubeinformers.NewSharedInformerFactory(f.kubeclient, noResyncPeriodFunc())
c := NewController(ctx, f.kubeclient, f.client,
k8sI.Apps().V1().Deployments(), i.Samplecontroller().V1alpha1().Foos())
c.foosSynced = alwaysReady
c.deploymentsSynced = alwaysReady
c.recorder = &record.FakeRecorder{}
for _, f := range f.fooLister {
i.Samplecontroller().V1alpha1().Foos().Informer().GetIndexer().Add(f)
}
for _, d := range f.deploymentLister {
k8sI.Apps().V1().Deployments().Informer().GetIndexer().Add(d)
}
return c, i, k8sI
}
func (f *fixture) run(ctx context.Context, fooName string) {
f.runController(ctx, fooName, true, false)
}
func (f *fixture) runExpectError(ctx context.Context, fooName string) {
f.runController(ctx, fooName, true, true)
}
func (f *fixture) runController(ctx context.Context, fooName string, startInformers bool, expectError bool) {
c, i, k8sI := f.newController(ctx)
if startInformers {
i.Start(ctx.Done())
k8sI.Start(ctx.Done())
}
err := c.syncHandler(ctx, fooName)
if !expectError && err != nil {
f.t.Errorf("error syncing foo: %v", err)
} else if expectError && err == nil {
f.t.Error("expected error syncing foo, got nil")
}
actions := filterInformerActions(f.client.Actions())
for i, action := range actions {
if len(f.actions) < i+1 {
f.t.Errorf("%d unexpected actions: %+v", len(actions)-len(f.actions), actions[i:])
break
}
expectedAction := f.actions[i]
checkAction(expectedAction, action, f.t)
}
if len(f.actions) > len(actions) {
f.t.Errorf("%d additional expected actions:%+v", len(f.actions)-len(actions), f.actions[len(actions):])
}
k8sActions := filterInformerActions(f.kubeclient.Actions())
for i, action := range k8sActions {
if len(f.kubeactions) < i+1 {
f.t.Errorf("%d unexpected actions: %+v", len(k8sActions)-len(f.kubeactions), k8sActions[i:])
break
}
expectedAction := f.kubeactions[i]
checkAction(expectedAction, action, f.t)
}
if len(f.kubeactions) > len(k8sActions) {
f.t.Errorf("%d additional expected actions:%+v", len(f.kubeactions)-len(k8sActions), f.kubeactions[len(k8sActions):])
}
}
// checkAction verifies that expected and actual actions are equal and both have
// same attached resources
func checkAction(expected, actual core.Action, t *testing.T) {
if !(expected.Matches(actual.GetVerb(), actual.GetResource().Resource) && actual.GetSubresource() == expected.GetSubresource()) {
t.Errorf("Expected\n\t%#v\ngot\n\t%#v", expected, actual)
return
}
if reflect.TypeOf(actual) != reflect.TypeOf(expected) {
t.Errorf("Action has wrong type. Expected: %t. Got: %t", expected, actual)
return
}
switch a := actual.(type) {
case core.CreateActionImpl:
e, _ := expected.(core.CreateActionImpl)
expObject := e.GetObject()
object := a.GetObject()
if !reflect.DeepEqual(expObject, object) {
t.Errorf("Action %s %s has wrong object\nDiff:\n %s",
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expObject, object))
}
case core.UpdateActionImpl:
e, _ := expected.(core.UpdateActionImpl)
expObject := e.GetObject()
object := a.GetObject()
if !reflect.DeepEqual(expObject, object) {
t.Errorf("Action %s %s has wrong object\nDiff:\n %s",
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expObject, object))
}
case core.PatchActionImpl:
e, _ := expected.(core.PatchActionImpl)
expPatch := e.GetPatch()
patch := a.GetPatch()
if !reflect.DeepEqual(expPatch, patch) {
t.Errorf("Action %s %s has wrong patch\nDiff:\n %s",
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintSideBySide(expPatch, patch))
}
default:
t.Errorf("Uncaptured Action %s %s, you should explicitly add a case to capture it",
actual.GetVerb(), actual.GetResource().Resource)
}
}
// filterInformerActions filters list and watch actions for testing resources.
// Since list and watch don't change resource state we can filter it to lower
// nose level in our tests.
func filterInformerActions(actions []core.Action) []core.Action {
ret := []core.Action{}
for _, action := range actions {
if len(action.GetNamespace()) == 0 &&
(action.Matches("list", "foos") ||
action.Matches("watch", "foos") ||
action.Matches("list", "deployments") ||
action.Matches("watch", "deployments")) {
continue
}
ret = append(ret, action)
}
return ret
}
func (f *fixture) expectCreateDeploymentAction(d *apps.Deployment) {
f.kubeactions = append(f.kubeactions, core.NewCreateAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d))
}
func (f *fixture) expectUpdateDeploymentAction(d *apps.Deployment) {
f.kubeactions = append(f.kubeactions, core.NewUpdateAction(schema.GroupVersionResource{Resource: "deployments"}, d.Namespace, d))
}
func (f *fixture) expectUpdateFooStatusAction(foo *samplecontroller.Foo) {
action := core.NewUpdateSubresourceAction(schema.GroupVersionResource{Resource: "foos"}, "status", foo.Namespace, foo)
f.actions = append(f.actions, action)
}
func getKey(foo *samplecontroller.Foo, t *testing.T) string {
key, err := cache.DeletionHandlingMetaNamespaceKeyFunc(foo)
if err != nil {
t.Errorf("Unexpected error getting key for foo %v: %v", foo.Name, err)
return ""
}
return key
}
func TestCreatesDeployment(t *testing.T) {
f := newFixture(t)
foo := newFoo("test", int32Ptr(1))
_, ctx := ktesting.NewTestContext(t)
f.fooLister = append(f.fooLister, foo)
f.objects = append(f.objects, foo)
expDeployment := newDeployment(foo)
f.expectCreateDeploymentAction(expDeployment)
f.expectUpdateFooStatusAction(foo)
f.run(ctx, getKey(foo, t))
}
func TestDoNothing(t *testing.T) {
f := newFixture(t)
foo := newFoo("test", int32Ptr(1))
_, ctx := ktesting.NewTestContext(t)
d := newDeployment(foo)
f.fooLister = append(f.fooLister, foo)
f.objects = append(f.objects, foo)
f.deploymentLister = append(f.deploymentLister, d)
f.kubeobjects = append(f.kubeobjects, d)
f.expectUpdateFooStatusAction(foo)
f.run(ctx, getKey(foo, t))
}
func TestUpdateDeployment(t *testing.T) {
f := newFixture(t)
foo := newFoo("test", int32Ptr(1))
_, ctx := ktesting.NewTestContext(t)
d := newDeployment(foo)
// Update replicas
foo.Spec.Replicas = int32Ptr(2)
expDeployment := newDeployment(foo)
f.fooLister = append(f.fooLister, foo)
f.objects = append(f.objects, foo)
f.deploymentLister = append(f.deploymentLister, d)
f.kubeobjects = append(f.kubeobjects, d)
f.expectUpdateFooStatusAction(foo)
f.expectUpdateDeploymentAction(expDeployment)
f.run(ctx, getKey(foo, t))
}
func TestNotControlledByUs(t *testing.T) {
f := newFixture(t)
foo := newFoo("test", int32Ptr(1))
_, ctx := ktesting.NewTestContext(t)
d := newDeployment(foo)
d.ObjectMeta.OwnerReferences = []metav1.OwnerReference{}
f.fooLister = append(f.fooLister, foo)
f.objects = append(f.objects, foo)
f.deploymentLister = append(f.deploymentLister, d)
f.kubeobjects = append(f.kubeobjects, d)
f.runExpectError(ctx, getKey(foo, t))
}
func int32Ptr(i int32) *int32 { return &i }

View File

@@ -1,64 +0,0 @@
# client-go under the hood
The [client-go](https://github.com/kubernetes/client-go/) library contains various mechanisms that you can use when
developing your custom controllers. These mechanisms are defined in the
[tools/cache folder](https://github.com/kubernetes/client-go/tree/master/tools/cache) of the library.
Here is a pictorial representation showing how the various components in
the client-go library work and their interaction points with the custom
controller code that you will write.
<p align="center">
<img src="images/client-go-controller-interaction.jpeg" height="600" width="700"/>
</p>
## client-go components
* Reflector: A reflector, which is defined in [type *Reflector* inside package *cache*](https://github.com/kubernetes/client-go/blob/master/tools/cache/reflector.go),
watches the Kubernetes API for the specified resource type (kind).
The function in which this is done is *ListAndWatch*.
The watch could be for an in-built resource or it could be for a custom resource.
When the reflector receives notification about existence of new
resource instance through the watch API, it gets the newly created object
using the corresponding listing API and puts it in the Delta Fifo queue
inside the *watchHandler* function.
* Informer: An informer defined in the [base controller inside package *cache*](https://github.com/kubernetes/client-go/blob/master/tools/cache/controller.go) pops objects from the Delta Fifo queue.
The function in which this is done is *processLoop*. The job of this base controller
is to save the object for later retrieval, and to invoke our controller passing it the object.
* Indexer: An indexer provides indexing functionality over objects.
It is defined in [type *Indexer* inside package *cache*](https://github.com/kubernetes/client-go/blob/master/tools/cache/index.go). A typical indexing use-case is to create an index based on object labels. Indexer can
maintain indexes based on several indexing functions.
Indexer uses a thread-safe data store to store objects and their keys.
There is a default function named *MetaNamespaceKeyFunc* defined in [type *Store* inside package *cache*](https://github.com/kubernetes/client-go/blob/master/tools/cache/store.go)
that generates an objects key as `<namespace>/<name>` combination for that object.
## Custom Controller components
* Informer reference: This is the reference to the Informer instance that knows
how to work with your custom resource objects. Your custom controller code needs
to create the appropriate Informer.
* Indexer reference: This is the reference to the Indexer instance that knows
how to work with your custom resource objects. Your custom controller code needs
to create this. You will be using this reference for retrieving objects for
later processing.
The base controller in client-go provides the *NewIndexerInformer* function to create Informer and Indexer.
In your code you can either [directly invoke this function](https://github.com/kubernetes/client-go/blob/master/examples/workqueue/main.go#L174) or [use factory methods for creating an informer.](https://github.com/kubernetes/sample-controller/blob/master/main.go#L61)
* Resource Event Handlers: These are the callback functions which will be called by
the Informer when it wants to deliver an object to your controller. The typical
pattern to write these functions is to obtain the dispatched objects key
and enqueue that key in a work queue for further processing.
* Work queue: This is the queue that you create in your controller code to decouple
delivery of an object from its processing. Resource event handler functions are written
to extract the delivered objects key and add that to the work queue.
* Process Item: This is the function that you create in your code which processes items
from the work queue. There can be one or more other functions that do the actual processing.
These functions will typically use the [Indexer reference](https://github.com/kubernetes/client-go/blob/master/examples/workqueue/main.go#L73), or a Listing wrapper to retrieve the object corresponding to the key.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 109 KiB

65
go.mod
View File

@@ -1,65 +0,0 @@
// This is a generated file. Do not edit directly.
module k8s.io/sample-controller
go 1.20
require (
k8s.io/api v0.28.0
k8s.io/apimachinery v0.28.0
k8s.io/client-go v0.28.0
k8s.io/code-generator v0.28.0
k8s.io/klog/v2 v2.100.1
)
require (
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/go-logr/logr v1.2.4 // indirect
github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.3 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/imdario/mergo v0.3.6 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
golang.org/x/mod v0.10.0 // indirect
golang.org/x/net v0.13.0 // indirect
golang.org/x/oauth2 v0.8.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/term v0.10.0 // indirect
golang.org/x/text v0.11.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.8.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.30.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.3.0 // indirect
)
replace (
k8s.io/api => k8s.io/api v0.28.0
k8s.io/apimachinery => k8s.io/apimachinery v0.28.0
k8s.io/client-go => k8s.io/client-go v0.28.0
k8s.io/code-generator => k8s.io/code-generator v0.28.0
)

169
go.sum
View File

@@ -1,169 +0,0 @@
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U=
github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.13.0 h1:Nvo8UFsZ8X3BhAC9699Z1j7XQ3rsZnUUm7jfBEk1ueY=
golang.org/x/net v0.13.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8=
golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.8.0 h1:vSDcovVPld282ceKgDimkRSC8kpaH1dgyc9UMzlt84Y=
golang.org/x/tools v0.8.0/go.mod h1:JxBZ99ISMI5ViVkT1tr6tdNmXeTrcpVSD3vZ1RsRdN4=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.28.0 h1:3j3VPWmN9tTDI68NETBWlDiA9qOiGJ7sdKeufehBYsM=
k8s.io/api v0.28.0/go.mod h1:0l8NZJzB0i/etuWnIXcwfIv+xnDOhL3lLW919AWYDuY=
k8s.io/apimachinery v0.28.0 h1:ScHS2AG16UlYWk63r46oU3D5y54T53cVI5mMJwwqFNA=
k8s.io/apimachinery v0.28.0/go.mod h1:X0xh/chESs2hP9koe+SdIAcXWcQ+RM5hy0ZynB+yEvw=
k8s.io/client-go v0.28.0 h1:ebcPRDZsCjpj62+cMk1eGNX1QkMdRmQ6lmz5BLoFWeM=
k8s.io/client-go v0.28.0/go.mod h1:0Asy9Xt3U98RypWJmU1ZrRAGKhP6NqDPmptlAzK2kMc=
k8s.io/code-generator v0.28.0 h1:msdkRVJNVFgdiIJ8REl/d3cZsMB9HByFcWMmn13NyuE=
k8s.io/code-generator v0.28.0/go.mod h1:ueeSJZJ61NHBa0ccWLey6mwawum25vX61nRZ6WOzN9A=
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08=
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
@@ -18,24 +18,17 @@ set -o errexit
set -o nounset
set -o pipefail
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
source "${CODEGEN_PKG}/kube_codegen.sh"
SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/..
CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
# generate the code with:
# --output-base because this script should also be able to run inside the vendor dir of
# k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir
# instead of the $GOPATH directly. For normal projects this can be dropped.
${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \
k8s.io/sample-controller/pkg/client k8s.io/sample-controller/pkg/apis \
samplecontroller:v1alpha1 \
--output-base "$(dirname ${BASH_SOURCE})/../../.."
kube::codegen::gen_helpers \
--input-pkg-root k8s.io/sample-controller/pkg/apis \
--output-base "$(dirname "${BASH_SOURCE[0]}")/../../.." \
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt"
kube::codegen::gen_client \
--with-watch \
--input-pkg-root k8s.io/sample-controller/pkg/apis \
--output-pkg-root k8s.io/sample-controller/pkg/generated \
--output-base "$(dirname "${BASH_SOURCE[0]}")/../../.." \
--boilerplate "${SCRIPT_ROOT}/hack/boilerplate.go.txt"
# To use your own boilerplate text append:
# --go-header-file ${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt

View File

@@ -1,4 +1,4 @@
#!/usr/bin/env bash
#!/bin/bash
# Copyright 2017 The Kubernetes Authors.
#
@@ -18,12 +18,14 @@ set -o errexit
set -o nounset
set -o pipefail
SCRIPT_ROOT="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd -P)"
SCRIPT_ROOT=$(dirname "${BASH_SOURCE}")/..
DIFFROOT="${SCRIPT_ROOT}/pkg"
TMP_DIFFROOT="$(mktemp -d -t "$(basename "$0").XXXXXX")/pkg"
TMP_DIFFROOT="${SCRIPT_ROOT}/_tmp/pkg"
_tmp="${SCRIPT_ROOT}/_tmp"
cleanup() {
rm -rf "${TMP_DIFFROOT}"
rm -rf "${_tmp}"
}
trap "cleanup" EXIT SIGINT
@@ -36,7 +38,9 @@ cp -a "${DIFFROOT}"/* "${TMP_DIFFROOT}"
echo "diffing ${DIFFROOT} against freshly generated codegen"
ret=0
diff -Naupr "${DIFFROOT}" "${TMP_DIFFROOT}" || ret=$?
if [[ $ret -eq 0 ]]; then
cp -a "${TMP_DIFFROOT}"/* "${DIFFROOT}"
if [[ $ret -eq 0 ]]
then
echo "${DIFFROOT} up to date."
else
echo "${DIFFROOT} is out of date. Please run hack/update-codegen.sh"

38
main.go
View File

@@ -20,16 +20,16 @@ import (
"flag"
"time"
"github.com/golang/glog"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
"k8s.io/sample-controller/pkg/signals"
// Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
// _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
clientset "k8s.io/sample-controller/pkg/generated/clientset/versioned"
informers "k8s.io/sample-controller/pkg/generated/informers/externalversions"
clientset "k8s.io/sample-controller/pkg/client/clientset/versioned"
informers "k8s.io/sample-controller/pkg/client/informers/externalversions"
"k8s.io/sample-controller/pkg/signals"
)
var (
@@ -38,46 +38,36 @@ var (
)
func main() {
klog.InitFlags(nil)
flag.Parse()
// set up signals so we handle the shutdown signal gracefully
ctx := signals.SetupSignalHandler()
logger := klog.FromContext(ctx)
// set up signals so we handle the first shutdown signal gracefully
stopCh := signals.SetupSignalHandler()
cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig)
if err != nil {
logger.Error(err, "Error building kubeconfig")
klog.FlushAndExit(klog.ExitFlushTimeout, 1)
glog.Fatalf("Error building kubeconfig: %s", err.Error())
}
kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
logger.Error(err, "Error building kubernetes clientset")
klog.FlushAndExit(klog.ExitFlushTimeout, 1)
glog.Fatalf("Error building kubernetes clientset: %s", err.Error())
}
exampleClient, err := clientset.NewForConfig(cfg)
if err != nil {
logger.Error(err, "Error building kubernetes clientset")
klog.FlushAndExit(klog.ExitFlushTimeout, 1)
glog.Fatalf("Error building example clientset: %s", err.Error())
}
kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, time.Second*30)
exampleInformerFactory := informers.NewSharedInformerFactory(exampleClient, time.Second*30)
controller := NewController(ctx, kubeClient, exampleClient,
kubeInformerFactory.Apps().V1().Deployments(),
exampleInformerFactory.Samplecontroller().V1alpha1().Foos())
controller := NewController(kubeClient, exampleClient, kubeInformerFactory, exampleInformerFactory)
// notice that there is no need to run Start methods in a separate goroutine. (i.e. go kubeInformerFactory.Start(ctx.done())
// Start method is non-blocking and runs all registered informers in a dedicated goroutine.
kubeInformerFactory.Start(ctx.Done())
exampleInformerFactory.Start(ctx.Done())
go kubeInformerFactory.Start(stopCh)
go exampleInformerFactory.Start(stopCh)
if err = controller.Run(ctx, 2); err != nil {
logger.Error(err, "Error running controller")
klog.FlushAndExit(klog.ExitFlushTimeout, 1)
if err = controller.Run(2, stopCh); err != nil {
glog.Fatalf("Error running controller: %s", err.Error())
}
}

View File

@@ -16,7 +16,6 @@ limitations under the License.
package samplecontroller
// GroupName is the group name used in this package
const (
GroupName = "samplecontroller.k8s.io"
)

View File

@@ -15,7 +15,7 @@ limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +groupName=samplecontroller.k8s.io
// Package v1alpha1 is the v1alpha1 version of the API.
package v1alpha1 // import "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
// +groupName=samplecontroller.k8s.io
package v1alpha1

View File

@@ -38,10 +38,8 @@ func Resource(resource string) schema.GroupResource {
}
var (
// SchemeBuilder initializes a scheme builder
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme is a global function that registers this API group & version to a scheme
AddToScheme = SchemeBuilder.AddToScheme
AddToScheme = SchemeBuilder.AddToScheme
)
// Adds the list of known types to Scheme.

View File

@@ -21,6 +21,7 @@ import (
)
// +genclient
// +genclient:noStatus
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Foo is a specification for a Foo resource

View File

@@ -1,8 +1,7 @@
//go:build !ignore_autogenerated
// +build !ignore_autogenerated
/*
Copyright The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -17,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by deepcopy-gen. DO NOT EDIT.
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
package v1alpha1
@@ -49,15 +48,16 @@ func (in *Foo) DeepCopy() *Foo {
func (in *Foo) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FooList) DeepCopyInto(out *FooList) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ListMeta.DeepCopyInto(&out.ListMeta)
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]Foo, len(*in))
@@ -82,8 +82,9 @@ func (in *FooList) DeepCopy() *FooList {
func (in *FooList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -91,8 +92,12 @@ func (in *FooSpec) DeepCopyInto(out *FooSpec) {
*out = *in
if in.Replicas != nil {
in, out := &in.Replicas, &out.Replicas
*out = new(int32)
**out = **in
if *in == nil {
*out = nil
} else {
*out = new(int32)
**out = **in
}
}
return
}

View File

@@ -1,5 +1,5 @@
/*
Copyright The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,26 +14,25 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package versioned
import (
"fmt"
"net/http"
glog "github.com/golang/glog"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
samplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/generated/clientset/versioned/typed/samplecontroller/v1alpha1"
samplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1"
)
type Interface interface {
Discovery() discovery.DiscoveryInterface
SamplecontrollerV1alpha1() samplecontrollerv1alpha1.SamplecontrollerV1alpha1Interface
// Deprecated: please explicitly pick a version if possible.
Samplecontroller() samplecontrollerv1alpha1.SamplecontrollerV1alpha1Interface
}
// Clientset contains the clients for groups.
// Clientset contains the clients for groups. Each group has exactly one
// version included in a Clientset.
type Clientset struct {
*discovery.DiscoveryClient
samplecontrollerV1alpha1 *samplecontrollerv1alpha1.SamplecontrollerV1alpha1Client
@@ -44,6 +43,12 @@ func (c *Clientset) SamplecontrollerV1alpha1() samplecontrollerv1alpha1.Sampleco
return c.samplecontrollerV1alpha1
}
// Deprecated: Samplecontroller retrieves the default version of SamplecontrollerClient.
// Please explicitly pick a version.
func (c *Clientset) Samplecontroller() samplecontrollerv1alpha1.SamplecontrollerV1alpha1Interface {
return c.samplecontrollerV1alpha1
}
// Discovery retrieves the DiscoveryClient
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
if c == nil {
@@ -53,48 +58,21 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface {
}
// NewForConfig creates a new Clientset for the given config.
// If config's RateLimiter is not set and QPS and Burst are acceptable,
// NewForConfig will generate a rate-limiter in configShallowCopy.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.UserAgent == "" {
configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent()
}
// share the transport between all clients
httpClient, err := rest.HTTPClientFor(&configShallowCopy)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&configShallowCopy, httpClient)
}
// NewForConfigAndClient creates a new Clientset for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
// If config's RateLimiter is not set and QPS and Burst are acceptable,
// NewForConfigAndClient will generate a rate-limiter in configShallowCopy.
func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
if configShallowCopy.Burst <= 0 {
return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
}
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
}
var cs Clientset
var err error
cs.samplecontrollerV1alpha1, err = samplecontrollerv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
cs.samplecontrollerV1alpha1, err = samplecontrollerv1alpha1.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient)
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
if err != nil {
glog.Errorf("failed to create the DiscoveryClient: %v", err)
return nil, err
}
return &cs, nil
@@ -103,11 +81,11 @@ func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset,
// NewForConfigOrDie creates a new Clientset for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Clientset {
cs, err := NewForConfig(c)
if err != nil {
panic(err)
}
return cs
var cs Clientset
cs.samplecontrollerV1alpha1 = samplecontrollerv1alpha1.NewForConfigOrDie(c)
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
return &cs
}
// New creates a new Clientset for the given RESTClient.

View File

@@ -0,0 +1,18 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This package has the automatically generated clientset.
package versioned

View File

@@ -1,5 +1,5 @@
/*
Copyright The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,8 +14,6 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
@@ -24,9 +22,9 @@ import (
"k8s.io/client-go/discovery"
fakediscovery "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/testing"
clientset "k8s.io/sample-controller/pkg/generated/clientset/versioned"
samplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/generated/clientset/versioned/typed/samplecontroller/v1alpha1"
fakesamplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/generated/clientset/versioned/typed/samplecontroller/v1alpha1/fake"
clientset "k8s.io/sample-controller/pkg/client/clientset/versioned"
samplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1"
fakesamplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/fake"
)
// NewSimpleClientset returns a clientset that will respond with the provided objects.
@@ -41,20 +39,11 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset {
}
}
cs := &Clientset{tracker: o}
cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
cs.AddReactor("*", "*", testing.ObjectReaction(o))
cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
gvr := action.GetResource()
ns := action.GetNamespace()
watch, err := o.Watch(gvr, ns)
if err != nil {
return false, nil, err
}
return true, watch, nil
})
fakePtr := testing.Fake{}
fakePtr.AddReactor("*", "*", testing.ObjectReaction(o))
fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil))
return cs
return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}}
}
// Clientset implements clientset.Interface. Meant to be embedded into a
@@ -63,23 +52,20 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset {
type Clientset struct {
testing.Fake
discovery *fakediscovery.FakeDiscovery
tracker testing.ObjectTracker
}
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
return c.discovery
}
func (c *Clientset) Tracker() testing.ObjectTracker {
return c.tracker
}
var (
_ clientset.Interface = &Clientset{}
_ testing.FakeClient = &Clientset{}
)
var _ clientset.Interface = &Clientset{}
// SamplecontrollerV1alpha1 retrieves the SamplecontrollerV1alpha1Client
func (c *Clientset) SamplecontrollerV1alpha1() samplecontrollerv1alpha1.SamplecontrollerV1alpha1Interface {
return &fakesamplecontrollerv1alpha1.FakeSamplecontrollerV1alpha1{Fake: &c.Fake}
}
// Samplecontroller retrieves the SamplecontrollerV1alpha1Client
func (c *Clientset) Samplecontroller() samplecontrollerv1alpha1.SamplecontrollerV1alpha1Interface {
return &fakesamplecontrollerv1alpha1.FakeSamplecontrollerV1alpha1{Fake: &c.Fake}
}

View File

@@ -1,5 +1,5 @@
/*
Copyright The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,7 +14,5 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated fake clientset.
package fake

View File

@@ -1,5 +1,5 @@
/*
Copyright The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,8 +14,6 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
@@ -23,34 +21,33 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
samplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
)
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
var parameterCodec = runtime.NewParameterCodec(scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
samplecontrollerv1alpha1.AddToScheme,
func init() {
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
AddToScheme(scheme)
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kuberentes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
// kclientset, _ := kubernetes.NewForConfig(c)
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
var AddToScheme = localSchemeBuilder.AddToScheme
func AddToScheme(scheme *runtime.Scheme) {
samplecontrollerv1alpha1.AddToScheme(scheme)
func init() {
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
utilruntime.Must(AddToScheme(scheme))
}

View File

@@ -1,5 +1,5 @@
/*
Copyright The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,7 +14,5 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package contains the scheme of the automatically generated clientset.
package scheme

View File

@@ -1,5 +1,5 @@
/*
Copyright The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,8 +14,6 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package scheme
import (
@@ -23,34 +21,33 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
samplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
)
var Scheme = runtime.NewScheme()
var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
samplecontrollerv1alpha1.AddToScheme,
func init() {
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
AddToScheme(Scheme)
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kuberentes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
// kclientset, _ := kubernetes.NewForConfig(c)
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
var AddToScheme = localSchemeBuilder.AddToScheme
func AddToScheme(scheme *runtime.Scheme) {
samplecontrollerv1alpha1.AddToScheme(scheme)
func init() {
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
utilruntime.Must(AddToScheme(Scheme))
}

View File

@@ -1,5 +1,5 @@
/*
Copyright The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,7 +14,5 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated typed clients.
package v1alpha1

View File

@@ -1,5 +1,5 @@
/*
Copyright The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,7 +14,5 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// Package fake has the automatically generated clients.
package fake

View File

@@ -1,5 +1,5 @@
/*
Copyright The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,15 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
"context"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
@@ -35,12 +32,12 @@ type FakeFoos struct {
ns string
}
var foosResource = v1alpha1.SchemeGroupVersion.WithResource("foos")
var foosResource = schema.GroupVersionResource{Group: "samplecontroller.k8s.io", Version: "v1alpha1", Resource: "foos"}
var foosKind = v1alpha1.SchemeGroupVersion.WithKind("Foo")
var foosKind = schema.GroupVersionKind{Group: "samplecontroller.k8s.io", Version: "v1alpha1", Kind: "Foo"}
// Get takes name of the foo, and returns the corresponding foo object, and an error if there is any.
func (c *FakeFoos) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Foo, err error) {
func (c *FakeFoos) Get(name string, options v1.GetOptions) (result *v1alpha1.Foo, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(foosResource, c.ns, name), &v1alpha1.Foo{})
@@ -51,7 +48,7 @@ func (c *FakeFoos) Get(ctx context.Context, name string, options v1.GetOptions)
}
// List takes label and field selectors, and returns the list of Foos that match those selectors.
func (c *FakeFoos) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.FooList, err error) {
func (c *FakeFoos) List(opts v1.ListOptions) (result *v1alpha1.FooList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(foosResource, foosKind, c.ns, opts), &v1alpha1.FooList{})
@@ -63,7 +60,7 @@ func (c *FakeFoos) List(ctx context.Context, opts v1.ListOptions) (result *v1alp
if label == nil {
label = labels.Everything()
}
list := &v1alpha1.FooList{ListMeta: obj.(*v1alpha1.FooList).ListMeta}
list := &v1alpha1.FooList{}
for _, item := range obj.(*v1alpha1.FooList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
@@ -73,14 +70,14 @@ func (c *FakeFoos) List(ctx context.Context, opts v1.ListOptions) (result *v1alp
}
// Watch returns a watch.Interface that watches the requested foos.
func (c *FakeFoos) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
func (c *FakeFoos) Watch(opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(foosResource, c.ns, opts))
}
// Create takes the representation of a foo and creates it. Returns the server's representation of the foo, and an error, if there is any.
func (c *FakeFoos) Create(ctx context.Context, foo *v1alpha1.Foo, opts v1.CreateOptions) (result *v1alpha1.Foo, err error) {
func (c *FakeFoos) Create(foo *v1alpha1.Foo) (result *v1alpha1.Foo, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(foosResource, c.ns, foo), &v1alpha1.Foo{})
@@ -91,7 +88,7 @@ func (c *FakeFoos) Create(ctx context.Context, foo *v1alpha1.Foo, opts v1.Create
}
// Update takes the representation of a foo and updates it. Returns the server's representation of the foo, and an error, if there is any.
func (c *FakeFoos) Update(ctx context.Context, foo *v1alpha1.Foo, opts v1.UpdateOptions) (result *v1alpha1.Foo, err error) {
func (c *FakeFoos) Update(foo *v1alpha1.Foo) (result *v1alpha1.Foo, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(foosResource, c.ns, foo), &v1alpha1.Foo{})
@@ -101,38 +98,26 @@ func (c *FakeFoos) Update(ctx context.Context, foo *v1alpha1.Foo, opts v1.Update
return obj.(*v1alpha1.Foo), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakeFoos) UpdateStatus(ctx context.Context, foo *v1alpha1.Foo, opts v1.UpdateOptions) (*v1alpha1.Foo, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(foosResource, "status", c.ns, foo), &v1alpha1.Foo{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.Foo), err
}
// Delete takes name of the foo and deletes it. Returns an error if one occurs.
func (c *FakeFoos) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
func (c *FakeFoos) Delete(name string, options *v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteActionWithOptions(foosResource, c.ns, name, opts), &v1alpha1.Foo{})
Invokes(testing.NewDeleteAction(foosResource, c.ns, name), &v1alpha1.Foo{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeFoos) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(foosResource, c.ns, listOpts)
func (c *FakeFoos) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(foosResource, c.ns, listOptions)
_, err := c.Fake.Invokes(action, &v1alpha1.FooList{})
return err
}
// Patch applies the patch and returns the patched foo.
func (c *FakeFoos) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Foo, err error) {
func (c *FakeFoos) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Foo, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(foosResource, c.ns, name, pt, data, subresources...), &v1alpha1.Foo{})
Invokes(testing.NewPatchSubresourceAction(foosResource, c.ns, name, data, subresources...), &v1alpha1.Foo{})
if obj == nil {
return nil, err

View File

@@ -1,5 +1,5 @@
/*
Copyright The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,14 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
v1alpha1 "k8s.io/sample-controller/pkg/generated/clientset/versioned/typed/samplecontroller/v1alpha1"
v1alpha1 "k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1"
)
type FakeSamplecontrollerV1alpha1 struct {

View File

@@ -0,0 +1,155 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
v1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
scheme "k8s.io/sample-controller/pkg/client/clientset/versioned/scheme"
)
// FoosGetter has a method to return a FooInterface.
// A group's client should implement this interface.
type FoosGetter interface {
Foos(namespace string) FooInterface
}
// FooInterface has methods to work with Foo resources.
type FooInterface interface {
Create(*v1alpha1.Foo) (*v1alpha1.Foo, error)
Update(*v1alpha1.Foo) (*v1alpha1.Foo, error)
Delete(name string, options *v1.DeleteOptions) error
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
Get(name string, options v1.GetOptions) (*v1alpha1.Foo, error)
List(opts v1.ListOptions) (*v1alpha1.FooList, error)
Watch(opts v1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Foo, err error)
FooExpansion
}
// foos implements FooInterface
type foos struct {
client rest.Interface
ns string
}
// newFoos returns a Foos
func newFoos(c *SamplecontrollerV1alpha1Client, namespace string) *foos {
return &foos{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the foo, and returns the corresponding foo object, and an error if there is any.
func (c *foos) Get(name string, options v1.GetOptions) (result *v1alpha1.Foo, err error) {
result = &v1alpha1.Foo{}
err = c.client.Get().
Namespace(c.ns).
Resource("foos").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of Foos that match those selectors.
func (c *foos) List(opts v1.ListOptions) (result *v1alpha1.FooList, err error) {
result = &v1alpha1.FooList{}
err = c.client.Get().
Namespace(c.ns).
Resource("foos").
VersionedParams(&opts, scheme.ParameterCodec).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested foos.
func (c *foos) Watch(opts v1.ListOptions) (watch.Interface, error) {
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("foos").
VersionedParams(&opts, scheme.ParameterCodec).
Watch()
}
// Create takes the representation of a foo and creates it. Returns the server's representation of the foo, and an error, if there is any.
func (c *foos) Create(foo *v1alpha1.Foo) (result *v1alpha1.Foo, err error) {
result = &v1alpha1.Foo{}
err = c.client.Post().
Namespace(c.ns).
Resource("foos").
Body(foo).
Do().
Into(result)
return
}
// Update takes the representation of a foo and updates it. Returns the server's representation of the foo, and an error, if there is any.
func (c *foos) Update(foo *v1alpha1.Foo) (result *v1alpha1.Foo, err error) {
result = &v1alpha1.Foo{}
err = c.client.Put().
Namespace(c.ns).
Resource("foos").
Name(foo.Name).
Body(foo).
Do().
Into(result)
return
}
// Delete takes name of the foo and deletes it. Returns an error if one occurs.
func (c *foos) Delete(name string, options *v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("foos").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *foos) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("foos").
VersionedParams(&listOptions, scheme.ParameterCodec).
Body(options).
Do().
Error()
}
// Patch applies the patch and returns the patched foo.
func (c *foos) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Foo, err error) {
result = &v1alpha1.Foo{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("foos").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}

View File

@@ -1,5 +1,5 @@
/*
Copyright The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,8 +14,6 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
type FooExpansion interface{}

View File

@@ -1,5 +1,5 @@
/*
Copyright The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,16 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"net/http"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
rest "k8s.io/client-go/rest"
v1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
"k8s.io/sample-controller/pkg/generated/clientset/versioned/scheme"
"k8s.io/sample-controller/pkg/client/clientset/versioned/scheme"
)
type SamplecontrollerV1alpha1Interface interface {
@@ -41,28 +38,12 @@ func (c *SamplecontrollerV1alpha1Client) Foos(namespace string) FooInterface {
}
// NewForConfig creates a new SamplecontrollerV1alpha1Client for the given config.
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
// where httpClient was generated with rest.HTTPClientFor(c).
func NewForConfig(c *rest.Config) (*SamplecontrollerV1alpha1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
httpClient, err := rest.HTTPClientFor(&config)
if err != nil {
return nil, err
}
return NewForConfigAndClient(&config, httpClient)
}
// NewForConfigAndClient creates a new SamplecontrollerV1alpha1Client for the given config and http client.
// Note the http client provided takes precedence over the configured transport values.
func NewForConfigAndClient(c *rest.Config, h *http.Client) (*SamplecontrollerV1alpha1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientForConfigAndClient(&config, h)
client, err := rest.RESTClientFor(&config)
if err != nil {
return nil, err
}
@@ -88,7 +69,7 @@ func setConfigDefaults(config *rest.Config) error {
gv := v1alpha1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()

View File

@@ -0,0 +1,130 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by informer-gen
package externalversions
import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
versioned "k8s.io/sample-controller/pkg/client/clientset/versioned"
internalinterfaces "k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces"
samplecontroller "k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller"
reflect "reflect"
sync "sync"
time "time"
)
type sharedInformerFactory struct {
client versioned.Interface
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
lock sync.Mutex
defaultResync time.Duration
informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started.
// This allows Start() to be called multiple times safely.
startedInformers map[reflect.Type]bool
}
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory
func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
return NewFilteredSharedInformerFactory(client, defaultResync, v1.NamespaceAll, nil)
}
// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
// Listers obtained via this SharedInformerFactory will be subject to the same filters
// as specified here.
func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
return &sharedInformerFactory{
client: client,
namespace: namespace,
tweakListOptions: tweakListOptions,
defaultResync: defaultResync,
informers: make(map[reflect.Type]cache.SharedIndexInformer),
startedInformers: make(map[reflect.Type]bool),
}
}
// Start initializes all requested informers.
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
f.lock.Lock()
defer f.lock.Unlock()
for informerType, informer := range f.informers {
if !f.startedInformers[informerType] {
go informer.Run(stopCh)
f.startedInformers[informerType] = true
}
}
}
// WaitForCacheSync waits for all started informers' cache were synced.
func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
informers := func() map[reflect.Type]cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informers := map[reflect.Type]cache.SharedIndexInformer{}
for informerType, informer := range f.informers {
if f.startedInformers[informerType] {
informers[informerType] = informer
}
}
return informers
}()
res := map[reflect.Type]bool{}
for informType, informer := range informers {
res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
}
return res
}
// InternalInformerFor returns the SharedIndexInformer for obj using an internal
// client.
func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(obj)
informer, exists := f.informers[informerType]
if exists {
return informer
}
informer = newFunc(f.client, f.defaultResync)
f.informers[informerType] = informer
return informer
}
// SharedInformerFactory provides shared informers for resources in all known
// API group versions.
type SharedInformerFactory interface {
internalinterfaces.SharedInformerFactory
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
Samplecontroller() samplecontroller.Interface
}
func (f *sharedInformerFactory) Samplecontroller() samplecontroller.Interface {
return samplecontroller.New(f, f.namespace, f.tweakListOptions)
}

View File

@@ -1,5 +1,5 @@
/*
Copyright The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,13 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
// This file was automatically generated by informer-gen
package externalversions
import (
"fmt"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
v1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"

View File

@@ -1,5 +1,5 @@
/*
Copyright The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,20 +14,18 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
// This file was automatically generated by informer-gen
package internalinterfaces
import (
time "time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
cache "k8s.io/client-go/tools/cache"
versioned "k8s.io/sample-controller/pkg/generated/clientset/versioned"
versioned "k8s.io/sample-controller/pkg/client/clientset/versioned"
time "time"
)
// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer.
type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer
// SharedInformerFactory a small interface to allow for adding an informer without an import cycle
@@ -36,5 +34,4 @@ type SharedInformerFactory interface {
InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer
}
// TweakListOptionsFunc is a function that transforms a v1.ListOptions.
type TweakListOptionsFunc func(*v1.ListOptions)

View File

@@ -1,5 +1,5 @@
/*
Copyright The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,13 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
// This file was automatically generated by informer-gen
package samplecontroller
import (
internalinterfaces "k8s.io/sample-controller/pkg/generated/informers/externalversions/internalinterfaces"
v1alpha1 "k8s.io/sample-controller/pkg/generated/informers/externalversions/samplecontroller/v1alpha1"
internalinterfaces "k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces"
v1alpha1 "k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1"
)
// Interface provides access to each of this group's versions.

View File

@@ -1,5 +1,5 @@
/*
Copyright The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,22 +14,20 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
// This file was automatically generated by informer-gen
package v1alpha1
import (
"context"
time "time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
samplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
versioned "k8s.io/sample-controller/pkg/generated/clientset/versioned"
internalinterfaces "k8s.io/sample-controller/pkg/generated/informers/externalversions/internalinterfaces"
v1alpha1 "k8s.io/sample-controller/pkg/generated/listers/samplecontroller/v1alpha1"
samplecontroller_v1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
versioned "k8s.io/sample-controller/pkg/client/clientset/versioned"
internalinterfaces "k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces"
v1alpha1 "k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1"
time "time"
)
// FooInformer provides access to a shared informer and lister for
@@ -62,16 +60,16 @@ func NewFilteredFooInformer(client versioned.Interface, namespace string, resync
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.SamplecontrollerV1alpha1().Foos(namespace).List(context.TODO(), options)
return client.SamplecontrollerV1alpha1().Foos(namespace).List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.SamplecontrollerV1alpha1().Foos(namespace).Watch(context.TODO(), options)
return client.SamplecontrollerV1alpha1().Foos(namespace).Watch(options)
},
},
&samplecontrollerv1alpha1.Foo{},
&samplecontroller_v1alpha1.Foo{},
resyncPeriod,
indexers,
)
@@ -82,7 +80,7 @@ func (f *fooInformer) defaultInformer(client versioned.Interface, resyncPeriod t
}
func (f *fooInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&samplecontrollerv1alpha1.Foo{}, f.defaultInformer)
return f.factory.InformerFor(&samplecontroller_v1alpha1.Foo{}, f.defaultInformer)
}
func (f *fooInformer) Lister() v1alpha1.FooLister {

View File

@@ -1,5 +1,5 @@
/*
Copyright The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,12 +14,12 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
// This file was automatically generated by informer-gen
package v1alpha1
import (
internalinterfaces "k8s.io/sample-controller/pkg/generated/informers/externalversions/internalinterfaces"
internalinterfaces "k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces"
)
// Interface provides access to all the informers in this group version.

View File

@@ -1,5 +1,5 @@
/*
Copyright The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
// This file was automatically generated by lister-gen
package v1alpha1

View File

@@ -1,5 +1,5 @@
/*
Copyright The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
// This file was automatically generated by lister-gen
package v1alpha1
@@ -26,10 +26,8 @@ import (
)
// FooLister helps list Foos.
// All objects returned here must be treated as read-only.
type FooLister interface {
// List lists all Foos in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1alpha1.Foo, err error)
// Foos returns an object that can list and get Foos.
Foos(namespace string) FooNamespaceLister
@@ -60,13 +58,10 @@ func (s *fooLister) Foos(namespace string) FooNamespaceLister {
}
// FooNamespaceLister helps list and get Foos.
// All objects returned here must be treated as read-only.
type FooNamespaceLister interface {
// List lists all Foos in the indexer for a given namespace.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*v1alpha1.Foo, err error)
// Get retrieves the Foo from the indexer for a given namespace and name.
// Objects returned here must be treated as read-only.
Get(name string) (*v1alpha1.Foo, error)
FooNamespaceListerExpansion
}

View File

@@ -1,195 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"context"
"time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
v1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
scheme "k8s.io/sample-controller/pkg/generated/clientset/versioned/scheme"
)
// FoosGetter has a method to return a FooInterface.
// A group's client should implement this interface.
type FoosGetter interface {
Foos(namespace string) FooInterface
}
// FooInterface has methods to work with Foo resources.
type FooInterface interface {
Create(ctx context.Context, foo *v1alpha1.Foo, opts v1.CreateOptions) (*v1alpha1.Foo, error)
Update(ctx context.Context, foo *v1alpha1.Foo, opts v1.UpdateOptions) (*v1alpha1.Foo, error)
UpdateStatus(ctx context.Context, foo *v1alpha1.Foo, opts v1.UpdateOptions) (*v1alpha1.Foo, error)
Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.Foo, error)
List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.FooList, error)
Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Foo, err error)
FooExpansion
}
// foos implements FooInterface
type foos struct {
client rest.Interface
ns string
}
// newFoos returns a Foos
func newFoos(c *SamplecontrollerV1alpha1Client, namespace string) *foos {
return &foos{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the foo, and returns the corresponding foo object, and an error if there is any.
func (c *foos) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.Foo, err error) {
result = &v1alpha1.Foo{}
err = c.client.Get().
Namespace(c.ns).
Resource("foos").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do(ctx).
Into(result)
return
}
// List takes label and field selectors, and returns the list of Foos that match those selectors.
func (c *foos) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.FooList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.FooList{}
err = c.client.Get().
Namespace(c.ns).
Resource("foos").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do(ctx).
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested foos.
func (c *foos) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("foos").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch(ctx)
}
// Create takes the representation of a foo and creates it. Returns the server's representation of the foo, and an error, if there is any.
func (c *foos) Create(ctx context.Context, foo *v1alpha1.Foo, opts v1.CreateOptions) (result *v1alpha1.Foo, err error) {
result = &v1alpha1.Foo{}
err = c.client.Post().
Namespace(c.ns).
Resource("foos").
VersionedParams(&opts, scheme.ParameterCodec).
Body(foo).
Do(ctx).
Into(result)
return
}
// Update takes the representation of a foo and updates it. Returns the server's representation of the foo, and an error, if there is any.
func (c *foos) Update(ctx context.Context, foo *v1alpha1.Foo, opts v1.UpdateOptions) (result *v1alpha1.Foo, err error) {
result = &v1alpha1.Foo{}
err = c.client.Put().
Namespace(c.ns).
Resource("foos").
Name(foo.Name).
VersionedParams(&opts, scheme.ParameterCodec).
Body(foo).
Do(ctx).
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *foos) UpdateStatus(ctx context.Context, foo *v1alpha1.Foo, opts v1.UpdateOptions) (result *v1alpha1.Foo, err error) {
result = &v1alpha1.Foo{}
err = c.client.Put().
Namespace(c.ns).
Resource("foos").
Name(foo.Name).
SubResource("status").
VersionedParams(&opts, scheme.ParameterCodec).
Body(foo).
Do(ctx).
Into(result)
return
}
// Delete takes name of the foo and deletes it. Returns an error if one occurs.
func (c *foos) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("foos").
Name(name).
Body(&opts).
Do(ctx).
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *foos) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
var timeout time.Duration
if listOpts.TimeoutSeconds != nil {
timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("foos").
VersionedParams(&listOpts, scheme.ParameterCodec).
Timeout(timeout).
Body(&opts).
Do(ctx).
Error()
}
// Patch applies the patch and returns the patched foo.
func (c *foos) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.Foo, err error) {
result = &v1alpha1.Foo{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("foos").
Name(name).
SubResource(subresources...).
VersionedParams(&opts, scheme.ParameterCodec).
Body(data).
Do(ctx).
Into(result)
return
}

View File

@@ -1,251 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
import (
reflect "reflect"
sync "sync"
time "time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
versioned "k8s.io/sample-controller/pkg/generated/clientset/versioned"
internalinterfaces "k8s.io/sample-controller/pkg/generated/informers/externalversions/internalinterfaces"
samplecontroller "k8s.io/sample-controller/pkg/generated/informers/externalversions/samplecontroller"
)
// SharedInformerOption defines the functional option type for SharedInformerFactory.
type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
type sharedInformerFactory struct {
client versioned.Interface
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
lock sync.Mutex
defaultResync time.Duration
customResync map[reflect.Type]time.Duration
informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started.
// This allows Start() to be called multiple times safely.
startedInformers map[reflect.Type]bool
// wg tracks how many goroutines were started.
wg sync.WaitGroup
// shuttingDown is true when Shutdown has been called. It may still be running
// because it needs to wait for goroutines.
shuttingDown bool
}
// WithCustomResyncConfig sets a custom resync period for the specified informer types.
func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
for k, v := range resyncConfig {
factory.customResync[reflect.TypeOf(k)] = v
}
return factory
}
}
// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.tweakListOptions = tweakListOptions
return factory
}
}
// WithNamespace limits the SharedInformerFactory to the specified namespace.
func WithNamespace(namespace string) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.namespace = namespace
return factory
}
}
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync)
}
// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
// Listers obtained via this SharedInformerFactory will be subject to the same filters
// as specified here.
// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
}
// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {
factory := &sharedInformerFactory{
client: client,
namespace: v1.NamespaceAll,
defaultResync: defaultResync,
informers: make(map[reflect.Type]cache.SharedIndexInformer),
startedInformers: make(map[reflect.Type]bool),
customResync: make(map[reflect.Type]time.Duration),
}
// Apply all options
for _, opt := range options {
factory = opt(factory)
}
return factory
}
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
f.lock.Lock()
defer f.lock.Unlock()
if f.shuttingDown {
return
}
for informerType, informer := range f.informers {
if !f.startedInformers[informerType] {
f.wg.Add(1)
// We need a new variable in each loop iteration,
// otherwise the goroutine would use the loop variable
// and that keeps changing.
informer := informer
go func() {
defer f.wg.Done()
informer.Run(stopCh)
}()
f.startedInformers[informerType] = true
}
}
}
func (f *sharedInformerFactory) Shutdown() {
f.lock.Lock()
f.shuttingDown = true
f.lock.Unlock()
// Will return immediately if there is nothing to wait for.
f.wg.Wait()
}
func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
informers := func() map[reflect.Type]cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informers := map[reflect.Type]cache.SharedIndexInformer{}
for informerType, informer := range f.informers {
if f.startedInformers[informerType] {
informers[informerType] = informer
}
}
return informers
}()
res := map[reflect.Type]bool{}
for informType, informer := range informers {
res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
}
return res
}
// InformerFor returns the SharedIndexInformer for obj using an internal
// client.
func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(obj)
informer, exists := f.informers[informerType]
if exists {
return informer
}
resyncPeriod, exists := f.customResync[informerType]
if !exists {
resyncPeriod = f.defaultResync
}
informer = newFunc(f.client, resyncPeriod)
f.informers[informerType] = informer
return informer
}
// SharedInformerFactory provides shared informers for resources in all known
// API group versions.
//
// It is typically used like this:
//
// ctx, cancel := context.Background()
// defer cancel()
// factory := NewSharedInformerFactory(client, resyncPeriod)
// defer factory.WaitForStop() // Returns immediately if nothing was started.
// genericInformer := factory.ForResource(resource)
// typedInformer := factory.SomeAPIGroup().V1().SomeType()
// factory.Start(ctx.Done()) // Start processing these informers.
// synced := factory.WaitForCacheSync(ctx.Done())
// for v, ok := range synced {
// if !ok {
// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v)
// return
// }
// }
//
// // Creating informers can also be created after Start, but then
// // Start must be called again:
// anotherGenericInformer := factory.ForResource(resource)
// factory.Start(ctx.Done())
type SharedInformerFactory interface {
internalinterfaces.SharedInformerFactory
// Start initializes all requested informers. They are handled in goroutines
// which run until the stop channel gets closed.
Start(stopCh <-chan struct{})
// Shutdown marks a factory as shutting down. At that point no new
// informers can be started anymore and Start will return without
// doing anything.
//
// In addition, Shutdown blocks until all goroutines have terminated. For that
// to happen, the close channel(s) that they were started with must be closed,
// either before Shutdown gets called or while it is waiting.
//
// Shutdown may be called multiple times, even concurrently. All such calls will
// block until all goroutines have terminated.
Shutdown()
// WaitForCacheSync blocks until all started informers' caches were synced
// or the stop channel gets closed.
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
// ForResource gives generic access to a shared informer of the matching type.
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
// InformerFor returns the SharedIndexInformer for obj using an internal
// client.
InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer
Samplecontroller() samplecontroller.Interface
}
func (f *sharedInformerFactory) Samplecontroller() samplecontroller.Interface {
return samplecontroller.New(f, f.namespace, f.tweakListOptions)
}

View File

@@ -17,28 +17,27 @@ limitations under the License.
package signals
import (
"context"
"os"
"os/signal"
)
var onlyOneSignalHandler = make(chan struct{})
// SetupSignalHandler registered for SIGTERM and SIGINT. A context is returned
// which is cancelled on one of these signals. If a second signal is caught,
// the program is terminated with exit code 1.
func SetupSignalHandler() context.Context {
// SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned
// which is closed on one of these signals. If a second signal is caught, the program
// is terminated with exit code 1.
func SetupSignalHandler() (stopCh <-chan struct{}) {
close(onlyOneSignalHandler) // panics when called twice
stop := make(chan struct{})
c := make(chan os.Signal, 2)
ctx, cancel := context.WithCancel(context.Background())
signal.Notify(c, shutdownSignals...)
go func() {
<-c
cancel()
close(stop)
<-c
os.Exit(1) // second signal. Exit directly.
}()
return ctx
return stop
}

View File

@@ -1,4 +1,3 @@
//go:build !windows
// +build !windows
/*

5
vendor/github.com/PuerkitoBio/purell/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,5 @@
*.sublime-*
.DS_Store
*.swp
*.swo
tags

7
vendor/github.com/PuerkitoBio/purell/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,7 @@
language: go
go:
- 1.4
- 1.5
- 1.6
- tip

12
vendor/github.com/PuerkitoBio/purell/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,12 @@
Copyright (c) 2012, Martin Angers
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

185
vendor/github.com/PuerkitoBio/purell/README.md generated vendored Normal file
View File

@@ -0,0 +1,185 @@
# Purell
Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know...
Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc].
[![build status](https://secure.travis-ci.org/PuerkitoBio/purell.png)](http://travis-ci.org/PuerkitoBio/purell)
## Install
`go get github.com/PuerkitoBio/purell`
## Changelog
* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich).
* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]).
* **v0.2.0** : Add benchmarks, Attempt IDN support.
* **v0.1.0** : Initial release.
## Examples
From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."):
```go
package purell
import (
"fmt"
"net/url"
)
func ExampleNormalizeURLString() {
if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/",
FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil {
panic(err)
} else {
fmt.Print(normalized)
}
// Output: http://somewebsite.com:80/Amazing%3F/url/
}
func ExampleMustNormalizeURLString() {
normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/",
FlagsUnsafeGreedy)
fmt.Print(normalized)
// Output: http://somewebsite.com/Amazing%FA/url
}
func ExampleNormalizeURL() {
if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil {
panic(err)
} else {
normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment)
fmt.Print(normalized)
}
// Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0
}
```
## API
As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags:
```go
const (
// Safe normalizations
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
FlagLowercaseHost // http://HOST -> http://host
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
FlagRemoveDefaultPort // http://host:80 -> http://host
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
// Usually safe normalizations
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
// Unsafe normalizations
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
FlagRemoveFragment // http://host/path#fragment -> http://host/path
FlagForceHTTP // https://host -> http://host
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
FlagRemoveWWW // http://www.host/ -> http://host/
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
// Normalizations not in the wikipedia article, required to cover tests cases
// submitted by jehiah
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
// Convenience set of safe normalizations
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
// Convenience set of usually safe normalizations (includes FlagsSafe)
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
// Convenience set of all available flags
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
)
```
For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set.
The [full godoc reference is available on gopkgdoc][godoc].
Some things to note:
* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it.
* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*):
- %24 -> $
- %26 -> &
- %2B-%3B -> +,-./0123456789:;
- %3D -> =
- %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ
- %5F -> _
- %61-%7A -> abcdefghijklmnopqrstuvwxyz
- %7E -> ~
* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization).
* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell.
* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object.
### Safe vs Usually Safe vs Unsafe
Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between.
Consider the following URL:
`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
Normalizing with the `FlagsSafe` gives:
`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
With the `FlagsUsuallySafeGreedy`:
`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid`
And with `FlagsUnsafeGreedy`:
`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3`
## TODOs
* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`.
## Thanks / Contributions
@rogpeppe
@jehiah
@opennota
@pchristopher1275
@zenovich
## License
The [BSD 3-Clause license][bsd].
[bsd]: http://opensource.org/licenses/BSD-3-Clause
[wiki]: http://en.wikipedia.org/wiki/URL_normalization
[rfc]: http://tools.ietf.org/html/rfc3986#section-6
[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell
[pr5]: https://github.com/PuerkitoBio/purell/pull/5
[iss7]: https://github.com/PuerkitoBio/purell/issues/7

375
vendor/github.com/PuerkitoBio/purell/purell.go generated vendored Normal file
View File

@@ -0,0 +1,375 @@
/*
Package purell offers URL normalization as described on the wikipedia page:
http://en.wikipedia.org/wiki/URL_normalization
*/
package purell
import (
"bytes"
"fmt"
"net/url"
"regexp"
"sort"
"strconv"
"strings"
"github.com/PuerkitoBio/urlesc"
"golang.org/x/net/idna"
"golang.org/x/text/secure/precis"
"golang.org/x/text/unicode/norm"
)
// A set of normalization flags determines how a URL will
// be normalized.
type NormalizationFlags uint
const (
// Safe normalizations
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
FlagLowercaseHost // http://HOST -> http://host
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
FlagRemoveDefaultPort // http://host:80 -> http://host
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
// Usually safe normalizations
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
// Unsafe normalizations
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
FlagRemoveFragment // http://host/path#fragment -> http://host/path
FlagForceHTTP // https://host -> http://host
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
FlagRemoveWWW // http://www.host/ -> http://host/
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
// Normalizations not in the wikipedia article, required to cover tests cases
// submitted by jehiah
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
// Convenience set of safe normalizations
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
// Convenience set of usually safe normalizations (includes FlagsSafe)
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
// Convenience set of all available flags
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
)
const (
defaultHttpPort = ":80"
defaultHttpsPort = ":443"
)
// Regular expressions used by the normalizations
var rxPort = regexp.MustCompile(`(:\d+)/?$`)
var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`)
var rxDupSlashes = regexp.MustCompile(`/{2,}`)
var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`)
var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`)
var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`)
var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`)
var rxEmptyPort = regexp.MustCompile(`:+$`)
// Map of flags to implementation function.
// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically
// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator.
// Since maps have undefined traversing order, make a slice of ordered keys
var flagsOrder = []NormalizationFlags{
FlagLowercaseScheme,
FlagLowercaseHost,
FlagRemoveDefaultPort,
FlagRemoveDirectoryIndex,
FlagRemoveDotSegments,
FlagRemoveFragment,
FlagForceHTTP, // Must be after remove default port (because https=443/http=80)
FlagRemoveDuplicateSlashes,
FlagRemoveWWW,
FlagAddWWW,
FlagSortQuery,
FlagDecodeDWORDHost,
FlagDecodeOctalHost,
FlagDecodeHexHost,
FlagRemoveUnnecessaryHostDots,
FlagRemoveEmptyPortSeparator,
FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last
FlagAddTrailingSlash,
}
// ... and then the map, where order is unimportant
var flags = map[NormalizationFlags]func(*url.URL){
FlagLowercaseScheme: lowercaseScheme,
FlagLowercaseHost: lowercaseHost,
FlagRemoveDefaultPort: removeDefaultPort,
FlagRemoveDirectoryIndex: removeDirectoryIndex,
FlagRemoveDotSegments: removeDotSegments,
FlagRemoveFragment: removeFragment,
FlagForceHTTP: forceHTTP,
FlagRemoveDuplicateSlashes: removeDuplicateSlashes,
FlagRemoveWWW: removeWWW,
FlagAddWWW: addWWW,
FlagSortQuery: sortQuery,
FlagDecodeDWORDHost: decodeDWORDHost,
FlagDecodeOctalHost: decodeOctalHost,
FlagDecodeHexHost: decodeHexHost,
FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots,
FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator,
FlagRemoveTrailingSlash: removeTrailingSlash,
FlagAddTrailingSlash: addTrailingSlash,
}
// MustNormalizeURLString returns the normalized string, and panics if an error occurs.
// It takes an URL string as input, as well as the normalization flags.
func MustNormalizeURLString(u string, f NormalizationFlags) string {
result, e := NormalizeURLString(u, f)
if e != nil {
panic(e)
}
return result
}
// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
// It takes an URL string as input, as well as the normalization flags.
func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
if parsed, e := url.Parse(u); e != nil {
return "", e
} else {
options := make([]precis.Option, 1, 3)
options[0] = precis.IgnoreCase
if f&FlagLowercaseHost == FlagLowercaseHost {
options = append(options, precis.FoldCase())
}
options = append(options, precis.Norm(norm.NFC))
profile := precis.NewFreeform(options...)
if parsed.Host, e = idna.ToASCII(profile.NewTransformer().String(parsed.Host)); e != nil {
return "", e
}
return NormalizeURL(parsed, f), nil
}
panic("Unreachable code.")
}
// NormalizeURL returns the normalized string.
// It takes a parsed URL object as input, as well as the normalization flags.
func NormalizeURL(u *url.URL, f NormalizationFlags) string {
for _, k := range flagsOrder {
if f&k == k {
flags[k](u)
}
}
return urlesc.Escape(u)
}
func lowercaseScheme(u *url.URL) {
if len(u.Scheme) > 0 {
u.Scheme = strings.ToLower(u.Scheme)
}
}
func lowercaseHost(u *url.URL) {
if len(u.Host) > 0 {
u.Host = strings.ToLower(u.Host)
}
}
func removeDefaultPort(u *url.URL) {
if len(u.Host) > 0 {
scheme := strings.ToLower(u.Scheme)
u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
return ""
}
return val
})
}
}
func removeTrailingSlash(u *url.URL) {
if l := len(u.Path); l > 0 {
if strings.HasSuffix(u.Path, "/") {
u.Path = u.Path[:l-1]
}
} else if l = len(u.Host); l > 0 {
if strings.HasSuffix(u.Host, "/") {
u.Host = u.Host[:l-1]
}
}
}
func addTrailingSlash(u *url.URL) {
if l := len(u.Path); l > 0 {
if !strings.HasSuffix(u.Path, "/") {
u.Path += "/"
}
} else if l = len(u.Host); l > 0 {
if !strings.HasSuffix(u.Host, "/") {
u.Host += "/"
}
}
}
func removeDotSegments(u *url.URL) {
if len(u.Path) > 0 {
var dotFree []string
var lastIsDot bool
sections := strings.Split(u.Path, "/")
for _, s := range sections {
if s == ".." {
if len(dotFree) > 0 {
dotFree = dotFree[:len(dotFree)-1]
}
} else if s != "." {
dotFree = append(dotFree, s)
}
lastIsDot = (s == "." || s == "..")
}
// Special case if host does not end with / and new path does not begin with /
u.Path = strings.Join(dotFree, "/")
if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") {
u.Path = "/" + u.Path
}
// Special case if the last segment was a dot, make sure the path ends with a slash
if lastIsDot && !strings.HasSuffix(u.Path, "/") {
u.Path += "/"
}
}
}
func removeDirectoryIndex(u *url.URL) {
if len(u.Path) > 0 {
u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1")
}
}
func removeFragment(u *url.URL) {
u.Fragment = ""
}
func forceHTTP(u *url.URL) {
if strings.ToLower(u.Scheme) == "https" {
u.Scheme = "http"
}
}
func removeDuplicateSlashes(u *url.URL) {
if len(u.Path) > 0 {
u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
}
}
func removeWWW(u *url.URL) {
if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") {
u.Host = u.Host[4:]
}
}
func addWWW(u *url.URL) {
if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") {
u.Host = "www." + u.Host
}
}
func sortQuery(u *url.URL) {
q := u.Query()
if len(q) > 0 {
arKeys := make([]string, len(q))
i := 0
for k, _ := range q {
arKeys[i] = k
i++
}
sort.Strings(arKeys)
buf := new(bytes.Buffer)
for _, k := range arKeys {
sort.Strings(q[k])
for _, v := range q[k] {
if buf.Len() > 0 {
buf.WriteRune('&')
}
buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v)))
}
}
// Rebuild the raw query string
u.RawQuery = buf.String()
}
}
func decodeDWORDHost(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 {
var parts [4]int64
dword, _ := strconv.ParseInt(matches[1], 10, 0)
for i, shift := range []uint{24, 16, 8, 0} {
parts[i] = dword >> shift & 0xFF
}
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2])
}
}
}
func decodeOctalHost(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 {
var parts [4]int64
for i := 1; i <= 4; i++ {
parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0)
}
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5])
}
}
}
func decodeHexHost(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 {
// Conversion is safe because of regex validation
parsed, _ := strconv.ParseInt(matches[1], 16, 0)
// Set host as DWORD (base 10) encoded host
u.Host = fmt.Sprintf("%d%s", parsed, matches[2])
// The rest is the same as decoding a DWORD host
decodeDWORDHost(u)
}
}
}
func removeUnncessaryHostDots(u *url.URL) {
if len(u.Host) > 0 {
if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 {
// Trim the leading and trailing dots
u.Host = strings.Trim(matches[1], ".")
if len(matches) > 2 {
u.Host += matches[2]
}
}
}
}
func removeEmptyPortSeparator(u *url.URL) {
if len(u.Host) > 0 {
u.Host = rxEmptyPort.ReplaceAllString(u.Host, "")
}
}

11
vendor/github.com/PuerkitoBio/urlesc/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,11 @@
language: go
go:
- 1.4
- tip
install:
- go build .
script:
- go test -v

27
vendor/github.com/PuerkitoBio/urlesc/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,27 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

16
vendor/github.com/PuerkitoBio/urlesc/README.md generated vendored Normal file
View File

@@ -0,0 +1,16 @@
urlesc [![Build Status](https://travis-ci.org/PuerkitoBio/urlesc.png?branch=master)](https://travis-ci.org/PuerkitoBio/urlesc) [![GoDoc](http://godoc.org/github.com/PuerkitoBio/urlesc?status.svg)](http://godoc.org/github.com/PuerkitoBio/urlesc)
======
Package urlesc implements query escaping as per RFC 3986.
It contains some parts of the net/url package, modified so as to allow
some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)).
## Install
go get github.com/PuerkitoBio/urlesc
## License
Go license (BSD-3-Clause)

180
vendor/github.com/PuerkitoBio/urlesc/urlesc.go generated vendored Normal file
View File

@@ -0,0 +1,180 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package urlesc implements query escaping as per RFC 3986.
// It contains some parts of the net/url package, modified so as to allow
// some reserved characters incorrectly escaped by net/url.
// See https://github.com/golang/go/issues/5684
package urlesc
import (
"bytes"
"net/url"
"strings"
)
type encoding int
const (
encodePath encoding = 1 + iota
encodeUserPassword
encodeQueryComponent
encodeFragment
)
// Return true if the specified character should be escaped when
// appearing in a URL string, according to RFC 3986.
func shouldEscape(c byte, mode encoding) bool {
// §2.3 Unreserved characters (alphanum)
if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
return false
}
switch c {
case '-', '.', '_', '~': // §2.3 Unreserved characters (mark)
return false
// §2.2 Reserved characters (reserved)
case ':', '/', '?', '#', '[', ']', '@', // gen-delims
'!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims
// Different sections of the URL allow a few of
// the reserved characters to appear unescaped.
switch mode {
case encodePath: // §3.3
// The RFC allows sub-delims and : @.
// '/', '[' and ']' can be used to assign meaning to individual path
// segments. This package only manipulates the path as a whole,
// so we allow those as well. That leaves only ? and # to escape.
return c == '?' || c == '#'
case encodeUserPassword: // §3.2.1
// The RFC allows : and sub-delims in
// userinfo. The parsing of userinfo treats ':' as special so we must escape
// all the gen-delims.
return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@'
case encodeQueryComponent: // §3.4
// The RFC allows / and ?.
return c != '/' && c != '?'
case encodeFragment: // §4.1
// The RFC text is silent but the grammar allows
// everything, so escape nothing but #
return c == '#'
}
}
// Everything else must be escaped.
return true
}
// QueryEscape escapes the string so it can be safely placed
// inside a URL query.
func QueryEscape(s string) string {
return escape(s, encodeQueryComponent)
}
func escape(s string, mode encoding) string {
spaceCount, hexCount := 0, 0
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(c, mode) {
if c == ' ' && mode == encodeQueryComponent {
spaceCount++
} else {
hexCount++
}
}
}
if spaceCount == 0 && hexCount == 0 {
return s
}
t := make([]byte, len(s)+2*hexCount)
j := 0
for i := 0; i < len(s); i++ {
switch c := s[i]; {
case c == ' ' && mode == encodeQueryComponent:
t[j] = '+'
j++
case shouldEscape(c, mode):
t[j] = '%'
t[j+1] = "0123456789ABCDEF"[c>>4]
t[j+2] = "0123456789ABCDEF"[c&15]
j += 3
default:
t[j] = s[i]
j++
}
}
return string(t)
}
var uiReplacer = strings.NewReplacer(
"%21", "!",
"%27", "'",
"%28", "(",
"%29", ")",
"%2A", "*",
)
// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986.
func unescapeUserinfo(s string) string {
return uiReplacer.Replace(s)
}
// Escape reassembles the URL into a valid URL string.
// The general form of the result is one of:
//
// scheme:opaque
// scheme://userinfo@host/path?query#fragment
//
// If u.Opaque is non-empty, String uses the first form;
// otherwise it uses the second form.
//
// In the second form, the following rules apply:
// - if u.Scheme is empty, scheme: is omitted.
// - if u.User is nil, userinfo@ is omitted.
// - if u.Host is empty, host/ is omitted.
// - if u.Scheme and u.Host are empty and u.User is nil,
// the entire scheme://userinfo@host/ is omitted.
// - if u.Host is non-empty and u.Path begins with a /,
// the form host/path does not add its own /.
// - if u.RawQuery is empty, ?query is omitted.
// - if u.Fragment is empty, #fragment is omitted.
func Escape(u *url.URL) string {
var buf bytes.Buffer
if u.Scheme != "" {
buf.WriteString(u.Scheme)
buf.WriteByte(':')
}
if u.Opaque != "" {
buf.WriteString(u.Opaque)
} else {
if u.Scheme != "" || u.Host != "" || u.User != nil {
buf.WriteString("//")
if ui := u.User; ui != nil {
buf.WriteString(unescapeUserinfo(ui.String()))
buf.WriteByte('@')
}
if h := u.Host; h != "" {
buf.WriteString(h)
}
}
if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
buf.WriteByte('/')
}
buf.WriteString(escape(u.Path, encodePath))
}
if u.RawQuery != "" {
buf.WriteByte('?')
buf.WriteString(u.RawQuery)
}
if u.Fragment != "" {
buf.WriteByte('#')
buf.WriteString(escape(u.Fragment, encodeFragment))
}
return buf.String()
}

15
vendor/github.com/davecgh/go-spew/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,15 @@
ISC License
Copyright (c) 2012-2016 Dave Collins <dave@davec.name>
Permission to use, copy, modify, and distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

152
vendor/github.com/davecgh/go-spew/spew/bypass.go generated vendored Normal file
View File

@@ -0,0 +1,152 @@
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is not running on Google App Engine, compiled by GopherJS, and
// "-tags safe" is not added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build !js,!appengine,!safe,!disableunsafe
package spew
import (
"reflect"
"unsafe"
)
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = false
// ptrSize is the size of a pointer on the current arch.
ptrSize = unsafe.Sizeof((*byte)(nil))
)
var (
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
// internal reflect.Value fields. These values are valid before golang
// commit ecccf07e7f9d which changed the format. The are also valid
// after commit 82f48826c6c7 which changed the format again to mirror
// the original format. Code in the init function updates these offsets
// as necessary.
offsetPtr = uintptr(ptrSize)
offsetScalar = uintptr(0)
offsetFlag = uintptr(ptrSize * 2)
// flagKindWidth and flagKindShift indicate various bits that the
// reflect package uses internally to track kind information.
//
// flagRO indicates whether or not the value field of a reflect.Value is
// read-only.
//
// flagIndir indicates whether the value field of a reflect.Value is
// the actual data or a pointer to the data.
//
// These values are valid before golang commit 90a7c3c86944 which
// changed their positions. Code in the init function updates these
// flags as necessary.
flagKindWidth = uintptr(5)
flagKindShift = uintptr(flagKindWidth - 1)
flagRO = uintptr(1 << 0)
flagIndir = uintptr(1 << 1)
)
func init() {
// Older versions of reflect.Value stored small integers directly in the
// ptr field (which is named val in the older versions). Versions
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
// scalar for this purpose which unfortunately came before the flag
// field, so the offset of the flag field is different for those
// versions.
//
// This code constructs a new reflect.Value from a known small integer
// and checks if the size of the reflect.Value struct indicates it has
// the scalar field. When it does, the offsets are updated accordingly.
vv := reflect.ValueOf(0xf00)
if unsafe.Sizeof(vv) == (ptrSize * 4) {
offsetScalar = ptrSize * 2
offsetFlag = ptrSize * 3
}
// Commit 90a7c3c86944 changed the flag positions such that the low
// order bits are the kind. This code extracts the kind from the flags
// field and ensures it's the correct type. When it's not, the flag
// order has been changed to the newer format, so the flags are updated
// accordingly.
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
upfv := *(*uintptr)(upf)
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
flagKindShift = 0
flagRO = 1 << 5
flagIndir = 1 << 6
// Commit adf9b30e5594 modified the flags to separate the
// flagRO flag into two bits which specifies whether or not the
// field is embedded. This causes flagIndir to move over a bit
// and means that flagRO is the combination of either of the
// original flagRO bit and the new bit.
//
// This code detects the change by extracting what used to be
// the indirect bit to ensure it's set. When it's not, the flag
// order has been changed to the newer format, so the flags are
// updated accordingly.
if upfv&flagIndir == 0 {
flagRO = 3 << 5
flagIndir = 1 << 7
}
}
}
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
// the typical safety restrictions preventing access to unaddressable and
// unexported data. It works by digging the raw pointer to the underlying
// value out of the protected value and generating a new unprotected (unsafe)
// reflect.Value to it.
//
// This allows us to check for implementations of the Stringer and error
// interfaces to be used for pretty printing ordinarily unaddressable and
// inaccessible values such as unexported struct fields.
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
indirects := 1
vt := v.Type()
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
if rvf&flagIndir != 0 {
vt = reflect.PtrTo(v.Type())
indirects++
} else if offsetScalar != 0 {
// The value is in the scalar field when it's not one of the
// reference types.
switch vt.Kind() {
case reflect.Uintptr:
case reflect.Chan:
case reflect.Func:
case reflect.Map:
case reflect.Ptr:
case reflect.UnsafePointer:
default:
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
offsetScalar)
}
}
pv := reflect.NewAt(vt, upv)
rv = pv
for i := 0; i < indirects; i++ {
rv = rv.Elem()
}
return rv
}

38
vendor/github.com/davecgh/go-spew/spew/bypasssafe.go generated vendored Normal file
View File

@@ -0,0 +1,38 @@
// Copyright (c) 2015-2016 Dave Collins <dave@davec.name>
//
// Permission to use, copy, modify, and distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
// NOTE: Due to the following build constraints, this file will only be compiled
// when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
// +build js appengine safe disableunsafe
package spew
import "reflect"
const (
// UnsafeDisabled is a build-time constant which specifies whether or
// not access to the unsafe package is available.
UnsafeDisabled = true
)
// unsafeReflectValue typically converts the passed reflect.Value into a one
// that bypasses the typical safety restrictions preventing access to
// unaddressable and unexported data. However, doing this relies on access to
// the unsafe package. This is a stub version which simply returns the passed
// reflect.Value when the unsafe package is not available.
func unsafeReflectValue(v reflect.Value) reflect.Value {
return v
}

341
vendor/github.com/davecgh/go-spew/spew/common.go generated vendored Normal file
View File

@@ -0,0 +1,341 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"reflect"
"sort"
"strconv"
)
// Some constants in the form of bytes to avoid string overhead. This mirrors
// the technique used in the fmt package.
var (
panicBytes = []byte("(PANIC=")
plusBytes = []byte("+")
iBytes = []byte("i")
trueBytes = []byte("true")
falseBytes = []byte("false")
interfaceBytes = []byte("(interface {})")
commaNewlineBytes = []byte(",\n")
newlineBytes = []byte("\n")
openBraceBytes = []byte("{")
openBraceNewlineBytes = []byte("{\n")
closeBraceBytes = []byte("}")
asteriskBytes = []byte("*")
colonBytes = []byte(":")
colonSpaceBytes = []byte(": ")
openParenBytes = []byte("(")
closeParenBytes = []byte(")")
spaceBytes = []byte(" ")
pointerChainBytes = []byte("->")
nilAngleBytes = []byte("<nil>")
maxNewlineBytes = []byte("<max depth reached>\n")
maxShortBytes = []byte("<max>")
circularBytes = []byte("<already shown>")
circularShortBytes = []byte("<shown>")
invalidAngleBytes = []byte("<invalid>")
openBracketBytes = []byte("[")
closeBracketBytes = []byte("]")
percentBytes = []byte("%")
precisionBytes = []byte(".")
openAngleBytes = []byte("<")
closeAngleBytes = []byte(">")
openMapBytes = []byte("map[")
closeMapBytes = []byte("]")
lenEqualsBytes = []byte("len=")
capEqualsBytes = []byte("cap=")
)
// hexDigits is used to map a decimal value to a hex digit.
var hexDigits = "0123456789abcdef"
// catchPanic handles any panics that might occur during the handleMethods
// calls.
func catchPanic(w io.Writer, v reflect.Value) {
if err := recover(); err != nil {
w.Write(panicBytes)
fmt.Fprintf(w, "%v", err)
w.Write(closeParenBytes)
}
}
// handleMethods attempts to call the Error and String methods on the underlying
// type the passed reflect.Value represents and outputes the result to Writer w.
//
// It handles panics in any called methods by catching and displaying the error
// as the formatted value.
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
// We need an interface to check if the type implements the error or
// Stringer interface. However, the reflect package won't give us an
// interface on certain things like unexported struct fields in order
// to enforce visibility rules. We use unsafe, when it's available,
// to bypass these restrictions since this package does not mutate the
// values.
if !v.CanInterface() {
if UnsafeDisabled {
return false
}
v = unsafeReflectValue(v)
}
// Choose whether or not to do error and Stringer interface lookups against
// the base type or a pointer to the base type depending on settings.
// Technically calling one of these methods with a pointer receiver can
// mutate the value, however, types which choose to satisify an error or
// Stringer interface with a pointer receiver should not be mutating their
// state inside these interface methods.
if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() {
v = unsafeReflectValue(v)
}
if v.CanAddr() {
v = v.Addr()
}
// Is it an error or Stringer?
switch iface := v.Interface().(type) {
case error:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.Error()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.Error()))
return true
case fmt.Stringer:
defer catchPanic(w, v)
if cs.ContinueOnMethod {
w.Write(openParenBytes)
w.Write([]byte(iface.String()))
w.Write(closeParenBytes)
w.Write(spaceBytes)
return false
}
w.Write([]byte(iface.String()))
return true
}
return false
}
// printBool outputs a boolean value as true or false to Writer w.
func printBool(w io.Writer, val bool) {
if val {
w.Write(trueBytes)
} else {
w.Write(falseBytes)
}
}
// printInt outputs a signed integer value to Writer w.
func printInt(w io.Writer, val int64, base int) {
w.Write([]byte(strconv.FormatInt(val, base)))
}
// printUint outputs an unsigned integer value to Writer w.
func printUint(w io.Writer, val uint64, base int) {
w.Write([]byte(strconv.FormatUint(val, base)))
}
// printFloat outputs a floating point value using the specified precision,
// which is expected to be 32 or 64bit, to Writer w.
func printFloat(w io.Writer, val float64, precision int) {
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
}
// printComplex outputs a complex value using the specified float precision
// for the real and imaginary parts to Writer w.
func printComplex(w io.Writer, c complex128, floatPrecision int) {
r := real(c)
w.Write(openParenBytes)
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
i := imag(c)
if i >= 0 {
w.Write(plusBytes)
}
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
w.Write(iBytes)
w.Write(closeParenBytes)
}
// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x'
// prefix to Writer w.
func printHexPtr(w io.Writer, p uintptr) {
// Null pointer.
num := uint64(p)
if num == 0 {
w.Write(nilAngleBytes)
return
}
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
buf := make([]byte, 18)
// It's simpler to construct the hex string right to left.
base := uint64(16)
i := len(buf) - 1
for num >= base {
buf[i] = hexDigits[num%base]
num /= base
i--
}
buf[i] = hexDigits[num]
// Add '0x' prefix.
i--
buf[i] = 'x'
i--
buf[i] = '0'
// Strip unused leading bytes.
buf = buf[i:]
w.Write(buf)
}
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
// elements to be sorted.
type valuesSorter struct {
values []reflect.Value
strings []string // either nil or same len and values
cs *ConfigState
}
// newValuesSorter initializes a valuesSorter instance, which holds a set of
// surrogate keys on which the data should be sorted. It uses flags in
// ConfigState to decide if and how to populate those surrogate keys.
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
vs := &valuesSorter{values: values, cs: cs}
if canSortSimply(vs.values[0].Kind()) {
return vs
}
if !cs.DisableMethods {
vs.strings = make([]string, len(values))
for i := range vs.values {
b := bytes.Buffer{}
if !handleMethods(cs, &b, vs.values[i]) {
vs.strings = nil
break
}
vs.strings[i] = b.String()
}
}
if vs.strings == nil && cs.SpewKeys {
vs.strings = make([]string, len(values))
for i := range vs.values {
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
}
}
return vs
}
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
// directly, or whether it should be considered for sorting by surrogate keys
// (if the ConfigState allows it).
func canSortSimply(kind reflect.Kind) bool {
// This switch parallels valueSortLess, except for the default case.
switch kind {
case reflect.Bool:
return true
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return true
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return true
case reflect.Float32, reflect.Float64:
return true
case reflect.String:
return true
case reflect.Uintptr:
return true
case reflect.Array:
return true
}
return false
}
// Len returns the number of values in the slice. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Len() int {
return len(s.values)
}
// Swap swaps the values at the passed indices. It is part of the
// sort.Interface implementation.
func (s *valuesSorter) Swap(i, j int) {
s.values[i], s.values[j] = s.values[j], s.values[i]
if s.strings != nil {
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
}
}
// valueSortLess returns whether the first value should sort before the second
// value. It is used by valueSorter.Less as part of the sort.Interface
// implementation.
func valueSortLess(a, b reflect.Value) bool {
switch a.Kind() {
case reflect.Bool:
return !a.Bool() && b.Bool()
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
return a.Int() < b.Int()
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
return a.Uint() < b.Uint()
case reflect.Float32, reflect.Float64:
return a.Float() < b.Float()
case reflect.String:
return a.String() < b.String()
case reflect.Uintptr:
return a.Uint() < b.Uint()
case reflect.Array:
// Compare the contents of both arrays.
l := a.Len()
for i := 0; i < l; i++ {
av := a.Index(i)
bv := b.Index(i)
if av.Interface() == bv.Interface() {
continue
}
return valueSortLess(av, bv)
}
}
return a.String() < b.String()
}
// Less returns whether the value at index i should sort before the
// value at index j. It is part of the sort.Interface implementation.
func (s *valuesSorter) Less(i, j int) bool {
if s.strings == nil {
return valueSortLess(s.values[i], s.values[j])
}
return s.strings[i] < s.strings[j]
}
// sortValues is a sort function that handles both native types and any type that
// can be converted to error or Stringer. Other inputs are sorted according to
// their Value.String() value to ensure display stability.
func sortValues(values []reflect.Value, cs *ConfigState) {
if len(values) == 0 {
return
}
sort.Sort(newValuesSorter(values, cs))
}

306
vendor/github.com/davecgh/go-spew/spew/config.go generated vendored Normal file
View File

@@ -0,0 +1,306 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"io"
"os"
)
// ConfigState houses the configuration options used by spew to format and
// display values. There is a global instance, Config, that is used to control
// all top-level Formatter and Dump functionality. Each ConfigState instance
// provides methods equivalent to the top-level functions.
//
// The zero value for ConfigState provides no indentation. You would typically
// want to set it to a space or a tab.
//
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
// with default settings. See the documentation of NewDefaultConfig for default
// values.
type ConfigState struct {
// Indent specifies the string to use for each indentation level. The
// global config instance that all top-level functions use set this to a
// single space by default. If you would like more indentation, you might
// set this to a tab with "\t" or perhaps two spaces with " ".
Indent string
// MaxDepth controls the maximum number of levels to descend into nested
// data structures. The default, 0, means there is no limit.
//
// NOTE: Circular data structures are properly detected, so it is not
// necessary to set this value unless you specifically want to limit deeply
// nested data structures.
MaxDepth int
// DisableMethods specifies whether or not error and Stringer interfaces are
// invoked for types that implement them.
DisableMethods bool
// DisablePointerMethods specifies whether or not to check for and invoke
// error and Stringer interfaces on types which only accept a pointer
// receiver when the current type is not a pointer.
//
// NOTE: This might be an unsafe action since calling one of these methods
// with a pointer receiver could technically mutate the value, however,
// in practice, types which choose to satisify an error or Stringer
// interface with a pointer receiver should not be mutating their state
// inside these interface methods. As a result, this option relies on
// access to the unsafe package, so it will not have any effect when
// running in environments without access to the unsafe package such as
// Google App Engine or with the "safe" build tag specified.
DisablePointerMethods bool
// DisablePointerAddresses specifies whether to disable the printing of
// pointer addresses. This is useful when diffing data structures in tests.
DisablePointerAddresses bool
// DisableCapacities specifies whether to disable the printing of capacities
// for arrays, slices, maps and channels. This is useful when diffing
// data structures in tests.
DisableCapacities bool
// ContinueOnMethod specifies whether or not recursion should continue once
// a custom error or Stringer interface is invoked. The default, false,
// means it will print the results of invoking the custom error or Stringer
// interface and return immediately instead of continuing to recurse into
// the internals of the data type.
//
// NOTE: This flag does not have any effect if method invocation is disabled
// via the DisableMethods or DisablePointerMethods options.
ContinueOnMethod bool
// SortKeys specifies map keys should be sorted before being printed. Use
// this to have a more deterministic, diffable output. Note that only
// native types (bool, int, uint, floats, uintptr and string) and types
// that support the error or Stringer interfaces (if methods are
// enabled) are supported, with other types sorted according to the
// reflect.Value.String() output which guarantees display stability.
SortKeys bool
// SpewKeys specifies that, as a last resort attempt, map keys should
// be spewed to strings and sorted by those strings. This is only
// considered if SortKeys is true.
SpewKeys bool
}
// Config is the active configuration of the top-level functions.
// The configuration can be changed by modifying the contents of spew.Config.
var Config = ConfigState{Indent: " "}
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the formatted string as a value that satisfies error. See NewFormatter
// for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, c.convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, c.convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, c.convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a Formatter interface returned by c.NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, c.convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
return fmt.Print(c.convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, c.convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
return fmt.Println(c.convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprint(a ...interface{}) string {
return fmt.Sprint(c.convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a Formatter interface returned by c.NewFormatter. It returns
// the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, c.convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a Formatter interface returned by c.NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
func (c *ConfigState) Sprintln(a ...interface{}) string {
return fmt.Sprintln(c.convertArgs(a)...)
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
c.Printf, c.Println, or c.Printf.
*/
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(c, v)
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
fdump(c, w, a...)
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by modifying the public members
of c. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func (c *ConfigState) Dump(a ...interface{}) {
fdump(c, os.Stdout, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func (c *ConfigState) Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(c, &buf, a...)
return buf.String()
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a spew Formatter interface using
// the ConfigState associated with s.
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = newFormatter(c, arg)
}
return formatters
}
// NewDefaultConfig returns a ConfigState with the following default settings.
//
// Indent: " "
// MaxDepth: 0
// DisableMethods: false
// DisablePointerMethods: false
// ContinueOnMethod: false
// SortKeys: false
func NewDefaultConfig() *ConfigState {
return &ConfigState{Indent: " "}
}

211
vendor/github.com/davecgh/go-spew/spew/doc.go generated vendored Normal file
View File

@@ -0,0 +1,211 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/*
Package spew implements a deep pretty printer for Go data structures to aid in
debugging.
A quick overview of the additional features spew provides over the built-in
printing facilities for Go data types are as follows:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output (only when using
Dump style)
There are two different approaches spew allows for dumping Go data structures:
* Dump style which prints with newlines, customizable indentation,
and additional debug information such as types and all pointer addresses
used to indirect to the final value
* A custom Formatter interface that integrates cleanly with the standard fmt
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
similar to the default %v while providing the additional functionality
outlined above and passing unsupported format verbs such as %x and %q
along to fmt
Quick Start
This section demonstrates how to quickly get started with spew. See the
sections below for further details on formatting and configuration options.
To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump:
spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...)
Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
%#+v (adds types and pointer addresses):
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available
via the spew.Config global.
It is also possible to create a ConfigState instance that provides methods
equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details.
The following configuration options are available:
* Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
* DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
* DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
* DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
* DisableCapacities
DisableCapacities specifies whether to disable the printing of
capacities for arrays, slices, maps and channels. This is useful when
diffing data structures in tests.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
* SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are
supported with other types sorted according to the
reflect.Value.String() output which guarantees display
stability. Natural map order is used by default.
* SpewKeys
Specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only
considered if SortKeys is true.
Dump Usage
Simply call spew.Dump with a list of variables you want to dump:
spew.Dump(myVar1, myVar2, ...)
You may also call spew.Fdump if you would prefer to output to an arbitrary
io.Writer. For example, to dump to standard error:
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
A third option is to call spew.Sdump to get the formatted output as a string:
str := spew.Sdump(myVar1, myVar2, ...)
Sample Dump Output
See the Dump example for details on the setup of the types and variables being
shown here.
(main.Foo) {
unexportedField: (*main.Bar)(0xf84002e210)({
flag: (main.Flag) flagTwo,
data: (uintptr) <nil>
}),
ExportedField: (map[interface {}]interface {}) (len=1) {
(string) (len=3) "one": (bool) true
}
}
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
command as shown.
([]uint8) (len=32 cap=32) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12|
}
Custom Formatter
Spew provides a custom formatter that implements the fmt.Formatter interface
so that it integrates cleanly with standard fmt package printing functions. The
formatter is useful for inline printing of smaller data types similar to the
standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Custom Formatter Usage
The simplest way to make use of the spew custom formatter is to call one of the
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
functions have syntax you are most likely already familiar with:
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Println(myVar, myVar2)
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
See the Index for the full list convenience functions.
Sample Formatter Output
Double pointer to a uint8:
%v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
Pointer to circular struct with a uint8 field and a pointer to itself:
%v: <*>{1 <*><shown>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
See the Printf example for details on the setup of variables being shown
here.
Errors
Since it is possible for custom Stringer/error interfaces to panic, spew
detects them and handles them internally by printing the panic information
inline with the output. Since spew is intended to provide deep pretty printing
capabilities on structures, it intentionally does not return any errors.
*/
package spew

509
vendor/github.com/davecgh/go-spew/spew/dump.go generated vendored Normal file
View File

@@ -0,0 +1,509 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"encoding/hex"
"fmt"
"io"
"os"
"reflect"
"regexp"
"strconv"
"strings"
)
var (
// uint8Type is a reflect.Type representing a uint8. It is used to
// convert cgo types to uint8 slices for hexdumping.
uint8Type = reflect.TypeOf(uint8(0))
// cCharRE is a regular expression that matches a cgo char.
// It is used to detect character arrays to hexdump them.
cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
// char. It is used to detect unsigned character arrays to hexdump
// them.
cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
// It is used to detect uint8_t arrays to hexdump them.
cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
)
// dumpState contains information about the state of a dump operation.
type dumpState struct {
w io.Writer
depth int
pointers map[uintptr]int
ignoreNextType bool
ignoreNextIndent bool
cs *ConfigState
}
// indent performs indentation according to the depth level and cs.Indent
// option.
func (d *dumpState) indent() {
if d.ignoreNextIndent {
d.ignoreNextIndent = false
return
}
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
}
// unpackValue returns values inside of non-nil interfaces when possible.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface && !v.IsNil() {
v = v.Elem()
}
return v
}
// dumpPtr handles formatting of pointers by indirecting them as necessary.
func (d *dumpState) dumpPtr(v reflect.Value) {
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range d.pointers {
if depth >= d.depth {
delete(d.pointers, k)
}
}
// Keep list of all dereferenced pointers to show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by dereferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
cycleFound = true
indirects--
break
}
d.pointers[addr] = d.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type information.
d.w.Write(openParenBytes)
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
d.w.Write([]byte(ve.Type().String()))
d.w.Write(closeParenBytes)
// Display pointer information.
if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 {
d.w.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
d.w.Write(pointerChainBytes)
}
printHexPtr(d.w, addr)
}
d.w.Write(closeParenBytes)
}
// Display dereferenced value.
d.w.Write(openParenBytes)
switch {
case nilFound == true:
d.w.Write(nilAngleBytes)
case cycleFound == true:
d.w.Write(circularBytes)
default:
d.ignoreNextType = true
d.dump(ve)
}
d.w.Write(closeParenBytes)
}
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
// reflection) arrays and slices are dumped in hexdump -C fashion.
func (d *dumpState) dumpSlice(v reflect.Value) {
// Determine whether this type should be hex dumped or not. Also,
// for types which should be hexdumped, try to use the underlying data
// first, then fall back to trying to convert them to a uint8 slice.
var buf []uint8
doConvert := false
doHexDump := false
numEntries := v.Len()
if numEntries > 0 {
vt := v.Index(0).Type()
vts := vt.String()
switch {
// C types that need to be converted.
case cCharRE.MatchString(vts):
fallthrough
case cUnsignedCharRE.MatchString(vts):
fallthrough
case cUint8tCharRE.MatchString(vts):
doConvert = true
// Try to use existing uint8 slices and fall back to converting
// and copying if that fails.
case vt.Kind() == reflect.Uint8:
// We need an addressable interface to convert the type
// to a byte slice. However, the reflect package won't
// give us an interface on certain things like
// unexported struct fields in order to enforce
// visibility rules. We use unsafe, when available, to
// bypass these restrictions since this package does not
// mutate the values.
vs := v
if !vs.CanInterface() || !vs.CanAddr() {
vs = unsafeReflectValue(vs)
}
if !UnsafeDisabled {
vs = vs.Slice(0, numEntries)
// Use the existing uint8 slice if it can be
// type asserted.
iface := vs.Interface()
if slice, ok := iface.([]uint8); ok {
buf = slice
doHexDump = true
break
}
}
// The underlying data needs to be converted if it can't
// be type asserted to a uint8 slice.
doConvert = true
}
// Copy and convert the underlying type if needed.
if doConvert && vt.ConvertibleTo(uint8Type) {
// Convert and copy each element into a uint8 byte
// slice.
buf = make([]uint8, numEntries)
for i := 0; i < numEntries; i++ {
vv := v.Index(i)
buf[i] = uint8(vv.Convert(uint8Type).Uint())
}
doHexDump = true
}
}
// Hexdump the entire slice as needed.
if doHexDump {
indent := strings.Repeat(d.cs.Indent, d.depth)
str := indent + hex.Dump(buf)
str = strings.Replace(str, "\n", "\n"+indent, -1)
str = strings.TrimRight(str, d.cs.Indent)
d.w.Write([]byte(str))
return
}
// Recursively call dump for each item.
for i := 0; i < numEntries; i++ {
d.dump(d.unpackValue(v.Index(i)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
// dump is the main workhorse for dumping a value. It uses the passed reflect
// value to figure out what kind of object we are dealing with and formats it
// appropriately. It is a recursive function, however circular data structures
// are detected and handled properly.
func (d *dumpState) dump(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
d.w.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
d.indent()
d.dumpPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !d.ignoreNextType {
d.indent()
d.w.Write(openParenBytes)
d.w.Write([]byte(v.Type().String()))
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
d.ignoreNextType = false
// Display length and capacity if the built-in len and cap functions
// work with the value's kind and the len/cap itself is non-zero.
valueLen, valueCap := 0, 0
switch v.Kind() {
case reflect.Array, reflect.Slice, reflect.Chan:
valueLen, valueCap = v.Len(), v.Cap()
case reflect.Map, reflect.String:
valueLen = v.Len()
}
if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 {
d.w.Write(openParenBytes)
if valueLen != 0 {
d.w.Write(lenEqualsBytes)
printInt(d.w, int64(valueLen), 10)
}
if !d.cs.DisableCapacities && valueCap != 0 {
if valueLen != 0 {
d.w.Write(spaceBytes)
}
d.w.Write(capEqualsBytes)
printInt(d.w, int64(valueCap), 10)
}
d.w.Write(closeParenBytes)
d.w.Write(spaceBytes)
}
// Call Stringer/error interfaces if they exist and the handle methods flag
// is enabled
if !d.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(d.cs, d.w, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(d.w, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(d.w, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(d.w, v.Uint(), 10)
case reflect.Float32:
printFloat(d.w, v.Float(), 32)
case reflect.Float64:
printFloat(d.w, v.Float(), 64)
case reflect.Complex64:
printComplex(d.w, v.Complex(), 32)
case reflect.Complex128:
printComplex(d.w, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
d.dumpSlice(v)
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.String:
d.w.Write([]byte(strconv.Quote(v.String())))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
d.w.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
d.w.Write(nilAngleBytes)
break
}
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
numEntries := v.Len()
keys := v.MapKeys()
if d.cs.SortKeys {
sortValues(keys, d.cs)
}
for i, key := range keys {
d.dump(d.unpackValue(key))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.MapIndex(key)))
if i < (numEntries - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Struct:
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
d.indent()
d.w.Write(maxNewlineBytes)
} else {
vt := v.Type()
numFields := v.NumField()
for i := 0; i < numFields; i++ {
d.indent()
vtf := vt.Field(i)
d.w.Write([]byte(vtf.Name))
d.w.Write(colonSpaceBytes)
d.ignoreNextIndent = true
d.dump(d.unpackValue(v.Field(i)))
if i < (numFields - 1) {
d.w.Write(commaNewlineBytes)
} else {
d.w.Write(newlineBytes)
}
}
}
d.depth--
d.indent()
d.w.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(d.w, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(d.w, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it in case any new
// types are added.
default:
if v.CanInterface() {
fmt.Fprintf(d.w, "%v", v.Interface())
} else {
fmt.Fprintf(d.w, "%v", v.String())
}
}
}
// fdump is a helper function to consolidate the logic from the various public
// methods which take varying writers and config states.
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
for _, arg := range a {
if arg == nil {
w.Write(interfaceBytes)
w.Write(spaceBytes)
w.Write(nilAngleBytes)
w.Write(newlineBytes)
continue
}
d := dumpState{w: w, cs: cs}
d.pointers = make(map[uintptr]int)
d.dump(reflect.ValueOf(arg))
d.w.Write(newlineBytes)
}
}
// Fdump formats and displays the passed arguments to io.Writer w. It formats
// exactly the same as Dump.
func Fdump(w io.Writer, a ...interface{}) {
fdump(&Config, w, a...)
}
// Sdump returns a string with the passed arguments formatted exactly the same
// as Dump.
func Sdump(a ...interface{}) string {
var buf bytes.Buffer
fdump(&Config, &buf, a...)
return buf.String()
}
/*
Dump displays the passed parameters to standard out with newlines, customizable
indentation, and additional debug information such as complete types and all
pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by an exported package global,
spew.Config. See ConfigState for options documentation.
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
get the formatted result as a string.
*/
func Dump(a ...interface{}) {
fdump(&Config, os.Stdout, a...)
}

419
vendor/github.com/davecgh/go-spew/spew/format.go generated vendored Normal file
View File

@@ -0,0 +1,419 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"bytes"
"fmt"
"reflect"
"strconv"
"strings"
)
// supportedFlags is a list of all the character flags supported by fmt package.
const supportedFlags = "0-+# "
// formatState implements the fmt.Formatter interface and contains information
// about the state of a formatting operation. The NewFormatter function can
// be used to get a new Formatter which can be used directly as arguments
// in standard fmt package printing calls.
type formatState struct {
value interface{}
fs fmt.State
depth int
pointers map[uintptr]int
ignoreNextType bool
cs *ConfigState
}
// buildDefaultFormat recreates the original format string without precision
// and width information to pass in to fmt.Sprintf in the case of an
// unrecognized type. Unless new types are added to the language, this
// function won't ever be called.
func (f *formatState) buildDefaultFormat() (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
buf.WriteRune('v')
format = buf.String()
return format
}
// constructOrigFormat recreates the original format string including precision
// and width information to pass along to the standard fmt package. This allows
// automatic deferral of all format strings this package doesn't support.
func (f *formatState) constructOrigFormat(verb rune) (format string) {
buf := bytes.NewBuffer(percentBytes)
for _, flag := range supportedFlags {
if f.fs.Flag(int(flag)) {
buf.WriteRune(flag)
}
}
if width, ok := f.fs.Width(); ok {
buf.WriteString(strconv.Itoa(width))
}
if precision, ok := f.fs.Precision(); ok {
buf.Write(precisionBytes)
buf.WriteString(strconv.Itoa(precision))
}
buf.WriteRune(verb)
format = buf.String()
return format
}
// unpackValue returns values inside of non-nil interfaces when possible and
// ensures that types for values which have been unpacked from an interface
// are displayed when the show types flag is also set.
// This is useful for data types like structs, arrays, slices, and maps which
// can contain varying types packed inside an interface.
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
if v.Kind() == reflect.Interface {
f.ignoreNextType = false
if !v.IsNil() {
v = v.Elem()
}
}
return v
}
// formatPtr handles formatting of pointers by indirecting them as necessary.
func (f *formatState) formatPtr(v reflect.Value) {
// Display nil if top level pointer is nil.
showTypes := f.fs.Flag('#')
if v.IsNil() && (!showTypes || f.ignoreNextType) {
f.fs.Write(nilAngleBytes)
return
}
// Remove pointers at or below the current depth from map used to detect
// circular refs.
for k, depth := range f.pointers {
if depth >= f.depth {
delete(f.pointers, k)
}
}
// Keep list of all dereferenced pointers to possibly show later.
pointerChain := make([]uintptr, 0)
// Figure out how many levels of indirection there are by derferencing
// pointers and unpacking interfaces down the chain while detecting circular
// references.
nilFound := false
cycleFound := false
indirects := 0
ve := v
for ve.Kind() == reflect.Ptr {
if ve.IsNil() {
nilFound = true
break
}
indirects++
addr := ve.Pointer()
pointerChain = append(pointerChain, addr)
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
cycleFound = true
indirects--
break
}
f.pointers[addr] = f.depth
ve = ve.Elem()
if ve.Kind() == reflect.Interface {
if ve.IsNil() {
nilFound = true
break
}
ve = ve.Elem()
}
}
// Display type or indirection level depending on flags.
if showTypes && !f.ignoreNextType {
f.fs.Write(openParenBytes)
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
f.fs.Write([]byte(ve.Type().String()))
f.fs.Write(closeParenBytes)
} else {
if nilFound || cycleFound {
indirects += strings.Count(ve.Type().String(), "*")
}
f.fs.Write(openAngleBytes)
f.fs.Write([]byte(strings.Repeat("*", indirects)))
f.fs.Write(closeAngleBytes)
}
// Display pointer information depending on flags.
if f.fs.Flag('+') && (len(pointerChain) > 0) {
f.fs.Write(openParenBytes)
for i, addr := range pointerChain {
if i > 0 {
f.fs.Write(pointerChainBytes)
}
printHexPtr(f.fs, addr)
}
f.fs.Write(closeParenBytes)
}
// Display dereferenced value.
switch {
case nilFound == true:
f.fs.Write(nilAngleBytes)
case cycleFound == true:
f.fs.Write(circularShortBytes)
default:
f.ignoreNextType = true
f.format(ve)
}
}
// format is the main workhorse for providing the Formatter interface. It
// uses the passed reflect value to figure out what kind of object we are
// dealing with and formats it appropriately. It is a recursive function,
// however circular data structures are detected and handled properly.
func (f *formatState) format(v reflect.Value) {
// Handle invalid reflect values immediately.
kind := v.Kind()
if kind == reflect.Invalid {
f.fs.Write(invalidAngleBytes)
return
}
// Handle pointers specially.
if kind == reflect.Ptr {
f.formatPtr(v)
return
}
// Print type information unless already handled elsewhere.
if !f.ignoreNextType && f.fs.Flag('#') {
f.fs.Write(openParenBytes)
f.fs.Write([]byte(v.Type().String()))
f.fs.Write(closeParenBytes)
}
f.ignoreNextType = false
// Call Stringer/error interfaces if they exist and the handle methods
// flag is enabled.
if !f.cs.DisableMethods {
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
if handled := handleMethods(f.cs, f.fs, v); handled {
return
}
}
}
switch kind {
case reflect.Invalid:
// Do nothing. We should never get here since invalid has already
// been handled above.
case reflect.Bool:
printBool(f.fs, v.Bool())
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
printInt(f.fs, v.Int(), 10)
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
printUint(f.fs, v.Uint(), 10)
case reflect.Float32:
printFloat(f.fs, v.Float(), 32)
case reflect.Float64:
printFloat(f.fs, v.Float(), 64)
case reflect.Complex64:
printComplex(f.fs, v.Complex(), 32)
case reflect.Complex128:
printComplex(f.fs, v.Complex(), 64)
case reflect.Slice:
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
fallthrough
case reflect.Array:
f.fs.Write(openBracketBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
numEntries := v.Len()
for i := 0; i < numEntries; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(v.Index(i)))
}
}
f.depth--
f.fs.Write(closeBracketBytes)
case reflect.String:
f.fs.Write([]byte(v.String()))
case reflect.Interface:
// The only time we should get here is for nil interfaces due to
// unpackValue calls.
if v.IsNil() {
f.fs.Write(nilAngleBytes)
}
case reflect.Ptr:
// Do nothing. We should never get here since pointers have already
// been handled above.
case reflect.Map:
// nil maps should be indicated as different than empty maps
if v.IsNil() {
f.fs.Write(nilAngleBytes)
break
}
f.fs.Write(openMapBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
keys := v.MapKeys()
if f.cs.SortKeys {
sortValues(keys, f.cs)
}
for i, key := range keys {
if i > 0 {
f.fs.Write(spaceBytes)
}
f.ignoreNextType = true
f.format(f.unpackValue(key))
f.fs.Write(colonBytes)
f.ignoreNextType = true
f.format(f.unpackValue(v.MapIndex(key)))
}
}
f.depth--
f.fs.Write(closeMapBytes)
case reflect.Struct:
numFields := v.NumField()
f.fs.Write(openBraceBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
f.fs.Write(maxShortBytes)
} else {
vt := v.Type()
for i := 0; i < numFields; i++ {
if i > 0 {
f.fs.Write(spaceBytes)
}
vtf := vt.Field(i)
if f.fs.Flag('+') || f.fs.Flag('#') {
f.fs.Write([]byte(vtf.Name))
f.fs.Write(colonBytes)
}
f.format(f.unpackValue(v.Field(i)))
}
}
f.depth--
f.fs.Write(closeBraceBytes)
case reflect.Uintptr:
printHexPtr(f.fs, uintptr(v.Uint()))
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
printHexPtr(f.fs, v.Pointer())
// There were not any other types at the time this code was written, but
// fall back to letting the default fmt package handle it if any get added.
default:
format := f.buildDefaultFormat()
if v.CanInterface() {
fmt.Fprintf(f.fs, format, v.Interface())
} else {
fmt.Fprintf(f.fs, format, v.String())
}
}
}
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
// details.
func (f *formatState) Format(fs fmt.State, verb rune) {
f.fs = fs
// Use standard formatting for verbs that are not v.
if verb != 'v' {
format := f.constructOrigFormat(verb)
fmt.Fprintf(fs, format, f.value)
return
}
if f.value == nil {
if fs.Flag('#') {
fs.Write(interfaceBytes)
}
fs.Write(nilAngleBytes)
return
}
f.format(reflect.ValueOf(f.value))
}
// newFormatter is a helper function to consolidate the logic from the various
// public methods which take varying config states.
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
fs := &formatState{value: v, cs: cs}
fs.pointers = make(map[uintptr]int)
return fs
}
/*
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
interface. As a result, it integrates cleanly with standard fmt package
printing functions. The formatter is useful for inline printing of smaller data
types similar to the standard %v format specifier.
The custom formatter only responds to the %v (most compact), %+v (adds pointer
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
combinations. Any other verbs such as %x and %q will be sent to the the
standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
Typically this function shouldn't be called directly. It is much easier to make
use of the custom formatter by calling one of the convenience functions such as
Printf, Println, or Fprintf.
*/
func NewFormatter(v interface{}) fmt.Formatter {
return newFormatter(&Config, v)
}

148
vendor/github.com/davecgh/go-spew/spew/spew.go generated vendored Normal file
View File

@@ -0,0 +1,148 @@
/*
* Copyright (c) 2013-2016 Dave Collins <dave@davec.name>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
package spew
import (
"fmt"
"io"
)
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the formatted string as a value that satisfies error. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Errorf(format string, a ...interface{}) (err error) {
return fmt.Errorf(format, convertArgs(a)...)
}
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprint(w, convertArgs(a)...)
}
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
return fmt.Fprintf(w, format, convertArgs(a)...)
}
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
// passed with a default Formatter interface returned by NewFormatter. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
return fmt.Fprintln(w, convertArgs(a)...)
}
// Print is a wrapper for fmt.Print that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
func Print(a ...interface{}) (n int, err error) {
return fmt.Print(convertArgs(a)...)
}
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Printf(format string, a ...interface{}) (n int, err error) {
return fmt.Printf(format, convertArgs(a)...)
}
// Println is a wrapper for fmt.Println that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the number of bytes written and any write error encountered. See
// NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
func Println(a ...interface{}) (n int, err error) {
return fmt.Println(convertArgs(a)...)
}
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprint(a ...interface{}) string {
return fmt.Sprint(convertArgs(a)...)
}
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
// passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintf(format string, a ...interface{}) string {
return fmt.Sprintf(format, convertArgs(a)...)
}
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
// were passed with a default Formatter interface returned by NewFormatter. It
// returns the resulting string. See NewFormatter for formatting details.
//
// This function is shorthand for the following syntax:
//
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
func Sprintln(a ...interface{}) string {
return fmt.Sprintln(convertArgs(a)...)
}
// convertArgs accepts a slice of arguments and returns a slice of the same
// length with each argument converted to a default spew Formatter interface.
func convertArgs(args []interface{}) (formatters []interface{}) {
formatters = make([]interface{}, len(args))
for index, arg := range args {
formatters[index] = NewFormatter(arg)
}
return formatters
}

70
vendor/github.com/emicklei/go-restful/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,70 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
restful.html
*.out
tmp.prof
go-restful.test
examples/restful-basic-authentication
examples/restful-encoding-filter
examples/restful-filters
examples/restful-hello-world
examples/restful-resource-functions
examples/restful-serve-static
examples/restful-user-service
*.DS_Store
examples/restful-user-resource
examples/restful-multi-containers
examples/restful-form-handling
examples/restful-CORS-filter
examples/restful-options-filter
examples/restful-curly-router
examples/restful-cpuprofiler-service
examples/restful-pre-post-filters
curly.prof
examples/restful-NCSA-logging
examples/restful-html-template
s.html
restful-path-tail

6
vendor/github.com/emicklei/go-restful/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,6 @@
language: go
go:
- 1.x
script: go test -v

223
vendor/github.com/emicklei/go-restful/CHANGES.md generated vendored Normal file
View File

@@ -0,0 +1,223 @@
Change history of go-restful
=
2017-02-16
- solved issue #304, make operation names unique
2017-01-30
[IMPORTANT] For swagger users, change your import statement to:
swagger "github.com/emicklei/go-restful-swagger12"
- moved swagger 1.2 code to go-restful-swagger12
- created TAG 2.0.0
2017-01-27
- remove defer request body close
- expose Dispatch for testing filters and Routefunctions
- swagger response model cannot be array
- created TAG 1.0.0
2016-12-22
- (API change) Remove code related to caching request content. Removes SetCacheReadEntity(doCache bool)
2016-11-26
- Default change! now use CurlyRouter (was RouterJSR311)
- Default change! no more caching of request content
- Default change! do not recover from panics
2016-09-22
- fix the DefaultRequestContentType feature
2016-02-14
- take the qualify factor of the Accept header mediatype into account when deciding the contentype of the response
- add constructors for custom entity accessors for xml and json
2015-09-27
- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency
2015-09-25
- fixed problem with changing Header after WriteHeader (issue 235)
2015-09-14
- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write)
- added support for custom EntityReaderWriters.
2015-08-06
- add support for reading entities from compressed request content
- use sync.Pool for compressors of http response and request body
- add Description to Parameter for documentation in Swagger UI
2015-03-20
- add configurable logging
2015-03-18
- if not specified, the Operation is derived from the Route function
2015-03-17
- expose Parameter creation functions
- make trace logger an interface
- fix OPTIONSFilter
- customize rendering of ServiceError
- JSR311 router now handles wildcards
- add Notes to Route
2014-11-27
- (api add) PrettyPrint per response. (as proposed in #167)
2014-11-12
- (api add) ApiVersion(.) for documentation in Swagger UI
2014-11-10
- (api change) struct fields tagged with "description" show up in Swagger UI
2014-10-31
- (api change) ReturnsError -> Returns
- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder
- fix swagger nested structs
- sort Swagger response messages by code
2014-10-23
- (api add) ReturnsError allows you to document Http codes in swagger
- fixed problem with greedy CurlyRouter
- (api add) Access-Control-Max-Age in CORS
- add tracing functionality (injectable) for debugging purposes
- support JSON parse 64bit int
- fix empty parameters for swagger
- WebServicesUrl is now optional for swagger
- fixed duplicate AccessControlAllowOrigin in CORS
- (api change) expose ServeMux in container
- (api add) added AllowedDomains in CORS
- (api add) ParameterNamed for detailed documentation
2014-04-16
- (api add) expose constructor of Request for testing.
2014-06-27
- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification).
- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons).
2014-07-03
- (api add) CORS can be configured with a list of allowed domains
2014-03-12
- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter)
2014-02-26
- (api add) Request now provides information about the matched Route, see method SelectedRoutePath
2014-02-17
- (api change) renamed parameter constants (go-lint checks)
2014-01-10
- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
2014-01-07
- (api change) Write* methods in Response now return the error or nil.
- added example of serving HTML from a Go template.
- fixed comparing Allowed headers in CORS (is now case-insensitive)
2013-11-13
- (api add) Response knows how many bytes are written to the response body.
2013-10-29
- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
2013-10-04
- (api add) Response knows what HTTP status has been written
- (api add) Request can have attributes (map of string->interface, also called request-scoped variables
2013-09-12
- (api change) Router interface simplified
- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
2013-08-05
- add OPTIONS support
- add CORS support
2013-08-27
- fixed some reported issues (see github)
- (api change) deprecated use of WriteError; use WriteErrorString instead
2014-04-15
- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
2013-08-08
- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
- (api add) the swagger package has be extended to have a UI per container.
- if panic is detected then a small stack trace is printed (thanks to runner-mei)
- (api add) WriteErrorString to Response
Important API changes:
- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
2013-07-06
- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
2013-06-19
- (improve) DoNotRecover option, moved request body closer, improved ReadEntity
2013-06-03
- (api change) removed Dispatcher interface, hide PathExpression
- changed receiver names of type functions to be more idiomatic Go
2013-06-02
- (optimize) Cache the RegExp compilation of Paths.
2013-05-22
- (api add) Added support for request/response filter functions
2013-05-18
- (api add) Added feature to change the default Http Request Dispatch function (travis cline)
- (api change) Moved Swagger Webservice to swagger package (see example restful-user)
[2012-11-14 .. 2013-05-18>
- See https://github.com/emicklei/go-restful/commits
2012-11-14
- Initial commit

22
vendor/github.com/emicklei/go-restful/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,22 @@
Copyright (c) 2012,2013 Ernest Micklei
MIT License
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

7
vendor/github.com/emicklei/go-restful/Makefile generated vendored Normal file
View File

@@ -0,0 +1,7 @@
all: test
test:
go test -v .
ex:
cd examples && ls *.go | xargs go build -o /tmp/ignore

74
vendor/github.com/emicklei/go-restful/README.md generated vendored Normal file
View File

@@ -0,0 +1,74 @@
go-restful
==========
package for building REST-style Web Services using Google Go
[![Build Status](https://travis-ci.org/emicklei/go-restful.png)](https://travis-ci.org/emicklei/go-restful)
[![Go Report Card](https://goreportcard.com/badge/github.com/emicklei/go-restful)](https://goreportcard.com/report/github.com/emicklei/go-restful)
[![GoDoc](https://godoc.org/github.com/emicklei/go-restful?status.svg)](https://godoc.org/github.com/emicklei/go-restful)
- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
- GET = Retrieve a representation of a resource
- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm.
- PUT = Create if you are sending the full content of the specified resource (URI).
- PUT = Update if you are updating the full content of the specified resource.
- DELETE = Delete if you are requesting the server to delete the resource
- PATCH = Update partial content of a resource
- OPTIONS = Get information about the communication options for the request URI
### Example
```Go
ws := new(restful.WebService)
ws.
Path("/users").
Consumes(restful.MIME_XML, restful.MIME_JSON).
Produces(restful.MIME_JSON, restful.MIME_XML)
ws.Route(ws.GET("/{user-id}").To(u.findUser).
Doc("get a user").
Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
Writes(User{}))
...
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
id := request.PathParameter("user-id")
...
}
```
[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go)
### Features
- Routes for request &#8594; function mapping with path parameter (e.g. {id}) support
- Configurable router:
- (default) Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}
- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but does **not** accept) regular expressions
- Request API for reading structs from JSON/XML and accesing parameters (path,query,header)
- Response API for writing structs to JSON/XML and setting headers
- Customizable encoding using EntityReaderWriter registration
- Filters for intercepting the request &#8594; response flow on Service or Route level
- Request-scoped variables using attributes
- Containers for WebServices on different HTTP endpoints
- Content encoding (gzip,deflate) of request and response payloads
- Automatic responses on OPTIONS (using a filter)
- Automatic CORS request handling (using a filter)
- API declaration for Swagger UI (see [go-restful-swagger12](https://github.com/emicklei/go-restful-swagger12),[go-restful-openapi](https://github.com/emicklei/go-restful-openapi))
- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
- Configurable (trace) logging
- Customizable gzip/deflate readers and writers using CompressorProvider registration
### Resources
- [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/)
- [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/)
- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
Type ```git shortlog -s``` for a full list of contributors.
© 2012 - 2017, http://ernestmicklei.com. MIT License. Contributions are welcome.

1
vendor/github.com/emicklei/go-restful/Srcfile generated vendored Normal file
View File

@@ -0,0 +1 @@
{"SkipDirs": ["examples"]}

10
vendor/github.com/emicklei/go-restful/bench_test.sh generated vendored Normal file
View File

@@ -0,0 +1,10 @@
#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out
go test -c
./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany
./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly
#go tool pprof go-restful.test tmp.prof
go tool pprof go-restful.test curly.prof

123
vendor/github.com/emicklei/go-restful/compress.go generated vendored Normal file
View File

@@ -0,0 +1,123 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bufio"
"compress/gzip"
"compress/zlib"
"errors"
"io"
"net"
"net/http"
"strings"
)
// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting.
var EnableContentEncoding = false
// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib)
type CompressingResponseWriter struct {
writer http.ResponseWriter
compressor io.WriteCloser
encoding string
}
// Header is part of http.ResponseWriter interface
func (c *CompressingResponseWriter) Header() http.Header {
return c.writer.Header()
}
// WriteHeader is part of http.ResponseWriter interface
func (c *CompressingResponseWriter) WriteHeader(status int) {
c.writer.WriteHeader(status)
}
// Write is part of http.ResponseWriter interface
// It is passed through the compressor
func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) {
if c.isCompressorClosed() {
return -1, errors.New("Compressing error: tried to write data using closed compressor")
}
return c.compressor.Write(bytes)
}
// CloseNotify is part of http.CloseNotifier interface
func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
return c.writer.(http.CloseNotifier).CloseNotify()
}
// Close the underlying compressor
func (c *CompressingResponseWriter) Close() error {
if c.isCompressorClosed() {
return errors.New("Compressing error: tried to close already closed compressor")
}
c.compressor.Close()
if ENCODING_GZIP == c.encoding {
currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer))
}
if ENCODING_DEFLATE == c.encoding {
currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer))
}
// gc hint needed?
c.compressor = nil
return nil
}
func (c *CompressingResponseWriter) isCompressorClosed() bool {
return nil == c.compressor
}
// Hijack implements the Hijacker interface
// This is especially useful when combining Container.EnabledContentEncoding
// in combination with websockets (for instance gorilla/websocket)
func (c *CompressingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) {
hijacker, ok := c.writer.(http.Hijacker)
if !ok {
return nil, nil, errors.New("ResponseWriter doesn't support Hijacker interface")
}
return hijacker.Hijack()
}
// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
func wantsCompressedResponse(httpRequest *http.Request) (bool, string) {
header := httpRequest.Header.Get(HEADER_AcceptEncoding)
gi := strings.Index(header, ENCODING_GZIP)
zi := strings.Index(header, ENCODING_DEFLATE)
// use in order of appearance
if gi == -1 {
return zi != -1, ENCODING_DEFLATE
} else if zi == -1 {
return gi != -1, ENCODING_GZIP
} else {
if gi < zi {
return true, ENCODING_GZIP
}
return true, ENCODING_DEFLATE
}
}
// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate}
func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) {
httpWriter.Header().Set(HEADER_ContentEncoding, encoding)
c := new(CompressingResponseWriter)
c.writer = httpWriter
var err error
if ENCODING_GZIP == encoding {
w := currentCompressorProvider.AcquireGzipWriter()
w.Reset(httpWriter)
c.compressor = w
c.encoding = ENCODING_GZIP
} else if ENCODING_DEFLATE == encoding {
w := currentCompressorProvider.AcquireZlibWriter()
w.Reset(httpWriter)
c.compressor = w
c.encoding = ENCODING_DEFLATE
} else {
return nil, errors.New("Unknown encoding:" + encoding)
}
return c, err
}

View File

@@ -0,0 +1,103 @@
package restful
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"compress/gzip"
"compress/zlib"
)
// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount
// of writers and readers (resources).
// If a new resource is acquired and all are in use, it will return a new unmanaged resource.
type BoundedCachedCompressors struct {
gzipWriters chan *gzip.Writer
gzipReaders chan *gzip.Reader
zlibWriters chan *zlib.Writer
writersCapacity int
readersCapacity int
}
// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors.
func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors {
b := &BoundedCachedCompressors{
gzipWriters: make(chan *gzip.Writer, writersCapacity),
gzipReaders: make(chan *gzip.Reader, readersCapacity),
zlibWriters: make(chan *zlib.Writer, writersCapacity),
writersCapacity: writersCapacity,
readersCapacity: readersCapacity,
}
for ix := 0; ix < writersCapacity; ix++ {
b.gzipWriters <- newGzipWriter()
b.zlibWriters <- newZlibWriter()
}
for ix := 0; ix < readersCapacity; ix++ {
b.gzipReaders <- newGzipReader()
}
return b
}
// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released.
func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer {
var writer *gzip.Writer
select {
case writer, _ = <-b.gzipWriters:
default:
// return a new unmanaged one
writer = newGzipWriter()
}
return writer
}
// ReleaseGzipWriter accepts a writer (does not have to be one that was cached)
// only when the cache has room for it. It will ignore it otherwise.
func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) {
// forget the unmanaged ones
if len(b.gzipWriters) < b.writersCapacity {
b.gzipWriters <- w
}
}
// AcquireGzipReader returns a *gzip.Reader. Needs to be released.
func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader {
var reader *gzip.Reader
select {
case reader, _ = <-b.gzipReaders:
default:
// return a new unmanaged one
reader = newGzipReader()
}
return reader
}
// ReleaseGzipReader accepts a reader (does not have to be one that was cached)
// only when the cache has room for it. It will ignore it otherwise.
func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) {
// forget the unmanaged ones
if len(b.gzipReaders) < b.readersCapacity {
b.gzipReaders <- r
}
}
// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released.
func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer {
var writer *zlib.Writer
select {
case writer, _ = <-b.zlibWriters:
default:
// return a new unmanaged one
writer = newZlibWriter()
}
return writer
}
// ReleaseZlibWriter accepts a writer (does not have to be one that was cached)
// only when the cache has room for it. It will ignore it otherwise.
func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) {
// forget the unmanaged ones
if len(b.zlibWriters) < b.writersCapacity {
b.zlibWriters <- w
}
}

View File

@@ -0,0 +1,91 @@
package restful
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"compress/gzip"
"compress/zlib"
"sync"
)
// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool.
type SyncPoolCompessors struct {
GzipWriterPool *sync.Pool
GzipReaderPool *sync.Pool
ZlibWriterPool *sync.Pool
}
// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors.
func NewSyncPoolCompessors() *SyncPoolCompessors {
return &SyncPoolCompessors{
GzipWriterPool: &sync.Pool{
New: func() interface{} { return newGzipWriter() },
},
GzipReaderPool: &sync.Pool{
New: func() interface{} { return newGzipReader() },
},
ZlibWriterPool: &sync.Pool{
New: func() interface{} { return newZlibWriter() },
},
}
}
func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer {
return s.GzipWriterPool.Get().(*gzip.Writer)
}
func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) {
s.GzipWriterPool.Put(w)
}
func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader {
return s.GzipReaderPool.Get().(*gzip.Reader)
}
func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) {
s.GzipReaderPool.Put(r)
}
func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer {
return s.ZlibWriterPool.Get().(*zlib.Writer)
}
func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) {
s.ZlibWriterPool.Put(w)
}
func newGzipWriter() *gzip.Writer {
// create with an empty bytes writer; it will be replaced before using the gzipWriter
writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
if err != nil {
panic(err.Error())
}
return writer
}
func newGzipReader() *gzip.Reader {
// create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader
// we can safely use currentCompressProvider because it is set on package initialization.
w := currentCompressorProvider.AcquireGzipWriter()
defer currentCompressorProvider.ReleaseGzipWriter(w)
b := new(bytes.Buffer)
w.Reset(b)
w.Flush()
w.Close()
reader, err := gzip.NewReader(bytes.NewReader(b.Bytes()))
if err != nil {
panic(err.Error())
}
return reader
}
func newZlibWriter() *zlib.Writer {
writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
if err != nil {
panic(err.Error())
}
return writer
}

54
vendor/github.com/emicklei/go-restful/compressors.go generated vendored Normal file
View File

@@ -0,0 +1,54 @@
package restful
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"compress/gzip"
"compress/zlib"
)
// CompressorProvider describes a component that can provider compressors for the std methods.
type CompressorProvider interface {
// Returns a *gzip.Writer which needs to be released later.
// Before using it, call Reset().
AcquireGzipWriter() *gzip.Writer
// Releases an aqcuired *gzip.Writer.
ReleaseGzipWriter(w *gzip.Writer)
// Returns a *gzip.Reader which needs to be released later.
AcquireGzipReader() *gzip.Reader
// Releases an aqcuired *gzip.Reader.
ReleaseGzipReader(w *gzip.Reader)
// Returns a *zlib.Writer which needs to be released later.
// Before using it, call Reset().
AcquireZlibWriter() *zlib.Writer
// Releases an aqcuired *zlib.Writer.
ReleaseZlibWriter(w *zlib.Writer)
}
// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip).
var currentCompressorProvider CompressorProvider
func init() {
currentCompressorProvider = NewSyncPoolCompessors()
}
// CurrentCompressorProvider returns the current CompressorProvider.
// It is initialized using a SyncPoolCompessors.
func CurrentCompressorProvider() CompressorProvider {
return currentCompressorProvider
}
// CompressorProvider sets the actual provider of compressors (zlib or gzip).
func SetCompressorProvider(p CompressorProvider) {
if p == nil {
panic("cannot set compressor provider to nil")
}
currentCompressorProvider = p
}

30
vendor/github.com/emicklei/go-restful/constants.go generated vendored Normal file
View File

@@ -0,0 +1,30 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
const (
MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
HEADER_Allow = "Allow"
HEADER_Accept = "Accept"
HEADER_Origin = "Origin"
HEADER_ContentType = "Content-Type"
HEADER_LastModified = "Last-Modified"
HEADER_AcceptEncoding = "Accept-Encoding"
HEADER_ContentEncoding = "Content-Encoding"
HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers"
HEADER_AccessControlRequestMethod = "Access-Control-Request-Method"
HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers"
HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods"
HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin"
HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials"
HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers"
HEADER_AccessControlMaxAge = "Access-Control-Max-Age"
ENCODING_GZIP = "gzip"
ENCODING_DEFLATE = "deflate"
)

366
vendor/github.com/emicklei/go-restful/container.go generated vendored Normal file
View File

@@ -0,0 +1,366 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"errors"
"fmt"
"net/http"
"os"
"runtime"
"strings"
"sync"
"github.com/emicklei/go-restful/log"
)
// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests.
// The requests are further dispatched to routes of WebServices using a RouteSelector
type Container struct {
webServicesLock sync.RWMutex
webServices []*WebService
ServeMux *http.ServeMux
isRegisteredOnRoot bool
containerFilters []FilterFunction
doNotRecover bool // default is true
recoverHandleFunc RecoverHandleFunction
serviceErrorHandleFunc ServiceErrorHandleFunction
router RouteSelector // default is a CurlyRouter (RouterJSR311 is a slower alternative)
contentEncodingEnabled bool // default is false
}
// NewContainer creates a new Container using a new ServeMux and default router (CurlyRouter)
func NewContainer() *Container {
return &Container{
webServices: []*WebService{},
ServeMux: http.NewServeMux(),
isRegisteredOnRoot: false,
containerFilters: []FilterFunction{},
doNotRecover: true,
recoverHandleFunc: logStackOnRecover,
serviceErrorHandleFunc: writeServiceError,
router: CurlyRouter{},
contentEncodingEnabled: false}
}
// RecoverHandleFunction declares functions that can be used to handle a panic situation.
// The first argument is what recover() returns. The second must be used to communicate an error response.
type RecoverHandleFunction func(interface{}, http.ResponseWriter)
// RecoverHandler changes the default function (logStackOnRecover) to be called
// when a panic is detected. DoNotRecover must be have its default value (=false).
func (c *Container) RecoverHandler(handler RecoverHandleFunction) {
c.recoverHandleFunc = handler
}
// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation.
// The first argument is the service error, the second is the request that resulted in the error and
// the third must be used to communicate an error response.
type ServiceErrorHandleFunction func(ServiceError, *Request, *Response)
// ServiceErrorHandler changes the default function (writeServiceError) to be called
// when a ServiceError is detected.
func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) {
c.serviceErrorHandleFunc = handler
}
// DoNotRecover controls whether panics will be caught to return HTTP 500.
// If set to true, Route functions are responsible for handling any error situation.
// Default value is true.
func (c *Container) DoNotRecover(doNot bool) {
c.doNotRecover = doNot
}
// Router changes the default Router (currently CurlyRouter)
func (c *Container) Router(aRouter RouteSelector) {
c.router = aRouter
}
// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses.
func (c *Container) EnableContentEncoding(enabled bool) {
c.contentEncodingEnabled = enabled
}
// Add a WebService to the Container. It will detect duplicate root paths and exit in that case.
func (c *Container) Add(service *WebService) *Container {
c.webServicesLock.Lock()
defer c.webServicesLock.Unlock()
// if rootPath was not set then lazy initialize it
if len(service.rootPath) == 0 {
service.Path("/")
}
// cannot have duplicate root paths
for _, each := range c.webServices {
if each.RootPath() == service.RootPath() {
log.Printf("[restful] WebService with duplicate root path detected:['%v']", each)
os.Exit(1)
}
}
// If not registered on root then add specific mapping
if !c.isRegisteredOnRoot {
c.isRegisteredOnRoot = c.addHandler(service, c.ServeMux)
}
c.webServices = append(c.webServices, service)
return c
}
// addHandler may set a new HandleFunc for the serveMux
// this function must run inside the critical region protected by the webServicesLock.
// returns true if the function was registered on root ("/")
func (c *Container) addHandler(service *WebService, serveMux *http.ServeMux) bool {
pattern := fixedPrefixPath(service.RootPath())
// check if root path registration is needed
if "/" == pattern || "" == pattern {
serveMux.HandleFunc("/", c.dispatch)
return true
}
// detect if registration already exists
alreadyMapped := false
for _, each := range c.webServices {
if each.RootPath() == service.RootPath() {
alreadyMapped = true
break
}
}
if !alreadyMapped {
serveMux.HandleFunc(pattern, c.dispatch)
if !strings.HasSuffix(pattern, "/") {
serveMux.HandleFunc(pattern+"/", c.dispatch)
}
}
return false
}
func (c *Container) Remove(ws *WebService) error {
if c.ServeMux == http.DefaultServeMux {
errMsg := fmt.Sprintf("[restful] cannot remove a WebService from a Container using the DefaultServeMux: ['%v']", ws)
log.Printf(errMsg)
return errors.New(errMsg)
}
c.webServicesLock.Lock()
defer c.webServicesLock.Unlock()
// build a new ServeMux and re-register all WebServices
newServeMux := http.NewServeMux()
newServices := []*WebService{}
newIsRegisteredOnRoot := false
for _, each := range c.webServices {
if each.rootPath != ws.rootPath {
// If not registered on root then add specific mapping
if !newIsRegisteredOnRoot {
newIsRegisteredOnRoot = c.addHandler(each, newServeMux)
}
newServices = append(newServices, each)
}
}
c.webServices, c.ServeMux, c.isRegisteredOnRoot = newServices, newServeMux, newIsRegisteredOnRoot
return nil
}
// logStackOnRecover is the default RecoverHandleFunction and is called
// when DoNotRecover is false and the recoverHandleFunc is not set for the container.
// Default implementation logs the stacktrace and writes the stacktrace on the response.
// This may be a security issue as it exposes sourcecode information.
func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) {
var buffer bytes.Buffer
buffer.WriteString(fmt.Sprintf("[restful] recover from panic situation: - %v\r\n", panicReason))
for i := 2; ; i += 1 {
_, file, line, ok := runtime.Caller(i)
if !ok {
break
}
buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line))
}
log.Print(buffer.String())
httpWriter.WriteHeader(http.StatusInternalServerError)
httpWriter.Write(buffer.Bytes())
}
// writeServiceError is the default ServiceErrorHandleFunction and is called
// when a ServiceError is returned during route selection. Default implementation
// calls resp.WriteErrorString(err.Code, err.Message)
func writeServiceError(err ServiceError, req *Request, resp *Response) {
resp.WriteErrorString(err.Code, err.Message)
}
// Dispatch the incoming Http Request to a matching WebService.
func (c *Container) Dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
if httpWriter == nil {
panic("httpWriter cannot be nil")
}
if httpRequest == nil {
panic("httpRequest cannot be nil")
}
c.dispatch(httpWriter, httpRequest)
}
// Dispatch the incoming Http Request to a matching WebService.
func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
writer := httpWriter
// CompressingResponseWriter should be closed after all operations are done
defer func() {
if compressWriter, ok := writer.(*CompressingResponseWriter); ok {
compressWriter.Close()
}
}()
// Instal panic recovery unless told otherwise
if !c.doNotRecover { // catch all for 500 response
defer func() {
if r := recover(); r != nil {
c.recoverHandleFunc(r, writer)
return
}
}()
}
// Detect if compression is needed
// assume without compression, test for override
if c.contentEncodingEnabled {
doCompress, encoding := wantsCompressedResponse(httpRequest)
if doCompress {
var err error
writer, err = NewCompressingResponseWriter(httpWriter, encoding)
if err != nil {
log.Print("[restful] unable to install compressor: ", err)
httpWriter.WriteHeader(http.StatusInternalServerError)
return
}
}
}
// Find best match Route ; err is non nil if no match was found
var webService *WebService
var route *Route
var err error
func() {
c.webServicesLock.RLock()
defer c.webServicesLock.RUnlock()
webService, route, err = c.router.SelectRoute(
c.webServices,
httpRequest)
}()
if err != nil {
// a non-200 response has already been written
// run container filters anyway ; they should not touch the response...
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
switch err.(type) {
case ServiceError:
ser := err.(ServiceError)
c.serviceErrorHandleFunc(ser, req, resp)
}
// TODO
}}
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer))
return
}
wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest)
// pass through filters (if any)
if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 {
// compose filter chain
allFilters := []FilterFunction{}
allFilters = append(allFilters, c.containerFilters...)
allFilters = append(allFilters, webService.filters...)
allFilters = append(allFilters, route.Filters...)
chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) {
// handle request by route after passing all filters
route.Function(wrappedRequest, wrappedResponse)
}}
chain.ProcessFilter(wrappedRequest, wrappedResponse)
} else {
// no filters, handle request by route
route.Function(wrappedRequest, wrappedResponse)
}
}
// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {}
func fixedPrefixPath(pathspec string) string {
varBegin := strings.Index(pathspec, "{")
if -1 == varBegin {
return pathspec
}
return pathspec[:varBegin]
}
// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server
func (c *Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) {
c.ServeMux.ServeHTTP(httpwriter, httpRequest)
}
// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics.
func (c *Container) Handle(pattern string, handler http.Handler) {
c.ServeMux.Handle(pattern, handler)
}
// HandleWithFilter registers the handler for the given pattern.
// Container's filter chain is applied for handler.
// If a handler already exists for pattern, HandleWithFilter panics.
func (c *Container) HandleWithFilter(pattern string, handler http.Handler) {
f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) {
if len(c.containerFilters) == 0 {
handler.ServeHTTP(httpResponse, httpRequest)
return
}
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
handler.ServeHTTP(httpResponse, httpRequest)
}}
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse))
}
c.Handle(pattern, http.HandlerFunc(f))
}
// Filter appends a container FilterFunction. These are called before dispatching
// a http.Request to a WebService from the container
func (c *Container) Filter(filter FilterFunction) {
c.containerFilters = append(c.containerFilters, filter)
}
// RegisteredWebServices returns the collections of added WebServices
func (c *Container) RegisteredWebServices() []*WebService {
c.webServicesLock.RLock()
defer c.webServicesLock.RUnlock()
result := make([]*WebService, len(c.webServices))
for ix := range c.webServices {
result[ix] = c.webServices[ix]
}
return result
}
// computeAllowedMethods returns a list of HTTP methods that are valid for a Request
func (c *Container) computeAllowedMethods(req *Request) []string {
// Go through all RegisteredWebServices() and all its Routes to collect the options
methods := []string{}
requestPath := req.Request.URL.Path
for _, ws := range c.RegisteredWebServices() {
matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath)
if matches != nil {
finalMatch := matches[len(matches)-1]
for _, rt := range ws.Routes() {
matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch)
if matches != nil {
lastMatch := matches[len(matches)-1]
if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor /.
methods = append(methods, rt.Method)
}
}
}
}
}
// methods = append(methods, "OPTIONS") not sure about this
return methods
}
// newBasicRequestResponse creates a pair of Request,Response from its http versions.
// It is basic because no parameter or (produces) content-type information is given.
func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
resp := NewResponse(httpWriter)
resp.requestAccept = httpRequest.Header.Get(HEADER_Accept)
return NewRequest(httpRequest), resp
}

202
vendor/github.com/emicklei/go-restful/cors_filter.go generated vendored Normal file
View File

@@ -0,0 +1,202 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"regexp"
"strconv"
"strings"
)
// CrossOriginResourceSharing is used to create a Container Filter that implements CORS.
// Cross-origin resource sharing (CORS) is a mechanism that allows JavaScript on a web page
// to make XMLHttpRequests to another domain, not the domain the JavaScript originated from.
//
// http://en.wikipedia.org/wiki/Cross-origin_resource_sharing
// http://enable-cors.org/server.html
// http://www.html5rocks.com/en/tutorials/cors/#toc-handling-a-not-so-simple-request
type CrossOriginResourceSharing struct {
ExposeHeaders []string // list of Header names
AllowedHeaders []string // list of Header names
AllowedDomains []string // list of allowed values for Http Origin. An allowed value can be a regular expression to support subdomain matching. If empty all are allowed.
AllowedMethods []string
MaxAge int // number of seconds before requiring new Options request
CookiesAllowed bool
Container *Container
allowedOriginPatterns []*regexp.Regexp // internal field for origin regexp check.
}
// Filter is a filter function that implements the CORS flow as documented on http://enable-cors.org/server.html
// and http://www.html5rocks.com/static/images/cors_server_flowchart.png
func (c CrossOriginResourceSharing) Filter(req *Request, resp *Response, chain *FilterChain) {
origin := req.Request.Header.Get(HEADER_Origin)
if len(origin) == 0 {
if trace {
traceLogger.Print("no Http header Origin set")
}
chain.ProcessFilter(req, resp)
return
}
if !c.isOriginAllowed(origin) { // check whether this origin is allowed
if trace {
traceLogger.Printf("HTTP Origin:%s is not part of %v, neither matches any part of %v", origin, c.AllowedDomains, c.allowedOriginPatterns)
}
chain.ProcessFilter(req, resp)
return
}
if req.Request.Method != "OPTIONS" {
c.doActualRequest(req, resp)
chain.ProcessFilter(req, resp)
return
}
if acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod); acrm != "" {
c.doPreflightRequest(req, resp)
} else {
c.doActualRequest(req, resp)
chain.ProcessFilter(req, resp)
return
}
}
func (c CrossOriginResourceSharing) doActualRequest(req *Request, resp *Response) {
c.setOptionsHeaders(req, resp)
// continue processing the response
}
func (c *CrossOriginResourceSharing) doPreflightRequest(req *Request, resp *Response) {
if len(c.AllowedMethods) == 0 {
if c.Container == nil {
c.AllowedMethods = DefaultContainer.computeAllowedMethods(req)
} else {
c.AllowedMethods = c.Container.computeAllowedMethods(req)
}
}
acrm := req.Request.Header.Get(HEADER_AccessControlRequestMethod)
if !c.isValidAccessControlRequestMethod(acrm, c.AllowedMethods) {
if trace {
traceLogger.Printf("Http header %s:%s is not in %v",
HEADER_AccessControlRequestMethod,
acrm,
c.AllowedMethods)
}
return
}
acrhs := req.Request.Header.Get(HEADER_AccessControlRequestHeaders)
if len(acrhs) > 0 {
for _, each := range strings.Split(acrhs, ",") {
if !c.isValidAccessControlRequestHeader(strings.Trim(each, " ")) {
if trace {
traceLogger.Printf("Http header %s:%s is not in %v",
HEADER_AccessControlRequestHeaders,
acrhs,
c.AllowedHeaders)
}
return
}
}
}
resp.AddHeader(HEADER_AccessControlAllowMethods, strings.Join(c.AllowedMethods, ","))
resp.AddHeader(HEADER_AccessControlAllowHeaders, acrhs)
c.setOptionsHeaders(req, resp)
// return http 200 response, no body
}
func (c CrossOriginResourceSharing) setOptionsHeaders(req *Request, resp *Response) {
c.checkAndSetExposeHeaders(resp)
c.setAllowOriginHeader(req, resp)
c.checkAndSetAllowCredentials(resp)
if c.MaxAge > 0 {
resp.AddHeader(HEADER_AccessControlMaxAge, strconv.Itoa(c.MaxAge))
}
}
func (c CrossOriginResourceSharing) isOriginAllowed(origin string) bool {
if len(origin) == 0 {
return false
}
if len(c.AllowedDomains) == 0 {
return true
}
allowed := false
for _, domain := range c.AllowedDomains {
if domain == origin {
allowed = true
break
}
}
if !allowed {
if len(c.allowedOriginPatterns) == 0 {
// compile allowed domains to allowed origin patterns
allowedOriginRegexps, err := compileRegexps(c.AllowedDomains)
if err != nil {
return false
}
c.allowedOriginPatterns = allowedOriginRegexps
}
for _, pattern := range c.allowedOriginPatterns {
if allowed = pattern.MatchString(origin); allowed {
break
}
}
}
return allowed
}
func (c CrossOriginResourceSharing) setAllowOriginHeader(req *Request, resp *Response) {
origin := req.Request.Header.Get(HEADER_Origin)
if c.isOriginAllowed(origin) {
resp.AddHeader(HEADER_AccessControlAllowOrigin, origin)
}
}
func (c CrossOriginResourceSharing) checkAndSetExposeHeaders(resp *Response) {
if len(c.ExposeHeaders) > 0 {
resp.AddHeader(HEADER_AccessControlExposeHeaders, strings.Join(c.ExposeHeaders, ","))
}
}
func (c CrossOriginResourceSharing) checkAndSetAllowCredentials(resp *Response) {
if c.CookiesAllowed {
resp.AddHeader(HEADER_AccessControlAllowCredentials, "true")
}
}
func (c CrossOriginResourceSharing) isValidAccessControlRequestMethod(method string, allowedMethods []string) bool {
for _, each := range allowedMethods {
if each == method {
return true
}
}
return false
}
func (c CrossOriginResourceSharing) isValidAccessControlRequestHeader(header string) bool {
for _, each := range c.AllowedHeaders {
if strings.ToLower(each) == strings.ToLower(header) {
return true
}
}
return false
}
// Take a list of strings and compile them into a list of regular expressions.
func compileRegexps(regexpStrings []string) ([]*regexp.Regexp, error) {
regexps := []*regexp.Regexp{}
for _, regexpStr := range regexpStrings {
r, err := regexp.Compile(regexpStr)
if err != nil {
return regexps, err
}
regexps = append(regexps, r)
}
return regexps, nil
}

2
vendor/github.com/emicklei/go-restful/coverage.sh generated vendored Normal file
View File

@@ -0,0 +1,2 @@
go test -coverprofile=coverage.out
go tool cover -html=coverage.out

164
vendor/github.com/emicklei/go-restful/curly.go generated vendored Normal file
View File

@@ -0,0 +1,164 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"net/http"
"regexp"
"sort"
"strings"
)
// CurlyRouter expects Routes with paths that contain zero or more parameters in curly brackets.
type CurlyRouter struct{}
// SelectRoute is part of the Router interface and returns the best match
// for the WebService and its Route for the given Request.
func (c CurlyRouter) SelectRoute(
webServices []*WebService,
httpRequest *http.Request) (selectedService *WebService, selected *Route, err error) {
requestTokens := tokenizePath(httpRequest.URL.Path)
detectedService := c.detectWebService(requestTokens, webServices)
if detectedService == nil {
if trace {
traceLogger.Printf("no WebService was found to match URL path:%s\n", httpRequest.URL.Path)
}
return nil, nil, NewError(http.StatusNotFound, "404: Page Not Found")
}
candidateRoutes := c.selectRoutes(detectedService, requestTokens)
if len(candidateRoutes) == 0 {
if trace {
traceLogger.Printf("no Route in WebService with path %s was found to match URL path:%s\n", detectedService.rootPath, httpRequest.URL.Path)
}
return detectedService, nil, NewError(http.StatusNotFound, "404: Page Not Found")
}
selectedRoute, err := c.detectRoute(candidateRoutes, httpRequest)
if selectedRoute == nil {
return detectedService, nil, err
}
return detectedService, selectedRoute, nil
}
// selectRoutes return a collection of Route from a WebService that matches the path tokens from the request.
func (c CurlyRouter) selectRoutes(ws *WebService, requestTokens []string) sortableCurlyRoutes {
candidates := sortableCurlyRoutes{}
for _, each := range ws.routes {
matches, paramCount, staticCount := c.matchesRouteByPathTokens(each.pathParts, requestTokens)
if matches {
candidates.add(curlyRoute{each, paramCount, staticCount}) // TODO make sure Routes() return pointers?
}
}
sort.Sort(sort.Reverse(candidates))
return candidates
}
// matchesRouteByPathTokens computes whether it matches, howmany parameters do match and what the number of static path elements are.
func (c CurlyRouter) matchesRouteByPathTokens(routeTokens, requestTokens []string) (matches bool, paramCount int, staticCount int) {
if len(routeTokens) < len(requestTokens) {
// proceed in matching only if last routeToken is wildcard
count := len(routeTokens)
if count == 0 || !strings.HasSuffix(routeTokens[count-1], "*}") {
return false, 0, 0
}
// proceed
}
for i, routeToken := range routeTokens {
if i == len(requestTokens) {
// reached end of request path
return false, 0, 0
}
requestToken := requestTokens[i]
if strings.HasPrefix(routeToken, "{") {
paramCount++
if colon := strings.Index(routeToken, ":"); colon != -1 {
// match by regex
matchesToken, matchesRemainder := c.regularMatchesPathToken(routeToken, colon, requestToken)
if !matchesToken {
return false, 0, 0
}
if matchesRemainder {
break
}
}
} else { // no { prefix
if requestToken != routeToken {
return false, 0, 0
}
staticCount++
}
}
return true, paramCount, staticCount
}
// regularMatchesPathToken tests whether the regular expression part of routeToken matches the requestToken or all remaining tokens
// format routeToken is {someVar:someExpression}, e.g. {zipcode:[\d][\d][\d][\d][A-Z][A-Z]}
func (c CurlyRouter) regularMatchesPathToken(routeToken string, colon int, requestToken string) (matchesToken bool, matchesRemainder bool) {
regPart := routeToken[colon+1 : len(routeToken)-1]
if regPart == "*" {
if trace {
traceLogger.Printf("wildcard parameter detected in route token %s that matches %s\n", routeToken, requestToken)
}
return true, true
}
matched, err := regexp.MatchString(regPart, requestToken)
return (matched && err == nil), false
}
var jsr311Router = RouterJSR311{}
// detectRoute selectes from a list of Route the first match by inspecting both the Accept and Content-Type
// headers of the Request. See also RouterJSR311 in jsr311.go
func (c CurlyRouter) detectRoute(candidateRoutes sortableCurlyRoutes, httpRequest *http.Request) (*Route, error) {
// tracing is done inside detectRoute
return jsr311Router.detectRoute(candidateRoutes.routes(), httpRequest)
}
// detectWebService returns the best matching webService given the list of path tokens.
// see also computeWebserviceScore
func (c CurlyRouter) detectWebService(requestTokens []string, webServices []*WebService) *WebService {
var best *WebService
score := -1
for _, each := range webServices {
matches, eachScore := c.computeWebserviceScore(requestTokens, each.pathExpr.tokens)
if matches && (eachScore > score) {
best = each
score = eachScore
}
}
return best
}
// computeWebserviceScore returns whether tokens match and
// the weighted score of the longest matching consecutive tokens from the beginning.
func (c CurlyRouter) computeWebserviceScore(requestTokens []string, tokens []string) (bool, int) {
if len(tokens) > len(requestTokens) {
return false, 0
}
score := 0
for i := 0; i < len(tokens); i++ {
each := requestTokens[i]
other := tokens[i]
if len(each) == 0 && len(other) == 0 {
score++
continue
}
if len(other) > 0 && strings.HasPrefix(other, "{") {
// no empty match
if len(each) == 0 {
return false, score
}
score += 1
} else {
// not a parameter
if each != other {
return false, score
}
score += (len(tokens) - i) * 10 //fuzzy
}
}
return true, score
}

52
vendor/github.com/emicklei/go-restful/curly_route.go generated vendored Normal file
View File

@@ -0,0 +1,52 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
// curlyRoute exits for sorting Routes by the CurlyRouter based on number of parameters and number of static path elements.
type curlyRoute struct {
route Route
paramCount int
staticCount int
}
type sortableCurlyRoutes []curlyRoute
func (s *sortableCurlyRoutes) add(route curlyRoute) {
*s = append(*s, route)
}
func (s sortableCurlyRoutes) routes() (routes []Route) {
for _, each := range s {
routes = append(routes, each.route) // TODO change return type
}
return routes
}
func (s sortableCurlyRoutes) Len() int {
return len(s)
}
func (s sortableCurlyRoutes) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
func (s sortableCurlyRoutes) Less(i, j int) bool {
ci := s[i]
cj := s[j]
// primary key
if ci.staticCount < cj.staticCount {
return true
}
if ci.staticCount > cj.staticCount {
return false
}
// secundary key
if ci.paramCount < cj.paramCount {
return true
}
if ci.paramCount > cj.paramCount {
return false
}
return ci.route.Path < cj.route.Path
}

185
vendor/github.com/emicklei/go-restful/doc.go generated vendored Normal file
View File

@@ -0,0 +1,185 @@
/*
Package restful , a lean package for creating REST-style WebServices without magic.
WebServices and Routes
A WebService has a collection of Route objects that dispatch incoming Http Requests to a function calls.
Typically, a WebService has a root path (e.g. /users) and defines common MIME types for its routes.
WebServices must be added to a container (see below) in order to handler Http requests from a server.
A Route is defined by a HTTP method, an URL path and (optionally) the MIME types it consumes (Content-Type) and produces (Accept).
This package has the logic to find the best matching Route and if found, call its Function.
ws := new(restful.WebService)
ws.
Path("/users").
Consumes(restful.MIME_JSON, restful.MIME_XML).
Produces(restful.MIME_JSON, restful.MIME_XML)
ws.Route(ws.GET("/{user-id}").To(u.findUser)) // u is a UserResource
...
// GET http://localhost:8080/users/1
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
id := request.PathParameter("user-id")
...
}
The (*Request, *Response) arguments provide functions for reading information from the request and writing information back to the response.
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-user-resource.go with a full implementation.
Regular expression matching Routes
A Route parameter can be specified using the format "uri/{var[:regexp]}" or the special version "uri/{var:*}" for matching the tail of the path.
For example, /persons/{name:[A-Z][A-Z]} can be used to restrict values for the parameter "name" to only contain capital alphabetic characters.
Regular expressions must use the standard Go syntax as described in the regexp package. (https://code.google.com/p/re2/wiki/Syntax)
This feature requires the use of a CurlyRouter.
Containers
A Container holds a collection of WebServices, Filters and a http.ServeMux for multiplexing http requests.
Using the statements "restful.Add(...) and restful.Filter(...)" will register WebServices and Filters to the Default Container.
The Default container of go-restful uses the http.DefaultServeMux.
You can create your own Container and create a new http.Server for that particular container.
container := restful.NewContainer()
server := &http.Server{Addr: ":8081", Handler: container}
Filters
A filter dynamically intercepts requests and responses to transform or use the information contained in the requests or responses.
You can use filters to perform generic logging, measurement, authentication, redirect, set response headers etc.
In the restful package there are three hooks into the request,response flow where filters can be added.
Each filter must define a FilterFunction:
func (req *restful.Request, resp *restful.Response, chain *restful.FilterChain)
Use the following statement to pass the request,response pair to the next filter or RouteFunction
chain.ProcessFilter(req, resp)
Container Filters
These are processed before any registered WebService.
// install a (global) filter for the default container (processed before any webservice)
restful.Filter(globalLogging)
WebService Filters
These are processed before any Route of a WebService.
// install a webservice filter (processed before any route)
ws.Filter(webserviceLogging).Filter(measureTime)
Route Filters
These are processed before calling the function associated with the Route.
// install 2 chained route filters (processed before calling findUser)
ws.Route(ws.GET("/{user-id}").Filter(routeLogging).Filter(NewCountFilter().routeCounter).To(findUser))
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-filters.go with full implementations.
Response Encoding
Two encodings are supported: gzip and deflate. To enable this for all responses:
restful.DefaultContainer.EnableContentEncoding(true)
If a Http request includes the Accept-Encoding header then the response content will be compressed using the specified encoding.
Alternatively, you can create a Filter that performs the encoding and install it per WebService or Route.
See the example https://github.com/emicklei/go-restful/blob/master/examples/restful-encoding-filter.go
OPTIONS support
By installing a pre-defined container filter, your Webservice(s) can respond to the OPTIONS Http request.
Filter(OPTIONSFilter())
CORS
By installing the filter of a CrossOriginResourceSharing (CORS), your WebService(s) can handle CORS requests.
cors := CrossOriginResourceSharing{ExposeHeaders: []string{"X-My-Header"}, CookiesAllowed: false, Container: DefaultContainer}
Filter(cors.Filter)
Error Handling
Unexpected things happen. If a request cannot be processed because of a failure, your service needs to tell via the response what happened and why.
For this reason HTTP status codes exist and it is important to use the correct code in every exceptional situation.
400: Bad Request
If path or query parameters are not valid (content or type) then use http.StatusBadRequest.
404: Not Found
Despite a valid URI, the resource requested may not be available
500: Internal Server Error
If the application logic could not process the request (or write the response) then use http.StatusInternalServerError.
405: Method Not Allowed
The request has a valid URL but the method (GET,PUT,POST,...) is not allowed.
406: Not Acceptable
The request does not have or has an unknown Accept Header set for this operation.
415: Unsupported Media Type
The request does not have or has an unknown Content-Type Header set for this operation.
ServiceError
In addition to setting the correct (error) Http status code, you can choose to write a ServiceError message on the response.
Performance options
This package has several options that affect the performance of your service. It is important to understand them and how you can change it.
restful.DefaultContainer.DoNotRecover(false)
DoNotRecover controls whether panics will be caught to return HTTP 500.
If set to false, the container will recover from panics.
Default value is true
restful.SetCompressorProvider(NewBoundedCachedCompressors(20, 20))
If content encoding is enabled then the default strategy for getting new gzip/zlib writers and readers is to use a sync.Pool.
Because writers are expensive structures, performance is even more improved when using a preloaded cache. You can also inject your own implementation.
Trouble shooting
This package has the means to produce detail logging of the complete Http request matching process and filter invocation.
Enabling this feature requires you to set an implementation of restful.StdLogger (e.g. log.Logger) instance such as:
restful.TraceLogger(log.New(os.Stdout, "[restful] ", log.LstdFlags|log.Lshortfile))
Logging
The restful.SetLogger() method allows you to override the logger used by the package. By default restful
uses the standard library `log` package and logs to stdout. Different logging packages are supported as
long as they conform to `StdLogger` interface defined in the `log` sub-package, writing an adapter for your
preferred package is simple.
Resources
[project]: https://github.com/emicklei/go-restful
[examples]: https://github.com/emicklei/go-restful/blob/master/examples
[design]: http://ernestmicklei.com/2012/11/11/go-restful-api-design/
[showcases]: https://github.com/emicklei/mora, https://github.com/emicklei/landskape
(c) 2012-2015, http://ernestmicklei.com. MIT License
*/
package restful

View File

@@ -0,0 +1,163 @@
package restful
// Copyright 2015 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"encoding/json"
"encoding/xml"
"strings"
"sync"
)
// EntityReaderWriter can read and write values using an encoding such as JSON,XML.
type EntityReaderWriter interface {
// Read a serialized version of the value from the request.
// The Request may have a decompressing reader. Depends on Content-Encoding.
Read(req *Request, v interface{}) error
// Write a serialized version of the value on the response.
// The Response may have a compressing writer. Depends on Accept-Encoding.
// status should be a valid Http Status code
Write(resp *Response, status int, v interface{}) error
}
// entityAccessRegistry is a singleton
var entityAccessRegistry = &entityReaderWriters{
protection: new(sync.RWMutex),
accessors: map[string]EntityReaderWriter{},
}
// entityReaderWriters associates MIME to an EntityReaderWriter
type entityReaderWriters struct {
protection *sync.RWMutex
accessors map[string]EntityReaderWriter
}
func init() {
RegisterEntityAccessor(MIME_JSON, NewEntityAccessorJSON(MIME_JSON))
RegisterEntityAccessor(MIME_XML, NewEntityAccessorXML(MIME_XML))
}
// RegisterEntityAccessor add/overrides the ReaderWriter for encoding content with this MIME type.
func RegisterEntityAccessor(mime string, erw EntityReaderWriter) {
entityAccessRegistry.protection.Lock()
defer entityAccessRegistry.protection.Unlock()
entityAccessRegistry.accessors[mime] = erw
}
// NewEntityAccessorJSON returns a new EntityReaderWriter for accessing JSON content.
// This package is already initialized with such an accessor using the MIME_JSON contentType.
func NewEntityAccessorJSON(contentType string) EntityReaderWriter {
return entityJSONAccess{ContentType: contentType}
}
// NewEntityAccessorXML returns a new EntityReaderWriter for accessing XML content.
// This package is already initialized with such an accessor using the MIME_XML contentType.
func NewEntityAccessorXML(contentType string) EntityReaderWriter {
return entityXMLAccess{ContentType: contentType}
}
// accessorAt returns the registered ReaderWriter for this MIME type.
func (r *entityReaderWriters) accessorAt(mime string) (EntityReaderWriter, bool) {
r.protection.RLock()
defer r.protection.RUnlock()
er, ok := r.accessors[mime]
if !ok {
// retry with reverse lookup
// more expensive but we are in an exceptional situation anyway
for k, v := range r.accessors {
if strings.Contains(mime, k) {
return v, true
}
}
}
return er, ok
}
// entityXMLAccess is a EntityReaderWriter for XML encoding
type entityXMLAccess struct {
// This is used for setting the Content-Type header when writing
ContentType string
}
// Read unmarshalls the value from XML
func (e entityXMLAccess) Read(req *Request, v interface{}) error {
return xml.NewDecoder(req.Request.Body).Decode(v)
}
// Write marshalls the value to JSON and set the Content-Type Header.
func (e entityXMLAccess) Write(resp *Response, status int, v interface{}) error {
return writeXML(resp, status, e.ContentType, v)
}
// writeXML marshalls the value to JSON and set the Content-Type Header.
func writeXML(resp *Response, status int, contentType string, v interface{}) error {
if v == nil {
resp.WriteHeader(status)
// do not write a nil representation
return nil
}
if resp.prettyPrint {
// pretty output must be created and written explicitly
output, err := xml.MarshalIndent(v, " ", " ")
if err != nil {
return err
}
resp.Header().Set(HEADER_ContentType, contentType)
resp.WriteHeader(status)
_, err = resp.Write([]byte(xml.Header))
if err != nil {
return err
}
_, err = resp.Write(output)
return err
}
// not-so-pretty
resp.Header().Set(HEADER_ContentType, contentType)
resp.WriteHeader(status)
return xml.NewEncoder(resp).Encode(v)
}
// entityJSONAccess is a EntityReaderWriter for JSON encoding
type entityJSONAccess struct {
// This is used for setting the Content-Type header when writing
ContentType string
}
// Read unmarshalls the value from JSON
func (e entityJSONAccess) Read(req *Request, v interface{}) error {
decoder := json.NewDecoder(req.Request.Body)
decoder.UseNumber()
return decoder.Decode(v)
}
// Write marshalls the value to JSON and set the Content-Type Header.
func (e entityJSONAccess) Write(resp *Response, status int, v interface{}) error {
return writeJSON(resp, status, e.ContentType, v)
}
// write marshalls the value to JSON and set the Content-Type Header.
func writeJSON(resp *Response, status int, contentType string, v interface{}) error {
if v == nil {
resp.WriteHeader(status)
// do not write a nil representation
return nil
}
if resp.prettyPrint {
// pretty output must be created and written explicitly
output, err := json.MarshalIndent(v, " ", " ")
if err != nil {
return err
}
resp.Header().Set(HEADER_ContentType, contentType)
resp.WriteHeader(status)
_, err = resp.Write(output)
return err
}
// not-so-pretty
resp.Header().Set(HEADER_ContentType, contentType)
resp.WriteHeader(status)
return json.NewEncoder(resp).Encode(v)
}

35
vendor/github.com/emicklei/go-restful/filter.go generated vendored Normal file
View File

@@ -0,0 +1,35 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
// FilterChain is a request scoped object to process one or more filters before calling the target RouteFunction.
type FilterChain struct {
Filters []FilterFunction // ordered list of FilterFunction
Index int // index into filters that is currently in progress
Target RouteFunction // function to call after passing all filters
}
// ProcessFilter passes the request,response pair through the next of Filters.
// Each filter can decide to proceed to the next Filter or handle the Response itself.
func (f *FilterChain) ProcessFilter(request *Request, response *Response) {
if f.Index < len(f.Filters) {
f.Index++
f.Filters[f.Index-1](request, response, f)
} else {
f.Target(request, response)
}
}
// FilterFunction definitions must call ProcessFilter on the FilterChain to pass on the control and eventually call the RouteFunction
type FilterFunction func(*Request, *Response, *FilterChain)
// NoBrowserCacheFilter is a filter function to set HTTP headers that disable browser caching
// See examples/restful-no-cache-filter.go for usage
func NoBrowserCacheFilter(req *Request, resp *Response, chain *FilterChain) {
resp.Header().Set("Cache-Control", "no-cache, no-store, must-revalidate") // HTTP 1.1.
resp.Header().Set("Pragma", "no-cache") // HTTP 1.0.
resp.Header().Set("Expires", "0") // Proxies.
chain.ProcessFilter(req, resp)
}

248
vendor/github.com/emicklei/go-restful/jsr311.go generated vendored Normal file
View File

@@ -0,0 +1,248 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"errors"
"fmt"
"net/http"
"sort"
)
// RouterJSR311 implements the flow for matching Requests to Routes (and consequently Resource Functions)
// as specified by the JSR311 http://jsr311.java.net/nonav/releases/1.1/spec/spec.html.
// RouterJSR311 implements the Router interface.
// Concept of locators is not implemented.
type RouterJSR311 struct{}
// SelectRoute is part of the Router interface and returns the best match
// for the WebService and its Route for the given Request.
func (r RouterJSR311) SelectRoute(
webServices []*WebService,
httpRequest *http.Request) (selectedService *WebService, selectedRoute *Route, err error) {
// Identify the root resource class (WebService)
dispatcher, finalMatch, err := r.detectDispatcher(httpRequest.URL.Path, webServices)
if err != nil {
return nil, nil, NewError(http.StatusNotFound, "")
}
// Obtain the set of candidate methods (Routes)
routes := r.selectRoutes(dispatcher, finalMatch)
if len(routes) == 0 {
return dispatcher, nil, NewError(http.StatusNotFound, "404: Page Not Found")
}
// Identify the method (Route) that will handle the request
route, ok := r.detectRoute(routes, httpRequest)
return dispatcher, route, ok
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
func (r RouterJSR311) detectRoute(routes []Route, httpRequest *http.Request) (*Route, error) {
// http method
methodOk := []Route{}
for _, each := range routes {
if httpRequest.Method == each.Method {
methodOk = append(methodOk, each)
}
}
if len(methodOk) == 0 {
if trace {
traceLogger.Printf("no Route found (in %d routes) that matches HTTP method %s\n", len(routes), httpRequest.Method)
}
return nil, NewError(http.StatusMethodNotAllowed, "405: Method Not Allowed")
}
inputMediaOk := methodOk
// content-type
contentType := httpRequest.Header.Get(HEADER_ContentType)
inputMediaOk = []Route{}
for _, each := range methodOk {
if each.matchesContentType(contentType) {
inputMediaOk = append(inputMediaOk, each)
}
}
if len(inputMediaOk) == 0 {
if trace {
traceLogger.Printf("no Route found (from %d) that matches HTTP Content-Type: %s\n", len(methodOk), contentType)
}
return nil, NewError(http.StatusUnsupportedMediaType, "415: Unsupported Media Type")
}
// accept
outputMediaOk := []Route{}
accept := httpRequest.Header.Get(HEADER_Accept)
if len(accept) == 0 {
accept = "*/*"
}
for _, each := range inputMediaOk {
if each.matchesAccept(accept) {
outputMediaOk = append(outputMediaOk, each)
}
}
if len(outputMediaOk) == 0 {
if trace {
traceLogger.Printf("no Route found (from %d) that matches HTTP Accept: %s\n", len(inputMediaOk), accept)
}
return nil, NewError(http.StatusNotAcceptable, "406: Not Acceptable")
}
// return r.bestMatchByMedia(outputMediaOk, contentType, accept), nil
return &outputMediaOk[0], nil
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2
// n/m > n/* > */*
func (r RouterJSR311) bestMatchByMedia(routes []Route, contentType string, accept string) *Route {
// TODO
return &routes[0]
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 2)
func (r RouterJSR311) selectRoutes(dispatcher *WebService, pathRemainder string) []Route {
filtered := &sortableRouteCandidates{}
for _, each := range dispatcher.Routes() {
pathExpr := each.pathExpr
matches := pathExpr.Matcher.FindStringSubmatch(pathRemainder)
if matches != nil {
lastMatch := matches[len(matches)-1]
if len(lastMatch) == 0 || lastMatch == "/" { // do not include if value is neither empty nor /.
filtered.candidates = append(filtered.candidates,
routeCandidate{each, len(matches) - 1, pathExpr.LiteralCount, pathExpr.VarCount})
}
}
}
if len(filtered.candidates) == 0 {
if trace {
traceLogger.Printf("WebService on path %s has no routes that match URL path remainder:%s\n", dispatcher.rootPath, pathRemainder)
}
return []Route{}
}
sort.Sort(sort.Reverse(filtered))
// select other routes from candidates whoes expression matches rmatch
matchingRoutes := []Route{filtered.candidates[0].route}
for c := 1; c < len(filtered.candidates); c++ {
each := filtered.candidates[c]
if each.route.pathExpr.Matcher.MatchString(pathRemainder) {
matchingRoutes = append(matchingRoutes, each.route)
}
}
return matchingRoutes
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-360003.7.2 (step 1)
func (r RouterJSR311) detectDispatcher(requestPath string, dispatchers []*WebService) (*WebService, string, error) {
filtered := &sortableDispatcherCandidates{}
for _, each := range dispatchers {
matches := each.pathExpr.Matcher.FindStringSubmatch(requestPath)
if matches != nil {
filtered.candidates = append(filtered.candidates,
dispatcherCandidate{each, matches[len(matches)-1], len(matches), each.pathExpr.LiteralCount, each.pathExpr.VarCount})
}
}
if len(filtered.candidates) == 0 {
if trace {
traceLogger.Printf("no WebService was found to match URL path:%s\n", requestPath)
}
return nil, "", errors.New("not found")
}
sort.Sort(sort.Reverse(filtered))
return filtered.candidates[0].dispatcher, filtered.candidates[0].finalMatch, nil
}
// Types and functions to support the sorting of Routes
type routeCandidate struct {
route Route
matchesCount int // the number of capturing groups
literalCount int // the number of literal characters (means those not resulting from template variable substitution)
nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ([^ /]+?))
}
func (r routeCandidate) expressionToMatch() string {
return r.route.pathExpr.Source
}
func (r routeCandidate) String() string {
return fmt.Sprintf("(m=%d,l=%d,n=%d)", r.matchesCount, r.literalCount, r.nonDefaultCount)
}
type sortableRouteCandidates struct {
candidates []routeCandidate
}
func (rcs *sortableRouteCandidates) Len() int {
return len(rcs.candidates)
}
func (rcs *sortableRouteCandidates) Swap(i, j int) {
rcs.candidates[i], rcs.candidates[j] = rcs.candidates[j], rcs.candidates[i]
}
func (rcs *sortableRouteCandidates) Less(i, j int) bool {
ci := rcs.candidates[i]
cj := rcs.candidates[j]
// primary key
if ci.literalCount < cj.literalCount {
return true
}
if ci.literalCount > cj.literalCount {
return false
}
// secundary key
if ci.matchesCount < cj.matchesCount {
return true
}
if ci.matchesCount > cj.matchesCount {
return false
}
// tertiary key
if ci.nonDefaultCount < cj.nonDefaultCount {
return true
}
if ci.nonDefaultCount > cj.nonDefaultCount {
return false
}
// quaternary key ("source" is interpreted as Path)
return ci.route.Path < cj.route.Path
}
// Types and functions to support the sorting of Dispatchers
type dispatcherCandidate struct {
dispatcher *WebService
finalMatch string
matchesCount int // the number of capturing groups
literalCount int // the number of literal characters (means those not resulting from template variable substitution)
nonDefaultCount int // the number of capturing groups with non-default regular expressions (i.e. not ([^ /]+?))
}
type sortableDispatcherCandidates struct {
candidates []dispatcherCandidate
}
func (dc *sortableDispatcherCandidates) Len() int {
return len(dc.candidates)
}
func (dc *sortableDispatcherCandidates) Swap(i, j int) {
dc.candidates[i], dc.candidates[j] = dc.candidates[j], dc.candidates[i]
}
func (dc *sortableDispatcherCandidates) Less(i, j int) bool {
ci := dc.candidates[i]
cj := dc.candidates[j]
// primary key
if ci.matchesCount < cj.matchesCount {
return true
}
if ci.matchesCount > cj.matchesCount {
return false
}
// secundary key
if ci.literalCount < cj.literalCount {
return true
}
if ci.literalCount > cj.literalCount {
return false
}
// tertiary key
return ci.nonDefaultCount < cj.nonDefaultCount
}

34
vendor/github.com/emicklei/go-restful/log/log.go generated vendored Normal file
View File

@@ -0,0 +1,34 @@
package log
import (
stdlog "log"
"os"
)
// StdLogger corresponds to a minimal subset of the interface satisfied by stdlib log.Logger
type StdLogger interface {
Print(v ...interface{})
Printf(format string, v ...interface{})
}
var Logger StdLogger
func init() {
// default Logger
SetLogger(stdlog.New(os.Stderr, "[restful] ", stdlog.LstdFlags|stdlog.Lshortfile))
}
// SetLogger sets the logger for this package
func SetLogger(customLogger StdLogger) {
Logger = customLogger
}
// Print delegates to the Logger
func Print(v ...interface{}) {
Logger.Print(v...)
}
// Printf delegates to the Logger
func Printf(format string, v ...interface{}) {
Logger.Printf(format, v...)
}

32
vendor/github.com/emicklei/go-restful/logger.go generated vendored Normal file
View File

@@ -0,0 +1,32 @@
package restful
// Copyright 2014 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"github.com/emicklei/go-restful/log"
)
var trace bool = false
var traceLogger log.StdLogger
func init() {
traceLogger = log.Logger // use the package logger by default
}
// TraceLogger enables detailed logging of Http request matching and filter invocation. Default no logger is set.
// You may call EnableTracing() directly to enable trace logging to the package-wide logger.
func TraceLogger(logger log.StdLogger) {
traceLogger = logger
EnableTracing(logger != nil)
}
// expose the setter for the global logger on the top-level package
func SetLogger(customLogger log.StdLogger) {
log.SetLogger(customLogger)
}
// EnableTracing can be used to Trace logging on and off.
func EnableTracing(enabled bool) {
trace = enabled
}

45
vendor/github.com/emicklei/go-restful/mime.go generated vendored Normal file
View File

@@ -0,0 +1,45 @@
package restful
import (
"strconv"
"strings"
)
type mime struct {
media string
quality float64
}
// insertMime adds a mime to a list and keeps it sorted by quality.
func insertMime(l []mime, e mime) []mime {
for i, each := range l {
// if current mime has lower quality then insert before
if e.quality > each.quality {
left := append([]mime{}, l[0:i]...)
return append(append(left, e), l[i:]...)
}
}
return append(l, e)
}
// sortedMimes returns a list of mime sorted (desc) by its specified quality.
func sortedMimes(accept string) (sorted []mime) {
for _, each := range strings.Split(accept, ",") {
typeAndQuality := strings.Split(strings.Trim(each, " "), ";")
if len(typeAndQuality) == 1 {
sorted = insertMime(sorted, mime{typeAndQuality[0], 1.0})
} else {
// take factor
parts := strings.Split(typeAndQuality[1], "=")
if len(parts) == 2 {
f, err := strconv.ParseFloat(parts[1], 64)
if err != nil {
traceLogger.Printf("unable to parse quality in %s, %v", each, err)
} else {
sorted = insertMime(sorted, mime{typeAndQuality[0], f})
}
}
}
}
return
}

View File

@@ -0,0 +1,26 @@
package restful
import "strings"
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
// and provides the response with a set of allowed methods for the request URL Path.
// As for any filter, you can also install it for a particular WebService within a Container.
// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
func (c *Container) OPTIONSFilter(req *Request, resp *Response, chain *FilterChain) {
if "OPTIONS" != req.Request.Method {
chain.ProcessFilter(req, resp)
return
}
resp.AddHeader(HEADER_Allow, strings.Join(c.computeAllowedMethods(req), ","))
}
// OPTIONSFilter is a filter function that inspects the Http Request for the OPTIONS method
// and provides the response with a set of allowed methods for the request URL Path.
// Note: this filter is not needed when using CrossOriginResourceSharing (for CORS).
func OPTIONSFilter() FilterFunction {
return DefaultContainer.OPTIONSFilter
}

114
vendor/github.com/emicklei/go-restful/parameter.go generated vendored Normal file
View File

@@ -0,0 +1,114 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
const (
// PathParameterKind = indicator of Request parameter type "path"
PathParameterKind = iota
// QueryParameterKind = indicator of Request parameter type "query"
QueryParameterKind
// BodyParameterKind = indicator of Request parameter type "body"
BodyParameterKind
// HeaderParameterKind = indicator of Request parameter type "header"
HeaderParameterKind
// FormParameterKind = indicator of Request parameter type "form"
FormParameterKind
)
// Parameter is for documententing the parameter used in a Http Request
// ParameterData kinds are Path,Query and Body
type Parameter struct {
data *ParameterData
}
// ParameterData represents the state of a Parameter.
// It is made public to make it accessible to e.g. the Swagger package.
type ParameterData struct {
Name, Description, DataType, DataFormat string
Kind int
Required bool
AllowableValues map[string]string
AllowMultiple bool
DefaultValue string
}
// Data returns the state of the Parameter
func (p *Parameter) Data() ParameterData {
return *p.data
}
// Kind returns the parameter type indicator (see const for valid values)
func (p *Parameter) Kind() int {
return p.data.Kind
}
func (p *Parameter) bePath() *Parameter {
p.data.Kind = PathParameterKind
return p
}
func (p *Parameter) beQuery() *Parameter {
p.data.Kind = QueryParameterKind
return p
}
func (p *Parameter) beBody() *Parameter {
p.data.Kind = BodyParameterKind
return p
}
func (p *Parameter) beHeader() *Parameter {
p.data.Kind = HeaderParameterKind
return p
}
func (p *Parameter) beForm() *Parameter {
p.data.Kind = FormParameterKind
return p
}
// Required sets the required field and returns the receiver
func (p *Parameter) Required(required bool) *Parameter {
p.data.Required = required
return p
}
// AllowMultiple sets the allowMultiple field and returns the receiver
func (p *Parameter) AllowMultiple(multiple bool) *Parameter {
p.data.AllowMultiple = multiple
return p
}
// AllowableValues sets the allowableValues field and returns the receiver
func (p *Parameter) AllowableValues(values map[string]string) *Parameter {
p.data.AllowableValues = values
return p
}
// DataType sets the dataType field and returns the receiver
func (p *Parameter) DataType(typeName string) *Parameter {
p.data.DataType = typeName
return p
}
// DataFormat sets the dataFormat field for Swagger UI
func (p *Parameter) DataFormat(formatName string) *Parameter {
p.data.DataFormat = formatName
return p
}
// DefaultValue sets the default value field and returns the receiver
func (p *Parameter) DefaultValue(stringRepresentation string) *Parameter {
p.data.DefaultValue = stringRepresentation
return p
}
// Description sets the description value field and returns the receiver
func (p *Parameter) Description(doc string) *Parameter {
p.data.Description = doc
return p
}

View File

@@ -0,0 +1,69 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"fmt"
"regexp"
"strings"
)
// PathExpression holds a compiled path expression (RegExp) needed to match against
// Http request paths and to extract path parameter values.
type pathExpression struct {
LiteralCount int // the number of literal characters (means those not resulting from template variable substitution)
VarCount int // the number of named parameters (enclosed by {}) in the path
Matcher *regexp.Regexp
Source string // Path as defined by the RouteBuilder
tokens []string
}
// NewPathExpression creates a PathExpression from the input URL path.
// Returns an error if the path is invalid.
func newPathExpression(path string) (*pathExpression, error) {
expression, literalCount, varCount, tokens := templateToRegularExpression(path)
compiled, err := regexp.Compile(expression)
if err != nil {
return nil, err
}
return &pathExpression{literalCount, varCount, compiled, expression, tokens}, nil
}
// http://jsr311.java.net/nonav/releases/1.1/spec/spec3.html#x3-370003.7.3
func templateToRegularExpression(template string) (expression string, literalCount int, varCount int, tokens []string) {
var buffer bytes.Buffer
buffer.WriteString("^")
//tokens = strings.Split(template, "/")
tokens = tokenizePath(template)
for _, each := range tokens {
if each == "" {
continue
}
buffer.WriteString("/")
if strings.HasPrefix(each, "{") {
// check for regular expression in variable
colon := strings.Index(each, ":")
if colon != -1 {
// extract expression
paramExpr := strings.TrimSpace(each[colon+1 : len(each)-1])
if paramExpr == "*" { // special case
buffer.WriteString("(.*)")
} else {
buffer.WriteString(fmt.Sprintf("(%s)", paramExpr)) // between colon and closing moustache
}
} else {
// plain var
buffer.WriteString("([^/]+?)")
}
varCount += 1
} else {
literalCount += len(each)
encoded := each // TODO URI encode
buffer.WriteString(regexp.QuoteMeta(encoded))
}
}
return strings.TrimRight(buffer.String(), "/") + "(/.*)?$", literalCount, varCount, tokens
}

113
vendor/github.com/emicklei/go-restful/request.go generated vendored Normal file
View File

@@ -0,0 +1,113 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"compress/zlib"
"net/http"
)
var defaultRequestContentType string
// Request is a wrapper for a http Request that provides convenience methods
type Request struct {
Request *http.Request
pathParameters map[string]string
attributes map[string]interface{} // for storing request-scoped values
selectedRoutePath string // root path + route path that matched the request, e.g. /meetings/{id}/attendees
}
func NewRequest(httpRequest *http.Request) *Request {
return &Request{
Request: httpRequest,
pathParameters: map[string]string{},
attributes: map[string]interface{}{},
} // empty parameters, attributes
}
// If ContentType is missing or */* is given then fall back to this type, otherwise
// a "Unable to unmarshal content of type:" response is returned.
// Valid values are restful.MIME_JSON and restful.MIME_XML
// Example:
// restful.DefaultRequestContentType(restful.MIME_JSON)
func DefaultRequestContentType(mime string) {
defaultRequestContentType = mime
}
// PathParameter accesses the Path parameter value by its name
func (r *Request) PathParameter(name string) string {
return r.pathParameters[name]
}
// PathParameters accesses the Path parameter values
func (r *Request) PathParameters() map[string]string {
return r.pathParameters
}
// QueryParameter returns the (first) Query parameter value by its name
func (r *Request) QueryParameter(name string) string {
return r.Request.FormValue(name)
}
// BodyParameter parses the body of the request (once for typically a POST or a PUT) and returns the value of the given name or an error.
func (r *Request) BodyParameter(name string) (string, error) {
err := r.Request.ParseForm()
if err != nil {
return "", err
}
return r.Request.PostFormValue(name), nil
}
// HeaderParameter returns the HTTP Header value of a Header name or empty if missing
func (r *Request) HeaderParameter(name string) string {
return r.Request.Header.Get(name)
}
// ReadEntity checks the Accept header and reads the content into the entityPointer.
func (r *Request) ReadEntity(entityPointer interface{}) (err error) {
contentType := r.Request.Header.Get(HEADER_ContentType)
contentEncoding := r.Request.Header.Get(HEADER_ContentEncoding)
// check if the request body needs decompression
if ENCODING_GZIP == contentEncoding {
gzipReader := currentCompressorProvider.AcquireGzipReader()
defer currentCompressorProvider.ReleaseGzipReader(gzipReader)
gzipReader.Reset(r.Request.Body)
r.Request.Body = gzipReader
} else if ENCODING_DEFLATE == contentEncoding {
zlibReader, err := zlib.NewReader(r.Request.Body)
if err != nil {
return err
}
r.Request.Body = zlibReader
}
// lookup the EntityReader, use defaultRequestContentType if needed and provided
entityReader, ok := entityAccessRegistry.accessorAt(contentType)
if !ok {
if len(defaultRequestContentType) != 0 {
entityReader, ok = entityAccessRegistry.accessorAt(defaultRequestContentType)
}
if !ok {
return NewError(http.StatusBadRequest, "Unable to unmarshal content of type:"+contentType)
}
}
return entityReader.Read(r, entityPointer)
}
// SetAttribute adds or replaces the attribute with the given value.
func (r *Request) SetAttribute(name string, value interface{}) {
r.attributes[name] = value
}
// Attribute returns the value associated to the given name. Returns nil if absent.
func (r Request) Attribute(name string) interface{} {
return r.attributes[name]
}
// SelectedRoutePath root path + route path that matched the request, e.g. /meetings/{id}/attendees
func (r Request) SelectedRoutePath() string {
return r.selectedRoutePath
}

236
vendor/github.com/emicklei/go-restful/response.go generated vendored Normal file
View File

@@ -0,0 +1,236 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"errors"
"net/http"
)
// DefaultResponseMimeType is DEPRECATED, use DefaultResponseContentType(mime)
var DefaultResponseMimeType string
//PrettyPrintResponses controls the indentation feature of XML and JSON serialization
var PrettyPrintResponses = true
// Response is a wrapper on the actual http ResponseWriter
// It provides several convenience methods to prepare and write response content.
type Response struct {
http.ResponseWriter
requestAccept string // mime-type what the Http Request says it wants to receive
routeProduces []string // mime-types what the Route says it can produce
statusCode int // HTTP status code that has been written explicity (if zero then net/http has written 200)
contentLength int // number of bytes written for the response body
prettyPrint bool // controls the indentation feature of XML and JSON serialization. It is initialized using var PrettyPrintResponses.
err error // err property is kept when WriteError is called
}
// NewResponse creates a new response based on a http ResponseWriter.
func NewResponse(httpWriter http.ResponseWriter) *Response {
return &Response{httpWriter, "", []string{}, http.StatusOK, 0, PrettyPrintResponses, nil} // empty content-types
}
// DefaultResponseContentType set a default.
// If Accept header matching fails, fall back to this type.
// Valid values are restful.MIME_JSON and restful.MIME_XML
// Example:
// restful.DefaultResponseContentType(restful.MIME_JSON)
func DefaultResponseContentType(mime string) {
DefaultResponseMimeType = mime
}
// InternalServerError writes the StatusInternalServerError header.
// DEPRECATED, use WriteErrorString(http.StatusInternalServerError,reason)
func (r Response) InternalServerError() Response {
r.WriteHeader(http.StatusInternalServerError)
return r
}
// PrettyPrint changes whether this response must produce pretty (line-by-line, indented) JSON or XML output.
func (r *Response) PrettyPrint(bePretty bool) {
r.prettyPrint = bePretty
}
// AddHeader is a shortcut for .Header().Add(header,value)
func (r Response) AddHeader(header string, value string) Response {
r.Header().Add(header, value)
return r
}
// SetRequestAccepts tells the response what Mime-type(s) the HTTP request said it wants to accept. Exposed for testing.
func (r *Response) SetRequestAccepts(mime string) {
r.requestAccept = mime
}
// EntityWriter returns the registered EntityWriter that the entity (requested resource)
// can write according to what the request wants (Accept) and what the Route can produce or what the restful defaults say.
// If called before WriteEntity and WriteHeader then a false return value can be used to write a 406: Not Acceptable.
func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
sorted := sortedMimes(r.requestAccept)
for _, eachAccept := range sorted {
for _, eachProduce := range r.routeProduces {
if eachProduce == eachAccept.media {
if w, ok := entityAccessRegistry.accessorAt(eachAccept.media); ok {
return w, true
}
}
}
if eachAccept.media == "*/*" {
for _, each := range r.routeProduces {
if w, ok := entityAccessRegistry.accessorAt(each); ok {
return w, true
}
}
}
}
// if requestAccept is empty
writer, ok := entityAccessRegistry.accessorAt(r.requestAccept)
if !ok {
// if not registered then fallback to the defaults (if set)
if DefaultResponseMimeType == MIME_JSON {
return entityAccessRegistry.accessorAt(MIME_JSON)
}
if DefaultResponseMimeType == MIME_XML {
return entityAccessRegistry.accessorAt(MIME_XML)
}
// Fallback to whatever the route says it can produce.
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
for _, each := range r.routeProduces {
if w, ok := entityAccessRegistry.accessorAt(each); ok {
return w, true
}
}
if trace {
traceLogger.Printf("no registered EntityReaderWriter found for %s", r.requestAccept)
}
}
return writer, ok
}
// WriteEntity calls WriteHeaderAndEntity with Http Status OK (200)
func (r *Response) WriteEntity(value interface{}) error {
return r.WriteHeaderAndEntity(http.StatusOK, value)
}
// WriteHeaderAndEntity marshals the value using the representation denoted by the Accept Header and the registered EntityWriters.
// If no Accept header is specified (or */*) then respond with the Content-Type as specified by the first in the Route.Produces.
// If an Accept header is specified then respond with the Content-Type as specified by the first in the Route.Produces that is matched with the Accept header.
// If the value is nil then no response is send except for the Http status. You may want to call WriteHeader(http.StatusNotFound) instead.
// If there is no writer available that can represent the value in the requested MIME type then Http Status NotAcceptable is written.
// Current implementation ignores any q-parameters in the Accept Header.
// Returns an error if the value could not be written on the response.
func (r *Response) WriteHeaderAndEntity(status int, value interface{}) error {
writer, ok := r.EntityWriter()
if !ok {
r.WriteHeader(http.StatusNotAcceptable)
return nil
}
return writer.Write(r, status, value)
}
// WriteAsXml is a convenience method for writing a value in xml (requires Xml tags on the value)
// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
func (r *Response) WriteAsXml(value interface{}) error {
return writeXML(r, http.StatusOK, MIME_XML, value)
}
// WriteHeaderAndXml is a convenience method for writing a status and value in xml (requires Xml tags on the value)
// It uses the standard encoding/xml package for marshalling the value ; not using a registered EntityReaderWriter.
func (r *Response) WriteHeaderAndXml(status int, value interface{}) error {
return writeXML(r, status, MIME_XML, value)
}
// WriteAsJson is a convenience method for writing a value in json.
// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
func (r *Response) WriteAsJson(value interface{}) error {
return writeJSON(r, http.StatusOK, MIME_JSON, value)
}
// WriteJson is a convenience method for writing a value in Json with a given Content-Type.
// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
func (r *Response) WriteJson(value interface{}, contentType string) error {
return writeJSON(r, http.StatusOK, contentType, value)
}
// WriteHeaderAndJson is a convenience method for writing the status and a value in Json with a given Content-Type.
// It uses the standard encoding/json package for marshalling the value ; not using a registered EntityReaderWriter.
func (r *Response) WriteHeaderAndJson(status int, value interface{}, contentType string) error {
return writeJSON(r, status, contentType, value)
}
// WriteError write the http status and the error string on the response.
func (r *Response) WriteError(httpStatus int, err error) error {
r.err = err
return r.WriteErrorString(httpStatus, err.Error())
}
// WriteServiceError is a convenience method for a responding with a status and a ServiceError
func (r *Response) WriteServiceError(httpStatus int, err ServiceError) error {
r.err = err
return r.WriteHeaderAndEntity(httpStatus, err)
}
// WriteErrorString is a convenience method for an error status with the actual error
func (r *Response) WriteErrorString(httpStatus int, errorReason string) error {
if r.err == nil {
// if not called from WriteError
r.err = errors.New(errorReason)
}
r.WriteHeader(httpStatus)
if _, err := r.Write([]byte(errorReason)); err != nil {
return err
}
return nil
}
// Flush implements http.Flusher interface, which sends any buffered data to the client.
func (r *Response) Flush() {
if f, ok := r.ResponseWriter.(http.Flusher); ok {
f.Flush()
} else if trace {
traceLogger.Printf("ResponseWriter %v doesn't support Flush", r)
}
}
// WriteHeader is overridden to remember the Status Code that has been written.
// Changes to the Header of the response have no effect after this.
func (r *Response) WriteHeader(httpStatus int) {
r.statusCode = httpStatus
r.ResponseWriter.WriteHeader(httpStatus)
}
// StatusCode returns the code that has been written using WriteHeader.
func (r Response) StatusCode() int {
if 0 == r.statusCode {
// no status code has been written yet; assume OK
return http.StatusOK
}
return r.statusCode
}
// Write writes the data to the connection as part of an HTTP reply.
// Write is part of http.ResponseWriter interface.
func (r *Response) Write(bytes []byte) (int, error) {
written, err := r.ResponseWriter.Write(bytes)
r.contentLength += written
return written, err
}
// ContentLength returns the number of bytes written for the response content.
// Note that this value is only correct if all data is written through the Response using its Write* methods.
// Data written directly using the underlying http.ResponseWriter is not accounted for.
func (r Response) ContentLength() int {
return r.contentLength
}
// CloseNotify is part of http.CloseNotifier interface
func (r Response) CloseNotify() <-chan bool {
return r.ResponseWriter.(http.CloseNotifier).CloseNotify()
}
// Error returns the err created by WriteError
func (r Response) Error() error {
return r.err
}

186
vendor/github.com/emicklei/go-restful/route.go generated vendored Normal file
View File

@@ -0,0 +1,186 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"bytes"
"net/http"
"strings"
)
// RouteFunction declares the signature of a function that can be bound to a Route.
type RouteFunction func(*Request, *Response)
// Route binds a HTTP Method,Path,Consumes combination to a RouteFunction.
type Route struct {
Method string
Produces []string
Consumes []string
Path string // webservice root path + described path
Function RouteFunction
Filters []FilterFunction
// cached values for dispatching
relativePath string
pathParts []string
pathExpr *pathExpression // cached compilation of relativePath as RegExp
// documentation
Doc string
Notes string
Operation string
ParameterDocs []*Parameter
ResponseErrors map[int]ResponseError
ReadSample, WriteSample interface{} // structs that model an example request or response payload
// Extra information used to store custom information about the route.
Metadata map[string]interface{}
}
// Initialize for Route
func (r *Route) postBuild() {
r.pathParts = tokenizePath(r.Path)
}
// Create Request and Response from their http versions
func (r *Route) wrapRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
params := r.extractParameters(httpRequest.URL.Path)
wrappedRequest := NewRequest(httpRequest)
wrappedRequest.pathParameters = params
wrappedRequest.selectedRoutePath = r.Path
wrappedResponse := NewResponse(httpWriter)
wrappedResponse.requestAccept = httpRequest.Header.Get(HEADER_Accept)
wrappedResponse.routeProduces = r.Produces
return wrappedRequest, wrappedResponse
}
// dispatchWithFilters call the function after passing through its own filters
func (r *Route) dispatchWithFilters(wrappedRequest *Request, wrappedResponse *Response) {
if len(r.Filters) > 0 {
chain := FilterChain{Filters: r.Filters, Target: r.Function}
chain.ProcessFilter(wrappedRequest, wrappedResponse)
} else {
// unfiltered
r.Function(wrappedRequest, wrappedResponse)
}
}
// Return whether the mimeType matches to what this Route can produce.
func (r Route) matchesAccept(mimeTypesWithQuality string) bool {
parts := strings.Split(mimeTypesWithQuality, ",")
for _, each := range parts {
var withoutQuality string
if strings.Contains(each, ";") {
withoutQuality = strings.Split(each, ";")[0]
} else {
withoutQuality = each
}
// trim before compare
withoutQuality = strings.Trim(withoutQuality, " ")
if withoutQuality == "*/*" {
return true
}
for _, producibleType := range r.Produces {
if producibleType == "*/*" || producibleType == withoutQuality {
return true
}
}
}
return false
}
// Return whether this Route can consume content with a type specified by mimeTypes (can be empty).
func (r Route) matchesContentType(mimeTypes string) bool {
if len(r.Consumes) == 0 {
// did not specify what it can consume ; any media type (“*/*”) is assumed
return true
}
if len(mimeTypes) == 0 {
// idempotent methods with (most-likely or garanteed) empty content match missing Content-Type
m := r.Method
if m == "GET" || m == "HEAD" || m == "OPTIONS" || m == "DELETE" || m == "TRACE" {
return true
}
// proceed with default
mimeTypes = MIME_OCTET
}
parts := strings.Split(mimeTypes, ",")
for _, each := range parts {
var contentType string
if strings.Contains(each, ";") {
contentType = strings.Split(each, ";")[0]
} else {
contentType = each
}
// trim before compare
contentType = strings.Trim(contentType, " ")
for _, consumeableType := range r.Consumes {
if consumeableType == "*/*" || consumeableType == contentType {
return true
}
}
}
return false
}
// Extract the parameters from the request url path
func (r Route) extractParameters(urlPath string) map[string]string {
urlParts := tokenizePath(urlPath)
pathParameters := map[string]string{}
for i, key := range r.pathParts {
var value string
if i >= len(urlParts) {
value = ""
} else {
value = urlParts[i]
}
if strings.HasPrefix(key, "{") { // path-parameter
if colon := strings.Index(key, ":"); colon != -1 {
// extract by regex
regPart := key[colon+1 : len(key)-1]
keyPart := key[1:colon]
if regPart == "*" {
pathParameters[keyPart] = untokenizePath(i, urlParts)
break
} else {
pathParameters[keyPart] = value
}
} else {
// without enclosing {}
pathParameters[key[1:len(key)-1]] = value
}
}
}
return pathParameters
}
// Untokenize back into an URL path using the slash separator
func untokenizePath(offset int, parts []string) string {
var buffer bytes.Buffer
for p := offset; p < len(parts); p++ {
buffer.WriteString(parts[p])
// do not end
if p < len(parts)-1 {
buffer.WriteString("/")
}
}
return buffer.String()
}
// Tokenize an URL path using the slash separator ; the result does not have empty tokens
func tokenizePath(path string) []string {
if "/" == path {
return []string{}
}
return strings.Split(strings.Trim(path, "/"), "/")
}
// for debugging
func (r Route) String() string {
return r.Method + " " + r.Path
}

293
vendor/github.com/emicklei/go-restful/route_builder.go generated vendored Normal file
View File

@@ -0,0 +1,293 @@
package restful
// Copyright 2013 Ernest Micklei. All rights reserved.
// Use of this source code is governed by a license
// that can be found in the LICENSE file.
import (
"fmt"
"os"
"reflect"
"runtime"
"strings"
"sync/atomic"
"github.com/emicklei/go-restful/log"
)
// RouteBuilder is a helper to construct Routes.
type RouteBuilder struct {
rootPath string
currentPath string
produces []string
consumes []string
httpMethod string // required
function RouteFunction // required
filters []FilterFunction
typeNameHandleFunc TypeNameHandleFunction // required
// documentation
doc string
notes string
operation string
readSample, writeSample interface{}
parameters []*Parameter
errorMap map[int]ResponseError
metadata map[string]interface{}
}
// Do evaluates each argument with the RouteBuilder itself.
// This allows you to follow DRY principles without breaking the fluent programming style.
// Example:
// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
//
// func Returns500(b *RouteBuilder) {
// b.Returns(500, "Internal Server Error", restful.ServiceError{})
// }
func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder {
for _, each := range oneArgBlocks {
each(b)
}
return b
}
// To bind the route to a function.
// If this route is matched with the incoming Http Request then call this function with the *Request,*Response pair. Required.
func (b *RouteBuilder) To(function RouteFunction) *RouteBuilder {
b.function = function
return b
}
// Method specifies what HTTP method to match. Required.
func (b *RouteBuilder) Method(method string) *RouteBuilder {
b.httpMethod = method
return b
}
// Produces specifies what MIME types can be produced ; the matched one will appear in the Content-Type Http header.
func (b *RouteBuilder) Produces(mimeTypes ...string) *RouteBuilder {
b.produces = mimeTypes
return b
}
// Consumes specifies what MIME types can be consumes ; the Accept Http header must matched any of these
func (b *RouteBuilder) Consumes(mimeTypes ...string) *RouteBuilder {
b.consumes = mimeTypes
return b
}
// Path specifies the relative (w.r.t WebService root path) URL path to match. Default is "/".
func (b *RouteBuilder) Path(subPath string) *RouteBuilder {
b.currentPath = subPath
return b
}
// Doc tells what this route is all about. Optional.
func (b *RouteBuilder) Doc(documentation string) *RouteBuilder {
b.doc = documentation
return b
}
// A verbose explanation of the operation behavior. Optional.
func (b *RouteBuilder) Notes(notes string) *RouteBuilder {
b.notes = notes
return b
}
// Reads tells what resource type will be read from the request payload. Optional.
// A parameter of type "body" is added ,required is set to true and the dataType is set to the qualified name of the sample's type.
func (b *RouteBuilder) Reads(sample interface{}) *RouteBuilder {
fn := b.typeNameHandleFunc
if fn == nil {
fn = reflectTypeName
}
typeAsName := fn(sample)
b.readSample = sample
bodyParameter := &Parameter{&ParameterData{Name: "body"}}
bodyParameter.beBody()
bodyParameter.Required(true)
bodyParameter.DataType(typeAsName)
b.Param(bodyParameter)
return b
}
// ParameterNamed returns a Parameter already known to the RouteBuilder. Returns nil if not.
// Use this to modify or extend information for the Parameter (through its Data()).
func (b RouteBuilder) ParameterNamed(name string) (p *Parameter) {
for _, each := range b.parameters {
if each.Data().Name == name {
return each
}
}
return p
}
// Writes tells what resource type will be written as the response payload. Optional.
func (b *RouteBuilder) Writes(sample interface{}) *RouteBuilder {
b.writeSample = sample
return b
}
// Param allows you to document the parameters of the Route. It adds a new Parameter (does not check for duplicates).
func (b *RouteBuilder) Param(parameter *Parameter) *RouteBuilder {
if b.parameters == nil {
b.parameters = []*Parameter{}
}
b.parameters = append(b.parameters, parameter)
return b
}
// Operation allows you to document what the actual method/function call is of the Route.
// Unless called, the operation name is derived from the RouteFunction set using To(..).
func (b *RouteBuilder) Operation(name string) *RouteBuilder {
b.operation = name
return b
}
// ReturnsError is deprecated, use Returns instead.
func (b *RouteBuilder) ReturnsError(code int, message string, model interface{}) *RouteBuilder {
log.Print("ReturnsError is deprecated, use Returns instead.")
return b.Returns(code, message, model)
}
// Returns allows you to document what responses (errors or regular) can be expected.
// The model parameter is optional ; either pass a struct instance or use nil if not applicable.
func (b *RouteBuilder) Returns(code int, message string, model interface{}) *RouteBuilder {
err := ResponseError{
Code: code,
Message: message,
Model: model,
IsDefault: false,
}
// lazy init because there is no NewRouteBuilder (yet)
if b.errorMap == nil {
b.errorMap = map[int]ResponseError{}
}
b.errorMap[code] = err
return b
}
// DefaultReturns is a special Returns call that sets the default of the response ; the code is zero.
func (b *RouteBuilder) DefaultReturns(message string, model interface{}) *RouteBuilder {
b.Returns(0, message, model)
// Modify the ResponseError just added/updated
re := b.errorMap[0]
// errorMap is initialized
b.errorMap[0] = ResponseError{
Code: re.Code,
Message: re.Message,
Model: re.Model,
IsDefault: true,
}
return b
}
// Metadata adds or updates a key=value pair to the metadata map.
func (b *RouteBuilder) Metadata(key string, value interface{}) *RouteBuilder {
if b.metadata == nil {
b.metadata = map[string]interface{}{}
}
b.metadata[key] = value
return b
}
// ResponseError represents a response; not necessarily an error.
type ResponseError struct {
Code int
Message string
Model interface{}
IsDefault bool
}
func (b *RouteBuilder) servicePath(path string) *RouteBuilder {
b.rootPath = path
return b
}
// Filter appends a FilterFunction to the end of filters for this Route to build.
func (b *RouteBuilder) Filter(filter FilterFunction) *RouteBuilder {
b.filters = append(b.filters, filter)
return b
}
// If no specific Route path then set to rootPath
// If no specific Produces then set to rootProduces
// If no specific Consumes then set to rootConsumes
func (b *RouteBuilder) copyDefaults(rootProduces, rootConsumes []string) {
if len(b.produces) == 0 {
b.produces = rootProduces
}
if len(b.consumes) == 0 {
b.consumes = rootConsumes
}
}
// typeNameHandler sets the function that will convert types to strings in the parameter
// and model definitions.
func (b *RouteBuilder) typeNameHandler(handler TypeNameHandleFunction) *RouteBuilder {
b.typeNameHandleFunc = handler
return b
}
// Build creates a new Route using the specification details collected by the RouteBuilder
func (b *RouteBuilder) Build() Route {
pathExpr, err := newPathExpression(b.currentPath)
if err != nil {
log.Printf("[restful] Invalid path:%s because:%v", b.currentPath, err)
os.Exit(1)
}
if b.function == nil {
log.Printf("[restful] No function specified for route:" + b.currentPath)
os.Exit(1)
}
operationName := b.operation
if len(operationName) == 0 && b.function != nil {
// extract from definition
operationName = nameOfFunction(b.function)
}
route := Route{
Method: b.httpMethod,
Path: concatPath(b.rootPath, b.currentPath),
Produces: b.produces,
Consumes: b.consumes,
Function: b.function,
Filters: b.filters,
relativePath: b.currentPath,
pathExpr: pathExpr,
Doc: b.doc,
Notes: b.notes,
Operation: operationName,
ParameterDocs: b.parameters,
ResponseErrors: b.errorMap,
ReadSample: b.readSample,
WriteSample: b.writeSample,
Metadata: b.metadata}
route.postBuild()
return route
}
func concatPath(path1, path2 string) string {
return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
}
var anonymousFuncCount int32
// nameOfFunction returns the short name of the function f for documentation.
// It uses a runtime feature for debugging ; its value may change for later Go versions.
func nameOfFunction(f interface{}) string {
fun := runtime.FuncForPC(reflect.ValueOf(f).Pointer())
tokenized := strings.Split(fun.Name(), ".")
last := tokenized[len(tokenized)-1]
last = strings.TrimSuffix(last, ")·fm") // < Go 1.5
last = strings.TrimSuffix(last, ")-fm") // Go 1.5
last = strings.TrimSuffix(last, "·fm") // < Go 1.5
last = strings.TrimSuffix(last, "-fm") // Go 1.5
if last == "func1" { // this could mean conflicts in API docs
val := atomic.AddInt32(&anonymousFuncCount, 1)
last = "func" + fmt.Sprintf("%d", val)
atomic.StoreInt32(&anonymousFuncCount, val)
}
return last
}

Some files were not shown because too many files have changed in this diff Show More