Compare commits

...

599 Commits

Author SHA1 Message Date
Kubernetes Publisher
0a5e536253 Fix Godeps.json to point to kubernetes-1.10.3 tags 2018-05-21 15:34:53 +00:00
Kubernetes Publisher
c8c875a7f5 Merge pull request #63627 from roycaihw/release-1.10
Automatic merge from submit-queue.

Manual cherrypick of kube-openapi changes for release-1.10

**What this PR does / why we need it**:
Cherry-picks kubernetes/kube-openapi#64 and kubernetes/kube-openapi#67
Fixes bugs that make apiserver panic when aggregating valid but not well formed OpenAPI spec (with empty `Paths`/`Definitions`)

**Release note**:

```release-note
Fixes bugs that make apiserver panic when aggregating valid but not well formed OpenAPI spec
```

/cc @MaciekPytel
/sig api-machinery

Kubernetes-commit: 42b63c8b19d1ad96399ec3f5a409da67e2fd19bd
2018-05-15 19:51:01 +00:00
Haowei Cai
7db0ad7e48 generated
Kubernetes-commit: 56d903a426f6cdaf420a507f0c36d45058a5bcc0
2018-05-09 14:46:14 -07:00
Kubernetes Publisher
e17af2c44c sync: update godeps 2018-04-17 15:32:41 +00:00
Kubernetes Publisher
ef4b908702 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-09 11:27:42 +00:00
Kubernetes Publisher
703bf442f7 sync: update required packages 2018-04-09 04:00:04 +00:00
Kubernetes Publisher
f2f0db5efa sync: update godeps 2018-04-09 04:00:04 +00:00
Kubernetes Publisher
2e8165f1de sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-09 03:59:46 +00:00
Kubernetes Publisher
101d823aa3 sync: update required packages 2018-04-09 00:00:37 +00:00
Kubernetes Publisher
26467c4f71 sync: update godeps 2018-04-09 00:00:36 +00:00
Kubernetes Publisher
b54ca2b2ed sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-09 00:00:08 +00:00
Kubernetes Publisher
a4f6488158 sync: update required packages 2018-04-08 20:00:43 +00:00
Kubernetes Publisher
83b9b3e620 sync: update godeps 2018-04-08 20:00:43 +00:00
Kubernetes Publisher
8306279e8a sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-08 20:00:25 +00:00
Kubernetes Publisher
b46e928ce5 sync: update required packages 2018-04-08 15:59:26 +00:00
Kubernetes Publisher
54ce7464fe sync: update godeps 2018-04-08 15:59:26 +00:00
Kubernetes Publisher
138ff46c05 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-08 15:59:08 +00:00
Kubernetes Publisher
051dbf71d6 sync: update required packages 2018-04-08 12:01:38 +00:00
Kubernetes Publisher
9174ee00a1 sync: update godeps 2018-04-08 12:01:38 +00:00
Kubernetes Publisher
0623d8089f sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-08 12:01:15 +00:00
Kubernetes Publisher
73b2d4258d sync: update required packages 2018-04-08 08:00:54 +00:00
Kubernetes Publisher
26dce95f6a sync: update godeps 2018-04-08 08:00:54 +00:00
Kubernetes Publisher
c6f22201c6 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-08 08:00:36 +00:00
Kubernetes Publisher
6ec6fd1983 sync: update required packages 2018-04-08 03:59:23 +00:00
Kubernetes Publisher
3dce427ec1 sync: update godeps 2018-04-08 03:59:23 +00:00
Kubernetes Publisher
8096957653 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-08 03:59:05 +00:00
Kubernetes Publisher
e06c41c6d3 sync: update required packages 2018-04-07 23:59:22 +00:00
Kubernetes Publisher
91909f643d sync: update godeps 2018-04-07 23:59:22 +00:00
Kubernetes Publisher
6d640c488c sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-07 23:59:02 +00:00
Kubernetes Publisher
3f9b55f6f6 sync: update required packages 2018-04-07 19:59:46 +00:00
Kubernetes Publisher
82e90ed6be sync: update godeps 2018-04-07 19:59:45 +00:00
Kubernetes Publisher
1cc3a97967 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-07 19:59:28 +00:00
Kubernetes Publisher
a31424472d sync: update required packages 2018-04-07 15:59:56 +00:00
Kubernetes Publisher
2f989798b8 sync: update godeps 2018-04-07 15:59:55 +00:00
Kubernetes Publisher
786e66ef24 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-07 15:59:37 +00:00
Kubernetes Publisher
aee8127dbe sync: update required packages 2018-04-07 11:59:44 +00:00
Kubernetes Publisher
16e1de1057 sync: update godeps 2018-04-07 11:59:44 +00:00
Kubernetes Publisher
d9ecc2cb9e sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-07 11:59:26 +00:00
Kubernetes Publisher
a445f3fee5 sync: update required packages 2018-04-07 08:01:00 +00:00
Kubernetes Publisher
3a2d7663f6 sync: update godeps 2018-04-07 08:00:59 +00:00
Kubernetes Publisher
3ffdae14ef sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-07 08:00:42 +00:00
Kubernetes Publisher
a574c4c3ef sync: update required packages 2018-04-07 04:00:28 +00:00
Kubernetes Publisher
d3203ce5ce sync: update godeps 2018-04-07 04:00:28 +00:00
Kubernetes Publisher
ad5137058f sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-07 04:00:07 +00:00
Kubernetes Publisher
607b0c2644 sync: update required packages 2018-04-06 23:59:37 +00:00
Kubernetes Publisher
bce03b60e1 sync: update godeps 2018-04-06 23:59:37 +00:00
Kubernetes Publisher
23bd2cb484 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-06 23:59:20 +00:00
Kubernetes Publisher
f11cfdebf5 sync: update required packages 2018-04-06 19:58:30 +00:00
Kubernetes Publisher
42f4c2598c sync: update godeps 2018-04-06 19:58:30 +00:00
Kubernetes Publisher
552df61ad6 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-06 19:58:13 +00:00
Kubernetes Publisher
d5fee05681 sync: update required packages 2018-04-06 16:01:10 +00:00
Kubernetes Publisher
55eaeaaa6e sync: update godeps 2018-04-06 16:01:09 +00:00
Kubernetes Publisher
7ea8e03447 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-06 16:00:52 +00:00
Kubernetes Publisher
4fa2a6a8e7 sync: update required packages 2018-04-06 12:11:29 +00:00
Kubernetes Publisher
6787e55d7a sync: update godeps 2018-04-06 12:11:29 +00:00
Kubernetes Publisher
1bfb3e4ee7 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-04-06 12:11:06 +00:00
Kubernetes Publisher
0fc2ee2089 sync: update required packages 2018-03-30 19:52:32 +00:00
Kubernetes Publisher
e5d1b3f3fd sync: update godeps 2018-03-30 19:52:32 +00:00
Kubernetes Publisher
40e1ab21a4 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-30 19:52:13 +00:00
Kubernetes Publisher
17eeebdf53 sync: update required packages 2018-03-30 16:06:50 +00:00
Kubernetes Publisher
6ab57347d8 sync: update godeps 2018-03-30 16:06:49 +00:00
Kubernetes Publisher
cd10a7b49e sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-30 16:06:25 +00:00
Kubernetes Publisher
cd906b0437 sync: update required packages 2018-03-30 12:05:36 +00:00
Kubernetes Publisher
d087cce6e8 sync: update godeps 2018-03-30 12:05:36 +00:00
Kubernetes Publisher
65e22f7740 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-30 12:05:16 +00:00
Kubernetes Publisher
6bea47b845 sync: update required packages 2018-03-30 08:04:40 +00:00
Kubernetes Publisher
bfd612abf6 sync: update godeps 2018-03-30 08:04:40 +00:00
Kubernetes Publisher
bfad7ed00b sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-30 08:04:18 +00:00
Kubernetes Publisher
f4426ef6fd sync: update required packages 2018-03-30 04:04:10 +00:00
Kubernetes Publisher
4e97399dfc sync: update godeps 2018-03-30 04:04:09 +00:00
Kubernetes Publisher
a4219630cf sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-30 04:03:50 +00:00
Kubernetes Publisher
bd44661a90 sync: update required packages 2018-03-30 00:06:16 +00:00
Kubernetes Publisher
d541bbad1f sync: update godeps 2018-03-30 00:06:16 +00:00
Kubernetes Publisher
eb0a2c0d76 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-30 00:05:55 +00:00
Kubernetes Publisher
263f7b1675 sync: update required packages 2018-03-29 19:55:55 +00:00
Kubernetes Publisher
fcbb619493 sync: update godeps 2018-03-29 19:55:55 +00:00
Kubernetes Publisher
ad9de770d7 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-29 19:55:32 +00:00
Kubernetes Publisher
0fa5f94de7 sync: update required packages 2018-03-29 15:52:02 +00:00
Kubernetes Publisher
b34e5fa3ab sync: update godeps 2018-03-29 15:52:01 +00:00
Kubernetes Publisher
d3dfc8394b sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-29 15:51:43 +00:00
Kubernetes Publisher
171ea097e8 sync: update required packages 2018-03-29 11:51:12 +00:00
Kubernetes Publisher
bbd21cee11 sync: update godeps 2018-03-29 11:51:12 +00:00
Kubernetes Publisher
f57d2f1230 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-29 11:50:55 +00:00
Kubernetes Publisher
22a61a94b3 sync: update required packages 2018-03-29 07:50:38 +00:00
Kubernetes Publisher
85c35b0ce1 sync: update godeps 2018-03-29 07:50:38 +00:00
Kubernetes Publisher
2957cc51dc sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-29 07:50:21 +00:00
Kubernetes Publisher
6c4483f032 sync: update required packages 2018-03-29 03:51:50 +00:00
Kubernetes Publisher
94fc20b05b sync: update godeps 2018-03-29 03:51:50 +00:00
Kubernetes Publisher
daaea70fab sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-29 03:51:33 +00:00
Kubernetes Publisher
143a8f25ef sync: update required packages 2018-03-28 23:51:34 +00:00
Kubernetes Publisher
17a07f263d sync: update godeps 2018-03-28 23:51:34 +00:00
Kubernetes Publisher
69ef7c63d3 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-28 23:51:16 +00:00
Kubernetes Publisher
aa2e0fdacc sync: update required packages 2018-03-28 19:52:44 +00:00
Kubernetes Publisher
6d5849604c sync: update godeps 2018-03-28 19:52:44 +00:00
Kubernetes Publisher
26d6339aeb sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-28 19:52:25 +00:00
Kubernetes Publisher
096b2e5e06 sync: update required packages 2018-03-28 15:53:50 +00:00
Kubernetes Publisher
814ef8b10b sync: update godeps 2018-03-28 15:53:50 +00:00
Kubernetes Publisher
af9002a249 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-28 15:53:27 +00:00
Kubernetes Publisher
93872e6b3c sync: update required packages 2018-03-28 11:51:57 +00:00
Kubernetes Publisher
ce7ddd0264 sync: update godeps 2018-03-28 11:51:57 +00:00
Kubernetes Publisher
002a25ec7b sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-28 11:51:39 +00:00
Kubernetes Publisher
aed009a5ce sync: update required packages 2018-03-28 07:53:37 +00:00
Kubernetes Publisher
459fe7f5a5 sync: update godeps 2018-03-28 07:53:37 +00:00
Kubernetes Publisher
ee0d40edd7 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-28 07:53:20 +00:00
Kubernetes Publisher
de7c665163 sync: update required packages 2018-03-28 03:50:41 +00:00
Kubernetes Publisher
6ed54da150 sync: update godeps 2018-03-28 03:50:41 +00:00
Kubernetes Publisher
254113449a sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-28 03:50:24 +00:00
Kubernetes Publisher
59a7c03d48 sync: update required packages 2018-03-28 00:00:31 +00:00
Kubernetes Publisher
e38be22b73 sync: update godeps 2018-03-28 00:00:31 +00:00
Kubernetes Publisher
e70e03b902 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-28 00:00:10 +00:00
Kubernetes Publisher
06977b1cad sync: update required packages 2018-03-27 19:33:43 +00:00
Kubernetes Publisher
431200b658 sync: update godeps 2018-03-27 19:33:43 +00:00
Kubernetes Publisher
b2244ed294 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-27 19:33:26 +00:00
Kubernetes Publisher
8c8027ca5e sync: update required packages 2018-03-27 15:31:21 +00:00
Kubernetes Publisher
3bfad238e0 sync: update godeps 2018-03-27 15:31:21 +00:00
Kubernetes Publisher
0360e00602 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-27 15:30:59 +00:00
Kubernetes Publisher
a4a037fd0d sync: update required packages 2018-03-27 11:30:01 +00:00
Kubernetes Publisher
f85ed1782a sync: update godeps 2018-03-27 11:30:01 +00:00
Kubernetes Publisher
0785bd016e sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-27 11:29:44 +00:00
Kubernetes Publisher
2f8b810731 sync: update required packages 2018-03-27 07:29:14 +00:00
Kubernetes Publisher
069c9ac99f sync: update godeps 2018-03-27 07:29:13 +00:00
Kubernetes Publisher
c189d16129 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-27 07:28:56 +00:00
Kubernetes Publisher
527c0da77d sync: update required packages 2018-03-27 03:29:15 +00:00
Kubernetes Publisher
e9b7f4d02f sync: update godeps 2018-03-27 03:29:15 +00:00
Kubernetes Publisher
5316967904 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-27 03:28:59 +00:00
Kubernetes Publisher
9bcd556d04 sync: update required packages 2018-03-26 23:29:28 +00:00
Kubernetes Publisher
cada811465 sync: update godeps 2018-03-26 23:29:28 +00:00
Kubernetes Publisher
005d07719f sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-26 23:29:13 +00:00
Kubernetes Publisher
85ce0c2dd7 sync: update required packages 2018-03-26 19:29:30 +00:00
Kubernetes Publisher
2ae6687d91 sync: update godeps 2018-03-26 19:29:30 +00:00
Kubernetes Publisher
b72743f639 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-26 19:29:13 +00:00
Kubernetes Publisher
7361e3cc64 sync: update required packages 2018-03-26 15:29:06 +00:00
Kubernetes Publisher
e9c16dee9c sync: update godeps 2018-03-26 15:29:06 +00:00
Kubernetes Publisher
b94f49e974 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-26 15:28:43 +00:00
Kubernetes Publisher
afa59796ba sync: update required packages 2018-03-26 11:28:39 +00:00
Kubernetes Publisher
dcc76b0fe3 sync: update godeps 2018-03-26 11:28:39 +00:00
Kubernetes Publisher
cad1a49514 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-26 11:28:23 +00:00
Kubernetes Publisher
3573f521f4 sync: update required packages 2018-03-26 07:27:59 +00:00
Kubernetes Publisher
50a4e3d5cd sync: update godeps 2018-03-26 07:27:59 +00:00
Kubernetes Publisher
134076efed sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-26 07:27:42 +00:00
Kubernetes Publisher
ac1a6baa7e sync: update required packages 2018-03-26 03:31:53 +00:00
Kubernetes Publisher
c5653a809b sync: update godeps 2018-03-26 03:31:53 +00:00
Kubernetes Publisher
73e3a39ac0 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-26 03:31:37 +00:00
Kubernetes Publisher
052ce245ab sync: update required packages 2018-03-25 23:32:12 +00:00
Kubernetes Publisher
09ffc3c5e5 sync: update godeps 2018-03-25 23:32:11 +00:00
Kubernetes Publisher
069568cbfd sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-25 23:31:53 +00:00
Kubernetes Publisher
2e37965b56 sync: update required packages 2018-03-24 23:35:09 +00:00
Kubernetes Publisher
7d43ebed9f sync: update godeps 2018-03-24 23:35:08 +00:00
Kubernetes Publisher
f52d19b0b5 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-24 23:34:51 +00:00
Kubernetes Publisher
e37ec669e6 sync: update required packages 2018-03-24 19:35:46 +00:00
Kubernetes Publisher
7f9d41036e sync: update godeps 2018-03-24 19:35:45 +00:00
Kubernetes Publisher
b728988179 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-24 19:35:26 +00:00
Kubernetes Publisher
6f6cb2c8de sync: update required packages 2018-03-24 15:35:22 +00:00
Kubernetes Publisher
a203f845e6 sync: update godeps 2018-03-24 15:35:22 +00:00
Kubernetes Publisher
29a12bb38d sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-24 15:35:01 +00:00
Kubernetes Publisher
eb6dc66a73 sync: update required packages 2018-03-24 11:36:22 +00:00
Kubernetes Publisher
382b8f2372 sync: update godeps 2018-03-24 11:36:22 +00:00
Kubernetes Publisher
a2dcb20649 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-24 11:36:01 +00:00
Kubernetes Publisher
133f2bfd95 sync: update required packages 2018-03-24 07:38:44 +00:00
Kubernetes Publisher
75fc2c0b2d sync: update godeps 2018-03-24 07:38:44 +00:00
Kubernetes Publisher
babb08cae8 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-24 07:38:26 +00:00
Kubernetes Publisher
6430cce125 sync: update required packages 2018-03-24 03:37:09 +00:00
Kubernetes Publisher
af9def9c69 sync: update godeps 2018-03-24 03:37:09 +00:00
Kubernetes Publisher
025d6a6537 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-24 03:36:51 +00:00
Kubernetes Publisher
3c50d9ef20 sync: update required packages 2018-03-23 23:35:05 +00:00
Kubernetes Publisher
fe338e3d63 sync: update godeps 2018-03-23 23:35:04 +00:00
Kubernetes Publisher
52140feca0 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-23 23:34:45 +00:00
Kubernetes Publisher
de31bdcfd4 sync: update required packages 2018-03-23 19:37:16 +00:00
Kubernetes Publisher
13dac82ec7 sync: update godeps 2018-03-23 19:37:15 +00:00
Kubernetes Publisher
7c6712734e sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-23 19:36:57 +00:00
Kubernetes Publisher
cdfc4c408a sync: update required packages 2018-03-23 15:35:47 +00:00
Kubernetes Publisher
33744e7f7a sync: update godeps 2018-03-23 15:35:47 +00:00
Kubernetes Publisher
d64377d656 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-23 15:35:29 +00:00
Kubernetes Publisher
1cba3a84dd sync: update required packages 2018-03-23 11:36:10 +00:00
Kubernetes Publisher
51a1b5be9c sync: update godeps 2018-03-23 11:36:09 +00:00
Kubernetes Publisher
d5e8acf0db sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-23 11:35:49 +00:00
Kubernetes Publisher
1beb16e925 sync: update required packages 2018-03-23 07:36:00 +00:00
Kubernetes Publisher
80701500be sync: update godeps 2018-03-23 07:35:59 +00:00
Kubernetes Publisher
af1ec638a7 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-23 07:35:36 +00:00
Kubernetes Publisher
9018ed043d sync: update required packages 2018-03-23 03:36:25 +00:00
Kubernetes Publisher
ec5c93e797 sync: update godeps 2018-03-23 03:36:25 +00:00
Kubernetes Publisher
0fae0b8942 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-23 03:36:06 +00:00
Kubernetes Publisher
697eb0bc7e sync: update required packages 2018-03-22 23:36:13 +00:00
Kubernetes Publisher
c1fb559423 sync: update godeps 2018-03-22 23:36:12 +00:00
Kubernetes Publisher
68b9c895d6 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-22 23:35:55 +00:00
Kubernetes Publisher
a87197d49f sync: update required packages 2018-03-22 19:33:42 +00:00
Kubernetes Publisher
66b748cc27 sync: update godeps 2018-03-22 19:33:41 +00:00
Kubernetes Publisher
bd8bae209d sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-22 19:33:23 +00:00
Kubernetes Publisher
5a1a4c87c2 sync: update required packages 2018-03-22 15:39:51 +00:00
Kubernetes Publisher
85be37b550 sync: update godeps 2018-03-22 15:39:51 +00:00
Kubernetes Publisher
31874c5425 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-22 15:39:32 +00:00
Kubernetes Publisher
6ab651c1b3 sync: update required packages 2018-03-22 11:35:43 +00:00
Kubernetes Publisher
9daf4b179c sync: update godeps 2018-03-22 11:35:43 +00:00
Kubernetes Publisher
e1bb1a2c4a sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-22 11:35:24 +00:00
Kubernetes Publisher
687016834b sync: update required packages 2018-03-22 07:36:58 +00:00
Kubernetes Publisher
aa2a87ff60 sync: update godeps 2018-03-22 07:36:58 +00:00
Kubernetes Publisher
c810e0a1d3 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-22 07:36:41 +00:00
Kubernetes Publisher
6ffd6147c3 sync: update required packages 2018-03-22 03:32:54 +00:00
Kubernetes Publisher
4915a979e2 sync: update godeps 2018-03-22 03:32:54 +00:00
Kubernetes Publisher
dafd52497e sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-22 03:32:36 +00:00
Kubernetes Publisher
eb0a44a5c4 sync: update required packages 2018-03-21 23:33:54 +00:00
Kubernetes Publisher
003afb1a98 sync: update godeps 2018-03-21 23:33:54 +00:00
Kubernetes Publisher
24f3c23b16 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-21 23:33:38 +00:00
Kubernetes Publisher
635b924ef2 sync: update required packages 2018-03-21 19:37:29 +00:00
Kubernetes Publisher
7997cd4908 sync: update godeps 2018-03-21 19:37:28 +00:00
Kubernetes Publisher
9c9cc9adfb sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-21 19:37:10 +00:00
Kubernetes Publisher
c46b2e4d42 sync: update required packages 2018-03-21 15:32:00 +00:00
Kubernetes Publisher
71a44d2fd1 sync: update godeps 2018-03-21 15:32:00 +00:00
Kubernetes Publisher
6485bcd146 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-21 15:31:44 +00:00
Kubernetes Publisher
97a525710a sync: update required packages 2018-03-21 11:32:09 +00:00
Kubernetes Publisher
91861f7442 sync: update godeps 2018-03-21 11:32:09 +00:00
Kubernetes Publisher
e8f07fa4f1 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-21 11:31:50 +00:00
Kubernetes Publisher
a64b1d437a sync: update required packages 2018-03-21 07:30:42 +00:00
Kubernetes Publisher
1411e2a728 sync: update godeps 2018-03-21 07:30:42 +00:00
Kubernetes Publisher
3d7b848e77 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-21 07:30:24 +00:00
Kubernetes Publisher
9722b78fad sync: update required packages 2018-03-21 03:31:09 +00:00
Kubernetes Publisher
0eebc4312f sync: update godeps 2018-03-21 03:31:08 +00:00
Kubernetes Publisher
7df88536b3 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-21 03:30:52 +00:00
Kubernetes Publisher
23fdb17340 sync: update required packages 2018-03-20 23:31:49 +00:00
Kubernetes Publisher
7e5ae68247 sync: update godeps 2018-03-20 23:31:49 +00:00
Kubernetes Publisher
1532ffa9b6 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-20 23:31:32 +00:00
Kubernetes Publisher
424afdc178 sync: update required packages 2018-03-20 19:33:50 +00:00
Kubernetes Publisher
c1502dfc36 sync: update godeps 2018-03-20 19:33:49 +00:00
Kubernetes Publisher
b389cf8b5c sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-20 19:33:29 +00:00
Kubernetes Publisher
fdb4838363 sync: update required packages 2018-03-20 15:39:41 +00:00
Kubernetes Publisher
78ad53fb9b sync: update godeps 2018-03-20 15:39:41 +00:00
Kubernetes Publisher
6a3303dffa sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-20 15:39:24 +00:00
Kubernetes Publisher
36c2bb2dbb sync: update required packages 2018-03-20 10:19:53 +00:00
Kubernetes Publisher
d7770c5705 sync: update godeps 2018-03-20 10:19:52 +00:00
Kubernetes Publisher
ab61fd3857 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-20 10:19:35 +00:00
Kubernetes Publisher
e35dd00024 sync: update required packages 2018-03-20 06:21:56 +00:00
Kubernetes Publisher
abca49aae7 sync: update godeps 2018-03-20 06:21:55 +00:00
Kubernetes Publisher
8e5edb641f sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-20 06:21:37 +00:00
Kubernetes Publisher
42bd6f720c sync: update required packages 2018-03-20 02:34:53 +00:00
Kubernetes Publisher
6e7e027bed sync: update godeps 2018-03-20 02:34:53 +00:00
Kubernetes Publisher
c6901a5311 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-20 02:34:32 +00:00
Kubernetes Publisher
8a03b8c386 sync: update required packages 2018-03-19 22:12:37 +00:00
Kubernetes Publisher
c06f72b066 sync: update godeps 2018-03-19 22:12:37 +00:00
Kubernetes Publisher
c039cdc2e8 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-19 22:12:18 +00:00
Kubernetes Publisher
cac335b012 sync: update required packages 2018-03-19 18:16:05 +00:00
Kubernetes Publisher
55e9dd7299 sync: update godeps 2018-03-19 18:16:05 +00:00
Kubernetes Publisher
946295bd47 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-19 18:15:42 +00:00
Kubernetes Publisher
c44670caf1 sync: update required packages 2018-03-19 14:09:17 +00:00
Kubernetes Publisher
97417d3768 sync: update godeps 2018-03-19 14:09:17 +00:00
Kubernetes Publisher
17e5c8f698 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-19 14:09:01 +00:00
Kubernetes Publisher
a115a855ba sync: update required packages 2018-03-19 10:07:30 +00:00
Kubernetes Publisher
d387c91b97 sync: update godeps 2018-03-19 10:07:30 +00:00
Kubernetes Publisher
0ba420a631 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-19 10:07:15 +00:00
Kubernetes Publisher
cfab3924db sync: update required packages 2018-03-19 06:08:04 +00:00
Kubernetes Publisher
1f22f5920c sync: update godeps 2018-03-19 06:08:04 +00:00
Kubernetes Publisher
86a034f052 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-19 06:07:42 +00:00
Kubernetes Publisher
e319be2fcf sync: update required packages 2018-03-19 02:08:15 +00:00
Kubernetes Publisher
0828d2b0b5 sync: update godeps 2018-03-19 02:08:14 +00:00
Kubernetes Publisher
5579549045 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-19 02:08:00 +00:00
Kubernetes Publisher
c910190c78 sync: update required packages 2018-03-18 22:08:39 +00:00
Kubernetes Publisher
c5b69f91fa sync: update godeps 2018-03-18 22:08:38 +00:00
Kubernetes Publisher
2d59b39d2b sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-18 22:08:23 +00:00
Kubernetes Publisher
2e5d8cbeab sync: update required packages 2018-03-18 18:08:10 +00:00
Kubernetes Publisher
5e9e0b0604 sync: update godeps 2018-03-18 18:08:09 +00:00
Kubernetes Publisher
f11cfe2e23 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-18 18:07:54 +00:00
Kubernetes Publisher
80f2fc1cc2 sync: update required packages 2018-03-18 14:07:41 +00:00
Kubernetes Publisher
86bca46230 sync: update godeps 2018-03-18 14:07:41 +00:00
Kubernetes Publisher
69fc5c3a8a sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-18 14:07:27 +00:00
Kubernetes Publisher
6a534c393d sync: update required packages 2018-03-18 10:07:14 +00:00
Kubernetes Publisher
6f124ebfb8 sync: update godeps 2018-03-18 10:07:14 +00:00
Kubernetes Publisher
fda4275fb0 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-18 10:06:59 +00:00
Kubernetes Publisher
4d2c899715 sync: update required packages 2018-03-18 06:06:43 +00:00
Kubernetes Publisher
2ebd366e43 sync: update godeps 2018-03-18 06:06:43 +00:00
Kubernetes Publisher
81ba2581d2 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-18 06:06:28 +00:00
Kubernetes Publisher
a77beea55a sync: update required packages 2018-03-18 02:07:27 +00:00
Kubernetes Publisher
8792f91a61 sync: update godeps 2018-03-18 02:07:27 +00:00
Kubernetes Publisher
d13469ed75 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-18 02:07:12 +00:00
Kubernetes Publisher
4183b3a644 sync: update required packages 2018-03-17 22:07:55 +00:00
Kubernetes Publisher
f1e3fb6696 sync: update godeps 2018-03-17 22:07:54 +00:00
Kubernetes Publisher
c555878b0e sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-17 22:07:40 +00:00
Kubernetes Publisher
bd5b9aef46 sync: update required packages 2018-03-17 18:08:23 +00:00
Kubernetes Publisher
215be0f23b sync: update godeps 2018-03-17 18:08:23 +00:00
Kubernetes Publisher
2eefb1d959 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-17 18:08:08 +00:00
Kubernetes Publisher
2ebf1c4bba sync: update required packages 2018-03-17 14:07:36 +00:00
Kubernetes Publisher
3d40b48cd7 sync: update godeps 2018-03-17 14:07:35 +00:00
Kubernetes Publisher
418d6d4001 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-17 14:07:20 +00:00
Kubernetes Publisher
49cf05089e sync: update required packages 2018-03-17 10:06:54 +00:00
Kubernetes Publisher
083276d636 sync: update godeps 2018-03-17 10:06:54 +00:00
Kubernetes Publisher
c32aa15559 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-17 10:06:40 +00:00
Kubernetes Publisher
9b588e7d54 sync: update required packages 2018-03-17 06:07:05 +00:00
Kubernetes Publisher
685a83a649 sync: update godeps 2018-03-17 06:07:04 +00:00
Kubernetes Publisher
5b5f8521ac sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-17 06:06:46 +00:00
Kubernetes Publisher
37150c5d59 sync: update required packages 2018-03-17 02:07:59 +00:00
Kubernetes Publisher
2772371d50 sync: update godeps 2018-03-17 02:07:58 +00:00
Kubernetes Publisher
6345ab5a5f sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-17 02:07:44 +00:00
Kubernetes Publisher
f30b502353 sync: update required packages 2018-03-16 22:07:45 +00:00
Kubernetes Publisher
7d744fa50e sync: update godeps 2018-03-16 22:07:45 +00:00
Kubernetes Publisher
19c9ee414d sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-16 22:07:30 +00:00
Kubernetes Publisher
a7c2881ee5 sync: update required packages 2018-03-16 18:07:52 +00:00
Kubernetes Publisher
015c2ddf28 sync: update godeps 2018-03-16 18:07:52 +00:00
Kubernetes Publisher
2f89831d5a sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-16 18:07:38 +00:00
Kubernetes Publisher
31c8529c96 sync: update required packages 2018-03-16 14:08:01 +00:00
Kubernetes Publisher
d21d0d1222 sync: update godeps 2018-03-16 14:08:01 +00:00
Kubernetes Publisher
f7f3a69639 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-16 14:07:47 +00:00
Kubernetes Publisher
7e2b572de8 sync: update required packages 2018-03-16 10:07:17 +00:00
Kubernetes Publisher
00bc0cc54d sync: update godeps 2018-03-16 10:07:17 +00:00
Kubernetes Publisher
482736d633 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-16 10:07:02 +00:00
Kubernetes Publisher
1a3f6402c4 sync: update required packages 2018-03-16 06:05:47 +00:00
Kubernetes Publisher
3f14529b52 sync: update godeps 2018-03-16 06:05:47 +00:00
Kubernetes Publisher
9e149a1204 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-16 06:05:33 +00:00
Kubernetes Publisher
add31d048b sync: update required packages 2018-03-16 02:07:46 +00:00
Kubernetes Publisher
1533869bbf sync: update godeps 2018-03-16 02:07:46 +00:00
Kubernetes Publisher
83c5457f37 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-16 02:07:31 +00:00
Kubernetes Publisher
9853a61a74 sync: update required packages 2018-03-15 22:07:44 +00:00
Kubernetes Publisher
32f4c1a3b3 sync: update godeps 2018-03-15 22:07:44 +00:00
Kubernetes Publisher
43c4c29266 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-15 22:07:30 +00:00
Kubernetes Publisher
c767bd8ea4 sync: update required packages 2018-03-15 18:08:17 +00:00
Kubernetes Publisher
670ea453dc sync: update godeps 2018-03-15 18:08:16 +00:00
Kubernetes Publisher
a1272159d0 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-15 18:08:02 +00:00
Kubernetes Publisher
73a6797833 sync: update required packages 2018-03-15 14:07:01 +00:00
Kubernetes Publisher
c1a78d5566 sync: update godeps 2018-03-15 14:07:00 +00:00
Kubernetes Publisher
3403c1c221 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-15 14:06:46 +00:00
Kubernetes Publisher
0186494bf8 sync: update required packages 2018-03-15 10:13:57 +00:00
Kubernetes Publisher
7b2f02651c sync: update godeps 2018-03-15 10:13:57 +00:00
Kubernetes Publisher
a9b8576de2 sync: initially remove files BUILD */BUILD BUILD.bazel */BUILD.bazel 2018-03-15 10:13:43 +00:00
Kubernetes Publisher
6f352c35bd sync: update required packages 2018-03-14 19:35:37 +00:00
Kubernetes Publisher
bf47ba396e sync: update godeps 2018-03-14 19:35:37 +00:00
Kubernetes Publisher
76c69f1e56 sync: update required packages 2018-03-14 15:34:49 +00:00
Kubernetes Publisher
c4a451c86d sync: update godeps 2018-03-14 15:34:49 +00:00
Kubernetes Publisher
33b6449064 sync: update required packages 2018-03-14 11:35:56 +00:00
Kubernetes Publisher
581bcdfd16 sync: update godeps 2018-03-14 11:35:56 +00:00
Kubernetes Publisher
84ac2ed656 sync: update required packages 2018-03-14 07:34:21 +00:00
Kubernetes Publisher
9bd2d834d4 sync: update godeps 2018-03-14 07:34:21 +00:00
Kubernetes Publisher
6e2779d244 sync: update required packages 2018-03-14 03:35:31 +00:00
Kubernetes Publisher
1fc09990a3 sync: update godeps 2018-03-14 03:35:31 +00:00
Kubernetes Publisher
524c2c3c0b sync: update required packages 2018-03-13 23:35:21 +00:00
Kubernetes Publisher
377b5807f9 sync: update godeps 2018-03-13 23:35:21 +00:00
Kubernetes Publisher
099238c893 sync: update required packages 2018-03-13 19:42:27 +00:00
Kubernetes Publisher
d773dd3c9a sync: update godeps 2018-03-13 19:42:27 +00:00
Kubernetes Publisher
0d20602d81 sync: update required packages 2018-03-13 15:38:08 +00:00
Kubernetes Publisher
cc4d8b840a sync: update godeps 2018-03-13 15:38:08 +00:00
Kubernetes Publisher
692007dd7f sync: update required packages 2018-03-13 11:40:06 +00:00
Kubernetes Publisher
8a5196b4c2 sync: update godeps 2018-03-13 11:40:06 +00:00
Kubernetes Publisher
95c1505963 sync: update required packages 2018-03-13 07:39:35 +00:00
Kubernetes Publisher
dc79eaa7ee sync: update godeps 2018-03-13 07:39:35 +00:00
Kubernetes Publisher
42aaf9654c sync: update required packages 2018-03-13 03:41:38 +00:00
Kubernetes Publisher
ddf83985cd sync: update godeps 2018-03-13 03:41:38 +00:00
Kubernetes Publisher
ef6d57380a sync: update required packages 2018-03-12 23:41:48 +00:00
Kubernetes Publisher
6fd418735c sync: update godeps 2018-03-12 23:41:48 +00:00
Kubernetes Publisher
cadcc5633e sync: update required packages 2018-03-12 19:37:34 +00:00
Kubernetes Publisher
e108570dcf sync: update godeps 2018-03-12 19:37:33 +00:00
Kubernetes Publisher
6deb56d2ec sync: update required packages 2018-03-12 15:35:58 +00:00
Kubernetes Publisher
5f11229638 sync: update godeps 2018-03-12 15:35:57 +00:00
Kubernetes Publisher
675c719e29 sync: update required packages 2018-03-12 11:37:11 +00:00
Kubernetes Publisher
c269e2ba9b sync: update godeps 2018-03-12 11:37:11 +00:00
Kubernetes Publisher
04cc38884f sync: update required packages 2018-03-12 07:36:01 +00:00
Kubernetes Publisher
a72f2a9d5c sync: update godeps 2018-03-12 07:36:01 +00:00
Kubernetes Publisher
42535640b0 sync: update required packages 2018-03-12 03:44:14 +00:00
Kubernetes Publisher
f5c017b582 sync: update godeps 2018-03-12 03:44:13 +00:00
Kubernetes Publisher
b1316120b0 sync: update required packages 2018-03-11 23:43:35 +00:00
Kubernetes Publisher
947a2e8724 sync: update godeps 2018-03-11 23:43:35 +00:00
Kubernetes Publisher
0a88455cbf sync: update required packages 2018-03-11 19:36:12 +00:00
Kubernetes Publisher
1eacb0738d sync: update godeps 2018-03-11 19:36:12 +00:00
Kubernetes Publisher
07182f6e5b sync: update required packages 2018-03-11 15:34:58 +00:00
Kubernetes Publisher
c297a81498 sync: update godeps 2018-03-11 15:34:57 +00:00
Kubernetes Publisher
3a95ae3206 sync: update required packages 2018-03-11 11:33:53 +00:00
Kubernetes Publisher
367cdaf712 sync: update godeps 2018-03-11 11:33:53 +00:00
Kubernetes Publisher
2843fd0d1d sync: update required packages 2018-03-11 07:34:35 +00:00
Kubernetes Publisher
ac9b2029a5 sync: update godeps 2018-03-11 07:34:35 +00:00
Kubernetes Publisher
e5a65f61c1 sync: update required packages 2018-03-11 03:36:42 +00:00
Kubernetes Publisher
14316764fe sync: update godeps 2018-03-11 03:36:42 +00:00
Kubernetes Publisher
2bf8a868e7 sync: update required packages 2018-03-10 23:33:19 +00:00
Kubernetes Publisher
f1c608de72 sync: update godeps 2018-03-10 23:33:19 +00:00
Kubernetes Publisher
5de782aeb9 sync: update required packages 2018-03-10 19:36:39 +00:00
Kubernetes Publisher
1b5e8f0e6a sync: update godeps 2018-03-10 19:36:39 +00:00
Kubernetes Publisher
42742eca95 sync: update required packages 2018-03-10 15:35:17 +00:00
Kubernetes Publisher
039864fd90 sync: update godeps 2018-03-10 15:35:17 +00:00
Kubernetes Publisher
248642a84a sync: update required packages 2018-03-10 11:36:26 +00:00
Kubernetes Publisher
e46f5b2dc4 sync: update godeps 2018-03-10 11:36:25 +00:00
Kubernetes Publisher
0341e918f5 sync: update required packages 2018-03-10 07:35:49 +00:00
Kubernetes Publisher
e124fb26ed sync: update godeps 2018-03-10 07:35:49 +00:00
Kubernetes Publisher
b8e7532d82 sync: update required packages 2018-03-10 03:34:54 +00:00
Kubernetes Publisher
92a7f356a7 sync: update godeps 2018-03-10 03:34:54 +00:00
Kubernetes Publisher
e89270098e sync: update required packages 2018-03-09 23:38:29 +00:00
Kubernetes Publisher
d757a500c2 sync: update godeps 2018-03-09 23:38:29 +00:00
Kubernetes Publisher
dc1bea83e7 sync: update required packages 2018-03-09 19:40:25 +00:00
Kubernetes Publisher
5ee4ca4537 sync: update godeps 2018-03-09 19:40:25 +00:00
Kubernetes Publisher
95a7de1c04 sync: update required packages 2018-03-09 15:37:07 +00:00
Kubernetes Publisher
a69e35ea88 sync: update godeps 2018-03-09 15:37:06 +00:00
Kubernetes Publisher
a593d56002 sync: update required packages 2018-03-09 11:33:36 +00:00
Kubernetes Publisher
ae9c236a47 sync: update godeps 2018-03-09 11:33:36 +00:00
Kubernetes Publisher
a717103397 sync: update required packages 2018-03-09 07:36:30 +00:00
Kubernetes Publisher
8a57124336 sync: update godeps 2018-03-09 07:36:29 +00:00
Kubernetes Publisher
c55e438328 sync: update required packages 2018-03-09 03:36:50 +00:00
Kubernetes Publisher
2980228bf6 sync: update godeps 2018-03-09 03:36:49 +00:00
Kubernetes Publisher
dbb14c55e7 sync: update required packages 2018-03-08 23:43:42 +00:00
Kubernetes Publisher
e323d1ae71 sync: update godeps 2018-03-08 23:43:41 +00:00
Kubernetes Publisher
46bb2ba055 sync: update required packages 2018-03-08 19:54:54 +00:00
Kubernetes Publisher
d7b707a0d9 sync: update godeps 2018-03-08 19:54:54 +00:00
Kubernetes Publisher
95b0712aef sync: update required packages 2018-03-08 15:53:54 +00:00
Kubernetes Publisher
18ee36d132 sync: update godeps 2018-03-08 15:53:54 +00:00
Kubernetes Publisher
130f9932f5 sync: update required packages 2018-03-08 11:53:04 +00:00
Kubernetes Publisher
ea557bd3cb sync: update godeps 2018-03-08 11:53:04 +00:00
Kubernetes Publisher
6b73aa9963 sync: update required packages 2018-03-08 07:54:56 +00:00
Kubernetes Publisher
ce99a5ecb6 sync: update godeps 2018-03-08 07:54:56 +00:00
Kubernetes Publisher
f3c7678dc0 sync: update required packages 2018-03-08 03:55:12 +00:00
Kubernetes Publisher
6be4298256 sync: update godeps 2018-03-08 03:55:12 +00:00
Kubernetes Publisher
e410338511 sync: update required packages 2018-03-08 00:02:26 +00:00
Kubernetes Publisher
b2221d1baf sync: update godeps 2018-03-08 00:02:26 +00:00
Kubernetes Publisher
e43cdbebf6 sync: update required packages 2018-03-07 20:02:25 +00:00
Kubernetes Publisher
0fabe9b059 sync: update godeps 2018-03-07 20:02:25 +00:00
Kubernetes Publisher
565d80c450 sync: update required packages 2018-03-07 15:52:39 +00:00
Kubernetes Publisher
76208bd5ac sync: update godeps 2018-03-07 15:52:39 +00:00
Kubernetes Publisher
fff375d793 sync: update required packages 2018-03-07 11:53:04 +00:00
Kubernetes Publisher
1672519a02 sync: update godeps 2018-03-07 11:53:04 +00:00
Kubernetes Publisher
3735f657a1 sync: update required packages 2018-03-07 07:55:29 +00:00
Kubernetes Publisher
1466a6e28b sync: update godeps 2018-03-07 07:55:29 +00:00
Kubernetes Publisher
f3f705f414 sync: update required packages 2018-03-07 04:15:51 +00:00
Kubernetes Publisher
38be92b288 sync: update godeps 2018-03-07 04:15:51 +00:00
Kubernetes Publisher
14df7837b7 sync: update required packages 2018-03-07 00:00:11 +00:00
Kubernetes Publisher
bf3aea7ace sync: update godeps 2018-03-07 00:00:11 +00:00
Kubernetes Publisher
e425e996a7 sync: update required packages 2018-03-06 20:02:59 +00:00
Kubernetes Publisher
f581536a9e sync: update godeps 2018-03-06 20:02:59 +00:00
Kubernetes Publisher
26625c0056 sync: update required packages 2018-03-06 16:02:02 +00:00
Kubernetes Publisher
dd76913297 sync: update godeps 2018-03-06 16:02:02 +00:00
Kubernetes Publisher
28dfabf992 sync: update required packages 2018-03-06 11:52:14 +00:00
Kubernetes Publisher
b86d1d58f3 sync: update godeps 2018-03-06 11:52:14 +00:00
Kubernetes Publisher
8c00b9b2c9 sync: update required packages 2018-03-06 07:52:31 +00:00
Kubernetes Publisher
de1e230cda sync: update godeps 2018-03-06 07:52:31 +00:00
Kubernetes Publisher
b71da6b123 sync: update required packages 2018-03-06 04:02:36 +00:00
Kubernetes Publisher
29b23c1f1c sync: update godeps 2018-03-06 04:02:36 +00:00
Kubernetes Publisher
bbab3b7a10 sync: update required packages 2018-03-06 00:01:34 +00:00
Kubernetes Publisher
cf5bffca2f sync: update godeps 2018-03-06 00:01:34 +00:00
Kubernetes Publisher
89f6dd735f sync: update required packages 2018-03-05 20:03:33 +00:00
Kubernetes Publisher
3d9d5c345e sync: update godeps 2018-03-05 20:03:33 +00:00
Kubernetes Publisher
1f57013c99 sync: update required packages 2018-03-05 15:59:29 +00:00
Kubernetes Publisher
cc15ded788 sync: update godeps 2018-03-05 15:59:29 +00:00
Kubernetes Publisher
98134c8427 sync: update required packages 2018-03-05 11:51:48 +00:00
Kubernetes Publisher
367e5756d8 sync: update godeps 2018-03-05 11:51:48 +00:00
Kubernetes Publisher
820a7a6aaf sync: update required packages 2018-03-05 07:47:23 +00:00
Kubernetes Publisher
2522216897 sync: update godeps 2018-03-05 07:47:23 +00:00
Kubernetes Publisher
38323dec6d sync: update required packages 2018-03-05 03:52:41 +00:00
Kubernetes Publisher
3b4778aa39 sync: update godeps 2018-03-05 03:52:41 +00:00
Kubernetes Publisher
b0e20e7b60 sync: update required packages 2018-03-04 23:59:21 +00:00
Kubernetes Publisher
ff061f9470 sync: update godeps 2018-03-04 23:59:20 +00:00
Kubernetes Publisher
eaa614a95d sync: update required packages 2018-03-04 20:03:40 +00:00
Kubernetes Publisher
d5d71c2684 sync: update godeps 2018-03-04 20:03:39 +00:00
Kubernetes Publisher
468fa3dc39 sync: update required packages 2018-03-04 15:56:39 +00:00
Kubernetes Publisher
200019a48e sync: update godeps 2018-03-04 15:56:38 +00:00
Kubernetes Publisher
a83f042c60 sync: update required packages 2018-03-04 11:43:30 +00:00
Kubernetes Publisher
6c4a574775 sync: update godeps 2018-03-04 11:43:30 +00:00
Kubernetes Publisher
13ac896773 sync: update required packages 2018-03-04 07:45:16 +00:00
Kubernetes Publisher
2eec0c38ea sync: update godeps 2018-03-04 07:45:16 +00:00
Kubernetes Publisher
f3c002b6a5 sync: update required packages 2018-03-04 03:47:17 +00:00
Kubernetes Publisher
971549189b sync: update godeps 2018-03-04 03:47:17 +00:00
Kubernetes Publisher
5a91342ead sync: update required packages 2018-03-03 23:53:42 +00:00
Kubernetes Publisher
0d4c61725c sync: update godeps 2018-03-03 23:53:42 +00:00
Kubernetes Publisher
614b5df479 sync: update required packages 2018-03-03 19:55:59 +00:00
Kubernetes Publisher
31f33152bc sync: update godeps 2018-03-03 19:55:59 +00:00
Kubernetes Publisher
61ef8c9240 sync: update required packages 2018-03-03 16:18:04 +00:00
Kubernetes Publisher
71d7e83357 sync: update godeps 2018-03-03 16:18:04 +00:00
Kubernetes Publisher
7fa17b48d1 sync: update required packages 2018-03-03 11:45:13 +00:00
Kubernetes Publisher
07df2fef88 sync: update godeps 2018-03-03 11:45:12 +00:00
Kubernetes Publisher
268d127a7e sync: update required packages 2018-03-03 07:45:37 +00:00
Kubernetes Publisher
2f68eddc55 sync: update godeps 2018-03-03 07:45:36 +00:00
Kubernetes Publisher
976355ea41 sync: update required packages 2018-03-03 04:03:22 +00:00
Kubernetes Publisher
8788678c62 sync: update godeps 2018-03-03 04:03:22 +00:00
Kubernetes Publisher
49daa1180c sync: update required packages 2018-03-02 23:50:45 +00:00
Kubernetes Publisher
fb4c6ae9b4 sync: update godeps 2018-03-02 23:50:45 +00:00
Kubernetes Publisher
e89ffee0aa sync: update required packages 2018-03-02 19:59:00 +00:00
Kubernetes Publisher
ecc1e4d5b7 sync: update godeps 2018-03-02 19:58:59 +00:00
Kubernetes Publisher
eecf74fa68 sync: update required packages 2018-03-02 19:58:47 +00:00
Kubernetes Publisher
d844f7d063 sync: update godeps 2018-03-02 19:58:47 +00:00
Kubernetes Publisher
1c8d36a84c Merge remote-tracking branch 'origin/master' into release-1.10. Deleting CHANGELOG-1.7.md
Kubernetes-commit: 305052d6d2c1fa976c7a841350396061a2c26ac0
2018-03-01 11:48:51 -05:00
Kubernetes Publisher
7d2b7a94fe Merge pull request #59495 from ericchiang/client-auth-exec
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

 client-go: add an exec-based client auth provider

Updates https://github.com/kubernetes/features/issues/541
Implements https://github.com/kubernetes/community/pull/1503
Closes https://github.com/kubernetes/kubernetes/issues/57164

```release-note
client-go: alpha support for exec-based credential providers
```

/sig auth
/kind feature

Kubernetes-commit: cb9d6b51556a1677f262e35e4aded0051c424818
2018-03-02 19:58:34 +00:00
Kubernetes Publisher
90185097df sync: update required packages 2018-03-01 14:09:27 +00:00
Kubernetes Publisher
153c4d7278 sync: update godeps 2018-03-01 14:09:26 +00:00
Kubernetes Publisher
cb151cc1a8 sync: update required packages 2018-03-01 10:11:16 +00:00
Kubernetes Publisher
a8e541f0d2 sync: update godeps 2018-03-01 10:11:15 +00:00
Kubernetes Publisher
d116f69397 sync: update required packages 2018-03-01 06:06:58 +00:00
Kubernetes Publisher
2be2fab7d1 sync: update godeps 2018-03-01 06:06:57 +00:00
Kubernetes Publisher
f723d5dec7 sync: update required packages 2018-03-01 02:07:13 +00:00
Kubernetes Publisher
3c8dedeb9d sync: update godeps 2018-03-01 02:07:12 +00:00
Kubernetes Publisher
bddc8dd04c sync: update required packages 2018-02-28 22:06:40 +00:00
Kubernetes Publisher
9840281fac sync: update godeps 2018-02-28 22:06:40 +00:00
Kubernetes Publisher
5529714c4a sync: update required packages 2018-02-28 18:08:07 +00:00
Kubernetes Publisher
156d42a628 sync: update godeps 2018-02-28 18:08:06 +00:00
Kubernetes Publisher
b59f54c2b2 sync: update required packages 2018-02-28 14:06:59 +00:00
Kubernetes Publisher
c9c8fd9666 sync: update godeps 2018-02-28 14:06:59 +00:00
Kubernetes Publisher
46ff43598d sync: update required packages 2018-02-28 10:06:45 +00:00
Kubernetes Publisher
9eb35675d7 sync: update godeps 2018-02-28 10:06:44 +00:00
Kubernetes Publisher
af0a89a540 sync: update required packages 2018-02-28 06:12:23 +00:00
Kubernetes Publisher
1d83fc7af7 sync: update godeps 2018-02-28 06:12:23 +00:00
Kubernetes Publisher
2facf96233 sync: update required packages 2018-02-28 06:12:02 +00:00
Kubernetes Publisher
4f271e31e4 sync: update godeps 2018-02-28 06:12:02 +00:00
Kubernetes Publisher
0430994cc2 Merge remote-tracking branch 'origin/master' into release-1.10
Kubernetes-commit: 6ee902eee1aa2022d41afd82c510b0d5e7de2d77
2018-02-27 18:10:55 -05:00
Kubernetes Publisher
b7e03de184 Merge pull request #60446 from cblecker/no-dep-reviewer
Automatic merge from submit-queue (batch tested with PRs 59365, 60446, 60448, 55019, 60431). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Remove dep-reviewers

**What this PR does / why we need it**:
The dep-reviewers group seems to get assigned PRs early the the review process. However, most code changes should be reviewed in the importing part of the code base first, and then assigned to an approver after.

By removing the reviewers group, the approvers plugin will still suggest assigning to an approver, but won't assign for review when the PR is initially opened.

**Release note**:

```release-note
NONE
```

Kubernetes-commit: 724a2f968c6981efc9f5a85e4ad60f56e1c0902f
2018-02-28 06:11:45 +00:00
Kubernetes Publisher
6c333b6100 Merge pull request #59674 from jennybuckley/codegen
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

code-gen: output golint compliant 'Generated by' comment

New PR instead of reopening #58115 because /reopen did not work.
This won't be ready to merge until the upstream https://github.com/kubernetes/gengo/pull/94 merges. Once that merges, the second commit will be changed to godep-save.sh and update-staging-godeps.sh, and the last commit will be changed to update-all.sh

The failing test is due to the upstream changes not being merged yet

```devel-release-note
Go code generated by the code generators will now have a comment which allows them to be easily identified by golint
```

Fixes #56489

Kubernetes-commit: 1eb1c00c44f8f597b9b23a05cd0a8da205c87f8a
2018-02-28 06:11:28 +00:00
Kubernetes Publisher
9a2a002b29 Merge pull request #59293 from roycaihw/openapi_endpoint
Automatic merge from submit-queue (batch tested with PRs 60011, 59256, 59293, 60328, 60367). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Serve OpenAPI spec with single /openapi/v2 endpoint

**What this PR does / why we need it**:
We are deprecating format-separated endpoints (`/swagger.json`, `/swagger-2.0.0.json`, `/swagger-2.0.0.pb-v1`, `/swagger-2.0.0.pb-v1.gz`) for OpenAPI spec, and switching to a single `/openapi/v2` endpoint in Kubernetes 1.10. The design doc and deprecation process are tracked at: https://docs.google.com/document/d/19lEqE9lc4yHJ3WJAJxS_G7TcORIJXGHyq3wpwcH28nU

Requested format is specified by setting HTTP headers

header | possible values
-- | --
Accept | `application/json`, `application/com.github.proto-openapi.spec.v2@v1.0+protobuf`
Accept-Encoding | `gzip`

This PR changes dynamic_client (and kubectl as a result) to use the new endpoint. The old endpoints will remain in 1.10 and 1.11, and get removed in 1.12.

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #

**Special notes for your reviewer**:

**Release note**:

```release-note
action required: Deprecate format-separated endpoints for OpenAPI spec. Please use single `/openapi/v2` endpoint instead.
```

/sig api-machinery

Kubernetes-commit: d6153194d929ad6c036d5bbbf67a6f892e75feb5
2018-02-28 06:11:11 +00:00
Kubernetes Publisher
632c382b43 sync: update required packages 2018-02-27 18:05:55 +00:00
Kubernetes Publisher
5c226ce7b9 sync: update godeps 2018-02-27 18:05:55 +00:00
Kubernetes Publisher
a737fc7380 sync: update required packages 2018-02-27 14:05:00 +00:00
Kubernetes Publisher
0b3755ac9c sync: update godeps 2018-02-27 14:05:00 +00:00
Kubernetes Publisher
aa4294228f sync: update required packages 2018-02-27 10:08:35 +00:00
Kubernetes Publisher
279eb14b03 sync: update godeps 2018-02-27 10:08:35 +00:00
Kubernetes Publisher
590967646e sync: update required packages 2018-02-27 06:05:30 +00:00
Kubernetes Publisher
646dda6122 sync: update godeps 2018-02-27 06:05:30 +00:00
Kubernetes Publisher
f11de95a54 sync: update required packages 2018-02-27 02:23:00 +00:00
Kubernetes Publisher
aa22ac5b65 sync: update godeps 2018-02-27 02:22:59 +00:00
Kubernetes Publisher
fa8258f29c sync: update required packages 2018-02-27 02:22:43 +00:00
Kubernetes Publisher
b8f215ad88 sync: update godeps 2018-02-27 02:22:43 +00:00
jennybuckley
23f67bd7a3 Run hack/update-all.sh
Kubernetes-commit: c8dacd8e631f59ef158c79156d77a99fd2a632cc
2018-02-26 17:16:14 -08:00
Christoph Blecker
42d4406d1c Remove dep-reviewers
Kubernetes-commit: b97b9530f08d40a4346ea328d8a1047822fb92b7
2018-02-26 11:11:15 -08:00
Kubernetes Publisher
940d88846b Merge remote-tracking branch 'origin/master' into release-1.10
Kubernetes-commit: 8d6416d0e6674f36d90274c98dda83ed7ae873de
2018-02-24 15:22:52 -05:00
Kubernetes Publisher
8a511b140e Merge pull request #59793 from nikhita/staging-repos-boilerplate
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

staging: add boilerplate header

Follow up of https://github.com/kubernetes/kubernetes/pull/57656.

Adds boilerplate in the relevant staging repos so that they don't need to depend on code-generator's boilerplate.

**Release note**:

```release-note
NONE
```

/cc sttts fisherxu

Kubernetes-commit: 1a1643bb5df07d94330e1b16d182cee660533f91
2018-02-27 02:22:26 +00:00
Kubernetes Publisher
0236239fbb Merge pull request #59587 from cblecker/cblecker-vendor
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Add cblecker to vendor OWNERS

**What this PR does / why we need it**:
Adds myself to vendor OWNERS. I can help approve dep bumps of existing deps, and refer to Tim and new deps for license review.

**Release note**:
```release-note
NONE
```

/assign thockin

Kubernetes-commit: 852e7f7bfa43d1427706c59453e39f2de12a4f32
2018-02-27 02:22:08 +00:00
Kubernetes Publisher
a95e9a627a Merge pull request #59842 from ixdy/update-rules_go-02-2018
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

 Update bazelbuild/rules_go, kubernetes/repo-infra, and gazelle dependencies

**What this PR does / why we need it**: updates our bazelbuild/rules_go dependency in order to bump everything to go1.9.4. I'm separating this effort into two separate PRs, since updating rules_go requires a large cleanup, removing an attribute from most build rules.

**Release note**:

```release-note
NONE
```

Kubernetes-commit: 96ec3187180b9c1d722756b3ea0984ebe65424dc
2018-02-27 02:21:51 +00:00
Haowei Cai
669a29fce3 Bump kube-openapi to add new openapi endpoint
Kubernetes-commit: 8b38e080c4ddd3e1416a5fc4d45a3e4d2dbe1033
2018-02-20 09:21:41 -08:00
Kubernetes Publisher
c3de6d02f5 sync: update required packages 2018-02-20 06:01:17 +00:00
Kubernetes Publisher
0b1456c846 sync: update godeps 2018-02-20 06:01:16 +00:00
Kubernetes Publisher
fdab74e2e7 sync: update required packages 2018-02-20 02:03:15 +00:00
Kubernetes Publisher
15b03d1b58 sync: update godeps 2018-02-20 02:03:15 +00:00
Kubernetes Publisher
f62a943b5a sync: update required packages 2018-02-19 22:03:57 +00:00
Kubernetes Publisher
25633a6136 sync: update godeps 2018-02-19 22:03:57 +00:00
Kubernetes Publisher
00379c004a sync: update required packages 2018-02-19 18:02:11 +00:00
Kubernetes Publisher
eb2ac93697 sync: update godeps 2018-02-19 18:02:10 +00:00
Kubernetes Publisher
2d6ed04f24 sync: update required packages 2018-02-19 13:58:26 +00:00
Kubernetes Publisher
7df2f66742 sync: update godeps 2018-02-19 13:58:26 +00:00
Kubernetes Publisher
bb431a3ab2 sync: update required packages 2018-02-19 09:58:05 +00:00
Kubernetes Publisher
b0d14a1c84 sync: update godeps 2018-02-19 09:58:05 +00:00
Kubernetes Publisher
f084c24891 sync: update required packages 2018-02-19 05:57:49 +00:00
Kubernetes Publisher
50534bb2bf sync: update godeps 2018-02-19 05:57:48 +00:00
Kubernetes Publisher
00ef954d98 sync: update required packages 2018-02-19 01:59:09 +00:00
Kubernetes Publisher
6a485beed6 sync: update godeps 2018-02-19 01:59:09 +00:00
Kubernetes Publisher
4eadffae42 sync: update required packages 2018-02-18 21:58:15 +00:00
Kubernetes Publisher
8d64450519 sync: update godeps 2018-02-18 21:58:14 +00:00
Kubernetes Publisher
2f9475ee12 sync: update required packages 2018-02-18 17:57:42 +00:00
Kubernetes Publisher
199796a57a sync: update godeps 2018-02-18 17:57:41 +00:00
Kubernetes Publisher
97b622f024 sync: update required packages 2018-02-18 13:57:46 +00:00
Kubernetes Publisher
1ecf0bb87c sync: update godeps 2018-02-18 13:57:46 +00:00
Kubernetes Publisher
3ace527e12 sync: update required packages 2018-02-18 09:58:04 +00:00
Kubernetes Publisher
abb9a9f498 sync: update godeps 2018-02-18 09:58:03 +00:00
Kubernetes Publisher
d99c14e079 sync: update required packages 2018-02-18 02:00:56 +00:00
Kubernetes Publisher
cfce4bf244 sync: update godeps 2018-02-18 02:00:56 +00:00
Kubernetes Publisher
dd1f49d75d sync: update required packages 2018-02-17 22:02:07 +00:00
Kubernetes Publisher
e5dbccbaba sync: update godeps 2018-02-17 22:02:07 +00:00
Kubernetes Publisher
6f38ac2beb sync: update required packages 2018-02-17 17:59:14 +00:00
Kubernetes Publisher
ae4221b560 sync: update godeps 2018-02-17 17:59:14 +00:00
Kubernetes Publisher
b1dae4c7e4 sync: update required packages 2018-02-17 13:58:22 +00:00
Kubernetes Publisher
710ff6c591 sync: update godeps 2018-02-17 13:58:22 +00:00
Kubernetes Publisher
a06fbc12fd sync: update required packages 2018-02-17 09:58:56 +00:00
Kubernetes Publisher
8b8ce2dc48 sync: update godeps 2018-02-17 09:58:56 +00:00
Kubernetes Publisher
f25b71ab89 sync: update required packages 2018-02-17 02:00:25 +00:00
Kubernetes Publisher
1191502013 sync: update godeps 2018-02-17 02:00:25 +00:00
Kubernetes Publisher
b576b15460 sync: update required packages 2018-02-16 22:02:04 +00:00
Kubernetes Publisher
5d3eb2e0dc sync: update godeps 2018-02-16 22:02:04 +00:00
Jeff Grafton
6a86852930 Autogenerated: hack/update-bazel.sh
Kubernetes-commit: ef56a8d6bb3800ab7803713eafc4191e8202ad6e
2018-02-16 13:43:01 -08:00
Kubernetes Publisher
b441530fb7 sync: update required packages 2018-02-16 18:00:01 +00:00
Kubernetes Publisher
4ca6cf6c9a sync: update godeps 2018-02-16 18:00:01 +00:00
Kubernetes Publisher
6f435c14c9 Merge pull request #58317 from nikhita/bump-go-yaml
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

bump(670d4c): gopkg.in/yaml.v2: fix parsing for non-specific tags

Fixes #56976

Fixes this bug - https://github.com/go-yaml/yaml/issues/75 - in `go-yaml`. The fix for this bug is at 670d4cfef0.

**Release note**:

```release-note
NONE
```

/cc sttts caesarxuchao jennybuckley

Kubernetes-commit: 4e2c3f060a873a0b727dbd3e66047a3b2858db97
2018-02-16 14:02:11 +00:00
Christoph Blecker
7d2f5fe51f Re-add OWNERS files to Godeps/vendor dirs
Kubernetes-commit: 6fb2304f2a6da44e42985ed662d5f7f56215eec6
2018-02-15 13:31:02 -08:00
Nikhita Raghunath
16c374870f staging: add boilerplate header
Kubernetes-commit: c7d67ad1ee412b2ebe906f21aa900dbdfc4ce5da
2018-02-13 16:50:05 +05:30
Eric Chiang
f50bf20edb generated
Kubernetes-commit: 01801ae13a86c10cd343c329f5224ab47272f826
2018-02-07 15:48:46 -08:00
Kubernetes Publisher
b8b76143ce Merge pull request #59059 from smarterclayton/move_partial_object
Automatic merge from submit-queue (batch tested with PRs 59158, 38320, 59059, 55516, 59357). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Promote v1alpha1 meta to v1beta1

No code changes, just renames. We can discuss if there are any field / naming changes here or in a follow-up

Parent #58536
Fixes #53224
Prereq to #55637

@kubernetes/sig-api-machinery-pr-reviews @deads2k

```release-note
The `meta.k8s.io/v1alpha1` objects for retrieving tabular responses from the server (`Table`) or fetching just the `ObjectMeta` for an object (as `PartialObjectMetadata`) are now beta as part of `meta.k8s.io/v1beta1`.  Clients may request alternate representations of normal Kubernetes objects by passing an `Accept` header like `application/json;as=Table;g=meta.k8s.io;v=v1beta1` or `application/json;as=PartialObjectMetadata;g=meta.k8s.io;v1=v1beta1`.  Older servers will ignore this representation or return an error if it is not available.  Clients may request fallback to the normal object by adding a non-qualified mime-type to their `Accept` header like `application/json` - the server will then respond with either the alternate representation if it is supported or the fallback mime-type which is the normal object response.
```

Kubernetes-commit: 9ee71b720ed2300d6298bb936d0a7873b5ecf2ac
2018-02-05 21:38:17 +00:00
Kubernetes Publisher
7df7717190 Merge pull request #38320 from liggitt/golang-ratelimit
Automatic merge from submit-queue (batch tested with PRs 59158, 38320, 59059, 55516, 59357). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Switch from juju/ratelimit to golang.org/x/time/rate

Replaces juju/ratelimit with golang.org/x/time/rate
xref https://github.com/kubernetes/steering/issues/21

Requires removing the Saturation() method on the rate limiter. In the process of attempting to contribute it to the `golang.org/x/time/rate` implementation, it became clear that what it was calculating was not very useful when combined with periodic polling. See discussion in https://go-review.googlesource.com/c/time/+/29958#message-4caffc11669cadd90e2da4c05122cfec50ea6a22

```release-note
NONE
```

Kubernetes-commit: 0656d030a7d131ca8088a9f0ecd12596eb90d2fd
2018-02-05 21:38:00 +00:00
Kubernetes Publisher
0d94fd0c60 Merge pull request #59195 from pigletfly/fix-typo
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Fix some typos

**What this PR does / why we need it**:

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #

**Special notes for your reviewer**:

**Release note**:

```release-note
     None
```

Kubernetes-commit: c6e581ff42bff91337565836834b76a1601be936
2018-02-04 17:33:09 +00:00
pigletfly
863b091f8c Fix typo
Kubernetes-commit: 5eba676508af6571e38b8b37dd4cde6efd33acd3
2018-02-01 19:11:19 +08:00
Kubernetes Publisher
1c605930c8 Merge pull request #59104 from nikhita/sample-controller-apps-v1
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

sample-controller: document minimum kubernetes version

In https://github.com/kubernetes/kubernetes/pull/58446, the sample-controller started using `apps/v1` deployments since they became GA in 1.9.

This means that sample-controller does not support versions below 1.9.

Fixes kubernetes/sample-controller#9

**Release note**:

```release-note
NONE
```

/assign sttts munnerz

Kubernetes-commit: 971960784927755cbc9423d0ecc19e573de9f391
2018-01-31 13:30:33 +00:00
Nikhita Raghunath
af680ed1d0 sample-controller: document minimum kube version
The sample-controller uses apps/v1 deployments
since they became GA in 1.9. This means that
sample-controller does not support versions below 1.9.

Kubernetes-commit: 1fd07e197803d0d98bba7d7caa1e5111abd12d6b
2018-01-31 00:56:18 +05:30
Clayton Coleman
af36e57afa Promote v1alpha1 meta to v1beta1
No code changes, just renames

Kubernetes-commit: d07a608607e1f4d252003c17cd615652574a0823
2018-01-30 13:30:57 -05:00
Kubernetes Publisher
0b8ef8e981 Merge pull request #58664 from hzxuzhonghu/code-gen
Automatic merge from submit-queue (batch tested with PRs 58259, 58664). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

fix generator_for_scheme: remove blank new line

**What this PR does / why we need it**:

1. remove new blank line in `AddToScheme`
80e344644e/pkg/client/clientset_generated/internalclientset/fake/register.go (L81-L83)

2. remove new blank line in `Install`
3d69cea1e5/staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/scheme/register.go (L44-L47)

**Special notes for your reviewer**:

the first commit changes the code generator for schema register.
c8c9ca77af

**Release note**:

```release-note
NONE
```

Kubernetes-commit: 32913ab0cdc5c4dde50e50ecffa4c1e9c6d6a150
2018-01-24 21:35:39 +00:00
Kubernetes Publisher
b13f138dcd Merge pull request #58394 from deads2k/controller-08-redeliver
Automatic merge from submit-queue (batch tested with PRs 58412, 56132, 58506, 58542, 58394). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

don't stop informer delivery on error

If an informer delivery fails today, we stop delivering to it entirely.  The pull updates the code to skip that particular notification, delay, and continue delivery with the next time.

/assign derekwaynecarr
/assign ncdc
/assign ash2k

@derekwaynecarr This would change the "the controller isn't doing anything?!" to "the controller missed my (individual) resource!"

```release-note
NONE
```

Kubernetes-commit: 71426ba59fd4a37e5da7deac6298ab33101bb5b6
2018-01-23 09:36:29 +00:00
hzxuzhonghu
489c07a569 run update code-gen
Kubernetes-commit: 1f013b7deacfaa80bd8fafbe9b1fad1375e1907d
2018-01-23 12:26:27 +08:00
Kubernetes Publisher
8db1716370 Merge pull request #58446 from hzxuzhonghu/sample-controleer
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

upgrade sample-controller deployment to apps/v1

**What this PR does / why we need it**:

apps/v1 is GA

**Release note**:

```release-note
NONE
```
/assign @sttts @nikhita

Kubernetes-commit: 0dd88a1fb8615ab42f80a5f3126eb9e0f4577dd7
2018-01-19 21:28:42 +00:00
David Eads
33ff92b504 don't stop informer delivery on error
Kubernetes-commit: 2fa93da6d5efd97dbcaad262a9e59073de9c5298
2018-01-18 13:07:18 -05:00
Kubernetes Publisher
4a1ec65955 Merge pull request #53631 from dixudx/enforce_cobra_required_flags
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

update vendor spf13/cobra to enforce required flags

**What this PR does / why we need it**:

spf13/cobra#502 has enforced checking flags that marked as required, an error will be raised if unset.

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*:fixes #54855
xref #48400
fixes kubernetes/kubectl#121

**Special notes for your reviewer**:
/assign @liggitt @eparis

**Release note**:

```release-note
kubectl now enforces required flags at a more fundamental level
```

Kubernetes-commit: 048757b8a51333f59d3112d2b228d2f0102a4afc
2018-01-18 13:36:56 +00:00
hzxuzhonghu
f647cfe199 run update bazel
Kubernetes-commit: b240d9239168d98acbb5b2344eae87511763a5ce
2018-01-18 17:48:05 +08:00
hzxuzhonghu
994cb3621c upgrade to apps/v1 deployment
Kubernetes-commit: 386e001446b8e9c96daf15d71d3176a73d638b50
2018-01-18 17:47:00 +08:00
Kubernetes Publisher
83669b2316 Merge pull request #57504 from yue9944882/fix-fake-client-dummy-watch
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

feat(fakeclient): push event on watched channel on add/update/delete

**What this PR does / why we need it**:

This PR enables watch function for kubernetes [fakeclient](1bcf0b0a22/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go (L88)).

This fake client add watchReactorFunction by wrapping [watch.NewFake](1bcf0b0a22/staging/src/k8s.io/client-go/kubernetes/fake/clientset_generated.go (L98)) which is a `chan Event` but actually nothing pushes objects into this channel. So all watch function called by fake client will never return or never receive any object.

This PR intercepts ReactionFunc of `Create / Update / DeleteActionImpl` and will push the requested object to channel.

Which issue(s) this PR fixes (optional, in fixes #<issue number>(, fixes #<issue_number>, ...) format, will close the issue(s) when PR gets merged):

Fixes #54075

**Special notes for your reviewer**:

**Release note**:

```dev-release-note
enable watch function for fake client
```

Kubernetes-commit: 268555a30a0f028762854f5b0d3ebb587e2ee4ee
2018-01-16 19:13:36 +00:00
Nikhita Raghunath
b6578b5696 bump(gopkg.in/yaml.v2): 670d4cfef0544295bc27a114dbac37980d83185a
Fix parsing of non-specific tags

Kubernetes-commit: 713b1fc396a1ccdda51d395ae54c8e5512ce5ab2
2018-01-16 03:27:01 +05:30
Kubernetes Publisher
f1e5514ccc Merge pull request #57059 from ericchiang/client-go/remove-openapi-import
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

client-go: remove open-api import from types

This builds off of https://github.com/kubernetes/kube-openapi/pull/21 and removes the following imports from `k8s.io/client-go/kubernetes`:

```
github.com/PuerkitoBio/purell
github.com/PuerkitoBio/urlesc
github.com/emicklei/go-restful
github.com/emicklei/go-restful/log
github.com/go-openapi/jsonpointer
github.com/go-openapi/jsonreference
github.com/go-openapi/spec
github.com/go-openapi/swag
github.com/mailru/easyjson/buffer
github.com/mailru/easyjson/jlexer
github.com/mailru/easyjson/jwriter
golang.org/x/text/cases
golang.org/x/text/internal
golang.org/x/text/internal/tag
golang.org/x/text/language
golang.org/x/text/runes
golang.org/x/text/secure/precis
golang.org/x/text/width
k8s.io/kube-openapi/pkg/common
```

/assign @sttts
/assign @mbohlool

cc @kubernetes/sig-api-machinery-pr-reviews

```release-note
NONE
```

Kubernetes-commit: f6d0632bbbf8428bfec0ca72db5103916e6248b4
2018-01-12 12:06:32 +00:00
Kubernetes Publisher
1607ce9716 Merge pull request #57259 from ericchiang/client-go-no-cache-import
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

client-go: remove import of github.com/gregjones/httpcache

Moves NewCacheRoundTripper from `k8s.io/client-go/transport` to its own package. This prevents Kubernetes clients from requiring its dependencies.

This change removes the following transitive imports from `k8s.io/client-go/kubernetes`

```
github.com/google/btree
github.com/gregjones/httpcache
github.com/gregjones/httpcache/diskcache
github.com/peterbourgon/diskv
```

```release-note
NONE
```

Kubernetes-commit: 4a77bd53e88a719c612086df69a52899bbdd85af
2018-01-11 12:07:25 +00:00
Kubernetes Publisher
76d013a3dd Merge pull request #57571 from spiffxp/mk-staging-repos-authoritative
Automatic merge from submit-queue (batch tested with PRs 58025, 57112, 57879, 57571, 58062). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Treat staging repos as authoritative source for all files

Add CONTRIBUTING.md files to remind people not to PR directly into the published repos.

/hold
I believe this requires removing these files from the published repos if any copies exist there

ref: kubernetes/kubernetes#57559

Kubernetes-commit: 4bc286f0255d6c007cb8d5435018ab3e31ec7d01
2018-01-11 08:10:59 +00:00
Eric Chiang
7635ea288c bump(k8s.io/kube-openapi): a07b7bbb58e7fdc5144f8d7046331d29fc9ad3b3
Kubernetes-commit: e1dda7e3be5cded77116cc48370372cb10992c17
2018-01-10 15:17:37 -08:00
Eric Chiang
c98ef8f09b generated: update staging godeps
Kubernetes-commit: dedeb99c97dcd0e5814ba49cf0f82aa7bf23f4ad
2018-01-08 10:00:13 -08:00
Kubernetes Publisher
258eead087 Merge pull request #57958 from nikhita/sample-controller-crd-validation
Automatic merge from submit-queue (batch tested with PRs 57902, 57958). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Add CustomResourceValidation example in sample-controller

Add `CustomResourceValidation` example in sample-controller.

Addresses the following part of https://github.com/kubernetes/sample-controller/issues/2:

> CRDs support json-schema schemas. These CRDs don't have them. It would be nice to show how to add them

**Release note**:

```release-note
NONE
```

/assign sttts munnerz

Kubernetes-commit: 51acead08489644d30b3f1a978e0b36f228d12a7
2018-01-08 12:38:39 +00:00
Nikhita Raghunath
735feeca90 Add CustomResourceValidation example in sample-controller
- Mention the schema in the example CRD.
- Update README and mention about feature gates.

Kubernetes-commit: 74c9efa148ac6591a1dcf8f95c7fdac35aa603d0
2018-01-08 14:44:48 +05:30
Kubernetes Publisher
9ba34a0e65 Merge pull request #56534 from allenpetersen/updateGengo
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Update gengo version to include goimports formatter

Update gengo which now uses goimports to format code and organize imports.

Fixes #55542

**Special notes for your reviewer**:
Updates version of k8s.io/gengo
Takes new dependency on golang.org/x/tools/imports and golang.org/x/tools/go/ast/astutil

**Release Notes**:
```release-note
NONE
```

Kubernetes-commit: 46eabb7d918d962a9c8e0474d41f9371081d47fa
2018-01-04 06:21:34 +00:00
Allen Petersen
92f920f871 Update generated files
Kubernetes-commit: 3d69cea1e589add1d24fc72e9a8c46081664a719
2018-01-02 22:07:30 -08:00
Kubernetes Publisher
9b9d7dbcb1 Merge pull request #57735 from cblecker/2018-is-the-year-of-kubernetes
Automatic merge from submit-queue (batch tested with PRs 57735, 57503). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Update generated copyrights for 2018

**What this PR does / why we need it**:
- Update boilerplate regex to support 2018
- Add generated runtime and generated device plugin scripts to `hack/update-all.sh` target list
- Run `make update` to regenerate all generated code

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #57728

**Special notes for your reviewer**:

**Release note**:
```release-note
NONE
```

Kubernetes-commit: b098cafae06797ba8bf7fd35379e237fd355b047
2018-01-02 18:13:25 +00:00
Christoph Blecker
1c8ed500ee Regenerate all generated code
Kubernetes-commit: 80e344644e2b6222296f2f03551a8d0273c7cbce
2018-01-02 00:21:07 -08:00
yue9944882
bf54256d6b Regenerating code of fake clientset
Kubernetes-commit: 6f381ab2cd351c96a28b7ccde704ea96c38612dd
2017-12-25 11:46:56 +08:00
Kubernetes Publisher
5562881cd1 Merge pull request #56820 from ixdy/pure-go-binaries
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Bump rules_go and build "static" binaries in pure Go mode

**What this PR does / why we need it**: uses latest set of enhancements in bazelbuild/rules_go to properly build the "static" Go binaries using pure Go (i.e. `CGO_ENABLED=0`), and to support cross compilation of these binaries. Cross compilation of cgo-enabled binaries is still not supported.

Also depends on https://github.com/kubernetes/repo-infra/pull/52, and requires bazel 0.8+ (which is not yet enabled in CI for kubernetes/kubernetes).

Note that for proper cross compilation we'll also need to remove the hardcoded "linux-amd64" that's strewn about several `BUILD` files.

**Release note**:

```release-note
NONE
```

/hold
/assign @BenTheElder @mikedanese @spxtr

Kubernetes-commit: f5f6f3e715cb8dfbd9657a4229c77ec6a5eab135
2017-12-24 02:01:38 +00:00
Jeff Grafton
72bbd3993a Autogenerate BUILD files
Kubernetes-commit: efee0704c60a2ee3049268a41535aaee7f661f6c
2017-12-23 13:06:26 -08:00
Kubernetes Publisher
8f8c5aedb8 Merge pull request #57473 from spiffxp/add-code-of-conduct-to-staging-repos
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Add code-of-conduct.md to staging repos

Instead of adding it directly to their published copies

Replaces
- https://github.com/kubernetes/api/pull/13
- https://github.com/kubernetes/apiextensions-apiserver/pull/21
- https://github.com/kubernetes/apimachinery/pull/33
- https://github.com/kubernetes/apiserver/pull/28
- https://github.com/kubernetes/client-go/pull/350
- https://github.com/kubernetes/code-generator/pull/29
- https://github.com/kubernetes/kube-aggregator/pull/11
- https://github.com/kubernetes/metrics/pull/12
- https://github.com/kubernetes/sample-apiserver/pull/18
- https://github.com/kubernetes/sample-controller/pull/3

ref: kubernetes/community#1527

Kubernetes-commit: 8a4a39d6ae1be45115c12cfa166f2c8151d88d8c
2017-12-23 02:02:10 +00:00
Aaron Crickenberger
31cb9f1779 Treat staging repos as authoritative for all files
Move files from kubernetes/foo root back to
kubernetes/kubernetes/staging/src/k8s.io/foo root

Then:
- add CONTRIBUTING.md for all staging repos
- add .PULL_REQUEST_TEMPLATE to all staging repos
- ignore .github while diffing generated protobuf

Kubernetes-commit: d9b5773101e930431f24fe178d988271c1becc35
2017-12-22 17:09:51 -05:00
Aaron Crickenberger
49cf8c5ad4 Add code-of-conduct.md to staging repos
Kubernetes-commit: 918a6a7de7a77294a57c11c8fe40ce54cd18ea56
2017-12-20 15:21:56 -05:00
Kubernetes Publisher
533e85a0fb Merge pull request #57243 from munnerz/fix-sample-ctrl
Automatic merge from submit-queue (batch tested with PRs 56403, 57243). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Register metav1 types into samplecontroller api scheme

**What this PR does / why we need it**:

Registers metav1 resource types (e.g. ListOptions) with sample-controller scheme.

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Fixes #57205

**Release note**:
```release-note
NONE
```

/cc @sttts @nikhita

Kubernetes-commit: 0d42e742da14f9659ebeb80e7fe375a58add04c4
2017-12-19 16:01:41 +00:00
Kubernetes Publisher
46b5d73382 Merge pull request #57142 from nikhita/bump-jsoniter
Automatic merge from submit-queue (batch tested with PRs 57122, 57142, 57016, 56927, 56678). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

bump(13f864): github.com/json-iterator/go: use ConfigCompatibleWithStandardLibrary

Jsoniter in `ConfigFastest` mode does not support escape characters in object keys, whereas `ConfigCompatibleWithStandardLibrary` does.

Fixes kubernetes/kubernetes#56018
Related kubernetes/kubernetes#56055

Benchmark results:

```
BenchmarkDecodeIntoJSON-4                                              	   30000	     48522 ns/op	    3792 B/op	      63 allocs/op
BenchmarkDecodeIntoJSONCodecGenConfigFast-4                            	  100000	     17409 ns/op	    4524 B/op	      96 allocs/op
BenchmarkDecodeIntoJSONCodecGenConfigCompatibleWithStandardLibrary-4   	  100000	     18617 ns/op	    4924 B/op	     121 allocs/op
```

/assign sttts thockin mfojtik

Kubernetes-commit: 135d58b3941fac99ae0426e18cbda266b83ca49e
2017-12-17 08:06:20 +00:00
James Munnelly
756ec2596b Register metav1 types into samplecontroller api scheme
Kubernetes-commit: 9efe856979001b83d544fde99c9a2af3f8bcc170
2017-12-15 12:21:16 +00:00
Nikhita Raghunath
1b691d7a53 update staging godeps
Kubernetes-commit: a75aa0f41c51add6fa02c8bfc9362cfe9a5be8bc
2017-12-14 01:07:31 +05:30
Di Xu
4d65170555 bump pflag
Kubernetes-commit: 6c54ec59ee7e6e1fa9f1dc311762ab98dc1b3d0a
2017-11-20 13:21:27 +08:00
Jordan Liggitt
530724013e Remove github.com/juju/ratelimit
Kubernetes-commit: 2068044910e2a009a37cf7628ba32184600d0281
2016-12-07 15:10:03 -05:00
Jordan Liggitt
e8d6b5cf83 Switch from juju/ratelimit to golang.org/x/time/rate
Kubernetes-commit: 4b9f00988b9401f6c774d3d5e1bce45a8e8f81c8
2018-01-19 02:08:51 -05:00
1485 changed files with 125439 additions and 17553 deletions

2
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,2 @@
Sorry, we do not accept changes directly against this repository. Please see
CONTRIBUTING.md for information on where and how to contribute instead.

64
BUILD
View File

@@ -1,64 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
go_library(
name = "go_default_library",
srcs = [
"controller.go",
"main.go",
],
importpath = "k8s.io/sample-controller",
visibility = ["//visibility:private"],
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/apps/v1beta2:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//vendor/k8s.io/client-go/informers:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/scheme:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/typed/core/v1:go_default_library",
"//vendor/k8s.io/client-go/listers/apps/v1beta2:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/client-go/tools/clientcmd:go_default_library",
"//vendor/k8s.io/client-go/tools/record:go_default_library",
"//vendor/k8s.io/client-go/util/workqueue:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/client/clientset/versioned:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/client/clientset/versioned/scheme:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/client/informers/externalversions:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/signals:go_default_library",
],
)
go_binary(
name = "sample-controller",
importpath = "k8s.io/sample-controller",
library = ":go_default_library",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller:all-srcs",
"//staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned:all-srcs",
"//staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions:all-srcs",
"//staging/src/k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1:all-srcs",
"//staging/src/k8s.io/sample-controller/pkg/signals:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

7
CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,7 @@
# Contributing guidelines
Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kubernetes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes.
This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/sample-controller](https://git.k8s.io/kubernetes/staging/src/k8s.io/sample-controller) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot).
Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information

496
Godeps/Godeps.json generated

File diff suppressed because it is too large Load Diff

2
Godeps/OWNERS generated Normal file
View File

@@ -0,0 +1,2 @@
approvers:
- dep-approvers

View File

@@ -29,6 +29,8 @@ This is an example of how to build a kube-like controller with a single type.
## Running
**Prerequisite**: Since the sample-controller uses `apps/v1` deployments, the Kubernetes cluster version should be greater than 1.9.
```sh
# assumes you have a working kubeconfig, not required if operating in-cluster
$ go run *.go -kubeconfig=$HOME/.kube/config
@@ -73,6 +75,22 @@ type User struct {
}
```
## Validation
To validate custom resources, use the [`CustomResourceValidation`](https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/#validation) feature.
This feature is beta and enabled by default in v1.9. If you are using v1.8, enable the feature using
the `CustomResourceValidation` feature gate on the [kube-apiserver](https://kubernetes.io/docs/admin/kube-apiserver):
```sh
--feature-gates=CustomResourceValidation=true
```
### Example
The schema in the [example CRD](./artifacts/examples/crd.yaml) applies the following validation on the custom resource:
`spec.replicas` must be an integer and must have a minimum value of 1 and a maximum value of 10.
## Cleanup
You can clean up the created CustomResourceDefinition with:

View File

@@ -9,3 +9,12 @@ spec:
kind: Foo
plural: foos
scope: Namespaced
validation:
openAPIV3Schema:
properties:
spec:
properties:
replicas:
type: integer
minimum: 1
maximum: 10

3
code-of-conduct.md Normal file
View File

@@ -0,0 +1,3 @@
# Kubernetes Community Code of Conduct
Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md)

View File

@@ -21,7 +21,7 @@ import (
"time"
"github.com/golang/glog"
appsv1beta2 "k8s.io/api/apps/v1beta2"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -32,7 +32,7 @@ import (
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
appslisters "k8s.io/client-go/listers/apps/v1beta2"
appslisters "k8s.io/client-go/listers/apps/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
@@ -93,7 +93,7 @@ func NewController(
// obtain references to shared index informers for the Deployment and Foo
// types.
deploymentInformer := kubeInformerFactory.Apps().V1beta2().Deployments()
deploymentInformer := kubeInformerFactory.Apps().V1().Deployments()
fooInformer := sampleInformerFactory.Samplecontroller().V1alpha1().Foos()
// Create event broadcaster
@@ -134,8 +134,8 @@ func NewController(
deploymentInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.handleObject,
UpdateFunc: func(old, new interface{}) {
newDepl := new.(*appsv1beta2.Deployment)
oldDepl := old.(*appsv1beta2.Deployment)
newDepl := new.(*appsv1.Deployment)
oldDepl := old.(*appsv1.Deployment)
if newDepl.ResourceVersion == oldDepl.ResourceVersion {
// Periodic resync will send update events for all known Deployments.
// Two different versions of the same Deployment will always have different RVs.
@@ -277,7 +277,7 @@ func (c *Controller) syncHandler(key string) error {
deployment, err := c.deploymentsLister.Deployments(foo.Namespace).Get(deploymentName)
// If the resource doesn't exist, we'll create it
if errors.IsNotFound(err) {
deployment, err = c.kubeclientset.AppsV1beta2().Deployments(foo.Namespace).Create(newDeployment(foo))
deployment, err = c.kubeclientset.AppsV1().Deployments(foo.Namespace).Create(newDeployment(foo))
}
// If an error occurs during Get/Create, we'll requeue the item so we can
@@ -299,8 +299,8 @@ func (c *Controller) syncHandler(key string) error {
// number does not equal the current desired replicas on the Deployment, we
// should update the Deployment resource.
if foo.Spec.Replicas != nil && *foo.Spec.Replicas != *deployment.Spec.Replicas {
glog.V(4).Infof("Foor: %d, deplR: %d", *foo.Spec.Replicas, *deployment.Spec.Replicas)
deployment, err = c.kubeclientset.AppsV1beta2().Deployments(foo.Namespace).Update(newDeployment(foo))
glog.V(4).Infof("Foo %s replicas: %d, deployment replicas: %d", name, *foo.Spec.Replicas, *deployment.Spec.Replicas)
deployment, err = c.kubeclientset.AppsV1().Deployments(foo.Namespace).Update(newDeployment(foo))
}
// If an error occurs during Update, we'll requeue the item so we can
@@ -321,7 +321,7 @@ func (c *Controller) syncHandler(key string) error {
return nil
}
func (c *Controller) updateFooStatus(foo *samplev1alpha1.Foo, deployment *appsv1beta2.Deployment) error {
func (c *Controller) updateFooStatus(foo *samplev1alpha1.Foo, deployment *appsv1.Deployment) error {
// NEVER modify objects from the store. It's a read-only, local cache.
// You can use DeepCopy() to make a deep copy of original object and modify this copy
// Or create a copy manually for better performance
@@ -391,12 +391,12 @@ func (c *Controller) handleObject(obj interface{}) {
// newDeployment creates a new Deployment for a Foo resource. It also sets
// the appropriate OwnerReferences on the resource so handleObject can discover
// the Foo resource that 'owns' it.
func newDeployment(foo *samplev1alpha1.Foo) *appsv1beta2.Deployment {
func newDeployment(foo *samplev1alpha1.Foo) *appsv1.Deployment {
labels := map[string]string{
"app": "nginx",
"controller": foo.Name,
}
return &appsv1beta2.Deployment{
return &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Name: foo.Spec.DeploymentName,
Namespace: foo.Namespace,
@@ -408,7 +408,7 @@ func newDeployment(foo *samplev1alpha1.Foo) *appsv1beta2.Deployment {
}),
},
},
Spec: appsv1beta2.DeploymentSpec{
Spec: appsv1.DeploymentSpec{
Replicas: foo.Spec.Replicas,
Selector: &metav1.LabelSelector{
MatchLabels: labels,

16
hack/boilerplate.go.txt Normal file
View File

@@ -0,0 +1,16 @@
/*
Copyright YEAR The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

View File

@@ -28,7 +28,8 @@ CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-ge
${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \
k8s.io/sample-controller/pkg/client k8s.io/sample-controller/pkg/apis \
samplecontroller:v1alpha1 \
--output-base "$(dirname ${BASH_SOURCE})/../../.."
--output-base "$(dirname ${BASH_SOURCE})/../../.." \
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt
# To use your own boilerplate text append:
# To use your own boilerplate text use:
# --go-header-file ${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt

View File

@@ -1,25 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["register.go"],
importpath = "k8s.io/sample-controller/pkg/apis/samplecontroller",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,33 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"register.go",
"types.go",
"zz_generated.deepcopy.go",
],
importpath = "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1",
visibility = ["//visibility:public"],
deps = [
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/apis/samplecontroller:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -17,6 +17,7 @@ limitations under the License.
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@@ -47,5 +48,6 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&Foo{},
&FooList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

View File

@@ -1,7 +1,7 @@
// +build !ignore_autogenerated
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
// Code generated by deepcopy-gen. DO NOT EDIT.
package v1alpha1
@@ -48,9 +48,8 @@ func (in *Foo) DeepCopy() *Foo {
func (in *Foo) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
@@ -82,9 +81,8 @@ func (in *FooList) DeepCopy() *FooList {
func (in *FooList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
} else {
return nil
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.

View File

@@ -1,37 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"clientset.go",
"doc.go",
],
importpath = "k8s.io/sample-controller/pkg/client/clientset/versioned",
visibility = ["//visibility:public"],
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/util/flowcontrol:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/fake:all-srcs",
"//staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/scheme:all-srcs",
"//staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package versioned
import (

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,5 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated clientset.
package versioned

View File

@@ -1,40 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"clientset_generated.go",
"doc.go",
"register.go",
],
importpath = "k8s.io/sample-controller/pkg/client/clientset/versioned/fake",
visibility = ["//visibility:public"],
deps = [
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/discovery:go_default_library",
"//vendor/k8s.io/client-go/discovery/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/client/clientset/versioned:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/fake:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
@@ -41,7 +43,15 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset {
fakePtr := testing.Fake{}
fakePtr.AddReactor("*", "*", testing.ObjectReaction(o))
fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil))
fakePtr.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
gvr := action.GetResource()
ns := action.GetNamespace()
watch, err := o.Watch(gvr, ns)
if err != nil {
return false, nil, err
}
return true, watch, nil
})
return &Clientset{fakePtr, &fakediscovery.FakeDiscovery{Fake: &fakePtr}}
}

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,5 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated fake clientset.
package fake

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
@@ -38,7 +40,7 @@ func init() {
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kuberentes/scheme"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
@@ -49,5 +51,4 @@ func init() {
// correctly.
func AddToScheme(scheme *runtime.Scheme) {
samplecontrollerv1alpha1.AddToScheme(scheme)
}

View File

@@ -1,32 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"register.go",
],
importpath = "k8s.io/sample-controller/pkg/client/clientset/versioned/scheme",
visibility = ["//visibility:public"],
deps = [
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,5 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package contains the scheme of the automatically generated clientset.
package scheme

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package scheme
import (
@@ -38,7 +40,7 @@ func init() {
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kuberentes/scheme"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
@@ -49,5 +51,4 @@ func init() {
// correctly.
func AddToScheme(scheme *runtime.Scheme) {
samplecontrollerv1alpha1.AddToScheme(scheme)
}

View File

@@ -1,39 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"foo.go",
"generated_expansion.go",
"samplecontroller_client.go",
],
importpath = "k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1",
visibility = ["//visibility:public"],
deps = [
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/serializer:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/client/clientset/versioned/scheme:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/fake:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,5 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated typed clients.
package v1alpha1

View File

@@ -1,37 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"fake_foo.go",
"fake_samplecontroller_client.go",
],
importpath = "k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/fake",
visibility = ["//visibility:public"],
deps = [
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/rest:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,5 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// Package fake has the automatically generated clients.
package fake

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
type FooExpansion interface{}

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,6 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (

View File

@@ -1,39 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"factory.go",
"generic.go",
],
importpath = "k8s.io/sample-controller/pkg/client/informers/externalversions",
visibility = ["//visibility:public"],
deps = [
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/client/clientset/versioned:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces:all-srcs",
"//staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,11 +14,15 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by informer-gen
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
import (
reflect "reflect"
sync "sync"
time "time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@@ -26,9 +30,6 @@ import (
versioned "k8s.io/sample-controller/pkg/client/clientset/versioned"
internalinterfaces "k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces"
samplecontroller "k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller"
reflect "reflect"
sync "sync"
time "time"
)
type sharedInformerFactory struct {

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,12 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by informer-gen
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
import (
"fmt"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
v1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"

View File

@@ -1,28 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["factory_interfaces.go"],
importpath = "k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces",
visibility = ["//visibility:public"],
deps = [
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/client/clientset/versioned:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,16 +14,17 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by informer-gen
// Code generated by informer-gen. DO NOT EDIT.
package internalinterfaces
import (
time "time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
cache "k8s.io/client-go/tools/cache"
versioned "k8s.io/sample-controller/pkg/client/clientset/versioned"
time "time"
)
type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer

View File

@@ -1,29 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["interface.go"],
importpath = "k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller",
visibility = ["//visibility:public"],
deps = [
"//vendor/k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//staging/src/k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by informer-gen
// Code generated by informer-gen. DO NOT EDIT.
package samplecontroller

View File

@@ -1,35 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"foo.go",
"interface.go",
],
importpath = "k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1",
visibility = ["//visibility:public"],
deps = [
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/watch:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/client/clientset/versioned:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,11 +14,13 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by informer-gen
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
time "time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
@@ -27,7 +29,6 @@ import (
versioned "k8s.io/sample-controller/pkg/client/clientset/versioned"
internalinterfaces "k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces"
v1alpha1 "k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1"
time "time"
)
// FooInformer provides access to a shared informer and lister for

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by informer-gen
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1

View File

@@ -1,31 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"expansion_generated.go",
"foo.go",
],
importpath = "k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1",
visibility = ["//visibility:public"],
deps = [
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/client-go/tools/cache:go_default_library",
"//vendor/k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by lister-gen
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1

View File

@@ -1,5 +1,5 @@
/*
Copyright 2017 The Kubernetes Authors.
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was automatically generated by lister-gen
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1

View File

@@ -1,30 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"signal.go",
"signal_posix.go",
] + select({
"@io_bazel_rules_go//go/platform:windows_amd64": [
"signal_windows.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/sample-controller/pkg/signals",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -1,900 +0,0 @@
// Copyright 2015 go-swagger maintainers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package spec
import (
"encoding/json"
"fmt"
"log"
"net/url"
"os"
"path/filepath"
"reflect"
"strings"
"sync"
"github.com/go-openapi/jsonpointer"
"github.com/go-openapi/swag"
)
var (
// Debug enables logging when SWAGGER_DEBUG env var is not empty
Debug = os.Getenv("SWAGGER_DEBUG") != ""
)
// ExpandOptions provides options for expand.
type ExpandOptions struct {
RelativeBase string
SkipSchemas bool
ContinueOnError bool
}
// ResolutionCache a cache for resolving urls
type ResolutionCache interface {
Get(string) (interface{}, bool)
Set(string, interface{})
}
type simpleCache struct {
lock sync.Mutex
store map[string]interface{}
}
var resCache ResolutionCache
func init() {
resCache = initResolutionCache()
}
func initResolutionCache() ResolutionCache {
return &simpleCache{store: map[string]interface{}{
"http://swagger.io/v2/schema.json": MustLoadSwagger20Schema(),
"http://json-schema.org/draft-04/schema": MustLoadJSONSchemaDraft04(),
}}
}
func (s *simpleCache) Get(uri string) (interface{}, bool) {
debugLog("getting %q from resolution cache", uri)
s.lock.Lock()
v, ok := s.store[uri]
debugLog("got %q from resolution cache: %t", uri, ok)
s.lock.Unlock()
return v, ok
}
func (s *simpleCache) Set(uri string, data interface{}) {
s.lock.Lock()
s.store[uri] = data
s.lock.Unlock()
}
// ResolveRefWithBase resolves a reference against a context root with preservation of base path
func ResolveRefWithBase(root interface{}, ref *Ref, opts *ExpandOptions) (*Schema, error) {
resolver, err := defaultSchemaLoader(root, nil, opts, nil)
if err != nil {
return nil, err
}
result := new(Schema)
if err := resolver.Resolve(ref, result); err != nil {
return nil, err
}
return result, nil
}
// ResolveRef resolves a reference against a context root
func ResolveRef(root interface{}, ref *Ref) (*Schema, error) {
return ResolveRefWithBase(root, ref, nil)
}
// ResolveParameter resolves a paramter reference against a context root
func ResolveParameter(root interface{}, ref Ref) (*Parameter, error) {
return ResolveParameterWithBase(root, ref, nil)
}
// ResolveParameterWithBase resolves a paramter reference against a context root and base path
func ResolveParameterWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Parameter, error) {
resolver, err := defaultSchemaLoader(root, nil, opts, nil)
if err != nil {
return nil, err
}
result := new(Parameter)
if err := resolver.Resolve(&ref, result); err != nil {
return nil, err
}
return result, nil
}
// ResolveResponse resolves response a reference against a context root
func ResolveResponse(root interface{}, ref Ref) (*Response, error) {
return ResolveResponseWithBase(root, ref, nil)
}
// ResolveResponseWithBase resolves response a reference against a context root and base path
func ResolveResponseWithBase(root interface{}, ref Ref, opts *ExpandOptions) (*Response, error) {
resolver, err := defaultSchemaLoader(root, nil, opts, nil)
if err != nil {
return nil, err
}
result := new(Response)
if err := resolver.Resolve(&ref, result); err != nil {
return nil, err
}
return result, nil
}
// ResolveItems resolves header and parameter items reference against a context root and base path
func ResolveItems(root interface{}, ref Ref, opts *ExpandOptions) (*Items, error) {
resolver, err := defaultSchemaLoader(root, nil, opts, nil)
if err != nil {
return nil, err
}
result := new(Items)
if err := resolver.Resolve(&ref, result); err != nil {
return nil, err
}
return result, nil
}
// ResolvePathItem resolves response a path item against a context root and base path
func ResolvePathItem(root interface{}, ref Ref, opts *ExpandOptions) (*PathItem, error) {
resolver, err := defaultSchemaLoader(root, nil, opts, nil)
if err != nil {
return nil, err
}
result := new(PathItem)
if err := resolver.Resolve(&ref, result); err != nil {
return nil, err
}
return result, nil
}
type schemaLoader struct {
loadingRef *Ref
startingRef *Ref
currentRef *Ref
root interface{}
options *ExpandOptions
cache ResolutionCache
loadDoc func(string) (json.RawMessage, error)
}
var idPtr, _ = jsonpointer.New("/id")
var refPtr, _ = jsonpointer.New("/$ref")
// PathLoader function to use when loading remote refs
var PathLoader func(string) (json.RawMessage, error)
func init() {
PathLoader = func(path string) (json.RawMessage, error) {
data, err := swag.LoadFromFileOrHTTP(path)
if err != nil {
return nil, err
}
return json.RawMessage(data), nil
}
}
func defaultSchemaLoader(
root interface{},
ref *Ref,
expandOptions *ExpandOptions,
cache ResolutionCache) (*schemaLoader, error) {
if cache == nil {
cache = resCache
}
if expandOptions == nil {
expandOptions = &ExpandOptions{}
}
var ptr *jsonpointer.Pointer
if ref != nil {
ptr = ref.GetPointer()
}
currentRef := nextRef(root, ref, ptr)
return &schemaLoader{
loadingRef: ref,
startingRef: ref,
currentRef: currentRef,
root: root,
options: expandOptions,
cache: cache,
loadDoc: func(path string) (json.RawMessage, error) {
debugLog("fetching document at %q", path)
return PathLoader(path)
},
}, nil
}
func idFromNode(node interface{}) (*Ref, error) {
if idValue, _, err := idPtr.Get(node); err == nil {
if refStr, ok := idValue.(string); ok && refStr != "" {
idRef, err := NewRef(refStr)
if err != nil {
return nil, err
}
return &idRef, nil
}
}
return nil, nil
}
func nextRef(startingNode interface{}, startingRef *Ref, ptr *jsonpointer.Pointer) *Ref {
if startingRef == nil {
return nil
}
if ptr == nil {
return startingRef
}
ret := startingRef
var idRef *Ref
node := startingNode
for _, tok := range ptr.DecodedTokens() {
node, _, _ = jsonpointer.GetForToken(node, tok)
if node == nil {
break
}
idRef, _ = idFromNode(node)
if idRef != nil {
nw, err := ret.Inherits(*idRef)
if err != nil {
break
}
ret = nw
}
refRef, _, _ := refPtr.Get(node)
if refRef != nil {
var rf Ref
switch value := refRef.(type) {
case string:
rf, _ = NewRef(value)
}
nw, err := ret.Inherits(rf)
if err != nil {
break
}
nwURL := nw.GetURL()
if nwURL.Scheme == "file" || (nwURL.Scheme == "" && nwURL.Host == "") {
nwpt := filepath.ToSlash(nwURL.Path)
if filepath.IsAbs(nwpt) {
_, err := os.Stat(nwpt)
if err != nil {
nwURL.Path = filepath.Join(".", nwpt)
}
}
}
ret = nw
}
}
return ret
}
func debugLog(msg string, args ...interface{}) {
if Debug {
log.Printf(msg, args...)
}
}
func normalizeFileRef(ref *Ref, relativeBase string) *Ref {
refURL := ref.GetURL()
debugLog("normalizing %s against %s (%s)", ref.String(), relativeBase, refURL.String())
if strings.HasPrefix(refURL.String(), "#") {
return ref
}
if refURL.Scheme == "file" || (refURL.Scheme == "" && refURL.Host == "") {
filePath := refURL.Path
debugLog("normalizing file path: %s", filePath)
if !filepath.IsAbs(filepath.FromSlash(filePath)) && len(relativeBase) != 0 {
debugLog("joining %s with %s", relativeBase, filePath)
if fi, err := os.Stat(filepath.FromSlash(relativeBase)); err == nil {
if !fi.IsDir() {
relativeBase = filepath.Dir(filepath.FromSlash(relativeBase))
}
}
filePath = filepath.Join(filepath.FromSlash(relativeBase), filepath.FromSlash(filePath))
}
if !filepath.IsAbs(filepath.FromSlash(filePath)) {
pwd, err := os.Getwd()
if err == nil {
debugLog("joining cwd %s with %s", pwd, filePath)
filePath = filepath.Join(pwd, filepath.FromSlash(filePath))
}
}
debugLog("cleaning %s", filePath)
filePath = filepath.Clean(filepath.FromSlash(filePath))
_, err := os.Stat(filepath.FromSlash(filePath))
if err == nil {
debugLog("rewriting url %s to scheme \"\" path %s", refURL.String(), filePath)
slp := filepath.FromSlash(filePath)
if filepath.IsAbs(slp) && filepath.Separator == '\\' && len(slp) > 1 && slp[1] == ':' && ('a' <= slp[0] && slp[0] <= 'z' || 'A' <= slp[0] && slp[0] <= 'Z') {
slp = slp[2:]
}
refURL.Scheme = ""
refURL.Path = filepath.ToSlash(slp)
debugLog("new url with joined filepath: %s", refURL.String())
*ref = MustCreateRef(refURL.String())
}
}
debugLog("refurl: %s", ref.GetURL().String())
return ref
}
func (r *schemaLoader) resolveRef(currentRef, ref *Ref, node, target interface{}) error {
tgt := reflect.ValueOf(target)
if tgt.Kind() != reflect.Ptr {
return fmt.Errorf("resolve ref: target needs to be a pointer")
}
oldRef := currentRef
if currentRef != nil {
debugLog("resolve ref current %s new %s", currentRef.String(), ref.String())
nextRef := nextRef(node, ref, currentRef.GetPointer())
if nextRef == nil || nextRef.GetURL() == nil {
return nil
}
var err error
currentRef, err = currentRef.Inherits(*nextRef)
debugLog("resolved ref current %s", currentRef.String())
if err != nil {
return err
}
}
if currentRef == nil {
currentRef = ref
}
refURL := currentRef.GetURL()
if refURL == nil {
return nil
}
if currentRef.IsRoot() {
nv := reflect.ValueOf(node)
reflect.Indirect(tgt).Set(reflect.Indirect(nv))
return nil
}
if strings.HasPrefix(refURL.String(), "#") {
res, _, err := ref.GetPointer().Get(node)
if err != nil {
res, _, err = ref.GetPointer().Get(r.root)
if err != nil {
return err
}
}
rv := reflect.Indirect(reflect.ValueOf(res))
tgtType := reflect.Indirect(tgt).Type()
if rv.Type().AssignableTo(tgtType) {
reflect.Indirect(tgt).Set(reflect.Indirect(reflect.ValueOf(res)))
} else {
if err := swag.DynamicJSONToStruct(rv.Interface(), target); err != nil {
return err
}
}
return nil
}
relativeBase := ""
if r.options != nil && r.options.RelativeBase != "" {
relativeBase = r.options.RelativeBase
}
normalizeFileRef(currentRef, relativeBase)
debugLog("current ref normalized file: %s", currentRef.String())
normalizeFileRef(ref, relativeBase)
debugLog("ref normalized file: %s", currentRef.String())
data, _, _, err := r.load(currentRef.GetURL())
if err != nil {
return err
}
if ((oldRef == nil && currentRef != nil) ||
(oldRef != nil && currentRef == nil) ||
oldRef.String() != currentRef.String()) &&
((oldRef == nil && ref != nil) ||
(oldRef != nil && ref == nil) ||
(oldRef.String() != ref.String())) {
return r.resolveRef(currentRef, ref, data, target)
}
var res interface{}
if currentRef.String() != "" {
res, _, err = currentRef.GetPointer().Get(data)
if err != nil {
if strings.HasPrefix(ref.String(), "#") {
if r.loadingRef != nil {
rr, er := r.loadingRef.Inherits(*ref)
if er != nil {
return er
}
refURL = rr.GetURL()
data, _, _, err = r.load(refURL)
if err != nil {
return err
}
} else {
data = r.root
}
}
res, _, err = ref.GetPointer().Get(data)
if err != nil {
return err
}
}
} else {
res = data
}
if err := swag.DynamicJSONToStruct(res, target); err != nil {
return err
}
r.currentRef = currentRef
return nil
}
func (r *schemaLoader) load(refURL *url.URL) (interface{}, url.URL, bool, error) {
debugLog("loading schema from url: %s", refURL)
toFetch := *refURL
toFetch.Fragment = ""
data, fromCache := r.cache.Get(toFetch.String())
if !fromCache {
b, err := r.loadDoc(toFetch.String())
if err != nil {
return nil, url.URL{}, false, err
}
if err := json.Unmarshal(b, &data); err != nil {
return nil, url.URL{}, false, err
}
r.cache.Set(toFetch.String(), data)
}
return data, toFetch, fromCache, nil
}
func (r *schemaLoader) Resolve(ref *Ref, target interface{}) error {
return r.resolveRef(r.currentRef, ref, r.root, target)
}
func (r *schemaLoader) reset() {
ref := r.startingRef
var ptr *jsonpointer.Pointer
if ref != nil {
ptr = ref.GetPointer()
}
r.currentRef = nextRef(r.root, ref, ptr)
}
// ExpandSpec expands the references in a swagger spec
func ExpandSpec(spec *Swagger, options *ExpandOptions) error {
resolver, err := defaultSchemaLoader(spec, nil, options, nil)
// Just in case this ever returns an error.
if shouldStopOnError(err, resolver.options) {
return err
}
if options == nil || !options.SkipSchemas {
for key, definition := range spec.Definitions {
var def *Schema
var err error
if def, err = expandSchema(definition, []string{"#/definitions/" + key}, resolver); shouldStopOnError(err, resolver.options) {
return err
}
resolver.reset()
spec.Definitions[key] = *def
}
}
for key, parameter := range spec.Parameters {
if err := expandParameter(&parameter, resolver); shouldStopOnError(err, resolver.options) {
return err
}
spec.Parameters[key] = parameter
}
for key, response := range spec.Responses {
if err := expandResponse(&response, resolver); shouldStopOnError(err, resolver.options) {
return err
}
spec.Responses[key] = response
}
if spec.Paths != nil {
for key, path := range spec.Paths.Paths {
if err := expandPathItem(&path, resolver); shouldStopOnError(err, resolver.options) {
return err
}
spec.Paths.Paths[key] = path
}
}
return nil
}
func shouldStopOnError(err error, opts *ExpandOptions) bool {
if err != nil && !opts.ContinueOnError {
return true
}
if err != nil {
log.Println(err)
}
return false
}
// ExpandSchema expands the refs in the schema object
func ExpandSchema(schema *Schema, root interface{}, cache ResolutionCache) error {
return ExpandSchemaWithBasePath(schema, root, cache, nil)
}
// ExpandSchemaWithBasePath expands the refs in the schema object, base path configured through expand options
func ExpandSchemaWithBasePath(schema *Schema, root interface{}, cache ResolutionCache, opts *ExpandOptions) error {
if schema == nil {
return nil
}
if root == nil {
root = schema
}
nrr, _ := NewRef(schema.ID)
var rrr *Ref
if nrr.String() != "" {
switch rt := root.(type) {
case *Schema:
rid, _ := NewRef(rt.ID)
rrr, _ = rid.Inherits(nrr)
case *Swagger:
rid, _ := NewRef(rt.ID)
rrr, _ = rid.Inherits(nrr)
}
}
resolver, err := defaultSchemaLoader(root, rrr, opts, cache)
if err != nil {
return err
}
refs := []string{""}
if rrr != nil {
refs[0] = rrr.String()
}
var s *Schema
if s, err = expandSchema(*schema, refs, resolver); err != nil {
return err
}
*schema = *s
return nil
}
func expandItems(target Schema, parentRefs []string, resolver *schemaLoader) (*Schema, error) {
if target.Items != nil {
if target.Items.Schema != nil {
t, err := expandSchema(*target.Items.Schema, parentRefs, resolver)
if err != nil {
if target.Items.Schema.ID == "" {
target.Items.Schema.ID = target.ID
if err != nil {
t, err = expandSchema(*target.Items.Schema, parentRefs, resolver)
if err != nil {
return nil, err
}
}
}
}
*target.Items.Schema = *t
}
for i := range target.Items.Schemas {
t, err := expandSchema(target.Items.Schemas[i], parentRefs, resolver)
if err != nil {
return nil, err
}
target.Items.Schemas[i] = *t
}
}
return &target, nil
}
func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader) (*Schema, error) {
if target.Ref.String() == "" && target.Ref.IsRoot() {
debugLog("skipping expand schema for no ref and root: %v", resolver.root)
return resolver.root.(*Schema), nil
}
// t is the new expanded schema
var t *Schema
for target.Ref.String() != "" {
if swag.ContainsStringsCI(parentRefs, target.Ref.String()) {
return &target, nil
}
if err := resolver.Resolve(&target.Ref, &t); shouldStopOnError(err, resolver.options) {
return &target, err
}
if swag.ContainsStringsCI(parentRefs, target.Ref.String()) {
debugLog("ref already exists in parent")
return &target, nil
}
parentRefs = append(parentRefs, target.Ref.String())
if t != nil {
target = *t
}
}
t, err := expandItems(target, parentRefs, resolver)
if shouldStopOnError(err, resolver.options) {
return &target, err
}
if t != nil {
target = *t
}
for i := range target.AllOf {
t, err := expandSchema(target.AllOf[i], parentRefs, resolver)
if shouldStopOnError(err, resolver.options) {
return &target, err
}
if t != nil {
target.AllOf[i] = *t
}
}
for i := range target.AnyOf {
t, err := expandSchema(target.AnyOf[i], parentRefs, resolver)
if shouldStopOnError(err, resolver.options) {
return &target, err
}
target.AnyOf[i] = *t
}
for i := range target.OneOf {
t, err := expandSchema(target.OneOf[i], parentRefs, resolver)
if shouldStopOnError(err, resolver.options) {
return &target, err
}
if t != nil {
target.OneOf[i] = *t
}
}
if target.Not != nil {
t, err := expandSchema(*target.Not, parentRefs, resolver)
if shouldStopOnError(err, resolver.options) {
return &target, err
}
if t != nil {
*target.Not = *t
}
}
for k := range target.Properties {
t, err := expandSchema(target.Properties[k], parentRefs, resolver)
if shouldStopOnError(err, resolver.options) {
return &target, err
}
if t != nil {
target.Properties[k] = *t
}
}
if target.AdditionalProperties != nil && target.AdditionalProperties.Schema != nil {
t, err := expandSchema(*target.AdditionalProperties.Schema, parentRefs, resolver)
if shouldStopOnError(err, resolver.options) {
return &target, err
}
if t != nil {
*target.AdditionalProperties.Schema = *t
}
}
for k := range target.PatternProperties {
t, err := expandSchema(target.PatternProperties[k], parentRefs, resolver)
if shouldStopOnError(err, resolver.options) {
return &target, err
}
if t != nil {
target.PatternProperties[k] = *t
}
}
for k := range target.Dependencies {
if target.Dependencies[k].Schema != nil {
t, err := expandSchema(*target.Dependencies[k].Schema, parentRefs, resolver)
if shouldStopOnError(err, resolver.options) {
return &target, err
}
if t != nil {
*target.Dependencies[k].Schema = *t
}
}
}
if target.AdditionalItems != nil && target.AdditionalItems.Schema != nil {
t, err := expandSchema(*target.AdditionalItems.Schema, parentRefs, resolver)
if shouldStopOnError(err, resolver.options) {
return &target, err
}
if t != nil {
*target.AdditionalItems.Schema = *t
}
}
for k := range target.Definitions {
t, err := expandSchema(target.Definitions[k], parentRefs, resolver)
if shouldStopOnError(err, resolver.options) {
return &target, err
}
if t != nil {
target.Definitions[k] = *t
}
}
return &target, nil
}
func expandPathItem(pathItem *PathItem, resolver *schemaLoader) error {
if pathItem == nil {
return nil
}
if pathItem.Ref.String() != "" {
if err := resolver.Resolve(&pathItem.Ref, &pathItem); err != nil {
return err
}
resolver.reset()
pathItem.Ref = Ref{}
}
for idx := range pathItem.Parameters {
if err := expandParameter(&(pathItem.Parameters[idx]), resolver); shouldStopOnError(err, resolver.options) {
return err
}
}
if err := expandOperation(pathItem.Get, resolver); shouldStopOnError(err, resolver.options) {
return err
}
if err := expandOperation(pathItem.Head, resolver); shouldStopOnError(err, resolver.options) {
return err
}
if err := expandOperation(pathItem.Options, resolver); shouldStopOnError(err, resolver.options) {
return err
}
if err := expandOperation(pathItem.Put, resolver); shouldStopOnError(err, resolver.options) {
return err
}
if err := expandOperation(pathItem.Post, resolver); shouldStopOnError(err, resolver.options) {
return err
}
if err := expandOperation(pathItem.Patch, resolver); shouldStopOnError(err, resolver.options) {
return err
}
if err := expandOperation(pathItem.Delete, resolver); shouldStopOnError(err, resolver.options) {
return err
}
return nil
}
func expandOperation(op *Operation, resolver *schemaLoader) error {
if op == nil {
return nil
}
for i, param := range op.Parameters {
if err := expandParameter(&param, resolver); shouldStopOnError(err, resolver.options) {
return err
}
op.Parameters[i] = param
}
if op.Responses != nil {
responses := op.Responses
if err := expandResponse(responses.Default, resolver); shouldStopOnError(err, resolver.options) {
return err
}
for code, response := range responses.StatusCodeResponses {
if err := expandResponse(&response, resolver); shouldStopOnError(err, resolver.options) {
return err
}
responses.StatusCodeResponses[code] = response
}
}
return nil
}
func expandResponse(response *Response, resolver *schemaLoader) error {
if response == nil {
return nil
}
var parentRefs []string
if response.Ref.String() != "" {
parentRefs = append(parentRefs, response.Ref.String())
if err := resolver.Resolve(&response.Ref, response); shouldStopOnError(err, resolver.options) {
return err
}
resolver.reset()
response.Ref = Ref{}
}
if !resolver.options.SkipSchemas && response.Schema != nil {
parentRefs = append(parentRefs, response.Schema.Ref.String())
debugLog("response ref: %s", response.Schema.Ref)
if err := resolver.Resolve(&response.Schema.Ref, &response.Schema); shouldStopOnError(err, resolver.options) {
return err
}
s, err := expandSchema(*response.Schema, parentRefs, resolver)
if shouldStopOnError(err, resolver.options) {
return err
}
resolver.reset()
*response.Schema = *s
}
return nil
}
func expandParameter(parameter *Parameter, resolver *schemaLoader) error {
if parameter == nil {
return nil
}
var parentRefs []string
if parameter.Ref.String() != "" {
parentRefs = append(parentRefs, parameter.Ref.String())
if err := resolver.Resolve(&parameter.Ref, parameter); shouldStopOnError(err, resolver.options) {
return err
}
resolver.reset()
parameter.Ref = Ref{}
}
if !resolver.options.SkipSchemas && parameter.Schema != nil {
parentRefs = append(parentRefs, parameter.Schema.Ref.String())
if err := resolver.Resolve(&parameter.Schema.Ref, &parameter.Schema); shouldStopOnError(err, resolver.options) {
return err
}
s, err := expandSchema(*parameter.Schema, parentRefs, resolver)
if shouldStopOnError(err, resolver.options) {
return err
}
resolver.reset()
*parameter.Schema = *s
}
return nil
}

View File

@@ -1 +0,0 @@
language: go

View File

@@ -1,12 +0,0 @@
# BTree implementation for Go
![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master)
This package provides an in-memory B-Tree implementation for Go, useful as
an ordered, mutable data structure.
The API is based off of the wonderful
http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to
act as a drop-in replacement for gollrb trees.
See http://godoc.org/github.com/google/btree for documentation.

View File

@@ -1,649 +0,0 @@
// Copyright 2014 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package btree implements in-memory B-Trees of arbitrary degree.
//
// btree implements an in-memory B-Tree for use as an ordered data structure.
// It is not meant for persistent storage solutions.
//
// It has a flatter structure than an equivalent red-black or other binary tree,
// which in some cases yields better memory usage and/or performance.
// See some discussion on the matter here:
// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
// Note, though, that this project is in no way related to the C++ B-Tree
// implmentation written about there.
//
// Within this tree, each node contains a slice of items and a (possibly nil)
// slice of children. For basic numeric values or raw structs, this can cause
// efficiency differences when compared to equivalent C++ template code that
// stores values in arrays within the node:
// * Due to the overhead of storing values as interfaces (each
// value needs to be stored as the value itself, then 2 words for the
// interface pointing to that value and its type), resulting in higher
// memory use.
// * Since interfaces can point to values anywhere in memory, values are
// most likely not stored in contiguous blocks, resulting in a higher
// number of cache misses.
// These issues don't tend to matter, though, when working with strings or other
// heap-allocated structures, since C++-equivalent structures also must store
// pointers and also distribute their values across the heap.
//
// This implementation is designed to be a drop-in replacement to gollrb.LLRB
// trees, (http://github.com/petar/gollrb), an excellent and probably the most
// widely used ordered tree implementation in the Go ecosystem currently.
// Its functions, therefore, exactly mirror those of
// llrb.LLRB where possible. Unlike gollrb, though, we currently don't
// support storing multiple equivalent values or backwards iteration.
package btree
import (
"fmt"
"io"
"sort"
"strings"
)
// Item represents a single object in the tree.
type Item interface {
// Less tests whether the current item is less than the given argument.
//
// This must provide a strict weak ordering.
// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
// hold one of either a or b in the tree).
Less(than Item) bool
}
const (
DefaultFreeListSize = 32
)
// FreeList represents a free list of btree nodes. By default each
// BTree has its own FreeList, but multiple BTrees can share the same
// FreeList.
// Two Btrees using the same freelist are not safe for concurrent write access.
type FreeList struct {
freelist []*node
}
// NewFreeList creates a new free list.
// size is the maximum size of the returned free list.
func NewFreeList(size int) *FreeList {
return &FreeList{freelist: make([]*node, 0, size)}
}
func (f *FreeList) newNode() (n *node) {
index := len(f.freelist) - 1
if index < 0 {
return new(node)
}
f.freelist, n = f.freelist[:index], f.freelist[index]
return
}
func (f *FreeList) freeNode(n *node) {
if len(f.freelist) < cap(f.freelist) {
f.freelist = append(f.freelist, n)
}
}
// ItemIterator allows callers of Ascend* to iterate in-order over portions of
// the tree. When this function returns false, iteration will stop and the
// associated Ascend* function will immediately return.
type ItemIterator func(i Item) bool
// New creates a new B-Tree with the given degree.
//
// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
// and 2-4 children).
func New(degree int) *BTree {
return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize))
}
// NewWithFreeList creates a new B-Tree that uses the given node free list.
func NewWithFreeList(degree int, f *FreeList) *BTree {
if degree <= 1 {
panic("bad degree")
}
return &BTree{
degree: degree,
freelist: f,
}
}
// items stores items in a node.
type items []Item
// insertAt inserts a value into the given index, pushing all subsequent values
// forward.
func (s *items) insertAt(index int, item Item) {
*s = append(*s, nil)
if index < len(*s) {
copy((*s)[index+1:], (*s)[index:])
}
(*s)[index] = item
}
// removeAt removes a value at a given index, pulling all subsequent values
// back.
func (s *items) removeAt(index int) Item {
item := (*s)[index]
(*s)[index] = nil
copy((*s)[index:], (*s)[index+1:])
*s = (*s)[:len(*s)-1]
return item
}
// pop removes and returns the last element in the list.
func (s *items) pop() (out Item) {
index := len(*s) - 1
out = (*s)[index]
(*s)[index] = nil
*s = (*s)[:index]
return
}
// find returns the index where the given item should be inserted into this
// list. 'found' is true if the item already exists in the list at the given
// index.
func (s items) find(item Item) (index int, found bool) {
i := sort.Search(len(s), func(i int) bool {
return item.Less(s[i])
})
if i > 0 && !s[i-1].Less(item) {
return i - 1, true
}
return i, false
}
// children stores child nodes in a node.
type children []*node
// insertAt inserts a value into the given index, pushing all subsequent values
// forward.
func (s *children) insertAt(index int, n *node) {
*s = append(*s, nil)
if index < len(*s) {
copy((*s)[index+1:], (*s)[index:])
}
(*s)[index] = n
}
// removeAt removes a value at a given index, pulling all subsequent values
// back.
func (s *children) removeAt(index int) *node {
n := (*s)[index]
(*s)[index] = nil
copy((*s)[index:], (*s)[index+1:])
*s = (*s)[:len(*s)-1]
return n
}
// pop removes and returns the last element in the list.
func (s *children) pop() (out *node) {
index := len(*s) - 1
out = (*s)[index]
(*s)[index] = nil
*s = (*s)[:index]
return
}
// node is an internal node in a tree.
//
// It must at all times maintain the invariant that either
// * len(children) == 0, len(items) unconstrained
// * len(children) == len(items) + 1
type node struct {
items items
children children
t *BTree
}
// split splits the given node at the given index. The current node shrinks,
// and this function returns the item that existed at that index and a new node
// containing all items/children after it.
func (n *node) split(i int) (Item, *node) {
item := n.items[i]
next := n.t.newNode()
next.items = append(next.items, n.items[i+1:]...)
n.items = n.items[:i]
if len(n.children) > 0 {
next.children = append(next.children, n.children[i+1:]...)
n.children = n.children[:i+1]
}
return item, next
}
// maybeSplitChild checks if a child should be split, and if so splits it.
// Returns whether or not a split occurred.
func (n *node) maybeSplitChild(i, maxItems int) bool {
if len(n.children[i].items) < maxItems {
return false
}
first := n.children[i]
item, second := first.split(maxItems / 2)
n.items.insertAt(i, item)
n.children.insertAt(i+1, second)
return true
}
// insert inserts an item into the subtree rooted at this node, making sure
// no nodes in the subtree exceed maxItems items. Should an equivalent item be
// be found/replaced by insert, it will be returned.
func (n *node) insert(item Item, maxItems int) Item {
i, found := n.items.find(item)
if found {
out := n.items[i]
n.items[i] = item
return out
}
if len(n.children) == 0 {
n.items.insertAt(i, item)
return nil
}
if n.maybeSplitChild(i, maxItems) {
inTree := n.items[i]
switch {
case item.Less(inTree):
// no change, we want first split node
case inTree.Less(item):
i++ // we want second split node
default:
out := n.items[i]
n.items[i] = item
return out
}
}
return n.children[i].insert(item, maxItems)
}
// get finds the given key in the subtree and returns it.
func (n *node) get(key Item) Item {
i, found := n.items.find(key)
if found {
return n.items[i]
} else if len(n.children) > 0 {
return n.children[i].get(key)
}
return nil
}
// min returns the first item in the subtree.
func min(n *node) Item {
if n == nil {
return nil
}
for len(n.children) > 0 {
n = n.children[0]
}
if len(n.items) == 0 {
return nil
}
return n.items[0]
}
// max returns the last item in the subtree.
func max(n *node) Item {
if n == nil {
return nil
}
for len(n.children) > 0 {
n = n.children[len(n.children)-1]
}
if len(n.items) == 0 {
return nil
}
return n.items[len(n.items)-1]
}
// toRemove details what item to remove in a node.remove call.
type toRemove int
const (
removeItem toRemove = iota // removes the given item
removeMin // removes smallest item in the subtree
removeMax // removes largest item in the subtree
)
// remove removes an item from the subtree rooted at this node.
func (n *node) remove(item Item, minItems int, typ toRemove) Item {
var i int
var found bool
switch typ {
case removeMax:
if len(n.children) == 0 {
return n.items.pop()
}
i = len(n.items)
case removeMin:
if len(n.children) == 0 {
return n.items.removeAt(0)
}
i = 0
case removeItem:
i, found = n.items.find(item)
if len(n.children) == 0 {
if found {
return n.items.removeAt(i)
}
return nil
}
default:
panic("invalid type")
}
// If we get to here, we have children.
child := n.children[i]
if len(child.items) <= minItems {
return n.growChildAndRemove(i, item, minItems, typ)
}
// Either we had enough items to begin with, or we've done some
// merging/stealing, because we've got enough now and we're ready to return
// stuff.
if found {
// The item exists at index 'i', and the child we've selected can give us a
// predecessor, since if we've gotten here it's got > minItems items in it.
out := n.items[i]
// We use our special-case 'remove' call with typ=maxItem to pull the
// predecessor of item i (the rightmost leaf of our immediate left child)
// and set it into where we pulled the item from.
n.items[i] = child.remove(nil, minItems, removeMax)
return out
}
// Final recursive call. Once we're here, we know that the item isn't in this
// node and that the child is big enough to remove from.
return child.remove(item, minItems, typ)
}
// growChildAndRemove grows child 'i' to make sure it's possible to remove an
// item from it while keeping it at minItems, then calls remove to actually
// remove it.
//
// Most documentation says we have to do two sets of special casing:
// 1) item is in this node
// 2) item is in child
// In both cases, we need to handle the two subcases:
// A) node has enough values that it can spare one
// B) node doesn't have enough values
// For the latter, we have to check:
// a) left sibling has node to spare
// b) right sibling has node to spare
// c) we must merge
// To simplify our code here, we handle cases #1 and #2 the same:
// If a node doesn't have enough items, we make sure it does (using a,b,c).
// We then simply redo our remove call, and the second time (regardless of
// whether we're in case 1 or 2), we'll have enough items and can guarantee
// that we hit case A.
func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item {
child := n.children[i]
if i > 0 && len(n.children[i-1].items) > minItems {
// Steal from left child
stealFrom := n.children[i-1]
stolenItem := stealFrom.items.pop()
child.items.insertAt(0, n.items[i-1])
n.items[i-1] = stolenItem
if len(stealFrom.children) > 0 {
child.children.insertAt(0, stealFrom.children.pop())
}
} else if i < len(n.items) && len(n.children[i+1].items) > minItems {
// steal from right child
stealFrom := n.children[i+1]
stolenItem := stealFrom.items.removeAt(0)
child.items = append(child.items, n.items[i])
n.items[i] = stolenItem
if len(stealFrom.children) > 0 {
child.children = append(child.children, stealFrom.children.removeAt(0))
}
} else {
if i >= len(n.items) {
i--
child = n.children[i]
}
// merge with right child
mergeItem := n.items.removeAt(i)
mergeChild := n.children.removeAt(i + 1)
child.items = append(child.items, mergeItem)
child.items = append(child.items, mergeChild.items...)
child.children = append(child.children, mergeChild.children...)
n.t.freeNode(mergeChild)
}
return n.remove(item, minItems, typ)
}
// iterate provides a simple method for iterating over elements in the tree.
// It could probably use some work to be extra-efficient (it calls from() a
// little more than it should), but it works pretty well for now.
//
// It requires that 'from' and 'to' both return true for values we should hit
// with the iterator. It should also be the case that 'from' returns true for
// values less than or equal to values 'to' returns true for, and 'to'
// returns true for values greater than or equal to those that 'from'
// does.
func (n *node) iterate(from, to func(Item) bool, iter ItemIterator) bool {
for i, item := range n.items {
if !from(item) {
continue
}
if len(n.children) > 0 && !n.children[i].iterate(from, to, iter) {
return false
}
if !to(item) {
return false
}
if !iter(item) {
return false
}
}
if len(n.children) > 0 {
return n.children[len(n.children)-1].iterate(from, to, iter)
}
return true
}
// Used for testing/debugging purposes.
func (n *node) print(w io.Writer, level int) {
fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items)
for _, c := range n.children {
c.print(w, level+1)
}
}
// BTree is an implementation of a B-Tree.
//
// BTree stores Item instances in an ordered structure, allowing easy insertion,
// removal, and iteration.
//
// Write operations are not safe for concurrent mutation by multiple
// goroutines, but Read operations are.
type BTree struct {
degree int
length int
root *node
freelist *FreeList
}
// maxItems returns the max number of items to allow per node.
func (t *BTree) maxItems() int {
return t.degree*2 - 1
}
// minItems returns the min number of items to allow per node (ignored for the
// root node).
func (t *BTree) minItems() int {
return t.degree - 1
}
func (t *BTree) newNode() (n *node) {
n = t.freelist.newNode()
n.t = t
return
}
func (t *BTree) freeNode(n *node) {
for i := range n.items {
n.items[i] = nil // clear to allow GC
}
n.items = n.items[:0]
for i := range n.children {
n.children[i] = nil // clear to allow GC
}
n.children = n.children[:0]
n.t = nil // clear to allow GC
t.freelist.freeNode(n)
}
// ReplaceOrInsert adds the given item to the tree. If an item in the tree
// already equals the given one, it is removed from the tree and returned.
// Otherwise, nil is returned.
//
// nil cannot be added to the tree (will panic).
func (t *BTree) ReplaceOrInsert(item Item) Item {
if item == nil {
panic("nil item being added to BTree")
}
if t.root == nil {
t.root = t.newNode()
t.root.items = append(t.root.items, item)
t.length++
return nil
} else if len(t.root.items) >= t.maxItems() {
item2, second := t.root.split(t.maxItems() / 2)
oldroot := t.root
t.root = t.newNode()
t.root.items = append(t.root.items, item2)
t.root.children = append(t.root.children, oldroot, second)
}
out := t.root.insert(item, t.maxItems())
if out == nil {
t.length++
}
return out
}
// Delete removes an item equal to the passed in item from the tree, returning
// it. If no such item exists, returns nil.
func (t *BTree) Delete(item Item) Item {
return t.deleteItem(item, removeItem)
}
// DeleteMin removes the smallest item in the tree and returns it.
// If no such item exists, returns nil.
func (t *BTree) DeleteMin() Item {
return t.deleteItem(nil, removeMin)
}
// DeleteMax removes the largest item in the tree and returns it.
// If no such item exists, returns nil.
func (t *BTree) DeleteMax() Item {
return t.deleteItem(nil, removeMax)
}
func (t *BTree) deleteItem(item Item, typ toRemove) Item {
if t.root == nil || len(t.root.items) == 0 {
return nil
}
out := t.root.remove(item, t.minItems(), typ)
if len(t.root.items) == 0 && len(t.root.children) > 0 {
oldroot := t.root
t.root = t.root.children[0]
t.freeNode(oldroot)
}
if out != nil {
t.length--
}
return out
}
// AscendRange calls the iterator for every value in the tree within the range
// [greaterOrEqual, lessThan), until iterator returns false.
func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(
func(a Item) bool { return !a.Less(greaterOrEqual) },
func(a Item) bool { return a.Less(lessThan) },
iterator)
}
// AscendLessThan calls the iterator for every value in the tree within the range
// [first, pivot), until iterator returns false.
func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(
func(a Item) bool { return true },
func(a Item) bool { return a.Less(pivot) },
iterator)
}
// AscendGreaterOrEqual calls the iterator for every value in the tree within
// the range [pivot, last], until iterator returns false.
func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(
func(a Item) bool { return !a.Less(pivot) },
func(a Item) bool { return true },
iterator)
}
// Ascend calls the iterator for every value in the tree within the range
// [first, last], until iterator returns false.
func (t *BTree) Ascend(iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(
func(a Item) bool { return true },
func(a Item) bool { return true },
iterator)
}
// Get looks for the key item in the tree, returning it. It returns nil if
// unable to find that item.
func (t *BTree) Get(key Item) Item {
if t.root == nil {
return nil
}
return t.root.get(key)
}
// Min returns the smallest item in the tree, or nil if the tree is empty.
func (t *BTree) Min() Item {
return min(t.root)
}
// Max returns the largest item in the tree, or nil if the tree is empty.
func (t *BTree) Max() Item {
return max(t.root)
}
// Has returns true if the given key is in the tree.
func (t *BTree) Has(key Item) bool {
return t.Get(key) != nil
}
// Len returns the number of items currently in the tree.
func (t *BTree) Len() int {
return t.length
}
// Int implements the Item interface for integers.
type Int int
// Less returns true if int(a) < int(b).
func (a Int) Less(b Item) bool {
return a < b.(Int)
}

View File

@@ -1,76 +0,0 @@
// Copyright 2014 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build ignore
// This binary compares memory usage between btree and gollrb.
package main
import (
"flag"
"fmt"
"math/rand"
"runtime"
"time"
"github.com/google/btree"
"github.com/petar/GoLLRB/llrb"
)
var (
size = flag.Int("size", 1000000, "size of the tree to build")
degree = flag.Int("degree", 8, "degree of btree")
gollrb = flag.Bool("llrb", false, "use llrb instead of btree")
)
func main() {
flag.Parse()
vals := rand.Perm(*size)
var t, v interface{}
v = vals
var stats runtime.MemStats
for i := 0; i < 10; i++ {
runtime.GC()
}
fmt.Println("-------- BEFORE ----------")
runtime.ReadMemStats(&stats)
fmt.Printf("%+v\n", stats)
start := time.Now()
if *gollrb {
tr := llrb.New()
for _, v := range vals {
tr.ReplaceOrInsert(llrb.Int(v))
}
t = tr // keep it around
} else {
tr := btree.New(*degree)
for _, v := range vals {
tr.ReplaceOrInsert(btree.Int(v))
}
t = tr // keep it around
}
fmt.Printf("%v inserts in %v\n", *size, time.Since(start))
fmt.Println("-------- AFTER ----------")
runtime.ReadMemStats(&stats)
fmt.Printf("%+v\n", stats)
for i := 0; i < 10; i++ {
runtime.GC()
}
fmt.Println("-------- AFTER GC ----------")
runtime.ReadMemStats(&stats)
fmt.Printf("%+v\n", stats)
if t == v {
fmt.Println("to make sure vals and tree aren't GC'd")
}
}

View File

@@ -1,18 +0,0 @@
sudo: false
language: go
go:
- 1.6.x
- 1.7.x
- 1.8.x
- master
matrix:
allow_failures:
- go: master
fast_finish: true
install:
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
script:
- go get -t -v ./...
- diff -u <(echo -n) <(gofmt -d .)
- go tool vet .
- go test -v -race ./...

View File

@@ -1,7 +0,0 @@
Copyright © 2012 Greg Jones (greg.jones@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1,24 +0,0 @@
httpcache
=========
[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache)
Package httpcache provides a http.RoundTripper implementation that works as a mostly RFC-compliant cache for http responses.
It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy).
Cache Backends
--------------
- The built-in 'memory' cache stores responses in an in-memory map.
- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library.
- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers.
- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage.
- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb).
- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries.
- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache.
License
-------
- [MIT License](LICENSE.txt)

View File

@@ -1,61 +0,0 @@
// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package
// to supplement an in-memory map with persistent storage
//
package diskcache
import (
"bytes"
"crypto/md5"
"encoding/hex"
"github.com/peterbourgon/diskv"
"io"
)
// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage
type Cache struct {
d *diskv.Diskv
}
// Get returns the response corresponding to key if present
func (c *Cache) Get(key string) (resp []byte, ok bool) {
key = keyToFilename(key)
resp, err := c.d.Read(key)
if err != nil {
return []byte{}, false
}
return resp, true
}
// Set saves a response to the cache as key
func (c *Cache) Set(key string, resp []byte) {
key = keyToFilename(key)
c.d.WriteStream(key, bytes.NewReader(resp), true)
}
// Delete removes the response with key from the cache
func (c *Cache) Delete(key string) {
key = keyToFilename(key)
c.d.Erase(key)
}
func keyToFilename(key string) string {
h := md5.New()
io.WriteString(h, key)
return hex.EncodeToString(h.Sum(nil))
}
// New returns a new Cache that will store files in basePath
func New(basePath string) *Cache {
return &Cache{
d: diskv.New(diskv.Options{
BasePath: basePath,
CacheSizeMax: 100 * 1024 * 1024, // 100MB
}),
}
}
// NewWithDiskv returns a new Cache using the provided Diskv as underlying
// storage.
func NewWithDiskv(d *diskv.Diskv) *Cache {
return &Cache{d}
}

View File

@@ -1,553 +0,0 @@
// Package httpcache provides a http.RoundTripper implementation that works as a
// mostly RFC-compliant cache for http responses.
//
// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client
// and not for a shared proxy).
//
package httpcache
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httputil"
"strings"
"sync"
"time"
)
const (
stale = iota
fresh
transparent
// XFromCache is the header added to responses that are returned from the cache
XFromCache = "X-From-Cache"
)
// A Cache interface is used by the Transport to store and retrieve responses.
type Cache interface {
// Get returns the []byte representation of a cached response and a bool
// set to true if the value isn't empty
Get(key string) (responseBytes []byte, ok bool)
// Set stores the []byte representation of a response against a key
Set(key string, responseBytes []byte)
// Delete removes the value associated with the key
Delete(key string)
}
// cacheKey returns the cache key for req.
func cacheKey(req *http.Request) string {
return req.URL.String()
}
// CachedResponse returns the cached http.Response for req if present, and nil
// otherwise.
func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) {
cachedVal, ok := c.Get(cacheKey(req))
if !ok {
return
}
b := bytes.NewBuffer(cachedVal)
return http.ReadResponse(bufio.NewReader(b), req)
}
// MemoryCache is an implemtation of Cache that stores responses in an in-memory map.
type MemoryCache struct {
mu sync.RWMutex
items map[string][]byte
}
// Get returns the []byte representation of the response and true if present, false if not
func (c *MemoryCache) Get(key string) (resp []byte, ok bool) {
c.mu.RLock()
resp, ok = c.items[key]
c.mu.RUnlock()
return resp, ok
}
// Set saves response resp to the cache with key
func (c *MemoryCache) Set(key string, resp []byte) {
c.mu.Lock()
c.items[key] = resp
c.mu.Unlock()
}
// Delete removes key from the cache
func (c *MemoryCache) Delete(key string) {
c.mu.Lock()
delete(c.items, key)
c.mu.Unlock()
}
// NewMemoryCache returns a new Cache that will store items in an in-memory map
func NewMemoryCache() *MemoryCache {
c := &MemoryCache{items: map[string][]byte{}}
return c
}
// Transport is an implementation of http.RoundTripper that will return values from a cache
// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since)
// to repeated requests allowing servers to return 304 / Not Modified
type Transport struct {
// The RoundTripper interface actually used to make requests
// If nil, http.DefaultTransport is used
Transport http.RoundTripper
Cache Cache
// If true, responses returned from the cache will be given an extra header, X-From-Cache
MarkCachedResponses bool
}
// NewTransport returns a new Transport with the
// provided Cache implementation and MarkCachedResponses set to true
func NewTransport(c Cache) *Transport {
return &Transport{Cache: c, MarkCachedResponses: true}
}
// Client returns an *http.Client that caches responses.
func (t *Transport) Client() *http.Client {
return &http.Client{Transport: t}
}
// varyMatches will return false unless all of the cached values for the headers listed in Vary
// match the new request
func varyMatches(cachedResp *http.Response, req *http.Request) bool {
for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") {
header = http.CanonicalHeaderKey(header)
if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) {
return false
}
}
return true
}
// RoundTrip takes a Request and returns a Response
//
// If there is a fresh Response already in cache, then it will be returned without connecting to
// the server.
//
// If there is a stale Response, then any validators it contains will be set on the new request
// to give the server a chance to respond with NotModified. If this happens, then the cached Response
// will be returned.
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
cacheKey := cacheKey(req)
cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == ""
var cachedResp *http.Response
if cacheable {
cachedResp, err = CachedResponse(t.Cache, req)
} else {
// Need to invalidate an existing value
t.Cache.Delete(cacheKey)
}
transport := t.Transport
if transport == nil {
transport = http.DefaultTransport
}
if cacheable && cachedResp != nil && err == nil {
if t.MarkCachedResponses {
cachedResp.Header.Set(XFromCache, "1")
}
if varyMatches(cachedResp, req) {
// Can only use cached value if the new request doesn't Vary significantly
freshness := getFreshness(cachedResp.Header, req.Header)
if freshness == fresh {
return cachedResp, nil
}
if freshness == stale {
var req2 *http.Request
// Add validators if caller hasn't already done so
etag := cachedResp.Header.Get("etag")
if etag != "" && req.Header.Get("etag") == "" {
req2 = cloneRequest(req)
req2.Header.Set("if-none-match", etag)
}
lastModified := cachedResp.Header.Get("last-modified")
if lastModified != "" && req.Header.Get("last-modified") == "" {
if req2 == nil {
req2 = cloneRequest(req)
}
req2.Header.Set("if-modified-since", lastModified)
}
if req2 != nil {
req = req2
}
}
}
resp, err = transport.RoundTrip(req)
if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified {
// Replace the 304 response with the one from cache, but update with some new headers
endToEndHeaders := getEndToEndHeaders(resp.Header)
for _, header := range endToEndHeaders {
cachedResp.Header[header] = resp.Header[header]
}
cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK))
cachedResp.StatusCode = http.StatusOK
resp = cachedResp
} else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) &&
req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) {
// In case of transport failure and stale-if-error activated, returns cached content
// when available
cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK))
cachedResp.StatusCode = http.StatusOK
return cachedResp, nil
} else {
if err != nil || resp.StatusCode != http.StatusOK {
t.Cache.Delete(cacheKey)
}
if err != nil {
return nil, err
}
}
} else {
reqCacheControl := parseCacheControl(req.Header)
if _, ok := reqCacheControl["only-if-cached"]; ok {
resp = newGatewayTimeoutResponse(req)
} else {
resp, err = transport.RoundTrip(req)
if err != nil {
return nil, err
}
}
}
if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) {
for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") {
varyKey = http.CanonicalHeaderKey(varyKey)
fakeHeader := "X-Varied-" + varyKey
reqValue := req.Header.Get(varyKey)
if reqValue != "" {
resp.Header.Set(fakeHeader, reqValue)
}
}
switch req.Method {
case "GET":
// Delay caching until EOF is reached.
resp.Body = &cachingReadCloser{
R: resp.Body,
OnEOF: func(r io.Reader) {
resp := *resp
resp.Body = ioutil.NopCloser(r)
respBytes, err := httputil.DumpResponse(&resp, true)
if err == nil {
t.Cache.Set(cacheKey, respBytes)
}
},
}
default:
respBytes, err := httputil.DumpResponse(resp, true)
if err == nil {
t.Cache.Set(cacheKey, respBytes)
}
}
} else {
t.Cache.Delete(cacheKey)
}
return resp, nil
}
// ErrNoDateHeader indicates that the HTTP headers contained no Date header.
var ErrNoDateHeader = errors.New("no Date header")
// Date parses and returns the value of the Date header.
func Date(respHeaders http.Header) (date time.Time, err error) {
dateHeader := respHeaders.Get("date")
if dateHeader == "" {
err = ErrNoDateHeader
return
}
return time.Parse(time.RFC1123, dateHeader)
}
type realClock struct{}
func (c *realClock) since(d time.Time) time.Duration {
return time.Since(d)
}
type timer interface {
since(d time.Time) time.Duration
}
var clock timer = &realClock{}
// getFreshness will return one of fresh/stale/transparent based on the cache-control
// values of the request and the response
//
// fresh indicates the response can be returned
// stale indicates that the response needs validating before it is returned
// transparent indicates the response should not be used to fulfil the request
//
// Because this is only a private cache, 'public' and 'private' in cache-control aren't
// signficant. Similarly, smax-age isn't used.
func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) {
respCacheControl := parseCacheControl(respHeaders)
reqCacheControl := parseCacheControl(reqHeaders)
if _, ok := reqCacheControl["no-cache"]; ok {
return transparent
}
if _, ok := respCacheControl["no-cache"]; ok {
return stale
}
if _, ok := reqCacheControl["only-if-cached"]; ok {
return fresh
}
date, err := Date(respHeaders)
if err != nil {
return stale
}
currentAge := clock.since(date)
var lifetime time.Duration
var zeroDuration time.Duration
// If a response includes both an Expires header and a max-age directive,
// the max-age directive overrides the Expires header, even if the Expires header is more restrictive.
if maxAge, ok := respCacheControl["max-age"]; ok {
lifetime, err = time.ParseDuration(maxAge + "s")
if err != nil {
lifetime = zeroDuration
}
} else {
expiresHeader := respHeaders.Get("Expires")
if expiresHeader != "" {
expires, err := time.Parse(time.RFC1123, expiresHeader)
if err != nil {
lifetime = zeroDuration
} else {
lifetime = expires.Sub(date)
}
}
}
if maxAge, ok := reqCacheControl["max-age"]; ok {
// the client is willing to accept a response whose age is no greater than the specified time in seconds
lifetime, err = time.ParseDuration(maxAge + "s")
if err != nil {
lifetime = zeroDuration
}
}
if minfresh, ok := reqCacheControl["min-fresh"]; ok {
// the client wants a response that will still be fresh for at least the specified number of seconds.
minfreshDuration, err := time.ParseDuration(minfresh + "s")
if err == nil {
currentAge = time.Duration(currentAge + minfreshDuration)
}
}
if maxstale, ok := reqCacheControl["max-stale"]; ok {
// Indicates that the client is willing to accept a response that has exceeded its expiration time.
// If max-stale is assigned a value, then the client is willing to accept a response that has exceeded
// its expiration time by no more than the specified number of seconds.
// If no value is assigned to max-stale, then the client is willing to accept a stale response of any age.
//
// Responses served only because of a max-stale value are supposed to have a Warning header added to them,
// but that seems like a hassle, and is it actually useful? If so, then there needs to be a different
// return-value available here.
if maxstale == "" {
return fresh
}
maxstaleDuration, err := time.ParseDuration(maxstale + "s")
if err == nil {
currentAge = time.Duration(currentAge - maxstaleDuration)
}
}
if lifetime > currentAge {
return fresh
}
return stale
}
// Returns true if either the request or the response includes the stale-if-error
// cache control extension: https://tools.ietf.org/html/rfc5861
func canStaleOnError(respHeaders, reqHeaders http.Header) bool {
respCacheControl := parseCacheControl(respHeaders)
reqCacheControl := parseCacheControl(reqHeaders)
var err error
lifetime := time.Duration(-1)
if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok {
if staleMaxAge != "" {
lifetime, err = time.ParseDuration(staleMaxAge + "s")
if err != nil {
return false
}
} else {
return true
}
}
if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok {
if staleMaxAge != "" {
lifetime, err = time.ParseDuration(staleMaxAge + "s")
if err != nil {
return false
}
} else {
return true
}
}
if lifetime >= 0 {
date, err := Date(respHeaders)
if err != nil {
return false
}
currentAge := clock.since(date)
if lifetime > currentAge {
return true
}
}
return false
}
func getEndToEndHeaders(respHeaders http.Header) []string {
// These headers are always hop-by-hop
hopByHopHeaders := map[string]struct{}{
"Connection": struct{}{},
"Keep-Alive": struct{}{},
"Proxy-Authenticate": struct{}{},
"Proxy-Authorization": struct{}{},
"Te": struct{}{},
"Trailers": struct{}{},
"Transfer-Encoding": struct{}{},
"Upgrade": struct{}{},
}
for _, extra := range strings.Split(respHeaders.Get("connection"), ",") {
// any header listed in connection, if present, is also considered hop-by-hop
if strings.Trim(extra, " ") != "" {
hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{}
}
}
endToEndHeaders := []string{}
for respHeader, _ := range respHeaders {
if _, ok := hopByHopHeaders[respHeader]; !ok {
endToEndHeaders = append(endToEndHeaders, respHeader)
}
}
return endToEndHeaders
}
func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) {
if _, ok := respCacheControl["no-store"]; ok {
return false
}
if _, ok := reqCacheControl["no-store"]; ok {
return false
}
return true
}
func newGatewayTimeoutResponse(req *http.Request) *http.Response {
var braw bytes.Buffer
braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n")
resp, err := http.ReadResponse(bufio.NewReader(&braw), req)
if err != nil {
panic(err)
}
return resp
}
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
// (This function copyright goauth2 authors: https://code.google.com/p/goauth2)
func cloneRequest(r *http.Request) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header)
for k, s := range r.Header {
r2.Header[k] = s
}
return r2
}
type cacheControl map[string]string
func parseCacheControl(headers http.Header) cacheControl {
cc := cacheControl{}
ccHeader := headers.Get("Cache-Control")
for _, part := range strings.Split(ccHeader, ",") {
part = strings.Trim(part, " ")
if part == "" {
continue
}
if strings.ContainsRune(part, '=') {
keyval := strings.Split(part, "=")
cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",")
} else {
cc[part] = ""
}
}
return cc
}
// headerAllCommaSepValues returns all comma-separated values (each
// with whitespace trimmed) for header name in headers. According to
// Section 4.2 of the HTTP/1.1 spec
// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2),
// values from multiple occurrences of a header should be concatenated, if
// the header's value is a comma-separated list.
func headerAllCommaSepValues(headers http.Header, name string) []string {
var vals []string
for _, val := range headers[http.CanonicalHeaderKey(name)] {
fields := strings.Split(val, ",")
for i, f := range fields {
fields[i] = strings.TrimSpace(f)
}
vals = append(vals, fields...)
}
return vals
}
// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF
// handler with a full copy of the content read from R when EOF is
// reached.
type cachingReadCloser struct {
// Underlying ReadCloser.
R io.ReadCloser
// OnEOF is called with a copy of the content of R when EOF is reached.
OnEOF func(io.Reader)
buf bytes.Buffer // buf stores a copy of the content of R.
}
// Read reads the next len(p) bytes from R or until R is drained. The
// return value n is the number of bytes read. If R has no data to
// return, err is io.EOF and OnEOF is called with a full copy of what
// has been read so far.
func (r *cachingReadCloser) Read(p []byte) (n int, err error) {
n, err = r.R.Read(p)
r.buf.Write(p[:n])
if err == io.EOF {
r.OnEOF(bytes.NewReader(r.buf.Bytes()))
}
return n, err
}
func (r *cachingReadCloser) Close() error {
return r.R.Close()
}
// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation
func NewMemoryCacheTransport() *Transport {
c := NewMemoryCache()
t := NewTransport(c)
return t
}

View File

@@ -1,3 +1,4 @@
.idea
/vendor
/bug_test.go
/coverage.txt
/profile.out
/.idea

View File

@@ -2,6 +2,7 @@ language: go
go:
- 1.8.x
- 1.x
before_install:
- go get -t -v ./...

33
vendor/github.com/json-iterator/go/Gopkg.lock generated vendored Normal file
View File

@@ -0,0 +1,33 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "github.com/davecgh/go-spew"
packages = ["spew"]
revision = "346938d642f2ec3594ed81d874461961cd0faa76"
version = "v1.1.0"
[[projects]]
branch = "master"
name = "github.com/google/gofuzz"
packages = ["."]
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
[[projects]]
name = "github.com/pmezard/go-difflib"
packages = ["difflib"]
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
version = "v1.0.0"
[[projects]]
name = "github.com/stretchr/testify"
packages = ["assert","require"]
revision = "69483b4bd14f5845b5a1e55bca19e954e827f1d0"
version = "v1.1.4"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "f8b7cf3941d3792cbbd570bb53c093adaf774334d1162c651565c97a58dc9d09"
solver-name = "gps-cdcl"
solver-version = 1

33
vendor/github.com/json-iterator/go/Gopkg.toml generated vendored Normal file
View File

@@ -0,0 +1,33 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
[[constraint]]
name = "github.com/davecgh/go-spew"
version = "1.1.0"
[[constraint]]
branch = "master"
name = "github.com/google/gofuzz"
[[constraint]]
name = "github.com/stretchr/testify"
version = "1.1.4"

View File

@@ -44,7 +44,9 @@ with
```go
import "github.com/json-iterator/go"
jsoniter.Marshal(&data)
var json = jsoniter.ConfigCompatibleWithStandardLibrary
json.Marshal(&data)
```
Replace
@@ -58,7 +60,9 @@ with
```go
import "github.com/json-iterator/go"
jsoniter.Unmarshal(input, &data)
var json = jsoniter.ConfigCompatibleWithStandardLibrary
json.Unmarshal(input, &data)
```
[More documentation](http://jsoniter.com/migrate-from-go-std.html)
@@ -76,5 +80,7 @@ Contributors
* [thockin](https://github.com/thockin)
* [mattn](https://github.com/mattn)
* [cch123](https://github.com/cch123)
* [Oleg Shaldybin](https://github.com/olegshaldybin)
* [Jason Toffaletti](https://github.com/toffaletti)
Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby)

12
vendor/github.com/json-iterator/go/build.sh generated vendored Executable file
View File

@@ -0,0 +1,12 @@
#!/bin/bash
set -e
set -x
if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then
mkdir -p /tmp/build-golang/src/github.com/json-iterator
ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go
fi
export GOPATH=/tmp/build-golang
go get -u github.com/golang/dep/cmd/dep
cd /tmp/build-golang/src/github.com/json-iterator/go
exec $GOPATH/bin/dep ensure -update

View File

@@ -110,6 +110,7 @@ type Encoder struct {
// Encode encode interface{} as JSON to io.Writer
func (adapter *Encoder) Encode(val interface{}) error {
adapter.stream.WriteVal(val)
adapter.stream.WriteRaw("\n")
adapter.stream.Flush()
return adapter.stream.Error
}
@@ -125,3 +126,8 @@ func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) {
config.EscapeHTML = escapeHTML
adapter.stream.cfg = config.Froze().(*frozenConfig)
}
// Valid reports whether data is a valid JSON encoding.
func Valid(data []byte) bool {
return ConfigDefault.Valid(data)
}

View File

@@ -1,6 +1,7 @@
package jsoniter
import (
"errors"
"fmt"
"io"
"reflect"
@@ -157,6 +158,8 @@ func (iter *Iterator) readAny() Any {
return iter.readArrayAny()
case '-':
return iter.readNumberAny(false)
case 0:
return &invalidAny{baseAny{}, errors.New("input is empty")}
default:
return iter.readNumberAny(true)
}

View File

@@ -12,23 +12,26 @@ import (
// Config customize how the API should behave.
// The API is created from Config by Froze.
type Config struct {
IndentionStep int
MarshalFloatWith6Digits bool
EscapeHTML bool
SortMapKeys bool
UseNumber bool
TagKey string
IndentionStep int
MarshalFloatWith6Digits bool
EscapeHTML bool
SortMapKeys bool
UseNumber bool
TagKey string
ValidateJsonRawMessage bool
ObjectFieldMustBeSimpleString bool
}
type frozenConfig struct {
configBeforeFrozen Config
sortMapKeys bool
indentionStep int
decoderCache unsafe.Pointer
encoderCache unsafe.Pointer
extensions []Extension
streamPool chan *Stream
iteratorPool chan *Iterator
configBeforeFrozen Config
sortMapKeys bool
indentionStep int
objectFieldMustBeSimpleString bool
decoderCache unsafe.Pointer
encoderCache unsafe.Pointer
extensions []Extension
streamPool chan *Stream
iteratorPool chan *Iterator
}
// API the public interface of this package.
@@ -44,6 +47,8 @@ type API interface {
Get(data []byte, path ...interface{}) Any
NewEncoder(writer io.Writer) *Encoder
NewDecoder(reader io.Reader) *Decoder
Valid(data []byte) bool
RegisterExtension(extension Extension)
}
// ConfigDefault the default API
@@ -53,24 +58,27 @@ var ConfigDefault = Config{
// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior
var ConfigCompatibleWithStandardLibrary = Config{
EscapeHTML: true,
SortMapKeys: true,
EscapeHTML: true,
SortMapKeys: true,
ValidateJsonRawMessage: true,
}.Froze()
// ConfigFastest marshals float with only 6 digits precision
var ConfigFastest = Config{
EscapeHTML: false,
MarshalFloatWith6Digits: true,
EscapeHTML: false,
MarshalFloatWith6Digits: true, // will lose precession
ObjectFieldMustBeSimpleString: true, // do not unescape object field
}.Froze()
// Froze forge API from config
func (cfg Config) Froze() API {
// TODO: cache frozen config
frozenConfig := &frozenConfig{
sortMapKeys: cfg.SortMapKeys,
indentionStep: cfg.IndentionStep,
streamPool: make(chan *Stream, 16),
iteratorPool: make(chan *Iterator, 16),
sortMapKeys: cfg.SortMapKeys,
indentionStep: cfg.IndentionStep,
objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString,
streamPool: make(chan *Stream, 16),
iteratorPool: make(chan *Iterator, 16),
}
atomic.StorePointer(&frozenConfig.decoderCache, unsafe.Pointer(&map[string]ValDecoder{}))
atomic.StorePointer(&frozenConfig.encoderCache, unsafe.Pointer(&map[string]ValEncoder{}))
@@ -83,10 +91,31 @@ func (cfg Config) Froze() API {
if cfg.UseNumber {
frozenConfig.useNumber()
}
if cfg.ValidateJsonRawMessage {
frozenConfig.validateJsonRawMessage()
}
frozenConfig.configBeforeFrozen = cfg
return frozenConfig
}
func (cfg *frozenConfig) validateJsonRawMessage() {
encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) {
rawMessage := *(*json.RawMessage)(ptr)
iter := cfg.BorrowIterator([]byte(rawMessage))
iter.Read()
if iter.Error != nil {
stream.WriteRaw("null")
} else {
cfg.ReturnIterator(iter)
stream.WriteRaw(string(rawMessage))
}
}, func(ptr unsafe.Pointer) bool {
return false
}}
cfg.addEncoderToCache(reflect.TypeOf((*json.RawMessage)(nil)).Elem(), encoder)
cfg.addEncoderToCache(reflect.TypeOf((*RawMessage)(nil)).Elem(), encoder)
}
func (cfg *frozenConfig) useNumber() {
cfg.addDecoderToCache(reflect.TypeOf((*interface{})(nil)).Elem(), &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) {
if iter.WhatIsNext() == NumberValue {
@@ -104,7 +133,7 @@ func (cfg *frozenConfig) getTagKey() string {
return tagKey
}
func (cfg *frozenConfig) registerExtension(extension Extension) {
func (cfg *frozenConfig) RegisterExtension(extension Extension) {
cfg.extensions = append(cfg.extensions, extension)
}
@@ -310,3 +339,10 @@ func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder {
iter := Parse(cfg, reader, 512)
return &Decoder{iter}
}
func (cfg *frozenConfig) Valid(data []byte) bool {
iter := cfg.BorrowIterator(data)
defer cfg.ReturnIterator(iter)
iter.Skip()
return iter.Error == nil
}

View File

@@ -77,6 +77,7 @@ type Iterator struct {
captureStartedAt int
captured []byte
Error error
Attachment interface{} // open for customized decoder
}
// NewIterator creates an empty Iterator instance
@@ -167,7 +168,7 @@ func (iter *Iterator) isObjectEnd() bool {
if c == '}' {
return true
}
iter.ReportError("isObjectEnd", "object ended prematurely")
iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c}))
return true
}
@@ -200,8 +201,22 @@ func (iter *Iterator) ReportError(operation string, msg string) {
if peekStart < 0 {
peekStart = 0
}
iter.Error = fmt.Errorf("%s: %s, parsing %v ...%s... at %s", operation, msg, iter.head,
string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail]))
peekEnd := iter.head + 10
if peekEnd > iter.tail {
peekEnd = iter.tail
}
parsing := string(iter.buf[peekStart:peekEnd])
contextStart := iter.head - 50
if contextStart < 0 {
contextStart = 0
}
contextEnd := iter.head + 50
if contextEnd > iter.tail {
contextEnd = iter.tail
}
context := string(iter.buf[contextStart:contextEnd])
iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...",
operation, msg, iter.head-peekStart, parsing, context)
}
// CurrentBuffer gets current buffer as string for debugging purpose
@@ -210,7 +225,7 @@ func (iter *Iterator) CurrentBuffer() string {
if peekStart < 0 {
peekStart = 0
}
return fmt.Sprintf("parsing %v ...|%s|... at %s", iter.head,
return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head,
string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail]))
}

View File

@@ -19,7 +19,7 @@ func (iter *Iterator) ReadArray() (ret bool) {
case ',':
return true
default:
iter.ReportError("ReadArray", "expect [ or , or ] or n, but found: "+string([]byte{c}))
iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c}))
return
}
}
@@ -42,7 +42,7 @@ func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) {
c = iter.nextToken()
}
if c != ']' {
iter.ReportError("ReadArrayCB", "expect ] in the end")
iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c}))
return false
}
return true
@@ -53,6 +53,6 @@ func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) {
iter.skipThreeBytes('u', 'l', 'l')
return true // null
}
iter.ReportError("ReadArrayCB", "expect [ or n, but found: "+string([]byte{c}))
iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c}))
return false
}

View File

@@ -115,6 +115,7 @@ func (iter *Iterator) ReadUint32() (ret uint32) {
func (iter *Iterator) readUint32(c byte) (ret uint32) {
ind := intDigits[c]
if ind == 0 {
iter.assertInteger()
return 0 // single zero
}
if ind == invalidCharForNumber {
@@ -127,12 +128,14 @@ func (iter *Iterator) readUint32(c byte) (ret uint32) {
ind2 := intDigits[iter.buf[i]]
if ind2 == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value
}
i++
ind3 := intDigits[iter.buf[i]]
if ind3 == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value*10 + uint32(ind2)
}
//iter.head = i + 1
@@ -141,30 +144,35 @@ func (iter *Iterator) readUint32(c byte) (ret uint32) {
ind4 := intDigits[iter.buf[i]]
if ind4 == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value*100 + uint32(ind2)*10 + uint32(ind3)
}
i++
ind5 := intDigits[iter.buf[i]]
if ind5 == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4)
}
i++
ind6 := intDigits[iter.buf[i]]
if ind6 == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5)
}
i++
ind7 := intDigits[iter.buf[i]]
if ind7 == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6)
}
i++
ind8 := intDigits[iter.buf[i]]
if ind8 == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7)
}
i++
@@ -172,6 +180,7 @@ func (iter *Iterator) readUint32(c byte) (ret uint32) {
value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8)
iter.head = i
if ind9 == invalidCharForNumber {
iter.assertInteger()
return value
}
}
@@ -180,6 +189,7 @@ func (iter *Iterator) readUint32(c byte) (ret uint32) {
ind = intDigits[iter.buf[i]]
if ind == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value
}
if value > uint32SafeToMultiply10 {
@@ -194,6 +204,7 @@ func (iter *Iterator) readUint32(c byte) (ret uint32) {
value = (value << 3) + (value << 1) + uint32(ind)
}
if !iter.loadMore() {
iter.assertInteger()
return value
}
}
@@ -226,6 +237,7 @@ func (iter *Iterator) ReadUint64() uint64 {
func (iter *Iterator) readUint64(c byte) (ret uint64) {
ind := intDigits[c]
if ind == 0 {
iter.assertInteger()
return 0 // single zero
}
if ind == invalidCharForNumber {
@@ -233,11 +245,73 @@ func (iter *Iterator) readUint64(c byte) (ret uint64) {
return
}
value := uint64(ind)
if iter.tail-iter.head > 10 {
i := iter.head
ind2 := intDigits[iter.buf[i]]
if ind2 == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value
}
i++
ind3 := intDigits[iter.buf[i]]
if ind3 == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value*10 + uint64(ind2)
}
//iter.head = i + 1
//value = value * 100 + uint32(ind2) * 10 + uint32(ind3)
i++
ind4 := intDigits[iter.buf[i]]
if ind4 == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value*100 + uint64(ind2)*10 + uint64(ind3)
}
i++
ind5 := intDigits[iter.buf[i]]
if ind5 == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4)
}
i++
ind6 := intDigits[iter.buf[i]]
if ind6 == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5)
}
i++
ind7 := intDigits[iter.buf[i]]
if ind7 == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6)
}
i++
ind8 := intDigits[iter.buf[i]]
if ind8 == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7)
}
i++
ind9 := intDigits[iter.buf[i]]
value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8)
iter.head = i
if ind9 == invalidCharForNumber {
iter.assertInteger()
return value
}
}
for {
for i := iter.head; i < iter.tail; i++ {
ind = intDigits[iter.buf[i]]
if ind == invalidCharForNumber {
iter.head = i
iter.assertInteger()
return value
}
if value > uint64SafeToMultiple10 {
@@ -252,7 +326,14 @@ func (iter *Iterator) readUint64(c byte) (ret uint64) {
value = (value << 3) + (value << 1) + uint64(ind)
}
if !iter.loadMore() {
iter.assertInteger()
return value
}
}
}
func (iter *Iterator) assertInteger() {
if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' {
iter.ReportError("assertInteger", "can not decode float as int")
}
}

View File

@@ -19,15 +19,33 @@ func (iter *Iterator) ReadObject() (ret string) {
c = iter.nextToken()
if c == '"' {
iter.unreadByte()
return string(iter.readObjectFieldAsBytes())
if iter.cfg.objectFieldMustBeSimpleString {
return string(iter.readObjectFieldAsBytes())
} else {
field := iter.ReadString()
c = iter.nextToken()
if c != ':' {
iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
}
return field
}
}
if c == '}' {
return "" // end of object
}
iter.ReportError("ReadObject", `expect " after {`)
iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c}))
return
case ',':
return string(iter.readObjectFieldAsBytes())
if iter.cfg.objectFieldMustBeSimpleString {
return string(iter.readObjectFieldAsBytes())
} else {
field := iter.ReadString()
c = iter.nextToken()
if c != ':' {
iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
}
return field
}
case '}':
return "" // end of object
default:
@@ -44,17 +62,34 @@ func (iter *Iterator) readFieldHash() int32 {
for i := iter.head; i < iter.tail; i++ {
// require ascii string and no escape
b := iter.buf[i]
if 'A' <= b && b <= 'Z' {
b += 'a' - 'A'
if !iter.cfg.objectFieldMustBeSimpleString && b == '\\' {
iter.head = i
for _, b := range iter.readStringSlowPath() {
if 'A' <= b && b <= 'Z' {
b += 'a' - 'A'
}
hash ^= int64(b)
hash *= 0x1000193
}
c = iter.nextToken()
if c != ':' {
iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
return 0
}
return int32(hash)
}
if b == '"' {
iter.head = i + 1
c = iter.nextToken()
if c != ':' {
iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c}))
return 0
}
return int32(hash)
}
if 'A' <= b && b <= 'Z' {
b += 'a' - 'A'
}
hash ^= int64(b)
hash *= 0x1000193
}
@@ -80,18 +115,38 @@ func calcHash(str string) int32 {
// ReadObjectCB read object with callback, the key is ascii only and field name not copied
func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
c := iter.nextToken()
var fieldBytes []byte
var field string
if c == '{' {
c = iter.nextToken()
if c == '"' {
iter.unreadByte()
field := iter.readObjectFieldAsBytes()
if !callback(iter, *(*string)(unsafe.Pointer(&field))) {
if iter.cfg.objectFieldMustBeSimpleString {
fieldBytes = iter.readObjectFieldAsBytes()
field = *(*string)(unsafe.Pointer(&fieldBytes))
} else {
field = iter.ReadString()
c = iter.nextToken()
if c != ':' {
iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
}
}
if !callback(iter, field) {
return false
}
c = iter.nextToken()
for c == ',' {
field = iter.readObjectFieldAsBytes()
if !callback(iter, *(*string)(unsafe.Pointer(&field))) {
if iter.cfg.objectFieldMustBeSimpleString {
fieldBytes = iter.readObjectFieldAsBytes()
field = *(*string)(unsafe.Pointer(&fieldBytes))
} else {
field = iter.ReadString()
c = iter.nextToken()
if c != ':' {
iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
}
}
if !callback(iter, field) {
return false
}
c = iter.nextToken()
@@ -105,14 +160,14 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool {
if c == '}' {
return true
}
iter.ReportError("ReadObjectCB", `expect " after }`)
iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c}))
return false
}
if c == 'n' {
iter.skipThreeBytes('u', 'l', 'l')
return true // null
}
iter.ReportError("ReadObjectCB", `expect { or n`)
iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c}))
return false
}
@@ -125,7 +180,7 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
iter.unreadByte()
field := iter.ReadString()
if iter.nextToken() != ':' {
iter.ReportError("ReadMapCB", "expect : after object field")
iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
return false
}
if !callback(iter, field) {
@@ -135,7 +190,7 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
for c == ',' {
field = iter.ReadString()
if iter.nextToken() != ':' {
iter.ReportError("ReadMapCB", "expect : after object field")
iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c}))
return false
}
if !callback(iter, field) {
@@ -152,14 +207,14 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool {
if c == '}' {
return true
}
iter.ReportError("ReadMapCB", `expect " after }`)
iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c}))
return false
}
if c == 'n' {
iter.skipThreeBytes('u', 'l', 'l')
return true // null
}
iter.ReportError("ReadMapCB", `expect { or n`)
iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c}))
return false
}
@@ -176,7 +231,7 @@ func (iter *Iterator) readObjectStart() bool {
iter.skipThreeBytes('u', 'l', 'l')
return false
}
iter.ReportError("readObjectStart", "expect { or n")
iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c}))
return false
}
@@ -192,7 +247,7 @@ func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) {
}
}
if iter.buf[iter.head] != ':' {
iter.ReportError("readObjectFieldAsBytes", "expect : after object field")
iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]}))
return
}
iter.head++

View File

@@ -25,7 +25,7 @@ func (iter *Iterator) ReadBool() (ret bool) {
iter.skipFourBytes('a', 'l', 's', 'e')
return false
}
iter.ReportError("ReadBool", "expect t or f")
iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c}))
return
}
@@ -59,7 +59,9 @@ func (iter *Iterator) stopCapture() []byte {
iter.captureStartedAt = -1
iter.captured = nil
if len(captured) == 0 {
return remaining
copied := make([]byte, len(remaining))
copy(copied, remaining)
return copied
}
captured = append(captured, remaining...)
return captured

View File

@@ -1,4 +1,4 @@
//+build jsoniter-sloppy
//+build jsoniter_sloppy
package jsoniter

View File

@@ -1,4 +1,4 @@
//+build !jsoniter-sloppy
//+build !jsoniter_sloppy
package jsoniter
@@ -64,7 +64,7 @@ func (iter *Iterator) trySkipString() bool {
} else if c == '\\' {
return false
} else if c < ' ' {
iter.ReportError("ReadString",
iter.ReportError("trySkipString",
fmt.Sprintf(`invalid control character found: %d`, c))
return true // already failed
}

View File

@@ -28,7 +28,7 @@ func (iter *Iterator) ReadString() (ret string) {
iter.skipThreeBytes('u', 'l', 'l')
return ""
}
iter.ReportError("ReadString", `expects " or n`)
iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c}))
return
}
@@ -47,7 +47,7 @@ func (iter *Iterator) readStringSlowPath() (ret string) {
str = append(str, c)
}
}
iter.ReportError("ReadString", "unexpected end of input")
iter.ReportError("readStringSlowPath", "unexpected end of input")
return
}
@@ -104,7 +104,7 @@ func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte {
case 't':
str = append(str, '\t')
default:
iter.ReportError("ReadString",
iter.ReportError("readEscapedChar",
`invalid escape char after \`)
return nil
}
@@ -139,7 +139,7 @@ func (iter *Iterator) ReadStringAsSlice() (ret []byte) {
}
return copied
}
iter.ReportError("ReadString", `expects " or n`)
iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c}))
return
}
@@ -156,7 +156,7 @@ func (iter *Iterator) readU4() (ret rune) {
} else if c >= 'A' && c <= 'F' {
ret = ret*16 + rune(c-'A'+10)
} else {
iter.ReportError("readU4", "expects 0~9 or a~f")
iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c}))
return
}
}

View File

@@ -1,9 +1,25 @@
package jsoniter
import "encoding/json"
import (
"encoding/json"
"strconv"
)
type Number string
// String returns the literal text of the number.
func (n Number) String() string { return string(n) }
// Float64 returns the number as a float64.
func (n Number) Float64() (float64, error) {
return strconv.ParseFloat(string(n), 64)
}
// Int64 returns the number as an int64.
func (n Number) Int64() (int64, error) {
return strconv.ParseInt(string(n), 10, 64)
}
func CastJsonNumber(val interface{}) (string, bool) {
switch typedVal := val.(type) {
case json.Number:

View File

@@ -28,6 +28,7 @@ func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream {
func (cfg *frozenConfig) ReturnStream(stream *Stream) {
stream.Error = nil
stream.Attachment = nil
select {
case cfg.streamPool <- stream:
return
@@ -48,6 +49,7 @@ func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator {
func (cfg *frozenConfig) ReturnIterator(iter *Iterator) {
iter.Error = nil
iter.Attachment = nil
select {
case cfg.iteratorPool <- iter:
return

View File

@@ -72,24 +72,24 @@ func init() {
textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem()
}
type optionalDecoder struct {
valueType reflect.Type
valueDecoder ValDecoder
type OptionalDecoder struct {
ValueType reflect.Type
ValueDecoder ValDecoder
}
func (decoder *optionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
if iter.ReadNil() {
*((*unsafe.Pointer)(ptr)) = nil
} else {
if *((*unsafe.Pointer)(ptr)) == nil {
//pointer to null, we have to allocate memory to hold the value
value := reflect.New(decoder.valueType)
value := reflect.New(decoder.ValueType)
newPtr := extractInterface(value.Interface()).word
decoder.valueDecoder.Decode(newPtr, iter)
decoder.ValueDecoder.Decode(newPtr, iter)
*((*uintptr)(ptr)) = uintptr(newPtr)
} else {
//reuse existing instance
decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter)
}
}
}
@@ -113,11 +113,31 @@ func (decoder *deferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
}
}
type optionalEncoder struct {
type OptionalEncoder struct {
ValueEncoder ValEncoder
}
func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
if *((*unsafe.Pointer)(ptr)) == nil {
stream.WriteNil()
} else {
encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream)
}
}
func (encoder *OptionalEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool {
return *((*unsafe.Pointer)(ptr)) == nil
}
type optionalMapEncoder struct {
valueEncoder ValEncoder
}
func (encoder *optionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
func (encoder *optionalMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
if *((*unsafe.Pointer)(ptr)) == nil {
stream.WriteNil()
} else {
@@ -125,15 +145,13 @@ func (encoder *optionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
}
}
func (encoder *optionalEncoder) EncodeInterface(val interface{}, stream *Stream) {
func (encoder *optionalMapEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
}
func (encoder *optionalEncoder) IsEmpty(ptr unsafe.Pointer) bool {
if *((*unsafe.Pointer)(ptr)) == nil {
return true
}
return false
func (encoder *optionalMapEncoder) IsEmpty(ptr unsafe.Pointer) bool {
p := *((*unsafe.Pointer)(ptr))
return p == nil || encoder.valueEncoder.IsEmpty(p)
}
type placeholderEncoder struct {
@@ -146,7 +164,7 @@ func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
}
func (encoder *placeholderEncoder) EncodeInterface(val interface{}, stream *Stream) {
WriteToStream(val, stream, encoder)
encoder.getRealEncoder().EncodeInterface(val, stream)
}
func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool {
@@ -154,11 +172,11 @@ func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool {
}
func (encoder *placeholderEncoder) getRealEncoder() ValEncoder {
for i := 0; i < 30; i++ {
for i := 0; i < 500; i++ {
realDecoder := encoder.cfg.getEncoderFromCache(encoder.cacheKey)
_, isPlaceholder := realDecoder.(*placeholderEncoder)
if isPlaceholder {
time.Sleep(time.Second)
time.Sleep(10 * time.Millisecond)
} else {
return realDecoder
}
@@ -172,11 +190,11 @@ type placeholderDecoder struct {
}
func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
for i := 0; i < 30; i++ {
for i := 0; i < 500; i++ {
realDecoder := decoder.cfg.getDecoderFromCache(decoder.cacheKey)
_, isPlaceholder := realDecoder.(*placeholderDecoder)
if isPlaceholder {
time.Sleep(time.Second)
time.Sleep(10 * time.Millisecond)
} else {
realDecoder.Decode(ptr, iter)
return
@@ -256,7 +274,7 @@ func decoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
if decoder != nil {
return decoder, nil
}
decoder = getTypeDecoderFromExtension(typ)
decoder = getTypeDecoderFromExtension(cfg, typ)
if decoder != nil {
cfg.addDecoderToCache(cacheKey, decoder)
return decoder, nil
@@ -267,6 +285,9 @@ func decoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error) {
for _, extension := range extensions {
decoder = extension.DecorateDecoder(typ, decoder)
}
for _, extension := range cfg.extensions {
decoder = extension.DecorateDecoder(typ, decoder)
}
cfg.addDecoderToCache(cacheKey, decoder)
return decoder, err
}
@@ -289,7 +310,7 @@ func createDecoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error
templateInterface := reflect.New(typ).Elem().Interface()
var decoder ValDecoder = &unmarshalerDecoder{extractInterface(templateInterface)}
if typ.Kind() == reflect.Ptr {
decoder = &optionalDecoder{typ.Elem(), decoder}
decoder = &OptionalDecoder{typ.Elem(), decoder}
}
return decoder, nil
}
@@ -302,7 +323,7 @@ func createDecoderOfType(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error
templateInterface := reflect.New(typ).Elem().Interface()
var decoder ValDecoder = &textUnmarshalerDecoder{extractInterface(templateInterface)}
if typ.Kind() == reflect.Ptr {
decoder = &optionalDecoder{typ.Elem(), decoder}
decoder = &OptionalDecoder{typ.Elem(), decoder}
}
return decoder, nil
}
@@ -423,7 +444,7 @@ func encoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
if encoder != nil {
return encoder, nil
}
encoder = getTypeEncoderFromExtension(typ)
encoder = getTypeEncoderFromExtension(cfg, typ)
if encoder != nil {
cfg.addEncoderToCache(cacheKey, encoder)
return encoder, nil
@@ -434,6 +455,9 @@ func encoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
for _, extension := range extensions {
encoder = extension.DecorateEncoder(typ, encoder)
}
for _, extension := range cfg.extensions {
encoder = extension.DecorateEncoder(typ, encoder)
}
cfg.addEncoderToCache(cacheKey, encoder)
return encoder, err
}
@@ -452,7 +476,7 @@ func createEncoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error
return &jsoniterNumberCodec{}, nil
}
if typ.Implements(marshalerType) {
checkIsEmpty, err := createCheckIsEmpty(typ)
checkIsEmpty, err := createCheckIsEmpty(cfg, typ)
if err != nil {
return nil, err
}
@@ -462,12 +486,24 @@ func createEncoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error
checkIsEmpty: checkIsEmpty,
}
if typ.Kind() == reflect.Ptr {
encoder = &optionalEncoder{encoder}
encoder = &OptionalEncoder{encoder}
}
return encoder, nil
}
if reflect.PtrTo(typ).Implements(marshalerType) {
checkIsEmpty, err := createCheckIsEmpty(cfg, reflect.PtrTo(typ))
if err != nil {
return nil, err
}
templateInterface := reflect.New(typ).Interface()
var encoder ValEncoder = &marshalerEncoder{
templateInterface: extractInterface(templateInterface),
checkIsEmpty: checkIsEmpty,
}
return encoder, nil
}
if typ.Implements(textMarshalerType) {
checkIsEmpty, err := createCheckIsEmpty(typ)
checkIsEmpty, err := createCheckIsEmpty(cfg, typ)
if err != nil {
return nil, err
}
@@ -477,7 +513,7 @@ func createEncoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error
checkIsEmpty: checkIsEmpty,
}
if typ.Kind() == reflect.Ptr {
encoder = &optionalEncoder{encoder}
encoder = &OptionalEncoder{encoder}
}
return encoder, nil
}
@@ -490,7 +526,7 @@ func createEncoderOfType(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error
return createEncoderOfSimpleType(cfg, typ)
}
func createCheckIsEmpty(typ reflect.Type) (checkIsEmpty, error) {
func createCheckIsEmpty(cfg *frozenConfig, typ reflect.Type) (checkIsEmpty, error) {
kind := typ.Kind()
switch kind {
case reflect.String:
@@ -535,9 +571,9 @@ func createCheckIsEmpty(typ reflect.Type) (checkIsEmpty, error) {
case reflect.Slice:
return &sliceEncoder{}, nil
case reflect.Map:
return &mapEncoder{}, nil
return encoderOfMap(cfg, typ)
case reflect.Ptr:
return &optionalEncoder{}, nil
return &OptionalEncoder{}, nil
default:
return nil, fmt.Errorf("unsupported type: %v", typ)
}
@@ -648,7 +684,7 @@ func decoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValDecoder, error)
if err != nil {
return nil, err
}
return &optionalDecoder{elemType, decoder}, nil
return &OptionalDecoder{elemType, decoder}, nil
}
func encoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
@@ -657,9 +693,9 @@ func encoderOfOptional(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error)
if err != nil {
return nil, err
}
encoder := &optionalEncoder{elemEncoder}
encoder := &OptionalEncoder{elemEncoder}
if elemType.Kind() == reflect.Map {
encoder = &optionalEncoder{encoder}
encoder = &OptionalEncoder{encoder}
}
return encoder, nil
}

View File

@@ -21,7 +21,7 @@ func encoderOfArray(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
return nil, err
}
if typ.Elem().Kind() == reflect.Map {
encoder = &optionalEncoder{encoder}
encoder = &OptionalEncoder{encoder}
}
return &arrayEncoder{typ, typ.Elem(), encoder}, nil
}

View File

@@ -161,22 +161,31 @@ func RegisterExtension(extension Extension) {
extensions = append(extensions, extension)
}
func getTypeDecoderFromExtension(typ reflect.Type) ValDecoder {
decoder := _getTypeDecoderFromExtension(typ)
func getTypeDecoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValDecoder {
decoder := _getTypeDecoderFromExtension(cfg, typ)
if decoder != nil {
for _, extension := range extensions {
decoder = extension.DecorateDecoder(typ, decoder)
}
for _, extension := range cfg.extensions {
decoder = extension.DecorateDecoder(typ, decoder)
}
}
return decoder
}
func _getTypeDecoderFromExtension(typ reflect.Type) ValDecoder {
func _getTypeDecoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValDecoder {
for _, extension := range extensions {
decoder := extension.CreateDecoder(typ)
if decoder != nil {
return decoder
}
}
for _, extension := range cfg.extensions {
decoder := extension.CreateDecoder(typ)
if decoder != nil {
return decoder
}
}
typeName := typ.String()
decoder := typeDecoders[typeName]
if decoder != nil {
@@ -185,29 +194,38 @@ func _getTypeDecoderFromExtension(typ reflect.Type) ValDecoder {
if typ.Kind() == reflect.Ptr {
decoder := typeDecoders[typ.Elem().String()]
if decoder != nil {
return &optionalDecoder{typ.Elem(), decoder}
return &OptionalDecoder{typ.Elem(), decoder}
}
}
return nil
}
func getTypeEncoderFromExtension(typ reflect.Type) ValEncoder {
encoder := _getTypeEncoderFromExtension(typ)
func getTypeEncoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValEncoder {
encoder := _getTypeEncoderFromExtension(cfg, typ)
if encoder != nil {
for _, extension := range extensions {
encoder = extension.DecorateEncoder(typ, encoder)
}
for _, extension := range cfg.extensions {
encoder = extension.DecorateEncoder(typ, encoder)
}
}
return encoder
}
func _getTypeEncoderFromExtension(typ reflect.Type) ValEncoder {
func _getTypeEncoderFromExtension(cfg *frozenConfig, typ reflect.Type) ValEncoder {
for _, extension := range extensions {
encoder := extension.CreateEncoder(typ)
if encoder != nil {
return encoder
}
}
for _, extension := range cfg.extensions {
encoder := extension.CreateEncoder(typ)
if encoder != nil {
return encoder
}
}
typeName := typ.String()
encoder := typeEncoders[typeName]
if encoder != nil {
@@ -216,7 +234,7 @@ func _getTypeEncoderFromExtension(typ reflect.Type) ValEncoder {
if typ.Kind() == reflect.Ptr {
encoder := typeEncoders[typ.Elem().String()]
if encoder != nil {
return &optionalEncoder{encoder}
return &OptionalEncoder{encoder}
}
}
return nil
@@ -254,7 +272,7 @@ func describeStruct(cfg *frozenConfig, typ reflect.Type) (*StructDescriptor, err
for _, binding := range structDescriptor.Fields {
binding.levels = append([]int{i}, binding.levels...)
omitempty := binding.Encoder.(*structFieldEncoder).omitempty
binding.Encoder = &optionalEncoder{binding.Encoder}
binding.Encoder = &OptionalEncoder{binding.Encoder}
binding.Encoder = &structFieldEncoder{&field, binding.Encoder, omitempty}
binding.Decoder = &deferenceDecoder{field.Type.Elem(), binding.Decoder}
binding.Decoder = &structFieldDecoder{&field, binding.Decoder}
@@ -269,7 +287,7 @@ func describeStruct(cfg *frozenConfig, typ reflect.Type) (*StructDescriptor, err
if decoder == nil {
var err error
decoder, err = decoderOfType(cfg, field.Type)
if err != nil {
if len(fieldNames) > 0 && err != nil {
return nil, err
}
}
@@ -277,12 +295,13 @@ func describeStruct(cfg *frozenConfig, typ reflect.Type) (*StructDescriptor, err
if encoder == nil {
var err error
encoder, err = encoderOfType(cfg, field.Type)
if err != nil {
if len(fieldNames) > 0 && err != nil {
return nil, err
}
// map is stored as pointer in the struct
if field.Type.Kind() == reflect.Map {
encoder = &optionalEncoder{encoder}
// map is stored as pointer in the struct,
// and treat nil or empty map as empty field
if encoder != nil && field.Type.Kind() == reflect.Map {
encoder = &optionalMapEncoder{encoder}
}
}
binding := &Binding{
@@ -323,6 +342,9 @@ func createStructDescriptor(cfg *frozenConfig, typ reflect.Type, bindings []*Bin
for _, extension := range extensions {
extension.UpdateStructDescriptor(structDescriptor)
}
for _, extension := range cfg.extensions {
extension.UpdateStructDescriptor(structDescriptor)
}
processTags(structDescriptor, cfg)
// merge normal & embedded bindings & sort with original order
allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...))

View File

@@ -4,6 +4,7 @@ import (
"encoding"
"encoding/base64"
"encoding/json"
"reflect"
"unsafe"
)
@@ -31,7 +32,9 @@ type intCodec struct {
}
func (codec *intCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*int)(ptr)) = iter.ReadInt()
if !iter.ReadNil() {
*((*int)(ptr)) = iter.ReadInt()
}
}
func (codec *intCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
@@ -50,7 +53,9 @@ type uintptrCodec struct {
}
func (codec *uintptrCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*uintptr)(ptr)) = uintptr(iter.ReadUint64())
if !iter.ReadNil() {
*((*uintptr)(ptr)) = uintptr(iter.ReadUint64())
}
}
func (codec *uintptrCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
@@ -69,7 +74,9 @@ type int8Codec struct {
}
func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*int8)(ptr)) = iter.ReadInt8()
if !iter.ReadNil() {
*((*int8)(ptr)) = iter.ReadInt8()
}
}
func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
@@ -88,7 +95,9 @@ type int16Codec struct {
}
func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*int16)(ptr)) = iter.ReadInt16()
if !iter.ReadNil() {
*((*int16)(ptr)) = iter.ReadInt16()
}
}
func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
@@ -107,7 +116,9 @@ type int32Codec struct {
}
func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*int32)(ptr)) = iter.ReadInt32()
if !iter.ReadNil() {
*((*int32)(ptr)) = iter.ReadInt32()
}
}
func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
@@ -126,7 +137,9 @@ type int64Codec struct {
}
func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*int64)(ptr)) = iter.ReadInt64()
if !iter.ReadNil() {
*((*int64)(ptr)) = iter.ReadInt64()
}
}
func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
@@ -145,7 +158,10 @@ type uintCodec struct {
}
func (codec *uintCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*uint)(ptr)) = iter.ReadUint()
if !iter.ReadNil() {
*((*uint)(ptr)) = iter.ReadUint()
return
}
}
func (codec *uintCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
@@ -164,7 +180,9 @@ type uint8Codec struct {
}
func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*uint8)(ptr)) = iter.ReadUint8()
if !iter.ReadNil() {
*((*uint8)(ptr)) = iter.ReadUint8()
}
}
func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
@@ -183,7 +201,9 @@ type uint16Codec struct {
}
func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*uint16)(ptr)) = iter.ReadUint16()
if !iter.ReadNil() {
*((*uint16)(ptr)) = iter.ReadUint16()
}
}
func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
@@ -202,7 +222,9 @@ type uint32Codec struct {
}
func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*uint32)(ptr)) = iter.ReadUint32()
if !iter.ReadNil() {
*((*uint32)(ptr)) = iter.ReadUint32()
}
}
func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
@@ -221,7 +243,9 @@ type uint64Codec struct {
}
func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*uint64)(ptr)) = iter.ReadUint64()
if !iter.ReadNil() {
*((*uint64)(ptr)) = iter.ReadUint64()
}
}
func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
@@ -240,7 +264,9 @@ type float32Codec struct {
}
func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*float32)(ptr)) = iter.ReadFloat32()
if !iter.ReadNil() {
*((*float32)(ptr)) = iter.ReadFloat32()
}
}
func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
@@ -259,7 +285,9 @@ type float64Codec struct {
}
func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*float64)(ptr)) = iter.ReadFloat64()
if !iter.ReadNil() {
*((*float64)(ptr)) = iter.ReadFloat64()
}
}
func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
@@ -278,7 +306,9 @@ type boolCodec struct {
}
func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*bool)(ptr)) = iter.ReadBool()
if !iter.ReadNil() {
*((*bool)(ptr)) = iter.ReadBool()
}
}
func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
@@ -297,7 +327,42 @@ type emptyInterfaceCodec struct {
}
func (codec *emptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*interface{})(ptr)) = iter.Read()
existing := *((*interface{})(ptr))
// Checking for both typed and untyped nil pointers.
if existing != nil &&
reflect.TypeOf(existing).Kind() == reflect.Ptr &&
!reflect.ValueOf(existing).IsNil() {
var ptrToExisting interface{}
for {
elem := reflect.ValueOf(existing).Elem()
if elem.Kind() != reflect.Ptr || elem.IsNil() {
break
}
ptrToExisting = existing
existing = elem.Interface()
}
if iter.ReadNil() {
if ptrToExisting != nil {
nilPtr := reflect.Zero(reflect.TypeOf(ptrToExisting).Elem())
reflect.ValueOf(ptrToExisting).Elem().Set(nilPtr)
} else {
*((*interface{})(ptr)) = nil
}
} else {
iter.ReadVal(existing)
}
return
}
if iter.ReadNil() {
*((*interface{})(ptr)) = nil
} else {
*((*interface{})(ptr)) = iter.Read()
}
}
func (codec *emptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
@@ -309,7 +374,8 @@ func (codec *emptyInterfaceCodec) EncodeInterface(val interface{}, stream *Strea
}
func (codec *emptyInterfaceCodec) IsEmpty(ptr unsafe.Pointer) bool {
return ptr == nil
emptyInterface := (*emptyInterface)(ptr)
return emptyInterface.typ == nil
}
type nonEmptyInterfaceCodec struct {
@@ -326,15 +392,20 @@ func (codec *nonEmptyInterfaceCodec) Decode(ptr unsafe.Pointer, iter *Iterator)
e.typ = nonEmptyInterface.itab.typ
e.word = nonEmptyInterface.word
iter.ReadVal(&i)
if e.word == nil {
nonEmptyInterface.itab = nil
}
nonEmptyInterface.word = e.word
}
func (codec *nonEmptyInterfaceCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
nonEmptyInterface := (*nonEmptyInterface)(ptr)
var i interface{}
e := (*emptyInterface)(unsafe.Pointer(&i))
e.typ = nonEmptyInterface.itab.typ
e.word = nonEmptyInterface.word
if nonEmptyInterface.itab != nil {
e := (*emptyInterface)(unsafe.Pointer(&i))
e.typ = nonEmptyInterface.itab.typ
e.word = nonEmptyInterface.word
}
stream.WriteVal(i)
}
@@ -370,7 +441,15 @@ type jsonNumberCodec struct {
}
func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString()))
switch iter.WhatIsNext() {
case StringValue:
*((*json.Number)(ptr)) = json.Number(iter.ReadString())
case NilValue:
iter.skipFourBytes('n', 'u', 'l', 'l')
*((*json.Number)(ptr)) = ""
default:
*((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString()))
}
}
func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
@@ -389,7 +468,15 @@ type jsoniterNumberCodec struct {
}
func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) {
*((*Number)(ptr)) = Number([]byte(iter.readNumberAsString()))
switch iter.WhatIsNext() {
case StringValue:
*((*Number)(ptr)) = Number(iter.ReadString())
case NilValue:
iter.skipFourBytes('n', 'u', 'l', 'l')
*((*Number)(ptr)) = ""
default:
*((*Number)(ptr)) = Number([]byte(iter.readNumberAsString()))
}
}
func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) {
@@ -521,7 +608,7 @@ type stringModeNumberDecoder struct {
func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) {
c := iter.nextToken()
if c != '"' {
iter.ReportError("stringModeNumberDecoder", `expect "`)
iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
return
}
decoder.elemDecoder.Decode(ptr, iter)
@@ -530,7 +617,7 @@ func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterato
}
c = iter.readByte()
if c != '"' {
iter.ReportError("stringModeNumberDecoder", `expect "`)
iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c}))
return
}
}
@@ -595,7 +682,12 @@ func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) {
templateInterface := encoder.templateInterface
templateInterface.word = ptr
realInterface := (*interface{})(unsafe.Pointer(&templateInterface))
marshaler := (*realInterface).(json.Marshaler)
marshaler, ok := (*realInterface).(json.Marshaler)
if !ok {
stream.WriteVal(nil)
return
}
bytes, err := marshaler.MarshalJSON()
if err != nil {
stream.Error = err

View File

@@ -21,7 +21,7 @@ func encoderOfSlice(cfg *frozenConfig, typ reflect.Type) (ValEncoder, error) {
return nil, err
}
if typ.Elem().Kind() == reflect.Map {
encoder = &optionalEncoder{encoder}
encoder = &OptionalEncoder{encoder}
}
return &sliceEncoder{typ, typ.Elem(), encoder}, nil
}
@@ -127,12 +127,10 @@ func growOne(slice *sliceHeader, sliceType reflect.Type, elementType reflect.Typ
newVal := reflect.MakeSlice(sliceType, newLen, newCap)
dst := unsafe.Pointer(newVal.Pointer())
// copy old array into new array
originalBytesCount := uintptr(slice.Len) * elementType.Size()
srcPtr := (*[1 << 30]byte)(slice.Data)
dstPtr := (*[1 << 30]byte)(dst)
for i := uintptr(0); i < originalBytesCount; i++ {
dstPtr[i] = srcPtr[i]
}
originalBytesCount := slice.Len * int(elementType.Size())
srcSliceHeader := (unsafe.Pointer)(&sliceHeader{slice.Data, originalBytesCount, originalBytesCount})
dstSliceHeader := (unsafe.Pointer)(&sliceHeader{dst, originalBytesCount, originalBytesCount})
copy(*(*[]byte)(dstSliceHeader), *(*[]byte)(srcSliceHeader))
slice.Data = dst
slice.Len = newLen
slice.Cap = newCap

View File

@@ -427,8 +427,18 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
if !iter.readObjectStart() {
return
}
fieldBytes := iter.readObjectFieldAsBytes()
field := *(*string)(unsafe.Pointer(&fieldBytes))
var fieldBytes []byte
var field string
if iter.cfg.objectFieldMustBeSimpleString {
fieldBytes = iter.readObjectFieldAsBytes()
field = *(*string)(unsafe.Pointer(&fieldBytes))
} else {
field = iter.ReadString()
c := iter.nextToken()
if c != ':' {
iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
}
}
fieldDecoder := decoder.fields[strings.ToLower(field)]
if fieldDecoder == nil {
iter.Skip()
@@ -436,8 +446,16 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator)
fieldDecoder.Decode(ptr, iter)
}
for iter.nextToken() == ',' {
fieldBytes = iter.readObjectFieldAsBytes()
field = *(*string)(unsafe.Pointer(&fieldBytes))
if iter.cfg.objectFieldMustBeSimpleString {
fieldBytes := iter.readObjectFieldAsBytes()
field = *(*string)(unsafe.Pointer(&fieldBytes))
} else {
field = iter.ReadString()
c := iter.nextToken()
if c != ':' {
iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c}))
}
}
fieldDecoder = decoder.fields[strings.ToLower(field)]
if fieldDecoder == nil {
iter.Skip()

View File

@@ -4,15 +4,16 @@ import (
"io"
)
// Stream is a io.Writer like object, with JSON specific write functions.
// stream is a io.Writer like object, with JSON specific write functions.
// Error is not returned as return value, but stored as Error member on this stream instance.
type Stream struct {
cfg *frozenConfig
out io.Writer
buf []byte
n int
Error error
indention int
cfg *frozenConfig
out io.Writer
buf []byte
n int
Error error
indention int
Attachment interface{} // open for customized encoder
}
// NewStream create new stream instance.
@@ -191,6 +192,9 @@ func (stream *Stream) ensure(minimal int) {
func (stream *Stream) growAtLeast(minimal int) {
if stream.out != nil {
stream.Flush()
if stream.Available() >= minimal {
return
}
}
toGrow := len(stream.buf)
if toGrow < minimal {
@@ -280,8 +284,7 @@ func (stream *Stream) WriteArrayStart() {
// WriteEmptyArray write []
func (stream *Stream) WriteEmptyArray() {
stream.writeByte('[')
stream.writeByte(']')
stream.writeTwoBytes('[', ']')
}
// WriteArrayEnd write ] with possible indention

View File

@@ -1,191 +0,0 @@
All files in this repository are licensed as follows. If you contribute
to this repository, it is assumed that you license your contribution
under the same license unless you state otherwise.
All files Copyright (C) 2015 Canonical Ltd. unless otherwise specified in the file.
This software is licensed under the LGPLv3, included below.
As a special exception to the GNU Lesser General Public License version 3
("LGPL3"), the copyright holders of this Library give you permission to
convey to a third party a Combined Work that links statically or dynamically
to this Library without providing any Minimal Corresponding Source or
Minimal Application Code as set out in 4d or providing the installation
information set out in section 4e, provided that you comply with the other
provisions of LGPL3 and provided that you meet, for the Application the
terms and conditions of the license(s) which apply to the Application.
Except as stated in this special exception, the provisions of LGPL3 will
continue to comply in full to this Library. If you modify this Library, you
may apply this exception to your version of this Library, but you are not
obliged to do so. If you do not wish to do so, delete this exception
statement from your version. This exception does not (and cannot) modify any
license terms which apply to the Application, with which you must still
comply.
GNU LESSER GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
This version of the GNU Lesser General Public License incorporates
the terms and conditions of version 3 of the GNU General Public
License, supplemented by the additional permissions listed below.
0. Additional Definitions.
As used herein, "this License" refers to version 3 of the GNU Lesser
General Public License, and the "GNU GPL" refers to version 3 of the GNU
General Public License.
"The Library" refers to a covered work governed by this License,
other than an Application or a Combined Work as defined below.
An "Application" is any work that makes use of an interface provided
by the Library, but which is not otherwise based on the Library.
Defining a subclass of a class defined by the Library is deemed a mode
of using an interface provided by the Library.
A "Combined Work" is a work produced by combining or linking an
Application with the Library. The particular version of the Library
with which the Combined Work was made is also called the "Linked
Version".
The "Minimal Corresponding Source" for a Combined Work means the
Corresponding Source for the Combined Work, excluding any source code
for portions of the Combined Work that, considered in isolation, are
based on the Application, and not on the Linked Version.
The "Corresponding Application Code" for a Combined Work means the
object code and/or source code for the Application, including any data
and utility programs needed for reproducing the Combined Work from the
Application, but excluding the System Libraries of the Combined Work.
1. Exception to Section 3 of the GNU GPL.
You may convey a covered work under sections 3 and 4 of this License
without being bound by section 3 of the GNU GPL.
2. Conveying Modified Versions.
If you modify a copy of the Library, and, in your modifications, a
facility refers to a function or data to be supplied by an Application
that uses the facility (other than as an argument passed when the
facility is invoked), then you may convey a copy of the modified
version:
a) under this License, provided that you make a good faith effort to
ensure that, in the event an Application does not supply the
function or data, the facility still operates, and performs
whatever part of its purpose remains meaningful, or
b) under the GNU GPL, with none of the additional permissions of
this License applicable to that copy.
3. Object Code Incorporating Material from Library Header Files.
The object code form of an Application may incorporate material from
a header file that is part of the Library. You may convey such object
code under terms of your choice, provided that, if the incorporated
material is not limited to numerical parameters, data structure
layouts and accessors, or small macros, inline functions and templates
(ten or fewer lines in length), you do both of the following:
a) Give prominent notice with each copy of the object code that the
Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the object code with a copy of the GNU GPL and this license
document.
4. Combined Works.
You may convey a Combined Work under terms of your choice that,
taken together, effectively do not restrict modification of the
portions of the Library contained in the Combined Work and reverse
engineering for debugging such modifications, if you also do each of
the following:
a) Give prominent notice with each copy of the Combined Work that
the Library is used in it and that the Library and its use are
covered by this License.
b) Accompany the Combined Work with a copy of the GNU GPL and this license
document.
c) For a Combined Work that displays copyright notices during
execution, include the copyright notice for the Library among
these notices, as well as a reference directing the user to the
copies of the GNU GPL and this license document.
d) Do one of the following:
0) Convey the Minimal Corresponding Source under the terms of this
License, and the Corresponding Application Code in a form
suitable for, and under terms that permit, the user to
recombine or relink the Application with a modified version of
the Linked Version to produce a modified Combined Work, in the
manner specified by section 6 of the GNU GPL for conveying
Corresponding Source.
1) Use a suitable shared library mechanism for linking with the
Library. A suitable mechanism is one that (a) uses at run time
a copy of the Library already present on the user's computer
system, and (b) will operate properly with a modified version
of the Library that is interface-compatible with the Linked
Version.
e) Provide Installation Information, but only if you would otherwise
be required to provide such information under section 6 of the
GNU GPL, and only to the extent that such information is
necessary to install and execute a modified version of the
Combined Work produced by recombining or relinking the
Application with a modified version of the Linked Version. (If
you use option 4d0, the Installation Information must accompany
the Minimal Corresponding Source and Corresponding Application
Code. If you use option 4d1, you must provide the Installation
Information in the manner specified by section 6 of the GNU GPL
for conveying Corresponding Source.)
5. Combined Libraries.
You may place library facilities that are a work based on the
Library side by side in a single library together with other library
facilities that are not Applications and are not covered by this
License, and convey such a combined library under terms of your
choice, if you do both of the following:
a) Accompany the combined library with a copy of the same work based
on the Library, uncombined with any other library facilities,
conveyed under the terms of this License.
b) Give prominent notice with the combined library that part of it
is a work based on the Library, and explaining where to find the
accompanying uncombined form of the same work.
6. Revised Versions of the GNU Lesser General Public License.
The Free Software Foundation may publish revised and/or new versions
of the GNU Lesser General Public License from time to time. Such new
versions will be similar in spirit to the present version, but may
differ in detail to address new problems or concerns.
Each version is given a distinguishing version number. If the
Library as you received it specifies that a certain numbered version
of the GNU Lesser General Public License "or any later version"
applies to it, you have the option of following the terms and
conditions either of that published version or of any later version
published by the Free Software Foundation. If the Library as you
received it does not specify a version number of the GNU Lesser
General Public License, you may choose any version of the GNU Lesser
General Public License ever published by the Free Software Foundation.
If the Library as you received it specifies that a proxy can decide
whether future versions of the GNU Lesser General Public License shall
apply, that proxy's public statement of acceptance of any version is
permanent authorization for you to choose that version for the
Library.

View File

@@ -1,117 +0,0 @@
# ratelimit
--
import "github.com/juju/ratelimit"
The ratelimit package provides an efficient token bucket implementation. See
http://en.wikipedia.org/wiki/Token_bucket.
## Usage
#### func Reader
```go
func Reader(r io.Reader, bucket *Bucket) io.Reader
```
Reader returns a reader that is rate limited by the given token bucket. Each
token in the bucket represents one byte.
#### func Writer
```go
func Writer(w io.Writer, bucket *Bucket) io.Writer
```
Writer returns a writer that is rate limited by the given token bucket. Each
token in the bucket represents one byte.
#### type Bucket
```go
type Bucket struct {
}
```
Bucket represents a token bucket that fills at a predetermined rate. Methods on
Bucket may be called concurrently.
#### func NewBucket
```go
func NewBucket(fillInterval time.Duration, capacity int64) *Bucket
```
NewBucket returns a new token bucket that fills at the rate of one token every
fillInterval, up to the given maximum capacity. Both arguments must be positive.
The bucket is initially full.
#### func NewBucketWithQuantum
```go
func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket
```
NewBucketWithQuantum is similar to NewBucket, but allows the specification of
the quantum size - quantum tokens are added every fillInterval.
#### func NewBucketWithRate
```go
func NewBucketWithRate(rate float64, capacity int64) *Bucket
```
NewBucketWithRate returns a token bucket that fills the bucket at the rate of
rate tokens per second up to the given maximum capacity. Because of limited
clock resolution, at high rates, the actual rate may be up to 1% different from
the specified rate.
#### func (*Bucket) Rate
```go
func (tb *Bucket) Rate() float64
```
Rate returns the fill rate of the bucket, in tokens per second.
#### func (*Bucket) Take
```go
func (tb *Bucket) Take(count int64) time.Duration
```
Take takes count tokens from the bucket without blocking. It returns the time
that the caller should wait until the tokens are actually available.
Note that if the request is irrevocable - there is no way to return tokens to
the bucket once this method commits us to taking them.
#### func (*Bucket) TakeAvailable
```go
func (tb *Bucket) TakeAvailable(count int64) int64
```
TakeAvailable takes up to count immediately available tokens from the bucket. It
returns the number of tokens removed, or zero if there are no available tokens.
It does not block.
#### func (*Bucket) TakeMaxDuration
```go
func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool)
```
TakeMaxDuration is like Take, except that it will only take tokens from the
bucket if the wait time for the tokens is no greater than maxWait.
If it would take longer than maxWait for the tokens to become available, it does
nothing and reports false, otherwise it returns the time that the caller should
wait until the tokens are actually available, and reports true.
#### func (*Bucket) Wait
```go
func (tb *Bucket) Wait(count int64)
```
Wait takes count tokens from the bucket, waiting until they are available.
#### func (*Bucket) WaitMaxDuration
```go
func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool
```
WaitMaxDuration is like Wait except that it will only take tokens from the
bucket if it needs to wait for no greater than maxWait. It reports whether any
tokens have been removed from the bucket If no tokens have been removed, it
returns immediately.

View File

@@ -1,284 +0,0 @@
// Copyright 2014 Canonical Ltd.
// Licensed under the LGPLv3 with static-linking exception.
// See LICENCE file for details.
// Package ratelimit provides an efficient token bucket implementation
// that can be used to limit the rate of arbitrary things.
// See http://en.wikipedia.org/wiki/Token_bucket.
package ratelimit
import (
"math"
"strconv"
"sync"
"time"
)
// Bucket represents a token bucket that fills at a predetermined rate.
// Methods on Bucket may be called concurrently.
type Bucket struct {
startTime time.Time
capacity int64
quantum int64
fillInterval time.Duration
clock Clock
// The mutex guards the fields following it.
mu sync.Mutex
// avail holds the number of available tokens
// in the bucket, as of availTick ticks from startTime.
// It will be negative when there are consumers
// waiting for tokens.
avail int64
availTick int64
}
// Clock is used to inject testable fakes.
type Clock interface {
Now() time.Time
Sleep(d time.Duration)
}
// realClock implements Clock in terms of standard time functions.
type realClock struct{}
// Now is identical to time.Now.
func (realClock) Now() time.Time {
return time.Now()
}
// Sleep is identical to time.Sleep.
func (realClock) Sleep(d time.Duration) {
time.Sleep(d)
}
// NewBucket returns a new token bucket that fills at the
// rate of one token every fillInterval, up to the given
// maximum capacity. Both arguments must be
// positive. The bucket is initially full.
func NewBucket(fillInterval time.Duration, capacity int64) *Bucket {
return NewBucketWithClock(fillInterval, capacity, realClock{})
}
// NewBucketWithClock is identical to NewBucket but injects a testable clock
// interface.
func NewBucketWithClock(fillInterval time.Duration, capacity int64, clock Clock) *Bucket {
return NewBucketWithQuantumAndClock(fillInterval, capacity, 1, clock)
}
// rateMargin specifes the allowed variance of actual
// rate from specified rate. 1% seems reasonable.
const rateMargin = 0.01
// NewBucketWithRate returns a token bucket that fills the bucket
// at the rate of rate tokens per second up to the given
// maximum capacity. Because of limited clock resolution,
// at high rates, the actual rate may be up to 1% different from the
// specified rate.
func NewBucketWithRate(rate float64, capacity int64) *Bucket {
return NewBucketWithRateAndClock(rate, capacity, realClock{})
}
// NewBucketWithRateAndClock is identical to NewBucketWithRate but injects a
// testable clock interface.
func NewBucketWithRateAndClock(rate float64, capacity int64, clock Clock) *Bucket {
for quantum := int64(1); quantum < 1<<50; quantum = nextQuantum(quantum) {
fillInterval := time.Duration(1e9 * float64(quantum) / rate)
if fillInterval <= 0 {
continue
}
tb := NewBucketWithQuantumAndClock(fillInterval, capacity, quantum, clock)
if diff := math.Abs(tb.Rate() - rate); diff/rate <= rateMargin {
return tb
}
}
panic("cannot find suitable quantum for " + strconv.FormatFloat(rate, 'g', -1, 64))
}
// nextQuantum returns the next quantum to try after q.
// We grow the quantum exponentially, but slowly, so we
// get a good fit in the lower numbers.
func nextQuantum(q int64) int64 {
q1 := q * 11 / 10
if q1 == q {
q1++
}
return q1
}
// NewBucketWithQuantum is similar to NewBucket, but allows
// the specification of the quantum size - quantum tokens
// are added every fillInterval.
func NewBucketWithQuantum(fillInterval time.Duration, capacity, quantum int64) *Bucket {
return NewBucketWithQuantumAndClock(fillInterval, capacity, quantum, realClock{})
}
// NewBucketWithQuantumAndClock is identical to NewBucketWithQuantum but injects
// a testable clock interface.
func NewBucketWithQuantumAndClock(fillInterval time.Duration, capacity, quantum int64, clock Clock) *Bucket {
if fillInterval <= 0 {
panic("token bucket fill interval is not > 0")
}
if capacity <= 0 {
panic("token bucket capacity is not > 0")
}
if quantum <= 0 {
panic("token bucket quantum is not > 0")
}
return &Bucket{
clock: clock,
startTime: clock.Now(),
capacity: capacity,
quantum: quantum,
avail: capacity,
fillInterval: fillInterval,
}
}
// Wait takes count tokens from the bucket, waiting until they are
// available.
func (tb *Bucket) Wait(count int64) {
if d := tb.Take(count); d > 0 {
tb.clock.Sleep(d)
}
}
// WaitMaxDuration is like Wait except that it will
// only take tokens from the bucket if it needs to wait
// for no greater than maxWait. It reports whether
// any tokens have been removed from the bucket
// If no tokens have been removed, it returns immediately.
func (tb *Bucket) WaitMaxDuration(count int64, maxWait time.Duration) bool {
d, ok := tb.TakeMaxDuration(count, maxWait)
if d > 0 {
tb.clock.Sleep(d)
}
return ok
}
const infinityDuration time.Duration = 0x7fffffffffffffff
// Take takes count tokens from the bucket without blocking. It returns
// the time that the caller should wait until the tokens are actually
// available.
//
// Note that if the request is irrevocable - there is no way to return
// tokens to the bucket once this method commits us to taking them.
func (tb *Bucket) Take(count int64) time.Duration {
d, _ := tb.take(tb.clock.Now(), count, infinityDuration)
return d
}
// TakeMaxDuration is like Take, except that
// it will only take tokens from the bucket if the wait
// time for the tokens is no greater than maxWait.
//
// If it would take longer than maxWait for the tokens
// to become available, it does nothing and reports false,
// otherwise it returns the time that the caller should
// wait until the tokens are actually available, and reports
// true.
func (tb *Bucket) TakeMaxDuration(count int64, maxWait time.Duration) (time.Duration, bool) {
return tb.take(tb.clock.Now(), count, maxWait)
}
// TakeAvailable takes up to count immediately available tokens from the
// bucket. It returns the number of tokens removed, or zero if there are
// no available tokens. It does not block.
func (tb *Bucket) TakeAvailable(count int64) int64 {
return tb.takeAvailable(tb.clock.Now(), count)
}
// takeAvailable is the internal version of TakeAvailable - it takes the
// current time as an argument to enable easy testing.
func (tb *Bucket) takeAvailable(now time.Time, count int64) int64 {
if count <= 0 {
return 0
}
tb.mu.Lock()
defer tb.mu.Unlock()
tb.adjust(now)
if tb.avail <= 0 {
return 0
}
if count > tb.avail {
count = tb.avail
}
tb.avail -= count
return count
}
// Available returns the number of available tokens. It will be negative
// when there are consumers waiting for tokens. Note that if this
// returns greater than zero, it does not guarantee that calls that take
// tokens from the buffer will succeed, as the number of available
// tokens could have changed in the meantime. This method is intended
// primarily for metrics reporting and debugging.
func (tb *Bucket) Available() int64 {
return tb.available(tb.clock.Now())
}
// available is the internal version of available - it takes the current time as
// an argument to enable easy testing.
func (tb *Bucket) available(now time.Time) int64 {
tb.mu.Lock()
defer tb.mu.Unlock()
tb.adjust(now)
return tb.avail
}
// Capacity returns the capacity that the bucket was created with.
func (tb *Bucket) Capacity() int64 {
return tb.capacity
}
// Rate returns the fill rate of the bucket, in tokens per second.
func (tb *Bucket) Rate() float64 {
return 1e9 * float64(tb.quantum) / float64(tb.fillInterval)
}
// take is the internal version of Take - it takes the current time as
// an argument to enable easy testing.
func (tb *Bucket) take(now time.Time, count int64, maxWait time.Duration) (time.Duration, bool) {
if count <= 0 {
return 0, true
}
tb.mu.Lock()
defer tb.mu.Unlock()
currentTick := tb.adjust(now)
avail := tb.avail - count
if avail >= 0 {
tb.avail = avail
return 0, true
}
// Round up the missing tokens to the nearest multiple
// of quantum - the tokens won't be available until
// that tick.
endTick := currentTick + (-avail+tb.quantum-1)/tb.quantum
endTime := tb.startTime.Add(time.Duration(endTick) * tb.fillInterval)
waitTime := endTime.Sub(now)
if waitTime > maxWait {
return 0, false
}
tb.avail = avail
return waitTime, true
}
// adjust adjusts the current bucket capacity based on the current time.
// It returns the current tick.
func (tb *Bucket) adjust(now time.Time) (currentTick int64) {
currentTick = int64(now.Sub(tb.startTime) / tb.fillInterval)
if tb.avail >= tb.capacity {
return
}
tb.avail += (currentTick - tb.availTick) * tb.quantum
if tb.avail > tb.capacity {
tb.avail = tb.capacity
}
tb.availTick = currentTick
return
}

View File

@@ -1,51 +0,0 @@
// Copyright 2014 Canonical Ltd.
// Licensed under the LGPLv3 with static-linking exception.
// See LICENCE file for details.
package ratelimit
import "io"
type reader struct {
r io.Reader
bucket *Bucket
}
// Reader returns a reader that is rate limited by
// the given token bucket. Each token in the bucket
// represents one byte.
func Reader(r io.Reader, bucket *Bucket) io.Reader {
return &reader{
r: r,
bucket: bucket,
}
}
func (r *reader) Read(buf []byte) (int, error) {
n, err := r.r.Read(buf)
if n <= 0 {
return n, err
}
r.bucket.Wait(int64(n))
return n, err
}
type writer struct {
w io.Writer
bucket *Bucket
}
// Writer returns a reader that is rate limited by
// the given token bucket. Each token in the bucket
// represents one byte.
func Writer(w io.Writer, bucket *Bucket) io.Writer {
return &writer{
w: w,
bucket: bucket,
}
}
func (w *writer) Write(buf []byte) (int, error) {
w.bucket.Wait(int64(len(buf)))
return w.w.Write(buf)
}

View File

@@ -1,19 +0,0 @@
Copyright (c) 2011-2012 Peter Bourgon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@@ -1,141 +0,0 @@
# What is diskv?
Diskv (disk-vee) is a simple, persistent key-value store written in the Go
language. It starts with an incredibly simple API for storing arbitrary data on
a filesystem by key, and builds several layers of performance-enhancing
abstraction on top. The end result is a conceptually simple, but highly
performant, disk-backed storage system.
[![Build Status][1]][2]
[1]: https://drone.io/github.com/peterbourgon/diskv/status.png
[2]: https://drone.io/github.com/peterbourgon/diskv/latest
# Installing
Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5].
Then,
```bash
$ go get github.com/peterbourgon/diskv
```
[3]: http://golang.org
[4]: http://golang.org/doc/install/source
[5]: http://golang.org/doc/install
# Usage
```go
package main
import (
"fmt"
"github.com/peterbourgon/diskv"
)
func main() {
// Simplest transform function: put all the data files into the base dir.
flatTransform := func(s string) []string { return []string{} }
// Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache.
d := diskv.New(diskv.Options{
BasePath: "my-data-dir",
Transform: flatTransform,
CacheSizeMax: 1024 * 1024,
})
// Write three bytes to the key "alpha".
key := "alpha"
d.Write(key, []byte{'1', '2', '3'})
// Read the value back out of the store.
value, _ := d.Read(key)
fmt.Printf("%v\n", value)
// Erase the key+value from the store (and the disk).
d.Erase(key)
}
```
More complex examples can be found in the "examples" subdirectory.
# Theory
## Basic idea
At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`).
The data is written to a single file on disk, with the same name as the key.
The key determines where that file will be stored, via a user-provided
`TransformFunc`, which takes a key and returns a slice (`[]string`)
corresponding to a path list where the key file will be stored. The simplest
TransformFunc,
```go
func SimpleTransform (key string) []string {
return []string{}
}
```
will place all keys in the same, base directory. The design is inspired by
[Redis diskstore][6]; a TransformFunc which emulates the default diskstore
behavior is available in the content-addressable-storage example.
[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1
**Note** that your TransformFunc should ensure that one valid key doesn't
transform to a subset of another valid key. That is, it shouldn't be possible
to construct valid keys that resolve to directory names. As a concrete example,
if your TransformFunc splits on every 3 characters, then
```go
d.Write("abcabc", val) // OK: written to <base>/abc/abc/abcabc
d.Write("abc", val) // Error: attempted write to <base>/abc/abc, but it's a directory
```
This will be addressed in an upcoming version of diskv.
Probably the most important design principle behind diskv is that your data is
always flatly available on the disk. diskv will never do anything that would
prevent you from accessing, copying, backing up, or otherwise interacting with
your data via common UNIX commandline tools.
## Adding a cache
An in-memory caching layer is provided by combining the BasicStore
functionality with a simple map structure, and keeping it up-to-date as
appropriate. Since the map structure in Go is not threadsafe, it's combined
with a RWMutex to provide safe concurrent access.
## Adding order
diskv is a key-value store and therefore inherently unordered. An ordering
system can be injected into the store by passing something which satisfies the
diskv.Index interface. (A default implementation, using Google's
[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a
user-provided Less function) index of the keys, which can be queried.
[7]: https://github.com/google/btree
## Adding compression
Something which implements the diskv.Compression interface may be passed
during store creation, so that all Writes and Reads are filtered through
a compression/decompression pipeline. Several default implementations,
using stdlib compression algorithms, are provided. Note that data is cached
compressed; the cost of decompression is borne with each Read.
## Streaming
diskv also now provides ReadStream and WriteStream methods, to allow very large
data to be handled efficiently.
# Future plans
* Needs plenty of robust testing: huge datasets, etc...
* More thorough benchmarking
* Your suggestions for use-cases I haven't thought of

View File

@@ -1,64 +0,0 @@
package diskv
import (
"compress/flate"
"compress/gzip"
"compress/zlib"
"io"
)
// Compression is an interface that Diskv uses to implement compression of
// data. Writer takes a destination io.Writer and returns a WriteCloser that
// compresses all data written through it. Reader takes a source io.Reader and
// returns a ReadCloser that decompresses all data read through it. You may
// define these methods on your own type, or use one of the NewCompression
// helpers.
type Compression interface {
Writer(dst io.Writer) (io.WriteCloser, error)
Reader(src io.Reader) (io.ReadCloser, error)
}
// NewGzipCompression returns a Gzip-based Compression.
func NewGzipCompression() Compression {
return NewGzipCompressionLevel(flate.DefaultCompression)
}
// NewGzipCompressionLevel returns a Gzip-based Compression with the given level.
func NewGzipCompressionLevel(level int) Compression {
return &genericCompression{
wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) },
rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) },
}
}
// NewZlibCompression returns a Zlib-based Compression.
func NewZlibCompression() Compression {
return NewZlibCompressionLevel(flate.DefaultCompression)
}
// NewZlibCompressionLevel returns a Zlib-based Compression with the given level.
func NewZlibCompressionLevel(level int) Compression {
return NewZlibCompressionLevelDict(level, nil)
}
// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given
// level, based on the given dictionary.
func NewZlibCompressionLevelDict(level int, dict []byte) Compression {
return &genericCompression{
func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) },
func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) },
}
}
type genericCompression struct {
wf func(w io.Writer) (io.WriteCloser, error)
rf func(r io.Reader) (io.ReadCloser, error)
}
func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) {
return g.wf(dst)
}
func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) {
return g.rf(src)
}

View File

@@ -1,624 +0,0 @@
// Diskv (disk-vee) is a simple, persistent, key-value store.
// It stores all data flatly on the filesystem.
package diskv
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"syscall"
)
const (
defaultBasePath = "diskv"
defaultFilePerm os.FileMode = 0666
defaultPathPerm os.FileMode = 0777
)
var (
defaultTransform = func(s string) []string { return []string{} }
errCanceled = errors.New("canceled")
errEmptyKey = errors.New("empty key")
errBadKey = errors.New("bad key")
errImportDirectory = errors.New("can't import a directory")
)
// TransformFunction transforms a key into a slice of strings, with each
// element in the slice representing a directory in the file path where the
// key's entry will eventually be stored.
//
// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"],
// the final location of the data file will be <basedir>/ab/cde/f/abcdef
type TransformFunction func(s string) []string
// Options define a set of properties that dictate Diskv behavior.
// All values are optional.
type Options struct {
BasePath string
Transform TransformFunction
CacheSizeMax uint64 // bytes
PathPerm os.FileMode
FilePerm os.FileMode
// If TempDir is set, it will enable filesystem atomic writes by
// writing temporary files to that location before being moved
// to BasePath.
// Note that TempDir MUST be on the same device/partition as
// BasePath.
TempDir string
Index Index
IndexLess LessFunction
Compression Compression
}
// Diskv implements the Diskv interface. You shouldn't construct Diskv
// structures directly; instead, use the New constructor.
type Diskv struct {
Options
mu sync.RWMutex
cache map[string][]byte
cacheSize uint64
}
// New returns an initialized Diskv structure, ready to use.
// If the path identified by baseDir already contains data,
// it will be accessible, but not yet cached.
func New(o Options) *Diskv {
if o.BasePath == "" {
o.BasePath = defaultBasePath
}
if o.Transform == nil {
o.Transform = defaultTransform
}
if o.PathPerm == 0 {
o.PathPerm = defaultPathPerm
}
if o.FilePerm == 0 {
o.FilePerm = defaultFilePerm
}
d := &Diskv{
Options: o,
cache: map[string][]byte{},
cacheSize: 0,
}
if d.Index != nil && d.IndexLess != nil {
d.Index.Initialize(d.IndexLess, d.Keys(nil))
}
return d
}
// Write synchronously writes the key-value pair to disk, making it immediately
// available for reads. Write relies on the filesystem to perform an eventual
// sync to physical media. If you need stronger guarantees, see WriteStream.
func (d *Diskv) Write(key string, val []byte) error {
return d.WriteStream(key, bytes.NewBuffer(val), false)
}
// WriteStream writes the data represented by the io.Reader to the disk, under
// the provided key. If sync is true, WriteStream performs an explicit sync on
// the file as soon as it's written.
//
// bytes.Buffer provides io.Reader semantics for basic data types.
func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error {
if len(key) <= 0 {
return errEmptyKey
}
d.mu.Lock()
defer d.mu.Unlock()
return d.writeStreamWithLock(key, r, sync)
}
// createKeyFileWithLock either creates the key file directly, or
// creates a temporary file in TempDir if it is set.
func (d *Diskv) createKeyFileWithLock(key string) (*os.File, error) {
if d.TempDir != "" {
if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil {
return nil, fmt.Errorf("temp mkdir: %s", err)
}
f, err := ioutil.TempFile(d.TempDir, "")
if err != nil {
return nil, fmt.Errorf("temp file: %s", err)
}
if err := f.Chmod(d.FilePerm); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return nil, fmt.Errorf("chmod: %s", err)
}
return f, nil
}
mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists
f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm)
if err != nil {
return nil, fmt.Errorf("open file: %s", err)
}
return f, nil
}
// writeStream does no input validation checking.
func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error {
if err := d.ensurePathWithLock(key); err != nil {
return fmt.Errorf("ensure path: %s", err)
}
f, err := d.createKeyFileWithLock(key)
if err != nil {
return fmt.Errorf("create key file: %s", err)
}
wc := io.WriteCloser(&nopWriteCloser{f})
if d.Compression != nil {
wc, err = d.Compression.Writer(f)
if err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("compression writer: %s", err)
}
}
if _, err := io.Copy(wc, r); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("i/o copy: %s", err)
}
if err := wc.Close(); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("compression close: %s", err)
}
if sync {
if err := f.Sync(); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("file sync: %s", err)
}
}
if err := f.Close(); err != nil {
return fmt.Errorf("file close: %s", err)
}
if f.Name() != d.completeFilename(key) {
if err := os.Rename(f.Name(), d.completeFilename(key)); err != nil {
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("rename: %s", err)
}
}
if d.Index != nil {
d.Index.Insert(key)
}
d.bustCacheWithLock(key) // cache only on read
return nil
}
// Import imports the source file into diskv under the destination key. If the
// destination key already exists, it's overwritten. If move is true, the
// source file is removed after a successful import.
func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) {
if dstKey == "" {
return errEmptyKey
}
if fi, err := os.Stat(srcFilename); err != nil {
return err
} else if fi.IsDir() {
return errImportDirectory
}
d.mu.Lock()
defer d.mu.Unlock()
if err := d.ensurePathWithLock(dstKey); err != nil {
return fmt.Errorf("ensure path: %s", err)
}
if move {
if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil {
d.bustCacheWithLock(dstKey)
return nil
} else if err != syscall.EXDEV {
// If it failed due to being on a different device, fall back to copying
return err
}
}
f, err := os.Open(srcFilename)
if err != nil {
return err
}
defer f.Close()
err = d.writeStreamWithLock(dstKey, f, false)
if err == nil && move {
err = os.Remove(srcFilename)
}
return err
}
// Read reads the key and returns the value.
// If the key is available in the cache, Read won't touch the disk.
// If the key is not in the cache, Read will have the side-effect of
// lazily caching the value.
func (d *Diskv) Read(key string) ([]byte, error) {
rc, err := d.ReadStream(key, false)
if err != nil {
return []byte{}, err
}
defer rc.Close()
return ioutil.ReadAll(rc)
}
// ReadStream reads the key and returns the value (data) as an io.ReadCloser.
// If the value is cached from a previous read, and direct is false,
// ReadStream will use the cached value. Otherwise, it will return a handle to
// the file on disk, and cache the data on read.
//
// If direct is true, ReadStream will lazily delete any cached value for the
// key, and return a direct handle to the file on disk.
//
// If compression is enabled, ReadStream taps into the io.Reader stream prior
// to decompression, and caches the compressed data.
func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) {
d.mu.RLock()
defer d.mu.RUnlock()
if val, ok := d.cache[key]; ok {
if !direct {
buf := bytes.NewBuffer(val)
if d.Compression != nil {
return d.Compression.Reader(buf)
}
return ioutil.NopCloser(buf), nil
}
go func() {
d.mu.Lock()
defer d.mu.Unlock()
d.uncacheWithLock(key, uint64(len(val)))
}()
}
return d.readWithRLock(key)
}
// read ignores the cache, and returns an io.ReadCloser representing the
// decompressed data for the given key, streamed from the disk. Clients should
// acquire a read lock on the Diskv and check the cache themselves before
// calling read.
func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) {
filename := d.completeFilename(key)
fi, err := os.Stat(filename)
if err != nil {
return nil, err
}
if fi.IsDir() {
return nil, os.ErrNotExist
}
f, err := os.Open(filename)
if err != nil {
return nil, err
}
var r io.Reader
if d.CacheSizeMax > 0 {
r = newSiphon(f, d, key)
} else {
r = &closingReader{f}
}
var rc = io.ReadCloser(ioutil.NopCloser(r))
if d.Compression != nil {
rc, err = d.Compression.Reader(r)
if err != nil {
return nil, err
}
}
return rc, nil
}
// closingReader provides a Reader that automatically closes the
// embedded ReadCloser when it reaches EOF
type closingReader struct {
rc io.ReadCloser
}
func (cr closingReader) Read(p []byte) (int, error) {
n, err := cr.rc.Read(p)
if err == io.EOF {
if closeErr := cr.rc.Close(); closeErr != nil {
return n, closeErr // close must succeed for Read to succeed
}
}
return n, err
}
// siphon is like a TeeReader: it copies all data read through it to an
// internal buffer, and moves that buffer to the cache at EOF.
type siphon struct {
f *os.File
d *Diskv
key string
buf *bytes.Buffer
}
// newSiphon constructs a siphoning reader that represents the passed file.
// When a successful series of reads ends in an EOF, the siphon will write
// the buffered data to Diskv's cache under the given key.
func newSiphon(f *os.File, d *Diskv, key string) io.Reader {
return &siphon{
f: f,
d: d,
key: key,
buf: &bytes.Buffer{},
}
}
// Read implements the io.Reader interface for siphon.
func (s *siphon) Read(p []byte) (int, error) {
n, err := s.f.Read(p)
if err == nil {
return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed
}
if err == io.EOF {
s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail
if closeErr := s.f.Close(); closeErr != nil {
return n, closeErr // close must succeed for Read to succeed
}
return n, err
}
return n, err
}
// Erase synchronously erases the given key from the disk and the cache.
func (d *Diskv) Erase(key string) error {
d.mu.Lock()
defer d.mu.Unlock()
d.bustCacheWithLock(key)
// erase from index
if d.Index != nil {
d.Index.Delete(key)
}
// erase from disk
filename := d.completeFilename(key)
if s, err := os.Stat(filename); err == nil {
if s.IsDir() {
return errBadKey
}
if err = os.Remove(filename); err != nil {
return err
}
} else {
// Return err as-is so caller can do os.IsNotExist(err).
return err
}
// clean up and return
d.pruneDirsWithLock(key)
return nil
}
// EraseAll will delete all of the data from the store, both in the cache and on
// the disk. Note that EraseAll doesn't distinguish diskv-related data from non-
// diskv-related data. Care should be taken to always specify a diskv base
// directory that is exclusively for diskv data.
func (d *Diskv) EraseAll() error {
d.mu.Lock()
defer d.mu.Unlock()
d.cache = make(map[string][]byte)
d.cacheSize = 0
if d.TempDir != "" {
os.RemoveAll(d.TempDir) // errors ignored
}
return os.RemoveAll(d.BasePath)
}
// Has returns true if the given key exists.
func (d *Diskv) Has(key string) bool {
d.mu.Lock()
defer d.mu.Unlock()
if _, ok := d.cache[key]; ok {
return true
}
filename := d.completeFilename(key)
s, err := os.Stat(filename)
if err != nil {
return false
}
if s.IsDir() {
return false
}
return true
}
// Keys returns a channel that will yield every key accessible by the store,
// in undefined order. If a cancel channel is provided, closing it will
// terminate and close the keys channel.
func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string {
return d.KeysPrefix("", cancel)
}
// KeysPrefix returns a channel that will yield every key accessible by the
// store with the given prefix, in undefined order. If a cancel channel is
// provided, closing it will terminate and close the keys channel. If the
// provided prefix is the empty string, all keys will be yielded.
func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string {
var prepath string
if prefix == "" {
prepath = d.BasePath
} else {
prepath = d.pathFor(prefix)
}
c := make(chan string)
go func() {
filepath.Walk(prepath, walker(c, prefix, cancel))
close(c)
}()
return c
}
// walker returns a function which satisfies the filepath.WalkFunc interface.
// It sends every non-directory file entry down the channel c.
func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc {
return func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) {
return nil // "pass"
}
select {
case c <- info.Name():
case <-cancel:
return errCanceled
}
return nil
}
}
// pathFor returns the absolute path for location on the filesystem where the
// data for the given key will be stored.
func (d *Diskv) pathFor(key string) string {
return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...))
}
// ensurePathWithLock is a helper function that generates all necessary
// directories on the filesystem for the given key.
func (d *Diskv) ensurePathWithLock(key string) error {
return os.MkdirAll(d.pathFor(key), d.PathPerm)
}
// completeFilename returns the absolute path to the file for the given key.
func (d *Diskv) completeFilename(key string) string {
return filepath.Join(d.pathFor(key), key)
}
// cacheWithLock attempts to cache the given key-value pair in the store's
// cache. It can fail if the value is larger than the cache's maximum size.
func (d *Diskv) cacheWithLock(key string, val []byte) error {
valueSize := uint64(len(val))
if err := d.ensureCacheSpaceWithLock(valueSize); err != nil {
return fmt.Errorf("%s; not caching", err)
}
// be very strict about memory guarantees
if (d.cacheSize + valueSize) > d.CacheSizeMax {
panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax))
}
d.cache[key] = val
d.cacheSize += valueSize
return nil
}
// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock.
func (d *Diskv) cacheWithoutLock(key string, val []byte) error {
d.mu.Lock()
defer d.mu.Unlock()
return d.cacheWithLock(key, val)
}
func (d *Diskv) bustCacheWithLock(key string) {
if val, ok := d.cache[key]; ok {
d.uncacheWithLock(key, uint64(len(val)))
}
}
func (d *Diskv) uncacheWithLock(key string, sz uint64) {
d.cacheSize -= sz
delete(d.cache, key)
}
// pruneDirsWithLock deletes empty directories in the path walk leading to the
// key k. Typically this function is called after an Erase is made.
func (d *Diskv) pruneDirsWithLock(key string) error {
pathlist := d.Transform(key)
for i := range pathlist {
dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...))
// thanks to Steven Blenkinsop for this snippet
switch fi, err := os.Stat(dir); true {
case err != nil:
return err
case !fi.IsDir():
panic(fmt.Sprintf("corrupt dirstate at %s", dir))
}
nlinks, err := filepath.Glob(filepath.Join(dir, "*"))
if err != nil {
return err
} else if len(nlinks) > 0 {
return nil // has subdirs -- do not prune
}
if err = os.Remove(dir); err != nil {
return err
}
}
return nil
}
// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order
// until the cache has at least valueSize bytes available.
func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error {
if valueSize > d.CacheSizeMax {
return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax)
}
safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax }
for key, val := range d.cache {
if safe() {
break
}
d.uncacheWithLock(key, uint64(len(val)))
}
if !safe() {
panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax))
}
return nil
}
// nopWriteCloser wraps an io.Writer and provides a no-op Close method to
// satisfy the io.WriteCloser interface.
type nopWriteCloser struct {
io.Writer
}
func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) }
func (wc *nopWriteCloser) Close() error { return nil }

View File

@@ -1,115 +0,0 @@
package diskv
import (
"sync"
"github.com/google/btree"
)
// Index is a generic interface for things that can
// provide an ordered list of keys.
type Index interface {
Initialize(less LessFunction, keys <-chan string)
Insert(key string)
Delete(key string)
Keys(from string, n int) []string
}
// LessFunction is used to initialize an Index of keys in a specific order.
type LessFunction func(string, string) bool
// btreeString is a custom data type that satisfies the BTree Less interface,
// making the strings it wraps sortable by the BTree package.
type btreeString struct {
s string
l LessFunction
}
// Less satisfies the BTree.Less interface using the btreeString's LessFunction.
func (s btreeString) Less(i btree.Item) bool {
return s.l(s.s, i.(btreeString).s)
}
// BTreeIndex is an implementation of the Index interface using google/btree.
type BTreeIndex struct {
sync.RWMutex
LessFunction
*btree.BTree
}
// Initialize populates the BTree tree with data from the keys channel,
// according to the passed less function. It's destructive to the BTreeIndex.
func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) {
i.Lock()
defer i.Unlock()
i.LessFunction = less
i.BTree = rebuild(less, keys)
}
// Insert inserts the given key (only) into the BTree tree.
func (i *BTreeIndex) Insert(key string) {
i.Lock()
defer i.Unlock()
if i.BTree == nil || i.LessFunction == nil {
panic("uninitialized index")
}
i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction})
}
// Delete removes the given key (only) from the BTree tree.
func (i *BTreeIndex) Delete(key string) {
i.Lock()
defer i.Unlock()
if i.BTree == nil || i.LessFunction == nil {
panic("uninitialized index")
}
i.BTree.Delete(btreeString{s: key, l: i.LessFunction})
}
// Keys yields a maximum of n keys in order. If the passed 'from' key is empty,
// Keys will return the first n keys. If the passed 'from' key is non-empty, the
// first key in the returned slice will be the key that immediately follows the
// passed key, in key order.
func (i *BTreeIndex) Keys(from string, n int) []string {
i.RLock()
defer i.RUnlock()
if i.BTree == nil || i.LessFunction == nil {
panic("uninitialized index")
}
if i.BTree.Len() <= 0 {
return []string{}
}
btreeFrom := btreeString{s: from, l: i.LessFunction}
skipFirst := true
if len(from) <= 0 || !i.BTree.Has(btreeFrom) {
// no such key, so fabricate an always-smallest item
btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }}
skipFirst = false
}
keys := []string{}
iterator := func(i btree.Item) bool {
keys = append(keys, i.(btreeString).s)
return len(keys) < n
}
i.BTree.AscendGreaterOrEqual(btreeFrom, iterator)
if skipFirst && len(keys) > 0 {
keys = keys[1:]
}
return keys
}
// rebuildIndex does the work of regenerating the index
// with the given keys.
func rebuild(less LessFunction, keys <-chan string) *btree.BTree {
tree := btree.New(2)
for key := range keys {
tree.ReplaceOrInsert(btreeString{s: key, l: less})
}
return tree
}

View File

@@ -3,18 +3,19 @@ sudo: false
language: go
go:
- 1.6.3
- 1.7.3
- tip
- 1.7.3
- 1.8.1
- tip
matrix:
allow_failures:
- go: tip
allow_failures:
- go: tip
install:
- go get github.com/golang/lint/golint
- export PATH=$GOPATH/bin:$PATH
- go install ./...
- go get github.com/golang/lint/golint
- export PATH=$GOPATH/bin:$PATH
- go install ./...
script:
- verify/all.sh -v
- go test ./...
- verify/all.sh -v
- go test ./...

View File

@@ -246,6 +246,25 @@ It is possible to mark a flag as hidden, meaning it will still function as norma
flags.MarkHidden("secretFlag")
```
## Disable sorting of flags
`pflag` allows you to disable sorting of flags for help and usage message.
**Example**:
```go
flags.BoolP("verbose", "v", false, "verbose output")
flags.String("coolflag", "yeaah", "it's really cool flag")
flags.Int("usefulflag", 777, "sometimes it's very useful")
flags.SortFlags = false
flags.PrintDefaults()
```
**Output**:
```
-v, --verbose verbose output
--coolflag string it's really cool flag (default "yeaah")
--usefulflag int sometimes it's very useful (default 777)
```
## Supporting Go flags when using pflag
In order to support flags defined using Go's `flag` package, they must be added to the `pflag` flagset. This is usually necessary
to support flags defined by third-party dependencies (e.g. `golang/glog`).
@@ -270,8 +289,8 @@ func main() {
You can see the full reference documentation of the pflag package
[at godoc.org][3], or through go's standard documentation system by
running `godoc -http=:6060` and browsing to
[http://localhost:6060/pkg/github.com/ogier/pflag][2] after
[http://localhost:6060/pkg/github.com/spf13/pflag][2] after
installation.
[2]: http://localhost:6060/pkg/github.com/ogier/pflag
[3]: http://godoc.org/github.com/ogier/pflag
[2]: http://localhost:6060/pkg/github.com/spf13/pflag
[3]: http://godoc.org/github.com/spf13/pflag

View File

@@ -11,13 +11,13 @@ func newCountValue(val int, p *int) *countValue {
}
func (i *countValue) Set(s string) error {
v, err := strconv.ParseInt(s, 0, 64)
// -1 means that no specific value was passed, so increment
if v == -1 {
// "+1" means that no specific value was passed, so increment
if s == "+1" {
*i = countValue(*i + 1)
} else {
*i = countValue(v)
return nil
}
v, err := strconv.ParseInt(s, 0, 0)
*i = countValue(v)
return err
}
@@ -54,7 +54,7 @@ func (f *FlagSet) CountVar(p *int, name string, usage string) {
// CountVarP is like CountVar only take a shorthand for the flag name.
func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) {
flag := f.VarPF(newCountValue(0, p), name, shorthand, usage)
flag.NoOptDefVal = "-1"
flag.NoOptDefVal = "+1"
}
// CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set
@@ -83,7 +83,9 @@ func (f *FlagSet) CountP(name, shorthand string, usage string) *int {
return p
}
// Count like Count only the flag is placed on the CommandLine isntead of a given flag set
// Count defines a count flag with specified name, default value, and usage string.
// The return value is the address of an int variable that stores the value of the flag.
// A count flag will add 1 to its value evey time it is found on the command line
func Count(name string, usage string) *int {
return CommandLine.CountP(name, "", usage)
}

286
vendor/github.com/spf13/pflag/flag.go generated vendored
View File

@@ -16,9 +16,9 @@ pflag is a drop-in replacement of Go's native flag package. If you import
pflag under the name "flag" then all code should continue to function
with no changes.
import flag "github.com/ogier/pflag"
import flag "github.com/spf13/pflag"
There is one exception to this: if you directly instantiate the Flag struct
There is one exception to this: if you directly instantiate the Flag struct
there is one more field "Shorthand" that you will need to set.
Most code never instantiates this struct directly, and instead uses
functions such as String(), BoolVar(), and Var(), and is therefore
@@ -134,14 +134,21 @@ type FlagSet struct {
// a custom error handler.
Usage func()
// SortFlags is used to indicate, if user wants to have sorted flags in
// help/usage messages.
SortFlags bool
name string
parsed bool
actual map[NormalizedName]*Flag
orderedActual []*Flag
sortedActual []*Flag
formal map[NormalizedName]*Flag
orderedFormal []*Flag
sortedFormal []*Flag
shorthands map[byte]*Flag
args []string // arguments after flags
argsLenAtDash int // len(args) when a '--' was located when parsing, or -1 if no --
exitOnError bool // does the program exit if there's an error?
errorHandling ErrorHandling
output io.Writer // nil means stderr; use out() accessor
interspersed bool // allow interspersed option/non-option args
@@ -156,7 +163,7 @@ type Flag struct {
Value Value // value as set
DefValue string // default value (as text); for usage message
Changed bool // If the user set the value (or if left to default)
NoOptDefVal string //default value (as text); if the flag is on the command line without any options
NoOptDefVal string // default value (as text); if the flag is on the command line without any options
Deprecated string // If this flag is deprecated, this string is the new or now thing to use
Hidden bool // used by cobra.Command to allow flags to be hidden from help/usage text
ShorthandDeprecated string // If the shorthand of this flag is deprecated, this string is the new or now thing to use
@@ -194,11 +201,19 @@ func sortFlags(flags map[NormalizedName]*Flag) []*Flag {
// "--getUrl" which may also be translated to "geturl" and everything will work.
func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) {
f.normalizeNameFunc = n
for k, v := range f.formal {
delete(f.formal, k)
nname := f.normalizeFlagName(string(k))
f.formal[nname] = v
v.Name = string(nname)
f.sortedFormal = f.sortedFormal[:0]
for fname, flag := range f.formal {
nname := f.normalizeFlagName(flag.Name)
if fname == nname {
continue
}
flag.Name = string(nname)
delete(f.formal, fname)
f.formal[nname] = flag
if _, set := f.actual[fname]; set {
delete(f.actual, fname)
f.actual[nname] = flag
}
}
}
@@ -229,10 +244,25 @@ func (f *FlagSet) SetOutput(output io.Writer) {
f.output = output
}
// VisitAll visits the flags in lexicographical order, calling fn for each.
// VisitAll visits the flags in lexicographical order or
// in primordial order if f.SortFlags is false, calling fn for each.
// It visits all flags, even those not set.
func (f *FlagSet) VisitAll(fn func(*Flag)) {
for _, flag := range sortFlags(f.formal) {
if len(f.formal) == 0 {
return
}
var flags []*Flag
if f.SortFlags {
if len(f.formal) != len(f.sortedFormal) {
f.sortedFormal = sortFlags(f.formal)
}
flags = f.sortedFormal
} else {
flags = f.orderedFormal
}
for _, flag := range flags {
fn(flag)
}
}
@@ -253,22 +283,39 @@ func (f *FlagSet) HasAvailableFlags() bool {
return false
}
// VisitAll visits the command-line flags in lexicographical order, calling
// fn for each. It visits all flags, even those not set.
// VisitAll visits the command-line flags in lexicographical order or
// in primordial order if f.SortFlags is false, calling fn for each.
// It visits all flags, even those not set.
func VisitAll(fn func(*Flag)) {
CommandLine.VisitAll(fn)
}
// Visit visits the flags in lexicographical order, calling fn for each.
// Visit visits the flags in lexicographical order or
// in primordial order if f.SortFlags is false, calling fn for each.
// It visits only those flags that have been set.
func (f *FlagSet) Visit(fn func(*Flag)) {
for _, flag := range sortFlags(f.actual) {
if len(f.actual) == 0 {
return
}
var flags []*Flag
if f.SortFlags {
if len(f.actual) != len(f.sortedActual) {
f.sortedActual = sortFlags(f.actual)
}
flags = f.sortedActual
} else {
flags = f.orderedActual
}
for _, flag := range flags {
fn(flag)
}
}
// Visit visits the command-line flags in lexicographical order, calling fn
// for each. It visits only those flags that have been set.
// Visit visits the command-line flags in lexicographical order or
// in primordial order if f.SortFlags is false, calling fn for each.
// It visits only those flags that have been set.
func Visit(fn func(*Flag)) {
CommandLine.Visit(fn)
}
@@ -278,6 +325,22 @@ func (f *FlagSet) Lookup(name string) *Flag {
return f.lookup(f.normalizeFlagName(name))
}
// ShorthandLookup returns the Flag structure of the short handed flag,
// returning nil if none exists.
// It panics, if len(name) > 1.
func (f *FlagSet) ShorthandLookup(name string) *Flag {
if name == "" {
return nil
}
if len(name) > 1 {
msg := fmt.Sprintf("can not look up shorthand which is more than one ASCII character: %q", name)
fmt.Fprintf(f.out(), msg)
panic(msg)
}
c := name[0]
return f.shorthands[c]
}
// lookup returns the Flag structure of the named flag, returning nil if none exists.
func (f *FlagSet) lookup(name NormalizedName) *Flag {
return f.formal[name]
@@ -319,7 +382,7 @@ func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error {
if flag == nil {
return fmt.Errorf("flag %q does not exist", name)
}
if len(usageMessage) == 0 {
if usageMessage == "" {
return fmt.Errorf("deprecated message for flag %q must be set", name)
}
flag.Deprecated = usageMessage
@@ -334,7 +397,7 @@ func (f *FlagSet) MarkShorthandDeprecated(name string, usageMessage string) erro
if flag == nil {
return fmt.Errorf("flag %q does not exist", name)
}
if len(usageMessage) == 0 {
if usageMessage == "" {
return fmt.Errorf("deprecated message for flag %q must be set", name)
}
flag.ShorthandDeprecated = usageMessage
@@ -358,6 +421,12 @@ func Lookup(name string) *Flag {
return CommandLine.Lookup(name)
}
// ShorthandLookup returns the Flag structure of the short handed flag,
// returning nil if none exists.
func ShorthandLookup(name string) *Flag {
return CommandLine.ShorthandLookup(name)
}
// Set sets the value of the named flag.
func (f *FlagSet) Set(name, value string) error {
normalName := f.normalizeFlagName(name)
@@ -365,17 +434,30 @@ func (f *FlagSet) Set(name, value string) error {
if !ok {
return fmt.Errorf("no such flag -%v", name)
}
err := flag.Value.Set(value)
if err != nil {
return err
var flagName string
if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
flagName = fmt.Sprintf("-%s, --%s", flag.Shorthand, flag.Name)
} else {
flagName = fmt.Sprintf("--%s", flag.Name)
}
return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err)
}
if f.actual == nil {
f.actual = make(map[NormalizedName]*Flag)
if !flag.Changed {
if f.actual == nil {
f.actual = make(map[NormalizedName]*Flag)
}
f.actual[normalName] = flag
f.orderedActual = append(f.orderedActual, flag)
flag.Changed = true
}
f.actual[normalName] = flag
flag.Changed = true
if len(flag.Deprecated) > 0 {
fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
if flag.Deprecated != "" {
fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
}
return nil
}
@@ -482,6 +564,14 @@ func UnquoteUsage(flag *Flag) (name string, usage string) {
name = "int"
case "uint64":
name = "uint"
case "stringSlice":
name = "strings"
case "intSlice":
name = "ints"
case "uintSlice":
name = "uints"
case "boolSlice":
name = "bools"
}
return
@@ -557,28 +647,28 @@ func wrap(i, w int, s string) string {
// for all flags in the FlagSet. Wrapped to `cols` columns (0 for no
// wrapping)
func (f *FlagSet) FlagUsagesWrapped(cols int) string {
x := new(bytes.Buffer)
buf := new(bytes.Buffer)
lines := make([]string, 0, len(f.formal))
maxlen := 0
f.VisitAll(func(flag *Flag) {
if len(flag.Deprecated) > 0 || flag.Hidden {
if flag.Deprecated != "" || flag.Hidden {
return
}
line := ""
if len(flag.Shorthand) > 0 && len(flag.ShorthandDeprecated) == 0 {
if flag.Shorthand != "" && flag.ShorthandDeprecated == "" {
line = fmt.Sprintf(" -%s, --%s", flag.Shorthand, flag.Name)
} else {
line = fmt.Sprintf(" --%s", flag.Name)
}
varname, usage := UnquoteUsage(flag)
if len(varname) > 0 {
if varname != "" {
line += " " + varname
}
if len(flag.NoOptDefVal) > 0 {
if flag.NoOptDefVal != "" {
switch flag.Value.Type() {
case "string":
line += fmt.Sprintf("[=\"%s\"]", flag.NoOptDefVal)
@@ -586,6 +676,10 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string {
if flag.NoOptDefVal != "true" {
line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
}
case "count":
if flag.NoOptDefVal != "+1" {
line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
}
default:
line += fmt.Sprintf("[=%s]", flag.NoOptDefVal)
}
@@ -601,7 +695,7 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string {
line += usage
if !flag.defaultIsZeroValue() {
if flag.Value.Type() == "string" {
line += fmt.Sprintf(" (default \"%s\")", flag.DefValue)
line += fmt.Sprintf(" (default %q)", flag.DefValue)
} else {
line += fmt.Sprintf(" (default %s)", flag.DefValue)
}
@@ -614,10 +708,10 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string {
sidx := strings.Index(line, "\x00")
spacing := strings.Repeat(" ", maxlen-sidx)
// maxlen + 2 comes from + 1 for the \x00 and + 1 for the (deliberate) off-by-one in maxlen-sidx
fmt.Fprintln(x, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:]))
fmt.Fprintln(buf, line[:sidx], spacing, wrap(maxlen+2, cols, line[sidx+1:]))
}
return x.String()
return buf.String()
}
// FlagUsages returns a string containing the usage information for all flags in
@@ -714,11 +808,10 @@ func (f *FlagSet) VarP(value Value, name, shorthand, usage string) {
// AddFlag will add the flag to the FlagSet
func (f *FlagSet) AddFlag(flag *Flag) {
// Call normalizeFlagName function only once
normalizedFlagName := f.normalizeFlagName(flag.Name)
_, alreadythere := f.formal[normalizedFlagName]
if alreadythere {
_, alreadyThere := f.formal[normalizedFlagName]
if alreadyThere {
msg := fmt.Sprintf("%s flag redefined: %s", f.name, flag.Name)
fmt.Fprintln(f.out(), msg)
panic(msg) // Happens only if flags are declared with identical names
@@ -729,28 +822,31 @@ func (f *FlagSet) AddFlag(flag *Flag) {
flag.Name = string(normalizedFlagName)
f.formal[normalizedFlagName] = flag
f.orderedFormal = append(f.orderedFormal, flag)
if len(flag.Shorthand) == 0 {
if flag.Shorthand == "" {
return
}
if len(flag.Shorthand) > 1 {
fmt.Fprintf(f.out(), "%s shorthand more than ASCII character: %s\n", f.name, flag.Shorthand)
panic("shorthand is more than one character")
msg := fmt.Sprintf("%q shorthand is more than one ASCII character", flag.Shorthand)
fmt.Fprintf(f.out(), msg)
panic(msg)
}
if f.shorthands == nil {
f.shorthands = make(map[byte]*Flag)
}
c := flag.Shorthand[0]
old, alreadythere := f.shorthands[c]
if alreadythere {
fmt.Fprintf(f.out(), "%s shorthand reused: %q for %s already used for %s\n", f.name, c, flag.Name, old.Name)
panic("shorthand redefinition")
used, alreadyThere := f.shorthands[c]
if alreadyThere {
msg := fmt.Sprintf("unable to redefine %q shorthand in %q flagset: it's already used for %q flag", c, f.name, used.Name)
fmt.Fprintf(f.out(), msg)
panic(msg)
}
f.shorthands[c] = flag
}
// AddFlagSet adds one FlagSet to another. If a flag is already present in f
// the flag from newSet will be ignored
// the flag from newSet will be ignored.
func (f *FlagSet) AddFlagSet(newSet *FlagSet) {
if newSet == nil {
return
@@ -781,8 +877,10 @@ func VarP(value Value, name, shorthand, usage string) {
// returns the error.
func (f *FlagSet) failf(format string, a ...interface{}) error {
err := fmt.Errorf(format, a...)
fmt.Fprintln(f.out(), err)
f.usage()
if f.errorHandling != ContinueOnError {
fmt.Fprintln(f.out(), err)
f.usage()
}
return err
}
@@ -798,34 +896,6 @@ func (f *FlagSet) usage() {
}
}
func (f *FlagSet) setFlag(flag *Flag, value string, origArg string) error {
if err := flag.Value.Set(value); err != nil {
return f.failf("invalid argument %q for %s: %v", value, origArg, err)
}
// mark as visited for Visit()
if f.actual == nil {
f.actual = make(map[NormalizedName]*Flag)
}
f.actual[f.normalizeFlagName(flag.Name)] = flag
flag.Changed = true
if len(flag.Deprecated) > 0 {
fmt.Fprintf(os.Stderr, "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated)
}
if len(flag.ShorthandDeprecated) > 0 && containsShorthand(origArg, flag.Shorthand) {
fmt.Fprintf(os.Stderr, "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
}
return nil
}
func containsShorthand(arg, shorthand string) bool {
// filter out flags --<flag_name>
if strings.HasPrefix(arg, "-") {
return false
}
arg = strings.SplitN(arg, "=", 2)[0]
return strings.Contains(arg, shorthand)
}
func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) {
a = args
name := s[2:]
@@ -833,10 +903,11 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
err = f.failf("bad flag syntax: %s", s)
return
}
split := strings.SplitN(name, "=", 2)
name = split[0]
flag, alreadythere := f.formal[f.normalizeFlagName(name)]
if !alreadythere {
flag, exists := f.formal[f.normalizeFlagName(name)]
if !exists {
if name == "help" { // special case for nice help message.
f.usage()
return a, ErrHelp
@@ -844,11 +915,12 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
err = f.failf("unknown flag: --%s", name)
return
}
var value string
if len(split) == 2 {
// '--flag=arg'
value = split[1]
} else if len(flag.NoOptDefVal) > 0 {
} else if flag.NoOptDefVal != "" {
// '--flag' (arg was optional)
value = flag.NoOptDefVal
} else if len(a) > 0 {
@@ -860,7 +932,11 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin
err = f.failf("flag needs an argument: %s", s)
return
}
err = fn(flag, value, s)
err = fn(flag, value)
if err != nil {
f.failf(err.Error())
}
return
}
@@ -868,38 +944,52 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse
if strings.HasPrefix(shorthands, "test.") {
return
}
outArgs = args
outShorts = shorthands[1:]
c := shorthands[0]
flag, alreadythere := f.shorthands[c]
if !alreadythere {
flag, exists := f.shorthands[c]
if !exists {
if c == 'h' { // special case for nice help message.
f.usage()
err = ErrHelp
return
}
//TODO continue on error
err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands)
return
}
var value string
if len(shorthands) > 2 && shorthands[1] == '=' {
// '-f=arg'
value = shorthands[2:]
outShorts = ""
} else if len(flag.NoOptDefVal) > 0 {
} else if flag.NoOptDefVal != "" {
// '-f' (arg was optional)
value = flag.NoOptDefVal
} else if len(shorthands) > 1 {
// '-farg'
value = shorthands[1:]
outShorts = ""
} else if len(args) > 0 {
// '-f arg'
value = args[0]
outArgs = args[1:]
} else {
// '-f' (arg was required)
err = f.failf("flag needs an argument: %q in -%s", c, shorthands)
return
}
err = fn(flag, value, shorthands)
if flag.ShorthandDeprecated != "" {
fmt.Fprintf(f.out(), "Flag shorthand -%s has been deprecated, %s\n", flag.Shorthand, flag.ShorthandDeprecated)
}
err = fn(flag, value)
if err != nil {
f.failf(err.Error())
}
return
}
@@ -907,6 +997,7 @@ func (f *FlagSet) parseShortArg(s string, args []string, fn parseFunc) (a []stri
a = args
shorthands := s[1:]
// "shorthands" can be a series of shorthand letters of flags (e.g. "-vvv").
for len(shorthands) > 0 {
shorthands, a, err = f.parseSingleShortArg(shorthands, args, fn)
if err != nil {
@@ -954,18 +1045,24 @@ func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) {
// The return value will be ErrHelp if -help was set but not defined.
func (f *FlagSet) Parse(arguments []string) error {
f.parsed = true
f.args = make([]string, 0, len(arguments))
assign := func(flag *Flag, value, origArg string) error {
return f.setFlag(flag, value, origArg)
if len(arguments) < 0 {
return nil
}
err := f.parseArgs(arguments, assign)
f.args = make([]string, 0, len(arguments))
set := func(flag *Flag, value string) error {
return f.Set(flag.Name, value)
}
err := f.parseArgs(arguments, set)
if err != nil {
switch f.errorHandling {
case ContinueOnError:
return err
case ExitOnError:
fmt.Println(err)
os.Exit(2)
case PanicOnError:
panic(err)
@@ -974,7 +1071,7 @@ func (f *FlagSet) Parse(arguments []string) error {
return nil
}
type parseFunc func(flag *Flag, value, origArg string) error
type parseFunc func(flag *Flag, value string) error
// ParseAll parses flag definitions from the argument list, which should not
// include the command name. The arguments for fn are flag and value. Must be
@@ -985,11 +1082,7 @@ func (f *FlagSet) ParseAll(arguments []string, fn func(flag *Flag, value string)
f.parsed = true
f.args = make([]string, 0, len(arguments))
assign := func(flag *Flag, value, origArg string) error {
return fn(flag, value)
}
err := f.parseArgs(arguments, assign)
err := f.parseArgs(arguments, fn)
if err != nil {
switch f.errorHandling {
case ContinueOnError:
@@ -1036,14 +1129,15 @@ func Parsed() bool {
// CommandLine is the default set of command-line flags, parsed from os.Args.
var CommandLine = NewFlagSet(os.Args[0], ExitOnError)
// NewFlagSet returns a new, empty flag set with the specified name and
// error handling property.
// NewFlagSet returns a new, empty flag set with the specified name,
// error handling property and SortFlags set to true.
func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet {
f := &FlagSet{
name: name,
errorHandling: errorHandling,
argsLenAtDash: -1,
interspersed: true,
SortFlags: true,
}
return f
}

88
vendor/github.com/spf13/pflag/int16.go generated vendored Normal file
View File

@@ -0,0 +1,88 @@
package pflag
import "strconv"
// -- int16 Value
type int16Value int16
func newInt16Value(val int16, p *int16) *int16Value {
*p = val
return (*int16Value)(p)
}
func (i *int16Value) Set(s string) error {
v, err := strconv.ParseInt(s, 0, 16)
*i = int16Value(v)
return err
}
func (i *int16Value) Type() string {
return "int16"
}
func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) }
func int16Conv(sval string) (interface{}, error) {
v, err := strconv.ParseInt(sval, 0, 16)
if err != nil {
return 0, err
}
return int16(v), nil
}
// GetInt16 returns the int16 value of a flag with the given name
func (f *FlagSet) GetInt16(name string) (int16, error) {
val, err := f.getFlagType(name, "int16", int16Conv)
if err != nil {
return 0, err
}
return val.(int16), nil
}
// Int16Var defines an int16 flag with specified name, default value, and usage string.
// The argument p points to an int16 variable in which to store the value of the flag.
func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) {
f.VarP(newInt16Value(value, p), name, "", usage)
}
// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
f.VarP(newInt16Value(value, p), name, shorthand, usage)
}
// Int16Var defines an int16 flag with specified name, default value, and usage string.
// The argument p points to an int16 variable in which to store the value of the flag.
func Int16Var(p *int16, name string, value int16, usage string) {
CommandLine.VarP(newInt16Value(value, p), name, "", usage)
}
// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash.
func Int16VarP(p *int16, name, shorthand string, value int16, usage string) {
CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage)
}
// Int16 defines an int16 flag with specified name, default value, and usage string.
// The return value is the address of an int16 variable that stores the value of the flag.
func (f *FlagSet) Int16(name string, value int16, usage string) *int16 {
p := new(int16)
f.Int16VarP(p, name, "", value, usage)
return p
}
// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 {
p := new(int16)
f.Int16VarP(p, name, shorthand, value, usage)
return p
}
// Int16 defines an int16 flag with specified name, default value, and usage string.
// The return value is the address of an int16 variable that stores the value of the flag.
func Int16(name string, value int16, usage string) *int16 {
return CommandLine.Int16P(name, "", value, usage)
}
// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash.
func Int16P(name, shorthand string, value int16, usage string) *int16 {
return CommandLine.Int16P(name, shorthand, value, usage)
}

3
vendor/golang.org/x/time/AUTHORS generated vendored Normal file
View File

@@ -0,0 +1,3 @@
# This source code refers to The Go Authors for copyright purposes.
# The master list of authors is in the main Go distribution,
# visible at http://tip.golang.org/AUTHORS.

3
vendor/golang.org/x/time/CONTRIBUTORS generated vendored Normal file
View File

@@ -0,0 +1,3 @@
# This source code was written by the Go contributors.
# The master list of contributors is in the main Go distribution,
# visible at http://tip.golang.org/CONTRIBUTORS.

Some files were not shown because too many files have changed in this diff Show More