Compare commits

..

111 Commits

Author SHA1 Message Date
Kubernetes Publisher
d727f5b8b7 Fix Godeps.json to point to kubernetes-1.14.0-beta.1 tags 2019-03-07 17:38:17 +00:00
Kubernetes Publisher
8d298c8afa Merge remote-tracking branch 'origin/master' into release-1.14. Deleting CHANGELOG-1.10.md CHANGELOG-1.11.md CHANGELOG-1.12.md CHANGELOG-1.4.md CHANGELOG-1.7.md CHANGELOG-1.8.md CHANGELOG-1.9.md
Kubernetes-commit: 39dac9a88c72865d249d88ffbfa45ebaac0534a1
2019-02-25 09:44:56 +00:00
Kubernetes Publisher
6c37140c4f Merge pull request #74328 from daixiang0/delete-blank
delete all duplicate empty blanks

Kubernetes-commit: 8993fbc543c18e73668793b5d5e234c0a136735c
2019-03-07 17:36:03 +00:00
Kubernetes Publisher
3cfffd9ca8 Merge pull request #71896 from awly/client-go-keyutil
client-go: extract new keyutil package from util/cert

Kubernetes-commit: b5566c781843a1a8c19993632700e476708a9cee
2019-03-07 17:35:40 +00:00
Kubernetes Publisher
3869957bf2 Merge pull request #73555 from bsalamat/priority_to_ga
Graduate PriorityClass API to GA

Kubernetes-commit: 3afa003126ff50092954839efbe10d584c2511ff
2019-03-07 17:35:18 +00:00
Kubernetes Publisher
0cc51f1e08 Merge remote-tracking branch 'origin/master' into release-1.14. Deleting CHANGELOG-1.8.md CHANGELOG-1.9.md
Kubernetes-commit: e203d7493d4fe5a14b997473493fe19fdc450da1
2019-02-22 09:29:28 +00:00
Kubernetes Publisher
c64ea3c60c Merge pull request #74348 from danielqsj/ku
update k8s.io/utils to fix keymutex issues

Kubernetes-commit: d9f3e96796e2d154b1d4caa156ba95ff9b01e5b2
2019-03-07 17:34:34 +00:00
Kubernetes Publisher
a59fbbfffb Merge pull request #74057 from liggitt/ingress-network-v1beta1
Ingress extensions/v1beta1 -> networking.k8s.io/v1beta1

Kubernetes-commit: 7d75b73e1d72cef7a0f0e2804f8e0a582ccb6b61
2019-03-07 17:34:12 +00:00
danielqsj
0312f5bab6 Update k/utils dependency in staging
Kubernetes-commit: b9ef1dd50b8db18fa3a2558289caa4e75f116260
2019-02-22 10:30:38 +08:00
Xiang Dai
13069f9575 delete all duplicate empty blanks
Signed-off-by: Xiang Dai <764524258@qq.com>

Kubernetes-commit: 36065c6dd717c14e0a90131041e20345a7e5e324
2019-02-22 09:43:51 +08:00
Bobby (Babak) Salamat
825f9540d3 generated files
Kubernetes-commit: 1dac6d03e3645ddcfdb00d84c158f7995cac94c8
2019-02-20 12:42:15 -08:00
Jordan Liggitt
c893169f47 generated files
Kubernetes-commit: f139218ac0711023a85db6ce43d59ad1775a9705
2019-02-14 00:28:24 -05:00
Andrew Lytvynov
8980536f0f Extract new keyutil package from client-go/util/cert
This package contains public/private key utilities copied directly from
client-go/util/cert. All imports were updated.

Future PRs will actually refactor the libraries.

Updates #71004

Kubernetes-commit: 18458392ca24c85c688e655aace1afd04f864cbd
2018-12-09 16:24:38 -08:00
Stavros Foteinopoulos
e124f1f28f Update vendor package github.com/hashicorp/golang-lru
Kubernetes-commit: df3fbf9295cb8d650d2e951ae46099d07e2130d2
2018-11-07 15:19:34 +02:00
Kubernetes Publisher
ce638bb1aa Merge pull request #74130 from sttts/sttts-sample-generated-pkg
sample-{apiserver,controller}: move generated file into pkg/generated

Kubernetes-commit: 4e54d7ba26a5404c5027297ae864e61012dd8eac
2019-02-16 01:51:19 +00:00
Kubernetes Publisher
1afb5d43d5 Merge pull request #73699 from pivotal-k8s/doc-links
Update deprecated and broken links

Kubernetes-commit: fbee96d733ad941e94cd2a4726c65ddb3c71a21d
2019-02-16 01:50:56 +00:00
Kubernetes Publisher
f232764143 Merge pull request #71512 from rkamudhan/patch-1
Update the go get to k8s.io/sample-controller

Kubernetes-commit: b0f65ca1cfd322128f7a3c1676f668acab551ffc
2019-02-15 17:44:40 +00:00
Dr. Stefan Schimanski
574892d56c Update generated code
Kubernetes-commit: 714b28cf22133d39bd637f8b0e6d5774a4eff415
2019-02-15 13:35:58 +01:00
Dr. Stefan Schimanski
84833d0138 sample-controller: adapt to package moves
Kubernetes-commit: b671a06e46ced532c76136474afa8e88a136e99f
2019-02-15 13:08:42 +01:00
Dr. Stefan Schimanski
8f7a02cc01 sample-controller: generate into pkg/generated package
Kubernetes-commit: 4f0e9467b96b88e8ec3b7bd8593700e99634d007
2019-02-15 13:09:16 +01:00
Kubernetes Publisher
2fb0948fde Merge pull request #73976 from jennybuckley/apply-errors
Make server-side apply's conflict errors more human readable

Kubernetes-commit: 62734d36705f3b0bb2a1a6fc9cf00b5206fede20
2019-02-15 04:20:42 +00:00
Kubernetes Publisher
347c2688d7 Merge pull request #72214 from caesarxuchao/move-discovery
Move cached discovery clients to their own packages

Kubernetes-commit: d8f014613865955b7e4ce6fefbf38a3c8fe97971
2019-02-14 07:56:42 +00:00
jennybuckley
fb0d8cffef Update generated
Kubernetes-commit: 7dba6fe90dde7724a0361a2a723c3255fdb23bcc
2019-02-12 15:16:43 -08:00
Ben Moss
ae6c030c2a Update deprecated links
Kubernetes-commit: 34ac4d9ee9fed65d770403fff4cb037253fc5d09
2019-02-04 13:28:31 -05:00
Chao Xu
e8f23bda6e generated
Kubernetes-commit: 1f2e2e61cf088cb0070fea4b88d9cefb7f8f2e3e
2018-12-19 13:52:12 -08:00
Kubernetes Publisher
713d159512 Merge pull request #71223 from sttts/sttts-openapi-aggreation-without-clone
openapi-aggregation: speed up merging from 1 sec to 50-100 ms

Kubernetes-commit: 6912bbb153ef53a0ece34e7dae74ca79bfc07b82
2019-02-11 21:33:01 +00:00
Dr. Stefan Schimanski
92637fbe63 Update staging godeps
Kubernetes-commit: 233178eec68573fc317523c8ce2b1409b9524e96
2019-02-09 17:49:59 +01:00
Kubernetes Publisher
717147646c Merge pull request #73540 from rlenferink/patch-5
Updated OWNERS files to include link to docs

Kubernetes-commit: b50c643be0cd528e438cd985384e229bd263d0e5
2019-02-08 17:30:43 +00:00
Kubernetes Publisher
765ad16233 Merge pull request #73713 from caesarxuchao/bump-json-patch-again
Importing the latest json patch and set the accumulated copy size limit

Kubernetes-commit: b00b5d4ac00cab50b50554ce2a9212e1cd689496
2019-02-07 09:31:55 +00:00
Kubernetes Publisher
cfd89cde21 Merge pull request #72947 from apelisse/wip-feature-serverside-apply-merge
Merge feature-serverside-apply back in master

Kubernetes-commit: 2a5a41a08b08075aa2960170c8342d974ccc2cd3
2019-02-05 05:30:45 +00:00
Chao Xu
e81d2102ff Importing latest json-patch.
Kubernetes-commit: b8911b8d79f7ca4030c6e2cdff9f873a47848021
2019-02-04 09:47:54 -08:00
Roy Lenferink
7a965bdb19 Updated OWNERS files to include link to docs
Kubernetes-commit: b43c04452f3b563473b5c2a765d4ac18cc0ff58f
2019-01-30 20:05:00 +01:00
Antoine Pelisse
3787f3c1f6 Run generation scripts
Kubernetes-commit: 5949154ec55e13be6877fb4aa17b89652b82c6f8
2019-01-29 14:26:39 -08:00
Kubernetes Publisher
192a614242 Merge pull request #73455 from danielqsj/keymutex
Migrate to k8s.io/utils/keymutex and k8s.io/utils/strings

Kubernetes-commit: 8f1082c6aff6df3cb5e103474d9d846b6f8ebf90
2019-01-31 09:56:19 -08:00
Kubernetes Publisher
6dfb0e8ec7 Merge branch 'master' into keymutex
Kubernetes-commit: e72b32558c8e9ed16690ef5a8e909c12fcc47f87
2019-02-01 15:25:31 +00:00
Kubernetes Prow Robot
f55470f1d9 sync: squashed up to merge 1f7e9fd9a2b31ee21babef2cbdd18caeb8c14cdc in e72b32558c8e9ed16690ef5a8e909c12fcc47f87 2019-02-01 15:25:01 +00:00
danielqsj
1fa7ebb984 Update vendor/k8s.io/utils
Kubernetes-commit: 257ae4da008964786bbc55e39cbd4c6121cebde6
2019-01-29 17:53:19 +08:00
Kubernetes Publisher
eb58fda54a Merge pull request #73212 from danielqsj/samplecontroller
fix shellcheck in sample-controller

Kubernetes-commit: 6ebe874d11dc04972d74904e0cf2b15506ea318a
2019-02-01 15:25:01 +00:00
Kubernetes Publisher
abcd26d668 Merge pull request #73308 from krzysied/reflector_trace2
Adding trace to reflector initialization

Kubernetes-commit: f5f5d9a54a6397012b780e1714abf7d8b4f5037c
2019-01-26 17:11:25 +00:00
Krzysztof Siedlecki
da16a67695 adding dependencies
Kubernetes-commit: 91d9f7f0c0052ee67253e7d1afe2a51a848cb5d6
2019-01-25 12:53:24 +01:00
Kubernetes Publisher
3095c2b11e Merge pull request #73243 from andrewsykim/update-utils-vendor
Update vendor k8s.io/utils and remove internal utils

Kubernetes-commit: 2cbb16bc8dd456c5db72c1667926abdbc87c32c7
2019-01-26 17:11:03 +00:00
Kubernetes Publisher
11d2fee50c Merge pull request #72972 from liggitt/remove-alpha-initializers
Remove use of alpha initializers

Kubernetes-commit: e28c757e8758638811130848abe7a47f760057c0
2019-01-26 17:10:41 +00:00
Kubernetes Publisher
955fe458eb Merge pull request #73217 from kubernetes/revert-73071-reflector_trace
Revert "Adding trace to reflector initialization"

Kubernetes-commit: a5d55f49b0f480b5bfe9fc40c9a07d9c04c117fd
2019-01-26 17:10:19 +00:00
Andrew Kim
5cd6257fee vendor k8s.io/utils to 8a87304934321b4b0ad72a7cb3cbc715d67d38c7
Kubernetes-commit: e321cdaee4bf22bce018011884af33ea16fa5b62
2019-01-23 21:19:50 -05:00
Wojciech Tyczynski
96c09e7697 Revert "Adding trace to reflector initialization"
Kubernetes-commit: c8d89b34cc692c4b477fe40ac4511be73a581629
2019-01-23 14:50:37 +01:00
danielqsj
7ec2c1043b fix shellcheck in sample-controller
Kubernetes-commit: 078115a604fb4e89123b4a4e12530fd823abeb88
2019-01-23 20:56:25 +08:00
Kubernetes Publisher
16271d1c70 Merge pull request #73071 from krzysied/reflector_trace
Adding trace to reflector initialization

Kubernetes-commit: fd0df59f5ba786cb25329e3a9d2793ad4227ed87
2019-01-22 05:15:55 -08:00
Krzysztof Siedlecki
bc3b41b4f2 adding dependency
Kubernetes-commit: e2a017327c1af628f4f0069cbd49865ad1e81975
2019-01-18 15:04:47 +01:00
Kubernetes Publisher
39f0f6c7c1 Merge pull request #73076 from yastij/refactor-events
refactor util functions for event recording

Kubernetes-commit: 193f659a1cd454b93cbe1e7b1f13b77c21783461
2019-01-19 02:39:10 +00:00
Kubernetes Publisher
59dd5ff32c Merge pull request #72138 from dims/switch-location-for-goautoneg
Switch location for goautoneg vendored code

Kubernetes-commit: ef2a5b948b0d6c422873a823755ee7d12284dcc3
2019-01-19 02:38:42 +00:00
Yassine TIJANI
049851c2f4 refactor util functions for event recording
Kubernetes-commit: 7296288928d347dfb1dcb990d779cdfce96ead1b
2019-01-18 15:28:37 +01:00
Jordan Liggitt
1c2764f53e Remove alpha InitializerConfiguration types, Initializers admission plugin
Kubernetes-commit: dc1fa870bff65c20f48a83ea3af54adb3f526e28
2019-01-16 10:19:44 -05:00
Kubernetes Publisher
c3a5aa93b2 Merge pull request #72239 from wojtek-t/v1_lease_api
Promote Lease API to v1

Kubernetes-commit: 5354f8bdfeeb14e52abb8bac4c24993f1addbfb1
2018-12-21 20:05:18 +00:00
wojtekt
994e8982d1 Autogenerated code
Kubernetes-commit: 9664779bdd99b84ff13b6dcd9d2dc72fd3831d7b
2018-12-20 11:56:54 +01:00
Kubernetes Publisher
9925b3e980 Merge pull request #72193 from kargakis/owners
Remove myself from OWNERS where I am not active

Kubernetes-commit: 544c49ab030a8930f3a6903de232473e0bf4e8e5
2018-12-19 19:16:19 +00:00
Michalis Kargakis
e029e15793 Remove myself from OWNERS where I am not active
Kubernetes-commit: c602a9ed4ffba92a229dcc45f1a9e3036596d90b
2018-12-19 10:19:22 +01:00
Davanum Srinivas
bc1f9127bf Switch location for goautoneg vendored code
Move to github.com/munnerz/goautoneg as bitbucket is flaky!

Change-Id: Iaa6e964ef0d6f308eea59bcc6f365ecd7dbf0784

Kubernetes-commit: 16fd72d6c91ba466a0e955a1d59a6c8d9e8791bc
2018-12-17 20:39:56 -05:00
Kubernetes Publisher
65d042cac5 Merge pull request #70995 from stewart-yu/stewart-sample-controller-redurabce-import
remove duplicated import

Kubernetes-commit: 163b54dc4d615bc34bf14c74e9230b733d8b4565
2018-11-29 23:25:01 +00:00
Kuralamudhan Ramakrishnan
bef4a2dcb7 Update the go get to k8s.io/sample-controller
Referring to the issue: https://github.com/kubernetes/sample-controller/issues/20#issuecomment-398716033

Kubernetes-commit: 6d631e32f281949e1b31e65e0cbd203be39a1c6f
2018-11-28 15:36:01 +00:00
Kubernetes Publisher
d17738308f Merge pull request #71296 from cblecker/json-iterator-bump
Update github.com/json-iterator/go to 1.1.4

Kubernetes-commit: 18619f0849b18944300cb22c224afe071d4317bd
2018-11-21 07:26:00 +00:00
Christoph Blecker
9cab99a527 Update github.com/json-iterator/go to 1.1.4
Kubernetes-commit: c7d39519279937693e654149eb6b67af46836135
2018-11-20 18:13:01 -08:00
Kubernetes Publisher
e943f6752e Merge pull request #70998 from deads2k/client-07-listwatchtimeout
update the client generator to set a client-side timeout

Kubernetes-commit: 9878253c3cb8fa4699615b41375578fe681b0f9a
2018-11-16 23:23:45 +00:00
David Eads
a5c672885d generated
Kubernetes-commit: 8f7edec615fb9cd722b7f8310dab3efa25351b7c
2018-11-16 08:38:57 -05:00
stewart-yu
4a9e8aba65 remove duplicated import
Kubernetes-commit: 217dbeafaf3293d193d6bdf3a8a220329d55dfe9
2018-11-13 20:08:39 +08:00
Kubernetes Publisher
ff6be62b4c Merge pull request #70889 from dims/update-cadvisor-and-other-repos-for-klog-take-2
Update cadvisor and other repos for klog

Kubernetes-commit: a3ccea9d8743f2ff82e41b6c2af6dc2c41dc7b10
2018-11-10 19:25:14 +00:00
Davanum Srinivas
ff201e3f3b Update all the staging Godeps.json
Change-Id: I64b30c68a606b4f5c095a66496a1e48c4d62ea88

Kubernetes-commit: 68ce375d0039738df5a2a837122215f3224f1fde
2018-11-09 16:41:26 -05:00
Davanum Srinivas
a508a6c07c Move from glog to klog
- Move from the old github.com/golang/glog to k8s.io/klog
- klog as explicit InitFlags() so we add them as necessary
- we update the other repositories that we vendor that made a similar
change from glog to klog
  * github.com/kubernetes/repo-infra
  * k8s.io/gengo/
  * k8s.io/kube-openapi/
  * github.com/google/cadvisor
- Entirely remove all references to glog
- Fix some tests by explicit InitFlags in their init() methods

Change-Id: I92db545ff36fcec83afe98f550c9e630098b3135

Kubernetes-commit: 954996e231074dc7429f7be1256a579bedd8344c
2018-11-09 13:49:10 -05:00
Kubernetes Publisher
af94dada8b Merge pull request #70598 from dims/switch-from-sigs.k8s.io/yaml-to-ghodss/yaml
Switch to sigs.k8s.io/yaml from ghodss/yaml

Kubernetes-commit: f212b9db236344d3121879e609d53b79f9f106f9
2018-11-08 20:14:56 +00:00
Kubernetes Publisher
a743781f59 Merge pull request #70718 from cblecker/godep-round-a-million
Fork godep to fix inconsistent abbreviation size

Kubernetes-commit: e998d6c2bc83385d98186a87e95a0f947e121ec1
2018-11-08 05:37:47 +00:00
Kubernetes Publisher
f0dafbb5a8 Merge pull request #70663 from wenjiaswe/gonet110
Upgrade golang.org/x/net image to release-branch.go1.10

Kubernetes-commit: 471aff6673ebe4ac9c9219a7579d23831e1146be
2018-11-07 21:46:35 +00:00
Christoph Blecker
b0de173bf4 Update godeps
Kubernetes-commit: d15da2c586ba27df895c22486b1b527852c6363d
2018-11-06 16:23:59 -08:00
Wenjia Zhang
fb81d7e3f2 update staging godeps for golang.org/x/net/... to release-branch.go1.10
Kubernetes-commit: adf155ee9f9dfa023069282ec195f9eb8d1ce0fe
2018-11-06 15:49:50 -08:00
Davanum Srinivas
c7ea123f93 Switch to sigs.k8s.io/yaml from ghodss/yaml
Change-Id: Ic72b5131bf441d159012d67a6a3d87088d0e6d31

Kubernetes-commit: 43f523d405b012fa8d90dd95b667f520e036f6bc
2018-11-02 16:41:57 -04:00
Kubernetes Publisher
8c6eeb11d7 Merge pull request #70260 from lavalamp/schema-fix
bump kube-openapi dep

Kubernetes-commit: 6f897af2daffd8cd3539f1ef279b9f9bc280d98f
2018-11-01 09:39:19 +00:00
Daniel Smith
a5ec744ada update generated deps
Kubernetes-commit: dcb10d81d18f4e8a58496ef61b62247ae93bbaef
2018-10-31 17:11:23 -07:00
Kubernetes Publisher
dcc8aa9d76 Merge pull request #70031 from nrfox/requeue-on-error
Sample Controller: requeue work item on syncHandler error

Kubernetes-commit: b7b0aae4358065b98a3db04411bec65a11eb166e
2018-10-27 10:54:09 +00:00
Kubernetes Publisher
7ee81ff4ae Merge pull request #69636 from p0lyn0mial/sample_controller_factory_start
fixes the way the informers are started in sample controller pkg

Kubernetes-commit: 8e7e226422aabc62f84a0e151c3cd73fa1da70d4
2018-10-26 15:32:11 +00:00
nrfox
94e9a85e76 Requeue work item on syncHandler error
Kubernetes-commit: 8f86654b8e23bbf960a286712df35c003f7389f8
2018-10-19 13:54:31 -04:00
Kubernetes Publisher
fe5d3e0825 Merge pull request #67547 from pbarker/audit-api
dynamic audit configuration api

Kubernetes-commit: 0652e098d03197aa4cc0a53440f62e425bf992c5
2018-10-18 01:54:19 +00:00
Kubernetes Publisher
f1200b1b60 Merge pull request #69269 from miguelbernadi/fix-golint-issues-68026
Fix undocumented golint errors

Kubernetes-commit: 31438712d6602ed16f0fd8311782178cec0bb774
2018-10-17 05:52:13 +00:00
Kubernetes Publisher
83a48d571f Merge pull request #69330 from vaikas-google/json-patch
Add support for JSON patch in fake client

Kubernetes-commit: 2f8b585d9c9815886e5563ed5ac75cd0bc94a8e8
2018-10-16 01:52:43 +00:00
Patrick Barker
eee7e4e176 adds dynamic audit api generated
Kubernetes-commit: b8e1250487f51bd27bd75f4bfb45c8635d6344ed
2018-10-16 00:20:30 +00:00
Kubernetes Publisher
e8109b7ca8 Merge pull request #69627 from dims/updating-ghodss-yaml-to-latest-version-2
Updating ghodss/yaml and gopkg.in/yaml.v2 to latest version 2

Kubernetes-commit: 3348f9ae23d6502218c6600bcee8d05e00ce5ee3
2018-10-13 01:29:34 +00:00
Kubernetes Publisher
76b83c1674 Merge pull request #69322 from jpbetz/etcd-client-3.3.9
Update etcd client to 3.3 for 1.13

Kubernetes-commit: a8c7a3fd5e707243af68b10a8a581c2c59248222
2018-10-11 07:15:43 +00:00
p0lyn0mial
3cc2958b99 fixes the way the informers are started in sample controller pkg
Kubernetes-commit: e55ca64dbd3b141f8a373d129b1a619db0e672f0
2018-10-10 20:54:36 +02:00
Davanum Srinivas
724b71bd9d Updating ghodss/yaml and gopkg.in/yaml.v2 to latest version
Change-Id: I1f1a10b68a2d3e796724c6ac26f0ed3260153588

Kubernetes-commit: 6364af128b0ca50e66501519f333e696a26801d9
2018-10-10 10:14:20 -04:00
Kubernetes Publisher
77db795851 sync: update godeps 2018-10-09 09:01:07 +00:00
Miguel Bernabeu
7bf6cd7d56 Fix golint errors when generating informer code
Kubernetes-commit: acf78cd6133de6faea9221d8c53b02ca6009b0bb
2018-10-03 21:39:26 +02:00
Ville Aikas
69ec0e4f12 Add support for JSON patch in fake client
Kubernetes-commit: a363b153851326ece7f81f4c1ae0a1ab8700a209
2018-10-02 13:30:52 +00:00
Joe Betz
bef5795b2d Update etcd client to 3.3.9
Kubernetes-commit: 4263c752115c3796ee5715c7de4cbc2e237809d3
2018-10-01 16:53:57 -07:00
Kubernetes Publisher
b8a1e69103 Merge pull request #68709 from krzyzacy/fix-sample-test
fix patch compare in sample-controller test

Kubernetes-commit: 1b2298cb75a1375083a275560c71f8a7479c43d0
2018-09-26 07:57:48 +00:00
Kubernetes Publisher
4f47c578d9 Merge pull request #68245 from jingyih/remove_tagName_in_goDoc
*: Remove comment tags in GoDoc

Kubernetes-commit: a67689dfcab0ed547e1d060c414eae7c81629cc9
2018-09-25 15:47:45 +00:00
Kubernetes Publisher
d71bd50edb Merge pull request #68238 from justinsb/update_reflect2_to_101
Update reflect2 to 1.0.1 (memory utilization fix)

Kubernetes-commit: a94ea824eb59e92188f166c302d7995ba9002667
2018-09-25 15:47:15 +00:00
Sen Lu
f98e21072d fix patch compare in test
Kubernetes-commit: 6e40cd846c1d68fed1315e37823ba600e9c761f2
2018-09-14 22:48:49 -07:00
Jingyi Hu
6d9191098e *: Remove comment tags in GoDoc
Adding blank line between comment tag and package name in doc.go. So
that the comment tags such as '+k8s:deepcopy-gen=package' do not show up
in GoDoc.

Kubernetes-commit: 61117761cd4a1b2e6ad9ff2d7eb915f3d2739dc6
2018-09-04 14:08:32 -07:00
Justin Santa Barbara
30a25c44dc Update reflect2 to 1.0.1 (memory utilization fix)
Picking up https://github.com/modern-go/reflect2/pull/2 which lazy
initializes a map of all types which we don't use in k8s, saving
memory & initialization time.

Kubernetes-commit: 970e4da4c6636b835175dc79a7492d22dc11ba33
2018-09-04 13:13:00 -04:00
Kubernetes Publisher
af8f21f6ee Merge pull request #67359 from mikedanese/reloadtoken
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions here: https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md.

client: periodically reload InClusterConfig token

/sig auth
/sig api-machinery

```release-note
NONE
```

Kubernetes-commit: 7b6647a418c660f2c87f183f706b297f1cb573ca
2018-09-02 08:53:46 +00:00
Kubernetes Publisher
9bec8a6a53 Merge pull request #64097 from damemi/hpa-metrics-specificity
Automatic merge from submit-queue (batch tested with PRs 67894, 64097). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

HPA metrics specificity improvements

**What this PR does / why we need it**:
Improves available specificity for HPA metrics by adding metric selector fields for metrics of Pods and Objects.

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
Implements this KEP: https://github.com/kubernetes/community/pull/2055

**Special notes for your reviewer**:
Need to add/update tests?

**Release note**:

```release-note
Introduces autoscaling/v2beta2 and custom_metrics/v1beta2, which implement metric selectors for Object and Pods metrics, as well as allowing AverageValue targets on Objects, similar to External metrics.
```

/assign @DirectXMan12

Kubernetes-commit: fdb5707194d56cbbd0da11c5be3a2a5653e714c9
2018-08-28 00:29:15 +00:00
Kubernetes Publisher
a7ac24a6d0 Merge pull request #66971 from tnozicka/informer-watcher
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

#50102 Task 2: Add UntilWithSync

**What this PR does / why we need it**:
This is a split off from https://github.com/kubernetes/kubernetes/pull/50102 to go in smaller pieces.

Introduces UntilWithSync based on informer.

**Needs https://github.com/kubernetes/kubernetes/pull/66906 first**
/hold

**Release note**:
```release-note
NONE
```

/priority important-soon
/kind bug
(bug after the main PR which is this split from)

Kubernetes-commit: c4f355a2ad9692f5459541d4e4d94fcbc5f7d946
2018-08-23 16:44:54 +00:00
Kubernetes Publisher
be98dc6210 Merge pull request #67686 from imroc/update-sample-controller-readme
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

update sample-controller README

Kubernetes-commit: 405eecad6dde9ab24b770fe583da51b9fd7c6941
2018-08-22 12:50:00 +00:00
roc
5962ba9f56 update sample-controller README
remind that CustomResourceSubresources is beta in v1.11 and enabled by default

Kubernetes-commit: ac741a8412d3c73ba39224f2f2e849f9976acc5a
2018-08-22 13:42:47 +08:00
Kubernetes Publisher
8605b8ceff Merge pull request #67596 from nikhita/add-apimachinery-label-owners
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Add sig/api-machinery label to apimachinery OWNERS files

Inspired by https://github.com/kubernetes/kubernetes/pull/67548. List of OWNERS files taken from https://github.com/kubernetes/community/blob/master/sig-api-machinery/README.md#subprojects.

**Release note**:

```release-note
NONE
```

Kubernetes-commit: 8f4ab6fe7635983443ebef7fde5b9c8861bef5bb
2018-08-21 00:23:11 +00:00
Nikhita Raghunath
bc340e9483 Add sig/api-machinery label to apimachinery OWNERS files
Kubernetes-commit: 6e47ba1fded3dc9932bd62affb673d321089760f
2018-08-20 18:46:47 +05:30
Kubernetes Publisher
acc9f193ca Merge pull request #65779 from cblecker/mergo-update
Automatic merge from submit-queue. If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

update github.com/imdario/mergo to v0.3.5

**What this PR does / why we need it**:
Updates github.com/imdario/mergo library to v0.3.5. We were pinned because of a functionality change in the dependency, however, a new function was introduced with similar functionality to the old.

There is apparently some Debian packaging issues (https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=878254) because of this. I'm still not clear what those are, unless they are forcing the library to update, as opposed to using our `vendor/`.

That said, this will allow for some of those vendor conflicts to resolve for anyone else who is using client-go, so that's at least worthwhile.

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
fixes #27543, fixes https://github.com/kubernetes/client-go/issues/431

**Special notes for your reviewer**:

**Release note**:
```release-note
NONE
```

Kubernetes-commit: 6b4135267911b6c10ed536308d29d2a7c453eef6
2018-08-18 02:39:08 +00:00
Kubernetes Publisher
3e2e91bf31 Merge pull request #66906 from tnozicka/rename-until
Automatic merge from submit-queue (batch tested with PRs 67071, 66906, 66722, 67276, 67039). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

#50102 Task 1: Move apimachinery/pkg/watch.Until into client-go/tools/watch.UntilWithoutRetry

**What this PR does / why we need it**:
This is a split off from https://github.com/kubernetes/kubernetes/pull/50102 to go in smaller pieces.

Moves `apimachinery/pkg/watch.Until` into `client-go/tools/watch.UntilWithoutRetry` and adds context so it is cancelable.

**Release note**:
```release-note
NONE
```

**Dev release note**:
```dev-release-note
`apimachinery/pkg/watch.Until` has been moved to `client-go/tools/watch.UntilWithoutRetry`.
While switching please consider using the new `client-go/tools/watch.UntilWithSync` or `client-go/tools/watch.Until`.
```

/cc @smarterclayton @kubernetes/sig-api-machinery-pr-reviews
/milestone v1.12
/priority important-soon
/kind bug
(bug after the main PR which is this split from)

Kubernetes-commit: b6f0aed056ab94fef0b6f54e1ca1d66a5fc228b3
2018-08-16 15:23:25 +00:00
Kubernetes Publisher
b24841bd57 Merge pull request #67178 from cblecker/cfssl
Automatic merge from submit-queue (batch tested with PRs 66602, 67178, 67207, 67125, 66332). If you want to cherry-pick this change to another branch, please follow the instructions <a href="https://github.com/kubernetes/community/blob/master/contributors/devel/cherry-picks.md">here</a>.

Vendor cfssl/cfssljson utilities

**What this PR does / why we need it**:
Vendors the `cfssl` and `cfssljson` tools. Updates `kube::util::ensure-cfssl` to use them.

**Which issue(s) this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close the issue(s) when PR gets merged)*:
fixes #66995, fixes #60070

**Special notes for your reviewer**:
1. Add cfssl/cfssljson ot the required bins for saving
2. Manually cloned/checked out the new dependencies to my gopath. `godep restore` doesn't pull them down because they aren't required or already in the `Godeps.json`. Used @BenTheElder's list here: https://github.com/kubernetes/kubernetes/issues/66995#issuecomment-410594532
3. `hack/godep-save.sh` to add the packages and dependencies to godep
4. Fixed two bugs when building:
  a. `golang.org/x/crypto` needed to be updated
  b. `github.com/cloudflare/cfssl` needed to be updated to 56268a613a so we can vendor their fork of `crypto/tls`, as we discard their modified vendored stdlib.
5. Update staging godeps
6. Update the `kube::util::ensure-cfssl` to install from vendor

**Release note**:
```release-note
NONE
```

Kubernetes-commit: 818e632c1fde5fb01bc8ccf9b9ee6201f33a28b4
2018-08-16 15:22:34 +00:00
Mike Danese
70ded76448 reload token file for InClusterConfig every 5 minutes
Kubernetes-commit: 287f6a564fb8c264f281056011f4a66f197b18f4
2018-08-13 16:47:17 -07:00
Tomas Nozicka
a4f7d75f32 Update Godeps
Kubernetes-commit: f6836df5dd104bde62d80be411582c5d08dcaa65
2018-08-07 20:15:14 +02:00
Tomas Nozicka
fb123e6c66 Update Godeps
Kubernetes-commit: 41f5c3dbf55c5b02a3067789b0b835c6eb6f3bd3
2018-08-06 14:49:19 +02:00
Christoph Blecker
72a1e10055 Update github.com/imdario/mergo to v0.3.5
Kubernetes-commit: 12b2e2c2b53ab987e956673bc778e040af22e304
2018-07-03 11:15:43 -07:00
Mike Dame
056e0354fd Generate files and modifications for autoscaling/v2beta2 and custom_metrics/v1beta2
Kubernetes-commit: 77d7f9cfa2b489b75b47fa1e1de5660139f83034
2018-06-28 14:35:50 -04:00
1016 changed files with 51679 additions and 94897 deletions

View File

@@ -1,2 +1,2 @@
Sorry, we do not accept changes directly against this repository. Please see
Sorry, we do not accept changes directly against this repository. Please see
CONTRIBUTING.md for information on where and how to contribute instead.

View File

@@ -2,6 +2,6 @@
Do not open pull requests directly against this repository, they will be ignored. Instead, please open pull requests against [kubernetes/kubernetes](https://git.k8s.io/kubernetes/). Please follow the same [contributing guide](https://git.k8s.io/kubernetes/CONTRIBUTING.md) you would follow for any other pull request made to kubernetes/kubernetes.
This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/sample-controller](https://git.k8s.io/kubernetes/staging/src/k8s.io/sample-controller) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot).
This repository is published from [kubernetes/kubernetes/staging/src/k8s.io/sample-controller](https://git.k8s.io/kubernetes/staging/src/k8s.io/sample-controller) by the [kubernetes publishing-bot](https://git.k8s.io/publishing-bot).
Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/staging.md) for more information
Please see [Staging Directory and Publishing](https://git.k8s.io/community/contributors/devel/sig-architecture/staging.md) for more information

642
Godeps/Godeps.json generated

File diff suppressed because it is too large Load Diff

2
Godeps/OWNERS generated
View File

@@ -1,2 +1,4 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- dep-approvers

3
OWNERS
View File

@@ -1,9 +1,10 @@
# See the OWNERS docs at https://go.k8s.io/owners
approvers:
- sttts
- munnerz
reviewers:
- gregory-m
- kargakis
- sttts
- munnerz
- nikhita

View File

@@ -42,6 +42,8 @@ This is an example of how to build a kube-like controller with a single type.
```sh
# assumes you have a working kubeconfig, not required if operating in-cluster
$ go get k8s.io/sample-controller
$ cd $GOPATH/src/k8s.io/sample-controller
$ go build -o sample-controller .
$ ./sample-controller -kubeconfig=$HOME/.kube/config

View File

@@ -20,13 +20,11 @@ import (
"fmt"
"time"
"github.com/golang/glog"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
appsinformers "k8s.io/client-go/informers/apps/v1"
@@ -37,12 +35,13 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
"k8s.io/klog"
samplev1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
clientset "k8s.io/sample-controller/pkg/client/clientset/versioned"
samplescheme "k8s.io/sample-controller/pkg/client/clientset/versioned/scheme"
informers "k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1"
listers "k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1"
clientset "k8s.io/sample-controller/pkg/generated/clientset/versioned"
samplescheme "k8s.io/sample-controller/pkg/generated/clientset/versioned/scheme"
informers "k8s.io/sample-controller/pkg/generated/informers/externalversions/samplecontroller/v1alpha1"
listers "k8s.io/sample-controller/pkg/generated/listers/samplecontroller/v1alpha1"
)
const controllerAgentName = "sample-controller"
@@ -96,9 +95,9 @@ func NewController(
// Add sample-controller types to the default Kubernetes Scheme so Events can be
// logged for sample-controller types.
utilruntime.Must(samplescheme.AddToScheme(scheme.Scheme))
glog.V(4).Info("Creating event broadcaster")
klog.V(4).Info("Creating event broadcaster")
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventBroadcaster.StartLogging(klog.Infof)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: kubeclientset.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: controllerAgentName})
@@ -113,7 +112,7 @@ func NewController(
recorder: recorder,
}
glog.Info("Setting up event handlers")
klog.Info("Setting up event handlers")
// Set up an event handler for when Foo resources change
fooInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.enqueueFoo,
@@ -150,27 +149,27 @@ func NewController(
// is closed, at which point it will shutdown the workqueue and wait for
// workers to finish processing their current work items.
func (c *Controller) Run(threadiness int, stopCh <-chan struct{}) error {
defer runtime.HandleCrash()
defer utilruntime.HandleCrash()
defer c.workqueue.ShutDown()
// Start the informer factories to begin populating the informer caches
glog.Info("Starting Foo controller")
klog.Info("Starting Foo controller")
// Wait for the caches to be synced before starting workers
glog.Info("Waiting for informer caches to sync")
klog.Info("Waiting for informer caches to sync")
if ok := cache.WaitForCacheSync(stopCh, c.deploymentsSynced, c.foosSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
glog.Info("Starting workers")
klog.Info("Starting workers")
// Launch two workers to process Foo resources
for i := 0; i < threadiness; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
glog.Info("Started workers")
klog.Info("Started workers")
<-stopCh
glog.Info("Shutting down workers")
klog.Info("Shutting down workers")
return nil
}
@@ -213,23 +212,25 @@ func (c *Controller) processNextWorkItem() bool {
// Forget here else we'd go into a loop of attempting to
// process a work item that is invalid.
c.workqueue.Forget(obj)
runtime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
utilruntime.HandleError(fmt.Errorf("expected string in workqueue but got %#v", obj))
return nil
}
// Run the syncHandler, passing it the namespace/name string of the
// Foo resource to be synced.
if err := c.syncHandler(key); err != nil {
return fmt.Errorf("error syncing '%s': %s", key, err.Error())
// Put the item back on the workqueue to handle any transient errors.
c.workqueue.AddRateLimited(key)
return fmt.Errorf("error syncing '%s': %s, requeuing", key, err.Error())
}
// Finally, if no error occurs we Forget this item so it does not
// get queued again until another change happens.
c.workqueue.Forget(obj)
glog.Infof("Successfully synced '%s'", key)
klog.Infof("Successfully synced '%s'", key)
return nil
}(obj)
if err != nil {
runtime.HandleError(err)
utilruntime.HandleError(err)
return true
}
@@ -243,7 +244,7 @@ func (c *Controller) syncHandler(key string) error {
// Convert the namespace/name string into a distinct namespace and name
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
runtime.HandleError(fmt.Errorf("invalid resource key: %s", key))
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key))
return nil
}
@@ -253,7 +254,7 @@ func (c *Controller) syncHandler(key string) error {
// The Foo resource may no longer exist, in which case we stop
// processing.
if errors.IsNotFound(err) {
runtime.HandleError(fmt.Errorf("foo '%s' in work queue no longer exists", key))
utilruntime.HandleError(fmt.Errorf("foo '%s' in work queue no longer exists", key))
return nil
}
@@ -265,7 +266,7 @@ func (c *Controller) syncHandler(key string) error {
// We choose to absorb the error here as the worker would requeue the
// resource otherwise. Instead, the next time the resource is updated
// the resource will be queued again.
runtime.HandleError(fmt.Errorf("%s: deployment name must be specified", key))
utilruntime.HandleError(fmt.Errorf("%s: deployment name must be specified", key))
return nil
}
@@ -295,7 +296,7 @@ func (c *Controller) syncHandler(key string) error {
// number does not equal the current desired replicas on the Deployment, we
// should update the Deployment resource.
if foo.Spec.Replicas != nil && *foo.Spec.Replicas != *deployment.Spec.Replicas {
glog.V(4).Infof("Foo %s replicas: %d, deployment replicas: %d", name, *foo.Spec.Replicas, *deployment.Spec.Replicas)
klog.V(4).Infof("Foo %s replicas: %d, deployment replicas: %d", name, *foo.Spec.Replicas, *deployment.Spec.Replicas)
deployment, err = c.kubeclientset.AppsV1().Deployments(foo.Namespace).Update(newDeployment(foo))
}
@@ -338,7 +339,7 @@ func (c *Controller) enqueueFoo(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
runtime.HandleError(err)
utilruntime.HandleError(err)
return
}
c.workqueue.AddRateLimited(key)
@@ -355,17 +356,17 @@ func (c *Controller) handleObject(obj interface{}) {
if object, ok = obj.(metav1.Object); !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
runtime.HandleError(fmt.Errorf("error decoding object, invalid type"))
utilruntime.HandleError(fmt.Errorf("error decoding object, invalid type"))
return
}
object, ok = tombstone.Obj.(metav1.Object)
if !ok {
runtime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type"))
utilruntime.HandleError(fmt.Errorf("error decoding object tombstone, invalid type"))
return
}
glog.V(4).Infof("Recovered deleted object '%s' from tombstone", object.GetName())
klog.V(4).Infof("Recovered deleted object '%s' from tombstone", object.GetName())
}
glog.V(4).Infof("Processing object: %s", object.GetName())
klog.V(4).Infof("Processing object: %s", object.GetName())
if ownerRef := metav1.GetControllerOf(object); ownerRef != nil {
// If this object is not owned by a Foo, we should not do anything more
// with it.
@@ -375,7 +376,7 @@ func (c *Controller) handleObject(obj interface{}) {
foo, err := c.foosLister.Foos(object.GetNamespace()).Get(ownerRef.Name)
if err != nil {
glog.V(4).Infof("ignoring orphaned object '%s' of foo '%s'", object.GetSelfLink(), ownerRef.Name)
klog.V(4).Infof("ignoring orphaned object '%s' of foo '%s'", object.GetSelfLink(), ownerRef.Name)
return
}

View File

@@ -34,8 +34,8 @@ import (
"k8s.io/client-go/tools/record"
samplecontroller "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
"k8s.io/sample-controller/pkg/client/clientset/versioned/fake"
informers "k8s.io/sample-controller/pkg/client/informers/externalversions"
"k8s.io/sample-controller/pkg/generated/clientset/versioned/fake"
informers "k8s.io/sample-controller/pkg/generated/informers/externalversions"
)
var (
@@ -198,7 +198,7 @@ func checkAction(expected, actual core.Action, t *testing.T) {
expPatch := e.GetPatch()
patch := a.GetPatch()
if !reflect.DeepEqual(expPatch, expPatch) {
if !reflect.DeepEqual(expPatch, patch) {
t.Errorf("Action %s %s has wrong patch\nDiff:\n %s",
a.GetVerb(), a.GetResource().Resource, diff.ObjectGoPrintDiff(expPatch, patch))
}

View File

@@ -1,11 +1,11 @@
# client-go under the hood
The [client-go](https://github.com/kubernetes/client-go/) library contains various mechanisms that you can use when
developing your custom controllers. These mechanisms are defined in the
The [client-go](https://github.com/kubernetes/client-go/) library contains various mechanisms that you can use when
developing your custom controllers. These mechanisms are defined in the
[tools/cache folder](https://github.com/kubernetes/client-go/tree/master/tools/cache) of the library.
Here is a pictorial representation showing how the various components in
the client-go library work and their interaction points with the custom
Here is a pictorial representation showing how the various components in
the client-go library work and their interaction points with the custom
controller code that you will write.
<p align="center">
@@ -19,7 +19,7 @@ watches the Kubernetes API for the specified resource type (kind).
The function in which this is done is *ListAndWatch*.
The watch could be for an in-built resource or it could be for a custom resource.
When the reflector receives notification about existence of new
resource instance through the watch API, it gets the newly created object
resource instance through the watch API, it gets the newly created object
using the corresponding listing API and puts it in the Delta Fifo queue
inside the *watchHandler* function.
@@ -38,27 +38,27 @@ that generates an objects key as `<namespace>/<name>` combination for that ob
## Custom Controller components
* Informer reference: This is the reference to the Informer instance that knows
how to work with your custom resource objects. Your custom controller code needs
* Informer reference: This is the reference to the Informer instance that knows
how to work with your custom resource objects. Your custom controller code needs
to create the appropriate Informer.
* Indexer reference: This is the reference to the Indexer instance that knows
how to work with your custom resource objects. Your custom controller code needs
to create this. You will be using this reference for retrieving objects for
* Indexer reference: This is the reference to the Indexer instance that knows
how to work with your custom resource objects. Your custom controller code needs
to create this. You will be using this reference for retrieving objects for
later processing.
The base controller in client-go provides the *NewIndexerInformer* function to create Informer and Indexer.
In your code you can either [directly invoke this function](https://github.com/kubernetes/client-go/blob/master/examples/workqueue/main.go#L174) or [use factory methods for creating an informer.](https://github.com/kubernetes/sample-controller/blob/master/main.go#L61)
* Resource Event Handlers: These are the callback functions which will be called by
the Informer when it wants to deliver an object to your controller. The typical
* Resource Event Handlers: These are the callback functions which will be called by
the Informer when it wants to deliver an object to your controller. The typical
pattern to write these functions is to obtain the dispatched objects key
and enqueue that key in a work queue for further processing.
* Work queue: This is the queue that you create in your controller code to decouple
delivery of an object from its processing. Resource event handler functions are written
* Work queue: This is the queue that you create in your controller code to decouple
delivery of an object from its processing. Resource event handler functions are written
to extract the delivered objects key and add that to the work queue.
* Process Item: This is the function that you create in your code which processes items
from the work queue. There can be one or more other functions that do the actual processing.
* Process Item: This is the function that you create in your code which processes items
from the work queue. There can be one or more other functions that do the actual processing.
These functions will typically use the [Indexer reference](https://github.com/kubernetes/client-go/blob/master/examples/workqueue/main.go#L73), or a Listing wrapper to retrieve the object corresponding to the key.

View File

@@ -18,18 +18,18 @@ set -o errexit
set -o nounset
set -o pipefail
SCRIPT_ROOT=$(dirname ${BASH_SOURCE})/..
CODEGEN_PKG=${CODEGEN_PKG:-$(cd ${SCRIPT_ROOT}; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-generator 2>/dev/null || echo ../code-generator)}
# generate the code with:
# --output-base because this script should also be able to run inside the vendor dir of
# k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir
# instead of the $GOPATH directly. For normal projects this can be dropped.
${CODEGEN_PKG}/generate-groups.sh "deepcopy,client,informer,lister" \
k8s.io/sample-controller/pkg/client k8s.io/sample-controller/pkg/apis \
"${CODEGEN_PKG}"/generate-groups.sh "deepcopy,client,informer,lister" \
k8s.io/sample-controller/pkg/generated k8s.io/sample-controller/pkg/apis \
samplecontroller:v1alpha1 \
--output-base "$(dirname ${BASH_SOURCE})/../../.." \
--go-header-file ${SCRIPT_ROOT}/hack/boilerplate.go.txt
--output-base "$(dirname "${BASH_SOURCE[0]}")/../../.." \
--go-header-file "${SCRIPT_ROOT}"/hack/boilerplate.go.txt
# To use your own boilerplate text use:
# --go-header-file ${SCRIPT_ROOT}/hack/custom-boilerplate.go.txt
# To use your own boilerplate text append:
# --go-header-file "${SCRIPT_ROOT}"/hack/custom-boilerplate.go.txt

View File

@@ -18,7 +18,7 @@ set -o errexit
set -o nounset
set -o pipefail
SCRIPT_ROOT=$(dirname "${BASH_SOURCE}")/..
SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/..
DIFFROOT="${SCRIPT_ROOT}/pkg"
TMP_DIFFROOT="${SCRIPT_ROOT}/_tmp/pkg"

20
main.go
View File

@@ -20,15 +20,15 @@ import (
"flag"
"time"
"github.com/golang/glog"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
// Uncomment the following line to load the gcp plugin (only required to authenticate against GKE clusters).
// _ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
clientset "k8s.io/sample-controller/pkg/client/clientset/versioned"
informers "k8s.io/sample-controller/pkg/client/informers/externalversions"
clientset "k8s.io/sample-controller/pkg/generated/clientset/versioned"
informers "k8s.io/sample-controller/pkg/generated/informers/externalversions"
"k8s.io/sample-controller/pkg/signals"
)
@@ -45,17 +45,17 @@ func main() {
cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfig)
if err != nil {
glog.Fatalf("Error building kubeconfig: %s", err.Error())
klog.Fatalf("Error building kubeconfig: %s", err.Error())
}
kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
glog.Fatalf("Error building kubernetes clientset: %s", err.Error())
klog.Fatalf("Error building kubernetes clientset: %s", err.Error())
}
exampleClient, err := clientset.NewForConfig(cfg)
if err != nil {
glog.Fatalf("Error building example clientset: %s", err.Error())
klog.Fatalf("Error building example clientset: %s", err.Error())
}
kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, time.Second*30)
@@ -65,11 +65,13 @@ func main() {
kubeInformerFactory.Apps().V1().Deployments(),
exampleInformerFactory.Samplecontroller().V1alpha1().Foos())
go kubeInformerFactory.Start(stopCh)
go exampleInformerFactory.Start(stopCh)
// notice that there is no need to run Start methods in a separate goroutine. (i.e. go kubeInformerFactory.Start(stopCh)
// Start method is non-blocking and runs all registered informers in a dedicated goroutine.
kubeInformerFactory.Start(stopCh)
exampleInformerFactory.Start(stopCh)
if err = controller.Run(2, stopCh); err != nil {
glog.Fatalf("Error running controller: %s", err.Error())
klog.Fatalf("Error running controller: %s", err.Error())
}
}

View File

@@ -15,7 +15,7 @@ limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +groupName=samplecontroller.k8s.io
// Package v1alpha1 is the v1alpha1 version of the API.
// +groupName=samplecontroller.k8s.io
package v1alpha1

View File

@@ -22,7 +22,7 @@ import (
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
samplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1"
samplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/generated/clientset/versioned/typed/samplecontroller/v1alpha1"
)
type Interface interface {

View File

@@ -24,9 +24,9 @@ import (
"k8s.io/client-go/discovery"
fakediscovery "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/testing"
clientset "k8s.io/sample-controller/pkg/client/clientset/versioned"
samplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1"
fakesamplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1/fake"
clientset "k8s.io/sample-controller/pkg/generated/clientset/versioned"
samplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/generated/clientset/versioned/typed/samplecontroller/v1alpha1"
fakesamplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/generated/clientset/versioned/typed/samplecontroller/v1alpha1/fake"
)
// NewSimpleClientset returns a clientset that will respond with the provided objects.

View File

@@ -131,7 +131,7 @@ func (c *FakeFoos) DeleteCollection(options *v1.DeleteOptions, listOptions v1.Li
// Patch applies the patch and returns the patched foo.
func (c *FakeFoos) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Foo, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(foosResource, c.ns, name, data, subresources...), &v1alpha1.Foo{})
Invokes(testing.NewPatchSubresourceAction(foosResource, c.ns, name, pt, data, subresources...), &v1alpha1.Foo{})
if obj == nil {
return nil, err

View File

@@ -21,7 +21,7 @@ package fake
import (
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
v1alpha1 "k8s.io/sample-controller/pkg/client/clientset/versioned/typed/samplecontroller/v1alpha1"
v1alpha1 "k8s.io/sample-controller/pkg/generated/clientset/versioned/typed/samplecontroller/v1alpha1"
)
type FakeSamplecontrollerV1alpha1 struct {

View File

@@ -19,12 +19,14 @@ limitations under the License.
package v1alpha1
import (
"time"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
v1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
scheme "k8s.io/sample-controller/pkg/client/clientset/versioned/scheme"
scheme "k8s.io/sample-controller/pkg/generated/clientset/versioned/scheme"
)
// FoosGetter has a method to return a FooInterface.
@@ -76,11 +78,16 @@ func (c *foos) Get(name string, options v1.GetOptions) (result *v1alpha1.Foo, er
// List takes label and field selectors, and returns the list of Foos that match those selectors.
func (c *foos) List(opts v1.ListOptions) (result *v1alpha1.FooList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.FooList{}
err = c.client.Get().
Namespace(c.ns).
Resource("foos").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do().
Into(result)
return
@@ -88,11 +95,16 @@ func (c *foos) List(opts v1.ListOptions) (result *v1alpha1.FooList, err error) {
// Watch returns a watch.Interface that watches the requested foos.
func (c *foos) Watch(opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("foos").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch()
}
@@ -150,10 +162,15 @@ func (c *foos) Delete(name string, options *v1.DeleteOptions) error {
// DeleteCollection deletes a collection of objects.
func (c *foos) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
var timeout time.Duration
if listOptions.TimeoutSeconds != nil {
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("foos").
VersionedParams(&listOptions, scheme.ParameterCodec).
Timeout(timeout).
Body(options).
Do().
Error()

View File

@@ -22,7 +22,7 @@ import (
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
rest "k8s.io/client-go/rest"
v1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
"k8s.io/sample-controller/pkg/client/clientset/versioned/scheme"
"k8s.io/sample-controller/pkg/generated/clientset/versioned/scheme"
)
type SamplecontrollerV1alpha1Interface interface {

View File

@@ -27,9 +27,9 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
versioned "k8s.io/sample-controller/pkg/client/clientset/versioned"
internalinterfaces "k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces"
samplecontroller "k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller"
versioned "k8s.io/sample-controller/pkg/generated/clientset/versioned"
internalinterfaces "k8s.io/sample-controller/pkg/generated/informers/externalversions/internalinterfaces"
samplecontroller "k8s.io/sample-controller/pkg/generated/informers/externalversions/samplecontroller"
)
// SharedInformerOption defines the functional option type for SharedInformerFactory.

View File

@@ -24,9 +24,10 @@ import (
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
cache "k8s.io/client-go/tools/cache"
versioned "k8s.io/sample-controller/pkg/client/clientset/versioned"
versioned "k8s.io/sample-controller/pkg/generated/clientset/versioned"
)
// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer.
type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer
// SharedInformerFactory a small interface to allow for adding an informer without an import cycle
@@ -35,4 +36,5 @@ type SharedInformerFactory interface {
InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer
}
// TweakListOptionsFunc is a function that transforms a v1.ListOptions.
type TweakListOptionsFunc func(*v1.ListOptions)

View File

@@ -19,8 +19,8 @@ limitations under the License.
package samplecontroller
import (
internalinterfaces "k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces"
v1alpha1 "k8s.io/sample-controller/pkg/client/informers/externalversions/samplecontroller/v1alpha1"
internalinterfaces "k8s.io/sample-controller/pkg/generated/informers/externalversions/internalinterfaces"
v1alpha1 "k8s.io/sample-controller/pkg/generated/informers/externalversions/samplecontroller/v1alpha1"
)
// Interface provides access to each of this group's versions.

View File

@@ -26,9 +26,9 @@ import (
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
samplecontrollerv1alpha1 "k8s.io/sample-controller/pkg/apis/samplecontroller/v1alpha1"
versioned "k8s.io/sample-controller/pkg/client/clientset/versioned"
internalinterfaces "k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces"
v1alpha1 "k8s.io/sample-controller/pkg/client/listers/samplecontroller/v1alpha1"
versioned "k8s.io/sample-controller/pkg/generated/clientset/versioned"
internalinterfaces "k8s.io/sample-controller/pkg/generated/informers/externalversions/internalinterfaces"
v1alpha1 "k8s.io/sample-controller/pkg/generated/listers/samplecontroller/v1alpha1"
)
// FooInformer provides access to a shared informer and lister for

View File

@@ -19,7 +19,7 @@ limitations under the License.
package v1alpha1
import (
internalinterfaces "k8s.io/sample-controller/pkg/client/informers/externalversions/internalinterfaces"
internalinterfaces "k8s.io/sample-controller/pkg/generated/informers/externalversions/internalinterfaces"
)
// Interface provides access to all the informers in this group version.

16
vendor/github.com/evanphx/json-patch/.travis.yml generated vendored Normal file
View File

@@ -0,0 +1,16 @@
language: go
go:
- 1.8
- 1.7
install:
- if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi
- go get github.com/jessevdk/go-flags
script:
- go get
- go test -cover ./...
notifications:
email: false

25
vendor/github.com/evanphx/json-patch/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,25 @@
Copyright (c) 2014, Evan Phoenix
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of the Evan Phoenix nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

297
vendor/github.com/evanphx/json-patch/README.md generated vendored Normal file
View File

@@ -0,0 +1,297 @@
# JSON-Patch
`jsonpatch` is a library which provides functionallity for both applying
[RFC6902 JSON patches](http://tools.ietf.org/html/rfc6902) against documents, as
well as for calculating & applying [RFC7396 JSON merge patches](https://tools.ietf.org/html/rfc7396).
[![GoDoc](https://godoc.org/github.com/evanphx/json-patch?status.svg)](http://godoc.org/github.com/evanphx/json-patch)
[![Build Status](https://travis-ci.org/evanphx/json-patch.svg?branch=master)](https://travis-ci.org/evanphx/json-patch)
[![Report Card](https://goreportcard.com/badge/github.com/evanphx/json-patch)](https://goreportcard.com/report/github.com/evanphx/json-patch)
# Get It!
**Latest and greatest**:
```bash
go get -u github.com/evanphx/json-patch
```
**Stable Versions**:
* Version 4: `go get -u gopkg.in/evanphx/json-patch.v4`
(previous versions below `v3` are unavailable)
# Use It!
* [Create and apply a merge patch](#create-and-apply-a-merge-patch)
* [Create and apply a JSON Patch](#create-and-apply-a-json-patch)
* [Comparing JSON documents](#comparing-json-documents)
* [Combine merge patches](#combine-merge-patches)
# Configuration
* There is a global configuration variable `jsonpatch.SupportNegativeIndices`.
This defaults to `true` and enables the non-standard practice of allowing
negative indices to mean indices starting at the end of an array. This
functionality can be disabled by setting `jsonpatch.SupportNegativeIndices =
false`.
* There is a global configuration variable `jsonpatch.AccumulatedCopySizeLimit`,
which limits the total size increase in bytes caused by "copy" operations in a
patch. It defaults to 0, which means there is no limit.
## Create and apply a merge patch
Given both an original JSON document and a modified JSON document, you can create
a [Merge Patch](https://tools.ietf.org/html/rfc7396) document.
It can describe the changes needed to convert from the original to the
modified JSON document.
Once you have a merge patch, you can apply it to other JSON documents using the
`jsonpatch.MergePatch(document, patch)` function.
```go
package main
import (
"fmt"
jsonpatch "github.com/evanphx/json-patch"
)
func main() {
// Let's create a merge patch from these two documents...
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
target := []byte(`{"name": "Jane", "age": 24}`)
patch, err := jsonpatch.CreateMergePatch(original, target)
if err != nil {
panic(err)
}
// Now lets apply the patch against a different JSON document...
alternative := []byte(`{"name": "Tina", "age": 28, "height": 3.75}`)
modifiedAlternative, err := jsonpatch.MergePatch(alternative, patch)
fmt.Printf("patch document: %s\n", patch)
fmt.Printf("updated alternative doc: %s\n", modifiedAlternative)
}
```
When ran, you get the following output:
```bash
$ go run main.go
patch document: {"height":null,"name":"Jane"}
updated tina doc: {"age":28,"name":"Jane"}
```
## Create and apply a JSON Patch
You can create patch objects using `DecodePatch([]byte)`, which can then
be applied against JSON documents.
The following is an example of creating a patch from two operations, and
applying it against a JSON document.
```go
package main
import (
"fmt"
jsonpatch "github.com/evanphx/json-patch"
)
func main() {
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
patchJSON := []byte(`[
{"op": "replace", "path": "/name", "value": "Jane"},
{"op": "remove", "path": "/height"}
]`)
patch, err := jsonpatch.DecodePatch(patchJSON)
if err != nil {
panic(err)
}
modified, err := patch.Apply(original)
if err != nil {
panic(err)
}
fmt.Printf("Original document: %s\n", original)
fmt.Printf("Modified document: %s\n", modified)
}
```
When ran, you get the following output:
```bash
$ go run main.go
Original document: {"name": "John", "age": 24, "height": 3.21}
Modified document: {"age":24,"name":"Jane"}
```
## Comparing JSON documents
Due to potential whitespace and ordering differences, one cannot simply compare
JSON strings or byte-arrays directly.
As such, you can instead use `jsonpatch.Equal(document1, document2)` to
determine if two JSON documents are _structurally_ equal. This ignores
whitespace differences, and key-value ordering.
```go
package main
import (
"fmt"
jsonpatch "github.com/evanphx/json-patch"
)
func main() {
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
similar := []byte(`
{
"age": 24,
"height": 3.21,
"name": "John"
}
`)
different := []byte(`{"name": "Jane", "age": 20, "height": 3.37}`)
if jsonpatch.Equal(original, similar) {
fmt.Println(`"original" is structurally equal to "similar"`)
}
if !jsonpatch.Equal(original, different) {
fmt.Println(`"original" is _not_ structurally equal to "similar"`)
}
}
```
When ran, you get the following output:
```bash
$ go run main.go
"original" is structurally equal to "similar"
"original" is _not_ structurally equal to "similar"
```
## Combine merge patches
Given two JSON merge patch documents, it is possible to combine them into a
single merge patch which can describe both set of changes.
The resulting merge patch can be used such that applying it results in a
document structurally similar as merging each merge patch to the document
in succession.
```go
package main
import (
"fmt"
jsonpatch "github.com/evanphx/json-patch"
)
func main() {
original := []byte(`{"name": "John", "age": 24, "height": 3.21}`)
nameAndHeight := []byte(`{"height":null,"name":"Jane"}`)
ageAndEyes := []byte(`{"age":4.23,"eyes":"blue"}`)
// Let's combine these merge patch documents...
combinedPatch, err := jsonpatch.MergeMergePatches(nameAndHeight, ageAndEyes)
if err != nil {
panic(err)
}
// Apply each patch individual against the original document
withoutCombinedPatch, err := jsonpatch.MergePatch(original, nameAndHeight)
if err != nil {
panic(err)
}
withoutCombinedPatch, err = jsonpatch.MergePatch(withoutCombinedPatch, ageAndEyes)
if err != nil {
panic(err)
}
// Apply the combined patch against the original document
withCombinedPatch, err := jsonpatch.MergePatch(original, combinedPatch)
if err != nil {
panic(err)
}
// Do both result in the same thing? They should!
if jsonpatch.Equal(withCombinedPatch, withoutCombinedPatch) {
fmt.Println("Both JSON documents are structurally the same!")
}
fmt.Printf("combined merge patch: %s", combinedPatch)
}
```
When ran, you get the following output:
```bash
$ go run main.go
Both JSON documents are structurally the same!
combined merge patch: {"age":4.23,"eyes":"blue","height":null,"name":"Jane"}
```
# CLI for comparing JSON documents
You can install the commandline program `json-patch`.
This program can take multiple JSON patch documents as arguments,
and fed a JSON document from `stdin`. It will apply the patch(es) against
the document and output the modified doc.
**patch.1.json**
```json
[
{"op": "replace", "path": "/name", "value": "Jane"},
{"op": "remove", "path": "/height"}
]
```
**patch.2.json**
```json
[
{"op": "add", "path": "/address", "value": "123 Main St"},
{"op": "replace", "path": "/age", "value": "21"}
]
```
**document.json**
```json
{
"name": "John",
"age": 24,
"height": 3.21
}
```
You can then run:
```bash
$ go install github.com/evanphx/json-patch/cmd/json-patch
$ cat document.json | json-patch -p patch.1.json -p patch.2.json
{"address":"123 Main St","age":"21","name":"Jane"}
```
# Help It!
Contributions are welcomed! Leave [an issue](https://github.com/evanphx/json-patch/issues)
or [create a PR](https://github.com/evanphx/json-patch/compare).
Before creating a pull request, we'd ask that you make sure tests are passing
and that you have added new tests when applicable.
Contributors can run tests using:
```bash
go test -cover ./...
```
Builds for pull requests are tested automatically
using [TravisCI](https://travis-ci.org/evanphx/json-patch).

38
vendor/github.com/evanphx/json-patch/errors.go generated vendored Normal file
View File

@@ -0,0 +1,38 @@
package jsonpatch
import "fmt"
// AccumulatedCopySizeError is an error type returned when the accumulated size
// increase caused by copy operations in a patch operation has exceeded the
// limit.
type AccumulatedCopySizeError struct {
limit int64
accumulated int64
}
// NewAccumulatedCopySizeError returns an AccumulatedCopySizeError.
func NewAccumulatedCopySizeError(l, a int64) *AccumulatedCopySizeError {
return &AccumulatedCopySizeError{limit: l, accumulated: a}
}
// Error implements the error interface.
func (a *AccumulatedCopySizeError) Error() string {
return fmt.Sprintf("Unable to complete the copy, the accumulated size increase of copy is %d, exceeding the limit %d", a.accumulated, a.limit)
}
// ArraySizeError is an error type returned when the array size has exceeded
// the limit.
type ArraySizeError struct {
limit int
size int
}
// NewArraySizeError returns an ArraySizeError.
func NewArraySizeError(l, s int) *ArraySizeError {
return &ArraySizeError{limit: l, size: s}
}
// Error implements the error interface.
func (a *ArraySizeError) Error() string {
return fmt.Sprintf("Unable to create array of size %d, limit is %d", a.size, a.limit)
}

383
vendor/github.com/evanphx/json-patch/merge.go generated vendored Normal file
View File

@@ -0,0 +1,383 @@
package jsonpatch
import (
"bytes"
"encoding/json"
"fmt"
"reflect"
)
func merge(cur, patch *lazyNode, mergeMerge bool) *lazyNode {
curDoc, err := cur.intoDoc()
if err != nil {
pruneNulls(patch)
return patch
}
patchDoc, err := patch.intoDoc()
if err != nil {
return patch
}
mergeDocs(curDoc, patchDoc, mergeMerge)
return cur
}
func mergeDocs(doc, patch *partialDoc, mergeMerge bool) {
for k, v := range *patch {
if v == nil {
if mergeMerge {
(*doc)[k] = nil
} else {
delete(*doc, k)
}
} else {
cur, ok := (*doc)[k]
if !ok || cur == nil {
pruneNulls(v)
(*doc)[k] = v
} else {
(*doc)[k] = merge(cur, v, mergeMerge)
}
}
}
}
func pruneNulls(n *lazyNode) {
sub, err := n.intoDoc()
if err == nil {
pruneDocNulls(sub)
} else {
ary, err := n.intoAry()
if err == nil {
pruneAryNulls(ary)
}
}
}
func pruneDocNulls(doc *partialDoc) *partialDoc {
for k, v := range *doc {
if v == nil {
delete(*doc, k)
} else {
pruneNulls(v)
}
}
return doc
}
func pruneAryNulls(ary *partialArray) *partialArray {
newAry := []*lazyNode{}
for _, v := range *ary {
if v != nil {
pruneNulls(v)
newAry = append(newAry, v)
}
}
*ary = newAry
return ary
}
var errBadJSONDoc = fmt.Errorf("Invalid JSON Document")
var errBadJSONPatch = fmt.Errorf("Invalid JSON Patch")
var errBadMergeTypes = fmt.Errorf("Mismatched JSON Documents")
// MergeMergePatches merges two merge patches together, such that
// applying this resulting merged merge patch to a document yields the same
// as merging each merge patch to the document in succession.
func MergeMergePatches(patch1Data, patch2Data []byte) ([]byte, error) {
return doMergePatch(patch1Data, patch2Data, true)
}
// MergePatch merges the patchData into the docData.
func MergePatch(docData, patchData []byte) ([]byte, error) {
return doMergePatch(docData, patchData, false)
}
func doMergePatch(docData, patchData []byte, mergeMerge bool) ([]byte, error) {
doc := &partialDoc{}
docErr := json.Unmarshal(docData, doc)
patch := &partialDoc{}
patchErr := json.Unmarshal(patchData, patch)
if _, ok := docErr.(*json.SyntaxError); ok {
return nil, errBadJSONDoc
}
if _, ok := patchErr.(*json.SyntaxError); ok {
return nil, errBadJSONPatch
}
if docErr == nil && *doc == nil {
return nil, errBadJSONDoc
}
if patchErr == nil && *patch == nil {
return nil, errBadJSONPatch
}
if docErr != nil || patchErr != nil {
// Not an error, just not a doc, so we turn straight into the patch
if patchErr == nil {
if mergeMerge {
doc = patch
} else {
doc = pruneDocNulls(patch)
}
} else {
patchAry := &partialArray{}
patchErr = json.Unmarshal(patchData, patchAry)
if patchErr != nil {
return nil, errBadJSONPatch
}
pruneAryNulls(patchAry)
out, patchErr := json.Marshal(patchAry)
if patchErr != nil {
return nil, errBadJSONPatch
}
return out, nil
}
} else {
mergeDocs(doc, patch, mergeMerge)
}
return json.Marshal(doc)
}
// resemblesJSONArray indicates whether the byte-slice "appears" to be
// a JSON array or not.
// False-positives are possible, as this function does not check the internal
// structure of the array. It only checks that the outer syntax is present and
// correct.
func resemblesJSONArray(input []byte) bool {
input = bytes.TrimSpace(input)
hasPrefix := bytes.HasPrefix(input, []byte("["))
hasSuffix := bytes.HasSuffix(input, []byte("]"))
return hasPrefix && hasSuffix
}
// CreateMergePatch will return a merge patch document capable of converting
// the original document(s) to the modified document(s).
// The parameters can be bytes of either two JSON Documents, or two arrays of
// JSON documents.
// The merge patch returned follows the specification defined at http://tools.ietf.org/html/draft-ietf-appsawg-json-merge-patch-07
func CreateMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
originalResemblesArray := resemblesJSONArray(originalJSON)
modifiedResemblesArray := resemblesJSONArray(modifiedJSON)
// Do both byte-slices seem like JSON arrays?
if originalResemblesArray && modifiedResemblesArray {
return createArrayMergePatch(originalJSON, modifiedJSON)
}
// Are both byte-slices are not arrays? Then they are likely JSON objects...
if !originalResemblesArray && !modifiedResemblesArray {
return createObjectMergePatch(originalJSON, modifiedJSON)
}
// None of the above? Then return an error because of mismatched types.
return nil, errBadMergeTypes
}
// createObjectMergePatch will return a merge-patch document capable of
// converting the original document to the modified document.
func createObjectMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
originalDoc := map[string]interface{}{}
modifiedDoc := map[string]interface{}{}
err := json.Unmarshal(originalJSON, &originalDoc)
if err != nil {
return nil, errBadJSONDoc
}
err = json.Unmarshal(modifiedJSON, &modifiedDoc)
if err != nil {
return nil, errBadJSONDoc
}
dest, err := getDiff(originalDoc, modifiedDoc)
if err != nil {
return nil, err
}
return json.Marshal(dest)
}
// createArrayMergePatch will return an array of merge-patch documents capable
// of converting the original document to the modified document for each
// pair of JSON documents provided in the arrays.
// Arrays of mismatched sizes will result in an error.
func createArrayMergePatch(originalJSON, modifiedJSON []byte) ([]byte, error) {
originalDocs := []json.RawMessage{}
modifiedDocs := []json.RawMessage{}
err := json.Unmarshal(originalJSON, &originalDocs)
if err != nil {
return nil, errBadJSONDoc
}
err = json.Unmarshal(modifiedJSON, &modifiedDocs)
if err != nil {
return nil, errBadJSONDoc
}
total := len(originalDocs)
if len(modifiedDocs) != total {
return nil, errBadJSONDoc
}
result := []json.RawMessage{}
for i := 0; i < len(originalDocs); i++ {
original := originalDocs[i]
modified := modifiedDocs[i]
patch, err := createObjectMergePatch(original, modified)
if err != nil {
return nil, err
}
result = append(result, json.RawMessage(patch))
}
return json.Marshal(result)
}
// Returns true if the array matches (must be json types).
// As is idiomatic for go, an empty array is not the same as a nil array.
func matchesArray(a, b []interface{}) bool {
if len(a) != len(b) {
return false
}
if (a == nil && b != nil) || (a != nil && b == nil) {
return false
}
for i := range a {
if !matchesValue(a[i], b[i]) {
return false
}
}
return true
}
// Returns true if the values matches (must be json types)
// The types of the values must match, otherwise it will always return false
// If two map[string]interface{} are given, all elements must match.
func matchesValue(av, bv interface{}) bool {
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
return false
}
switch at := av.(type) {
case string:
bt := bv.(string)
if bt == at {
return true
}
case float64:
bt := bv.(float64)
if bt == at {
return true
}
case bool:
bt := bv.(bool)
if bt == at {
return true
}
case nil:
// Both nil, fine.
return true
case map[string]interface{}:
bt := bv.(map[string]interface{})
for key := range at {
if !matchesValue(at[key], bt[key]) {
return false
}
}
for key := range bt {
if !matchesValue(at[key], bt[key]) {
return false
}
}
return true
case []interface{}:
bt := bv.([]interface{})
return matchesArray(at, bt)
}
return false
}
// getDiff returns the (recursive) difference between a and b as a map[string]interface{}.
func getDiff(a, b map[string]interface{}) (map[string]interface{}, error) {
into := map[string]interface{}{}
for key, bv := range b {
av, ok := a[key]
// value was added
if !ok {
into[key] = bv
continue
}
// If types have changed, replace completely
if reflect.TypeOf(av) != reflect.TypeOf(bv) {
into[key] = bv
continue
}
// Types are the same, compare values
switch at := av.(type) {
case map[string]interface{}:
bt := bv.(map[string]interface{})
dst := make(map[string]interface{}, len(bt))
dst, err := getDiff(at, bt)
if err != nil {
return nil, err
}
if len(dst) > 0 {
into[key] = dst
}
case string, float64, bool:
if !matchesValue(av, bv) {
into[key] = bv
}
case []interface{}:
bt := bv.([]interface{})
if !matchesArray(at, bt) {
into[key] = bv
}
case nil:
switch bv.(type) {
case nil:
// Both nil, fine.
default:
into[key] = bv
}
default:
panic(fmt.Sprintf("Unknown type:%T in key %s", av, key))
}
}
// Now add all deleted values as nil
for key := range a {
_, found := b[key]
if !found {
into[key] = nil
}
}
return into, nil
}

696
vendor/github.com/evanphx/json-patch/patch.go generated vendored Normal file
View File

@@ -0,0 +1,696 @@
package jsonpatch
import (
"bytes"
"encoding/json"
"fmt"
"strconv"
"strings"
)
const (
eRaw = iota
eDoc
eAry
)
var (
// SupportNegativeIndices decides whether to support non-standard practice of
// allowing negative indices to mean indices starting at the end of an array.
// Default to true.
SupportNegativeIndices bool = true
// AccumulatedCopySizeLimit limits the total size increase in bytes caused by
// "copy" operations in a patch.
AccumulatedCopySizeLimit int64 = 0
)
type lazyNode struct {
raw *json.RawMessage
doc partialDoc
ary partialArray
which int
}
type operation map[string]*json.RawMessage
// Patch is an ordered collection of operations.
type Patch []operation
type partialDoc map[string]*lazyNode
type partialArray []*lazyNode
type container interface {
get(key string) (*lazyNode, error)
set(key string, val *lazyNode) error
add(key string, val *lazyNode) error
remove(key string) error
}
func newLazyNode(raw *json.RawMessage) *lazyNode {
return &lazyNode{raw: raw, doc: nil, ary: nil, which: eRaw}
}
func (n *lazyNode) MarshalJSON() ([]byte, error) {
switch n.which {
case eRaw:
return json.Marshal(n.raw)
case eDoc:
return json.Marshal(n.doc)
case eAry:
return json.Marshal(n.ary)
default:
return nil, fmt.Errorf("Unknown type")
}
}
func (n *lazyNode) UnmarshalJSON(data []byte) error {
dest := make(json.RawMessage, len(data))
copy(dest, data)
n.raw = &dest
n.which = eRaw
return nil
}
func deepCopy(src *lazyNode) (*lazyNode, int, error) {
if src == nil {
return nil, 0, nil
}
a, err := src.MarshalJSON()
if err != nil {
return nil, 0, err
}
sz := len(a)
ra := make(json.RawMessage, sz)
copy(ra, a)
return newLazyNode(&ra), sz, nil
}
func (n *lazyNode) intoDoc() (*partialDoc, error) {
if n.which == eDoc {
return &n.doc, nil
}
if n.raw == nil {
return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial document")
}
err := json.Unmarshal(*n.raw, &n.doc)
if err != nil {
return nil, err
}
n.which = eDoc
return &n.doc, nil
}
func (n *lazyNode) intoAry() (*partialArray, error) {
if n.which == eAry {
return &n.ary, nil
}
if n.raw == nil {
return nil, fmt.Errorf("Unable to unmarshal nil pointer as partial array")
}
err := json.Unmarshal(*n.raw, &n.ary)
if err != nil {
return nil, err
}
n.which = eAry
return &n.ary, nil
}
func (n *lazyNode) compact() []byte {
buf := &bytes.Buffer{}
if n.raw == nil {
return nil
}
err := json.Compact(buf, *n.raw)
if err != nil {
return *n.raw
}
return buf.Bytes()
}
func (n *lazyNode) tryDoc() bool {
if n.raw == nil {
return false
}
err := json.Unmarshal(*n.raw, &n.doc)
if err != nil {
return false
}
n.which = eDoc
return true
}
func (n *lazyNode) tryAry() bool {
if n.raw == nil {
return false
}
err := json.Unmarshal(*n.raw, &n.ary)
if err != nil {
return false
}
n.which = eAry
return true
}
func (n *lazyNode) equal(o *lazyNode) bool {
if n.which == eRaw {
if !n.tryDoc() && !n.tryAry() {
if o.which != eRaw {
return false
}
return bytes.Equal(n.compact(), o.compact())
}
}
if n.which == eDoc {
if o.which == eRaw {
if !o.tryDoc() {
return false
}
}
if o.which != eDoc {
return false
}
for k, v := range n.doc {
ov, ok := o.doc[k]
if !ok {
return false
}
if v == nil && ov == nil {
continue
}
if !v.equal(ov) {
return false
}
}
return true
}
if o.which != eAry && !o.tryAry() {
return false
}
if len(n.ary) != len(o.ary) {
return false
}
for idx, val := range n.ary {
if !val.equal(o.ary[idx]) {
return false
}
}
return true
}
func (o operation) kind() string {
if obj, ok := o["op"]; ok && obj != nil {
var op string
err := json.Unmarshal(*obj, &op)
if err != nil {
return "unknown"
}
return op
}
return "unknown"
}
func (o operation) path() string {
if obj, ok := o["path"]; ok && obj != nil {
var op string
err := json.Unmarshal(*obj, &op)
if err != nil {
return "unknown"
}
return op
}
return "unknown"
}
func (o operation) from() string {
if obj, ok := o["from"]; ok && obj != nil {
var op string
err := json.Unmarshal(*obj, &op)
if err != nil {
return "unknown"
}
return op
}
return "unknown"
}
func (o operation) value() *lazyNode {
if obj, ok := o["value"]; ok {
return newLazyNode(obj)
}
return nil
}
func isArray(buf []byte) bool {
Loop:
for _, c := range buf {
switch c {
case ' ':
case '\n':
case '\t':
continue
case '[':
return true
default:
break Loop
}
}
return false
}
func findObject(pd *container, path string) (container, string) {
doc := *pd
split := strings.Split(path, "/")
if len(split) < 2 {
return nil, ""
}
parts := split[1 : len(split)-1]
key := split[len(split)-1]
var err error
for _, part := range parts {
next, ok := doc.get(decodePatchKey(part))
if next == nil || ok != nil {
return nil, ""
}
if isArray(*next.raw) {
doc, err = next.intoAry()
if err != nil {
return nil, ""
}
} else {
doc, err = next.intoDoc()
if err != nil {
return nil, ""
}
}
}
return doc, decodePatchKey(key)
}
func (d *partialDoc) set(key string, val *lazyNode) error {
(*d)[key] = val
return nil
}
func (d *partialDoc) add(key string, val *lazyNode) error {
(*d)[key] = val
return nil
}
func (d *partialDoc) get(key string) (*lazyNode, error) {
return (*d)[key], nil
}
func (d *partialDoc) remove(key string) error {
_, ok := (*d)[key]
if !ok {
return fmt.Errorf("Unable to remove nonexistent key: %s", key)
}
delete(*d, key)
return nil
}
// set should only be used to implement the "replace" operation, so "key" must
// be an already existing index in "d".
func (d *partialArray) set(key string, val *lazyNode) error {
idx, err := strconv.Atoi(key)
if err != nil {
return err
}
(*d)[idx] = val
return nil
}
func (d *partialArray) add(key string, val *lazyNode) error {
if key == "-" {
*d = append(*d, val)
return nil
}
idx, err := strconv.Atoi(key)
if err != nil {
return err
}
sz := len(*d) + 1
ary := make([]*lazyNode, sz)
cur := *d
if idx >= len(ary) {
return fmt.Errorf("Unable to access invalid index: %d", idx)
}
if SupportNegativeIndices {
if idx < -len(ary) {
return fmt.Errorf("Unable to access invalid index: %d", idx)
}
if idx < 0 {
idx += len(ary)
}
}
copy(ary[0:idx], cur[0:idx])
ary[idx] = val
copy(ary[idx+1:], cur[idx:])
*d = ary
return nil
}
func (d *partialArray) get(key string) (*lazyNode, error) {
idx, err := strconv.Atoi(key)
if err != nil {
return nil, err
}
if idx >= len(*d) {
return nil, fmt.Errorf("Unable to access invalid index: %d", idx)
}
return (*d)[idx], nil
}
func (d *partialArray) remove(key string) error {
idx, err := strconv.Atoi(key)
if err != nil {
return err
}
cur := *d
if idx >= len(cur) {
return fmt.Errorf("Unable to access invalid index: %d", idx)
}
if SupportNegativeIndices {
if idx < -len(cur) {
return fmt.Errorf("Unable to access invalid index: %d", idx)
}
if idx < 0 {
idx += len(cur)
}
}
ary := make([]*lazyNode, len(cur)-1)
copy(ary[0:idx], cur[0:idx])
copy(ary[idx:], cur[idx+1:])
*d = ary
return nil
}
func (p Patch) add(doc *container, op operation) error {
path := op.path()
con, key := findObject(doc, path)
if con == nil {
return fmt.Errorf("jsonpatch add operation does not apply: doc is missing path: \"%s\"", path)
}
return con.add(key, op.value())
}
func (p Patch) remove(doc *container, op operation) error {
path := op.path()
con, key := findObject(doc, path)
if con == nil {
return fmt.Errorf("jsonpatch remove operation does not apply: doc is missing path: \"%s\"", path)
}
return con.remove(key)
}
func (p Patch) replace(doc *container, op operation) error {
path := op.path()
con, key := findObject(doc, path)
if con == nil {
return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing path: %s", path)
}
_, ok := con.get(key)
if ok != nil {
return fmt.Errorf("jsonpatch replace operation does not apply: doc is missing key: %s", path)
}
return con.set(key, op.value())
}
func (p Patch) move(doc *container, op operation) error {
from := op.from()
con, key := findObject(doc, from)
if con == nil {
return fmt.Errorf("jsonpatch move operation does not apply: doc is missing from path: %s", from)
}
val, err := con.get(key)
if err != nil {
return err
}
err = con.remove(key)
if err != nil {
return err
}
path := op.path()
con, key = findObject(doc, path)
if con == nil {
return fmt.Errorf("jsonpatch move operation does not apply: doc is missing destination path: %s", path)
}
return con.add(key, val)
}
func (p Patch) test(doc *container, op operation) error {
path := op.path()
con, key := findObject(doc, path)
if con == nil {
return fmt.Errorf("jsonpatch test operation does not apply: is missing path: %s", path)
}
val, err := con.get(key)
if err != nil {
return err
}
if val == nil {
if op.value().raw == nil {
return nil
}
return fmt.Errorf("Testing value %s failed", path)
} else if op.value() == nil {
return fmt.Errorf("Testing value %s failed", path)
}
if val.equal(op.value()) {
return nil
}
return fmt.Errorf("Testing value %s failed", path)
}
func (p Patch) copy(doc *container, op operation, accumulatedCopySize *int64) error {
from := op.from()
con, key := findObject(doc, from)
if con == nil {
return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing from path: %s", from)
}
val, err := con.get(key)
if err != nil {
return err
}
path := op.path()
con, key = findObject(doc, path)
if con == nil {
return fmt.Errorf("jsonpatch copy operation does not apply: doc is missing destination path: %s", path)
}
valCopy, sz, err := deepCopy(val)
if err != nil {
return err
}
(*accumulatedCopySize) += int64(sz)
if AccumulatedCopySizeLimit > 0 && *accumulatedCopySize > AccumulatedCopySizeLimit {
return NewAccumulatedCopySizeError(AccumulatedCopySizeLimit, *accumulatedCopySize)
}
return con.add(key, valCopy)
}
// Equal indicates if 2 JSON documents have the same structural equality.
func Equal(a, b []byte) bool {
ra := make(json.RawMessage, len(a))
copy(ra, a)
la := newLazyNode(&ra)
rb := make(json.RawMessage, len(b))
copy(rb, b)
lb := newLazyNode(&rb)
return la.equal(lb)
}
// DecodePatch decodes the passed JSON document as an RFC 6902 patch.
func DecodePatch(buf []byte) (Patch, error) {
var p Patch
err := json.Unmarshal(buf, &p)
if err != nil {
return nil, err
}
return p, nil
}
// Apply mutates a JSON document according to the patch, and returns the new
// document.
func (p Patch) Apply(doc []byte) ([]byte, error) {
return p.ApplyIndent(doc, "")
}
// ApplyIndent mutates a JSON document according to the patch, and returns the new
// document indented.
func (p Patch) ApplyIndent(doc []byte, indent string) ([]byte, error) {
var pd container
if doc[0] == '[' {
pd = &partialArray{}
} else {
pd = &partialDoc{}
}
err := json.Unmarshal(doc, pd)
if err != nil {
return nil, err
}
err = nil
var accumulatedCopySize int64
for _, op := range p {
switch op.kind() {
case "add":
err = p.add(&pd, op)
case "remove":
err = p.remove(&pd, op)
case "replace":
err = p.replace(&pd, op)
case "move":
err = p.move(&pd, op)
case "test":
err = p.test(&pd, op)
case "copy":
err = p.copy(&pd, op, &accumulatedCopySize)
default:
err = fmt.Errorf("Unexpected kind: %s", op.kind())
}
if err != nil {
return nil, err
}
}
if indent != "" {
return json.MarshalIndent(pd, "", indent)
}
return json.Marshal(pd)
}
// From http://tools.ietf.org/html/rfc6901#section-4 :
//
// Evaluation of each reference token begins by decoding any escaped
// character sequence. This is performed by first transforming any
// occurrence of the sequence '~1' to '/', and then transforming any
// occurrence of the sequence '~0' to '~'.
var (
rfc6901Decoder = strings.NewReplacer("~1", "/", "~0", "~")
)
func decodePatchKey(k string) string {
return rfc6901Decoder.Replace(k)
}

View File

@@ -1,7 +0,0 @@
language: go
go:
- 1.3
- 1.4
script:
- go test
- go build

View File

@@ -10,5 +10,6 @@
# Please keep the list sorted.
Sendgrid, Inc
Vastech SA (PTY) LTD
Walter Schulze <awalterschulze@gmail.com>

View File

@@ -1,4 +1,5 @@
Anton Povarov <anton.povarov@gmail.com>
Brian Goff <cpuguy83@gmail.com>
Clayton Coleman <ccoleman@redhat.com>
Denis Smirnov <denis.smirnov.91@gmail.com>
DongYun Kang <ceram1000@gmail.com>
@@ -10,9 +11,12 @@ John Shahid <jvshahid@gmail.com>
John Tuley <john@tuley.org>
Laurent <laurent@adyoulike.com>
Patrick Lee <patrick@dropbox.com>
Roger Johansson <rogeralsing@gmail.com>
Sam Nguyen <sam.nguyen@sendgrid.com>
Sergio Arbeo <serabe@gmail.com>
Stephen J Day <stephen.day@docker.com>
Tamir Duberstein <tamird@gmail.com>
Todd Eisenberger <teisenberger@dropbox.com>
Tormod Erevik Lea <tormodlea@gmail.com>
Vyacheslav Kim <kane@sendgrid.com>
Walter Schulze <awalterschulze@gmail.com>

View File

@@ -174,11 +174,11 @@ func sizeFixed32(x uint64) int {
// This is the format used for the sint64 protocol buffer type.
func (p *Buffer) EncodeZigzag64(x uint64) error {
// use signed number to get arithmetic right shift.
return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63)))
}
func sizeZigzag64(x uint64) int {
return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
return sizeVarint((x << 1) ^ uint64((int64(x) >> 63)))
}
// EncodeZigzag32 writes a zigzag-encoded 32-bit integer

View File

@@ -73,7 +73,6 @@ for a protocol buffer variable v:
When the .proto file specifies `syntax="proto3"`, there are some differences:
- Non-repeated fields of non-message type are values instead of pointers.
- Getters are only generated for message and oneof fields.
- Enum types do not get an Enum method.
The simplest way to describe this is to see an example.

View File

@@ -193,6 +193,7 @@ type Properties struct {
Default string // default value
HasDefault bool // whether an explicit default was provided
CustomType string
CastType string
StdTime bool
StdDuration bool
@@ -341,6 +342,8 @@ func (p *Properties) Parse(s string) {
p.OrigName = strings.Split(f, "=")[1]
case strings.HasPrefix(f, "customtype="):
p.CustomType = strings.Split(f, "=")[1]
case strings.HasPrefix(f, "casttype="):
p.CastType = strings.Split(f, "=")[1]
case f == "stdtime":
p.StdTime = true
case f == "stdduration":

View File

@@ -522,6 +522,17 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
}
return nil
}
} else if len(props.CastType) > 0 {
if _, ok := v.Interface().(interface {
String() string
}); ok {
switch v.Kind() {
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
_, err := fmt.Fprintf(w, "%d", v.Interface())
return err
}
}
} else if props.StdTime {
t, ok := v.Interface().(time.Time)
if !ok {
@@ -531,9 +542,9 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
if err != nil {
return err
}
props.StdTime = false
err = tm.writeAny(w, reflect.ValueOf(tproto), props)
props.StdTime = true
propsCopy := *props // Make a copy so that this is goroutine-safe
propsCopy.StdTime = false
err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy)
return err
} else if props.StdDuration {
d, ok := v.Interface().(time.Duration)
@@ -541,9 +552,9 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert
return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface())
}
dproto := durationProto(d)
props.StdDuration = false
err := tm.writeAny(w, reflect.ValueOf(dproto), props)
props.StdDuration = true
propsCopy := *props // Make a copy so that this is goroutine-safe
propsCopy.StdDuration = false
err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy)
return err
}
}

View File

@@ -983,7 +983,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error {
return p.readStruct(fv, terminator)
case reflect.Uint32:
if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
fv.SetUint(uint64(x))
fv.SetUint(x)
return nil
}
case reflect.Uint64:

View File

@@ -1 +0,0 @@
language: go

View File

@@ -1,12 +0,0 @@
# BTree implementation for Go
![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master)
This package provides an in-memory B-Tree implementation for Go, useful as
an ordered, mutable data structure.
The API is based off of the wonderful
http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to
act as a drop-in replacement for gollrb trees.
See http://godoc.org/github.com/google/btree for documentation.

View File

@@ -1,649 +0,0 @@
// Copyright 2014 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package btree implements in-memory B-Trees of arbitrary degree.
//
// btree implements an in-memory B-Tree for use as an ordered data structure.
// It is not meant for persistent storage solutions.
//
// It has a flatter structure than an equivalent red-black or other binary tree,
// which in some cases yields better memory usage and/or performance.
// See some discussion on the matter here:
// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
// Note, though, that this project is in no way related to the C++ B-Tree
// implmentation written about there.
//
// Within this tree, each node contains a slice of items and a (possibly nil)
// slice of children. For basic numeric values or raw structs, this can cause
// efficiency differences when compared to equivalent C++ template code that
// stores values in arrays within the node:
// * Due to the overhead of storing values as interfaces (each
// value needs to be stored as the value itself, then 2 words for the
// interface pointing to that value and its type), resulting in higher
// memory use.
// * Since interfaces can point to values anywhere in memory, values are
// most likely not stored in contiguous blocks, resulting in a higher
// number of cache misses.
// These issues don't tend to matter, though, when working with strings or other
// heap-allocated structures, since C++-equivalent structures also must store
// pointers and also distribute their values across the heap.
//
// This implementation is designed to be a drop-in replacement to gollrb.LLRB
// trees, (http://github.com/petar/gollrb), an excellent and probably the most
// widely used ordered tree implementation in the Go ecosystem currently.
// Its functions, therefore, exactly mirror those of
// llrb.LLRB where possible. Unlike gollrb, though, we currently don't
// support storing multiple equivalent values or backwards iteration.
package btree
import (
"fmt"
"io"
"sort"
"strings"
)
// Item represents a single object in the tree.
type Item interface {
// Less tests whether the current item is less than the given argument.
//
// This must provide a strict weak ordering.
// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
// hold one of either a or b in the tree).
Less(than Item) bool
}
const (
DefaultFreeListSize = 32
)
// FreeList represents a free list of btree nodes. By default each
// BTree has its own FreeList, but multiple BTrees can share the same
// FreeList.
// Two Btrees using the same freelist are not safe for concurrent write access.
type FreeList struct {
freelist []*node
}
// NewFreeList creates a new free list.
// size is the maximum size of the returned free list.
func NewFreeList(size int) *FreeList {
return &FreeList{freelist: make([]*node, 0, size)}
}
func (f *FreeList) newNode() (n *node) {
index := len(f.freelist) - 1
if index < 0 {
return new(node)
}
f.freelist, n = f.freelist[:index], f.freelist[index]
return
}
func (f *FreeList) freeNode(n *node) {
if len(f.freelist) < cap(f.freelist) {
f.freelist = append(f.freelist, n)
}
}
// ItemIterator allows callers of Ascend* to iterate in-order over portions of
// the tree. When this function returns false, iteration will stop and the
// associated Ascend* function will immediately return.
type ItemIterator func(i Item) bool
// New creates a new B-Tree with the given degree.
//
// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
// and 2-4 children).
func New(degree int) *BTree {
return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize))
}
// NewWithFreeList creates a new B-Tree that uses the given node free list.
func NewWithFreeList(degree int, f *FreeList) *BTree {
if degree <= 1 {
panic("bad degree")
}
return &BTree{
degree: degree,
freelist: f,
}
}
// items stores items in a node.
type items []Item
// insertAt inserts a value into the given index, pushing all subsequent values
// forward.
func (s *items) insertAt(index int, item Item) {
*s = append(*s, nil)
if index < len(*s) {
copy((*s)[index+1:], (*s)[index:])
}
(*s)[index] = item
}
// removeAt removes a value at a given index, pulling all subsequent values
// back.
func (s *items) removeAt(index int) Item {
item := (*s)[index]
(*s)[index] = nil
copy((*s)[index:], (*s)[index+1:])
*s = (*s)[:len(*s)-1]
return item
}
// pop removes and returns the last element in the list.
func (s *items) pop() (out Item) {
index := len(*s) - 1
out = (*s)[index]
(*s)[index] = nil
*s = (*s)[:index]
return
}
// find returns the index where the given item should be inserted into this
// list. 'found' is true if the item already exists in the list at the given
// index.
func (s items) find(item Item) (index int, found bool) {
i := sort.Search(len(s), func(i int) bool {
return item.Less(s[i])
})
if i > 0 && !s[i-1].Less(item) {
return i - 1, true
}
return i, false
}
// children stores child nodes in a node.
type children []*node
// insertAt inserts a value into the given index, pushing all subsequent values
// forward.
func (s *children) insertAt(index int, n *node) {
*s = append(*s, nil)
if index < len(*s) {
copy((*s)[index+1:], (*s)[index:])
}
(*s)[index] = n
}
// removeAt removes a value at a given index, pulling all subsequent values
// back.
func (s *children) removeAt(index int) *node {
n := (*s)[index]
(*s)[index] = nil
copy((*s)[index:], (*s)[index+1:])
*s = (*s)[:len(*s)-1]
return n
}
// pop removes and returns the last element in the list.
func (s *children) pop() (out *node) {
index := len(*s) - 1
out = (*s)[index]
(*s)[index] = nil
*s = (*s)[:index]
return
}
// node is an internal node in a tree.
//
// It must at all times maintain the invariant that either
// * len(children) == 0, len(items) unconstrained
// * len(children) == len(items) + 1
type node struct {
items items
children children
t *BTree
}
// split splits the given node at the given index. The current node shrinks,
// and this function returns the item that existed at that index and a new node
// containing all items/children after it.
func (n *node) split(i int) (Item, *node) {
item := n.items[i]
next := n.t.newNode()
next.items = append(next.items, n.items[i+1:]...)
n.items = n.items[:i]
if len(n.children) > 0 {
next.children = append(next.children, n.children[i+1:]...)
n.children = n.children[:i+1]
}
return item, next
}
// maybeSplitChild checks if a child should be split, and if so splits it.
// Returns whether or not a split occurred.
func (n *node) maybeSplitChild(i, maxItems int) bool {
if len(n.children[i].items) < maxItems {
return false
}
first := n.children[i]
item, second := first.split(maxItems / 2)
n.items.insertAt(i, item)
n.children.insertAt(i+1, second)
return true
}
// insert inserts an item into the subtree rooted at this node, making sure
// no nodes in the subtree exceed maxItems items. Should an equivalent item be
// be found/replaced by insert, it will be returned.
func (n *node) insert(item Item, maxItems int) Item {
i, found := n.items.find(item)
if found {
out := n.items[i]
n.items[i] = item
return out
}
if len(n.children) == 0 {
n.items.insertAt(i, item)
return nil
}
if n.maybeSplitChild(i, maxItems) {
inTree := n.items[i]
switch {
case item.Less(inTree):
// no change, we want first split node
case inTree.Less(item):
i++ // we want second split node
default:
out := n.items[i]
n.items[i] = item
return out
}
}
return n.children[i].insert(item, maxItems)
}
// get finds the given key in the subtree and returns it.
func (n *node) get(key Item) Item {
i, found := n.items.find(key)
if found {
return n.items[i]
} else if len(n.children) > 0 {
return n.children[i].get(key)
}
return nil
}
// min returns the first item in the subtree.
func min(n *node) Item {
if n == nil {
return nil
}
for len(n.children) > 0 {
n = n.children[0]
}
if len(n.items) == 0 {
return nil
}
return n.items[0]
}
// max returns the last item in the subtree.
func max(n *node) Item {
if n == nil {
return nil
}
for len(n.children) > 0 {
n = n.children[len(n.children)-1]
}
if len(n.items) == 0 {
return nil
}
return n.items[len(n.items)-1]
}
// toRemove details what item to remove in a node.remove call.
type toRemove int
const (
removeItem toRemove = iota // removes the given item
removeMin // removes smallest item in the subtree
removeMax // removes largest item in the subtree
)
// remove removes an item from the subtree rooted at this node.
func (n *node) remove(item Item, minItems int, typ toRemove) Item {
var i int
var found bool
switch typ {
case removeMax:
if len(n.children) == 0 {
return n.items.pop()
}
i = len(n.items)
case removeMin:
if len(n.children) == 0 {
return n.items.removeAt(0)
}
i = 0
case removeItem:
i, found = n.items.find(item)
if len(n.children) == 0 {
if found {
return n.items.removeAt(i)
}
return nil
}
default:
panic("invalid type")
}
// If we get to here, we have children.
child := n.children[i]
if len(child.items) <= minItems {
return n.growChildAndRemove(i, item, minItems, typ)
}
// Either we had enough items to begin with, or we've done some
// merging/stealing, because we've got enough now and we're ready to return
// stuff.
if found {
// The item exists at index 'i', and the child we've selected can give us a
// predecessor, since if we've gotten here it's got > minItems items in it.
out := n.items[i]
// We use our special-case 'remove' call with typ=maxItem to pull the
// predecessor of item i (the rightmost leaf of our immediate left child)
// and set it into where we pulled the item from.
n.items[i] = child.remove(nil, minItems, removeMax)
return out
}
// Final recursive call. Once we're here, we know that the item isn't in this
// node and that the child is big enough to remove from.
return child.remove(item, minItems, typ)
}
// growChildAndRemove grows child 'i' to make sure it's possible to remove an
// item from it while keeping it at minItems, then calls remove to actually
// remove it.
//
// Most documentation says we have to do two sets of special casing:
// 1) item is in this node
// 2) item is in child
// In both cases, we need to handle the two subcases:
// A) node has enough values that it can spare one
// B) node doesn't have enough values
// For the latter, we have to check:
// a) left sibling has node to spare
// b) right sibling has node to spare
// c) we must merge
// To simplify our code here, we handle cases #1 and #2 the same:
// If a node doesn't have enough items, we make sure it does (using a,b,c).
// We then simply redo our remove call, and the second time (regardless of
// whether we're in case 1 or 2), we'll have enough items and can guarantee
// that we hit case A.
func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item {
child := n.children[i]
if i > 0 && len(n.children[i-1].items) > minItems {
// Steal from left child
stealFrom := n.children[i-1]
stolenItem := stealFrom.items.pop()
child.items.insertAt(0, n.items[i-1])
n.items[i-1] = stolenItem
if len(stealFrom.children) > 0 {
child.children.insertAt(0, stealFrom.children.pop())
}
} else if i < len(n.items) && len(n.children[i+1].items) > minItems {
// steal from right child
stealFrom := n.children[i+1]
stolenItem := stealFrom.items.removeAt(0)
child.items = append(child.items, n.items[i])
n.items[i] = stolenItem
if len(stealFrom.children) > 0 {
child.children = append(child.children, stealFrom.children.removeAt(0))
}
} else {
if i >= len(n.items) {
i--
child = n.children[i]
}
// merge with right child
mergeItem := n.items.removeAt(i)
mergeChild := n.children.removeAt(i + 1)
child.items = append(child.items, mergeItem)
child.items = append(child.items, mergeChild.items...)
child.children = append(child.children, mergeChild.children...)
n.t.freeNode(mergeChild)
}
return n.remove(item, minItems, typ)
}
// iterate provides a simple method for iterating over elements in the tree.
// It could probably use some work to be extra-efficient (it calls from() a
// little more than it should), but it works pretty well for now.
//
// It requires that 'from' and 'to' both return true for values we should hit
// with the iterator. It should also be the case that 'from' returns true for
// values less than or equal to values 'to' returns true for, and 'to'
// returns true for values greater than or equal to those that 'from'
// does.
func (n *node) iterate(from, to func(Item) bool, iter ItemIterator) bool {
for i, item := range n.items {
if !from(item) {
continue
}
if len(n.children) > 0 && !n.children[i].iterate(from, to, iter) {
return false
}
if !to(item) {
return false
}
if !iter(item) {
return false
}
}
if len(n.children) > 0 {
return n.children[len(n.children)-1].iterate(from, to, iter)
}
return true
}
// Used for testing/debugging purposes.
func (n *node) print(w io.Writer, level int) {
fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items)
for _, c := range n.children {
c.print(w, level+1)
}
}
// BTree is an implementation of a B-Tree.
//
// BTree stores Item instances in an ordered structure, allowing easy insertion,
// removal, and iteration.
//
// Write operations are not safe for concurrent mutation by multiple
// goroutines, but Read operations are.
type BTree struct {
degree int
length int
root *node
freelist *FreeList
}
// maxItems returns the max number of items to allow per node.
func (t *BTree) maxItems() int {
return t.degree*2 - 1
}
// minItems returns the min number of items to allow per node (ignored for the
// root node).
func (t *BTree) minItems() int {
return t.degree - 1
}
func (t *BTree) newNode() (n *node) {
n = t.freelist.newNode()
n.t = t
return
}
func (t *BTree) freeNode(n *node) {
for i := range n.items {
n.items[i] = nil // clear to allow GC
}
n.items = n.items[:0]
for i := range n.children {
n.children[i] = nil // clear to allow GC
}
n.children = n.children[:0]
n.t = nil // clear to allow GC
t.freelist.freeNode(n)
}
// ReplaceOrInsert adds the given item to the tree. If an item in the tree
// already equals the given one, it is removed from the tree and returned.
// Otherwise, nil is returned.
//
// nil cannot be added to the tree (will panic).
func (t *BTree) ReplaceOrInsert(item Item) Item {
if item == nil {
panic("nil item being added to BTree")
}
if t.root == nil {
t.root = t.newNode()
t.root.items = append(t.root.items, item)
t.length++
return nil
} else if len(t.root.items) >= t.maxItems() {
item2, second := t.root.split(t.maxItems() / 2)
oldroot := t.root
t.root = t.newNode()
t.root.items = append(t.root.items, item2)
t.root.children = append(t.root.children, oldroot, second)
}
out := t.root.insert(item, t.maxItems())
if out == nil {
t.length++
}
return out
}
// Delete removes an item equal to the passed in item from the tree, returning
// it. If no such item exists, returns nil.
func (t *BTree) Delete(item Item) Item {
return t.deleteItem(item, removeItem)
}
// DeleteMin removes the smallest item in the tree and returns it.
// If no such item exists, returns nil.
func (t *BTree) DeleteMin() Item {
return t.deleteItem(nil, removeMin)
}
// DeleteMax removes the largest item in the tree and returns it.
// If no such item exists, returns nil.
func (t *BTree) DeleteMax() Item {
return t.deleteItem(nil, removeMax)
}
func (t *BTree) deleteItem(item Item, typ toRemove) Item {
if t.root == nil || len(t.root.items) == 0 {
return nil
}
out := t.root.remove(item, t.minItems(), typ)
if len(t.root.items) == 0 && len(t.root.children) > 0 {
oldroot := t.root
t.root = t.root.children[0]
t.freeNode(oldroot)
}
if out != nil {
t.length--
}
return out
}
// AscendRange calls the iterator for every value in the tree within the range
// [greaterOrEqual, lessThan), until iterator returns false.
func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(
func(a Item) bool { return !a.Less(greaterOrEqual) },
func(a Item) bool { return a.Less(lessThan) },
iterator)
}
// AscendLessThan calls the iterator for every value in the tree within the range
// [first, pivot), until iterator returns false.
func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(
func(a Item) bool { return true },
func(a Item) bool { return a.Less(pivot) },
iterator)
}
// AscendGreaterOrEqual calls the iterator for every value in the tree within
// the range [pivot, last], until iterator returns false.
func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(
func(a Item) bool { return !a.Less(pivot) },
func(a Item) bool { return true },
iterator)
}
// Ascend calls the iterator for every value in the tree within the range
// [first, last], until iterator returns false.
func (t *BTree) Ascend(iterator ItemIterator) {
if t.root == nil {
return
}
t.root.iterate(
func(a Item) bool { return true },
func(a Item) bool { return true },
iterator)
}
// Get looks for the key item in the tree, returning it. It returns nil if
// unable to find that item.
func (t *BTree) Get(key Item) Item {
if t.root == nil {
return nil
}
return t.root.get(key)
}
// Min returns the smallest item in the tree, or nil if the tree is empty.
func (t *BTree) Min() Item {
return min(t.root)
}
// Max returns the largest item in the tree, or nil if the tree is empty.
func (t *BTree) Max() Item {
return max(t.root)
}
// Has returns true if the given key is in the tree.
func (t *BTree) Has(key Item) bool {
return t.Get(key) != nil
}
// Len returns the number of items currently in the tree.
func (t *BTree) Len() int {
return t.length
}
// Int implements the Item interface for integers.
type Int int
// Less returns true if int(a) < int(b).
func (a Int) Less(b Item) bool {
return a < b.(Int)
}

View File

@@ -1,76 +0,0 @@
// Copyright 2014 Google Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build ignore
// This binary compares memory usage between btree and gollrb.
package main
import (
"flag"
"fmt"
"math/rand"
"runtime"
"time"
"github.com/google/btree"
"github.com/petar/GoLLRB/llrb"
)
var (
size = flag.Int("size", 1000000, "size of the tree to build")
degree = flag.Int("degree", 8, "degree of btree")
gollrb = flag.Bool("llrb", false, "use llrb instead of btree")
)
func main() {
flag.Parse()
vals := rand.Perm(*size)
var t, v interface{}
v = vals
var stats runtime.MemStats
for i := 0; i < 10; i++ {
runtime.GC()
}
fmt.Println("-------- BEFORE ----------")
runtime.ReadMemStats(&stats)
fmt.Printf("%+v\n", stats)
start := time.Now()
if *gollrb {
tr := llrb.New()
for _, v := range vals {
tr.ReplaceOrInsert(llrb.Int(v))
}
t = tr // keep it around
} else {
tr := btree.New(*degree)
for _, v := range vals {
tr.ReplaceOrInsert(btree.Int(v))
}
t = tr // keep it around
}
fmt.Printf("%v inserts in %v\n", *size, time.Since(start))
fmt.Println("-------- AFTER ----------")
runtime.ReadMemStats(&stats)
fmt.Printf("%+v\n", stats)
for i := 0; i < 10; i++ {
runtime.GC()
}
fmt.Println("-------- AFTER GC ----------")
runtime.ReadMemStats(&stats)
fmt.Printf("%+v\n", stats)
if t == v {
fmt.Println("to make sure vals and tree aren't GC'd")
}
}

View File

@@ -34,21 +34,27 @@ type Fuzzer struct {
nilChance float64
minElements int
maxElements int
maxDepth int
}
// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs,
// RandSource, NilChance, or NumElements in any order.
func New() *Fuzzer {
return NewWithSeed(time.Now().UnixNano())
}
func NewWithSeed(seed int64) *Fuzzer {
f := &Fuzzer{
defaultFuzzFuncs: fuzzFuncMap{
reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime),
},
fuzzFuncs: fuzzFuncMap{},
r: rand.New(rand.NewSource(time.Now().UnixNano())),
r: rand.New(rand.NewSource(seed)),
nilChance: .2,
minElements: 1,
maxElements: 10,
maxDepth: 100,
}
return f
}
@@ -136,6 +142,14 @@ func (f *Fuzzer) genShouldFill() bool {
return f.r.Float64() > f.nilChance
}
// MaxDepth sets the maximum number of recursive fuzz calls that will be made
// before stopping. This includes struct members, pointers, and map and slice
// elements.
func (f *Fuzzer) MaxDepth(d int) *Fuzzer {
f.maxDepth = d
return f
}
// Fuzz recursively fills all of obj's fields with something random. First
// this tries to find a custom fuzz function (see Funcs). If there is no
// custom function this tests whether the object implements fuzz.Interface and,
@@ -144,17 +158,19 @@ func (f *Fuzzer) genShouldFill() bool {
// fails, this will generate random values for all primitive fields and then
// recurse for all non-primitives.
//
// Not safe for cyclic or tree-like structs!
// This is safe for cyclic or tree-like structs, up to a limit. Use the
// MaxDepth method to adjust how deep you need it to recurse.
//
// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ )
// Intended for tests, so will panic on bad input or unimplemented fields.
// obj must be a pointer. Only exported (public) fields can be set (thanks,
// golang :/ ) Intended for tests, so will panic on bad input or unimplemented
// fields.
func (f *Fuzzer) Fuzz(obj interface{}) {
v := reflect.ValueOf(obj)
if v.Kind() != reflect.Ptr {
panic("needed ptr!")
}
v = v.Elem()
f.doFuzz(v, 0)
f.fuzzWithContext(v, 0)
}
// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for
@@ -170,7 +186,7 @@ func (f *Fuzzer) FuzzNoCustom(obj interface{}) {
panic("needed ptr!")
}
v = v.Elem()
f.doFuzz(v, flagNoCustomFuzz)
f.fuzzWithContext(v, flagNoCustomFuzz)
}
const (
@@ -178,69 +194,87 @@ const (
flagNoCustomFuzz uint64 = 1 << iota
)
func (f *Fuzzer) doFuzz(v reflect.Value, flags uint64) {
func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) {
fc := &fuzzerContext{fuzzer: f}
fc.doFuzz(v, flags)
}
// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer
// be thread-safe.
type fuzzerContext struct {
fuzzer *Fuzzer
curDepth int
}
func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) {
if fc.curDepth >= fc.fuzzer.maxDepth {
return
}
fc.curDepth++
defer func() { fc.curDepth-- }()
if !v.CanSet() {
return
}
if flags&flagNoCustomFuzz == 0 {
// Check for both pointer and non-pointer custom functions.
if v.CanAddr() && f.tryCustom(v.Addr()) {
if v.CanAddr() && fc.tryCustom(v.Addr()) {
return
}
if f.tryCustom(v) {
if fc.tryCustom(v) {
return
}
}
if fn, ok := fillFuncMap[v.Kind()]; ok {
fn(v, f.r)
fn(v, fc.fuzzer.r)
return
}
switch v.Kind() {
case reflect.Map:
if f.genShouldFill() {
if fc.fuzzer.genShouldFill() {
v.Set(reflect.MakeMap(v.Type()))
n := f.genElementCount()
n := fc.fuzzer.genElementCount()
for i := 0; i < n; i++ {
key := reflect.New(v.Type().Key()).Elem()
f.doFuzz(key, 0)
fc.doFuzz(key, 0)
val := reflect.New(v.Type().Elem()).Elem()
f.doFuzz(val, 0)
fc.doFuzz(val, 0)
v.SetMapIndex(key, val)
}
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Ptr:
if f.genShouldFill() {
if fc.fuzzer.genShouldFill() {
v.Set(reflect.New(v.Type().Elem()))
f.doFuzz(v.Elem(), 0)
fc.doFuzz(v.Elem(), 0)
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Slice:
if f.genShouldFill() {
n := f.genElementCount()
if fc.fuzzer.genShouldFill() {
n := fc.fuzzer.genElementCount()
v.Set(reflect.MakeSlice(v.Type(), n, n))
for i := 0; i < n; i++ {
f.doFuzz(v.Index(i), 0)
fc.doFuzz(v.Index(i), 0)
}
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Array:
if f.genShouldFill() {
if fc.fuzzer.genShouldFill() {
n := v.Len()
for i := 0; i < n; i++ {
f.doFuzz(v.Index(i), 0)
fc.doFuzz(v.Index(i), 0)
}
return
}
v.Set(reflect.Zero(v.Type()))
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
f.doFuzz(v.Field(i), 0)
fc.doFuzz(v.Field(i), 0)
}
case reflect.Chan:
fallthrough
@@ -255,20 +289,20 @@ func (f *Fuzzer) doFuzz(v reflect.Value, flags uint64) {
// tryCustom searches for custom handlers, and returns true iff it finds a match
// and successfully randomizes v.
func (f *Fuzzer) tryCustom(v reflect.Value) bool {
func (fc *fuzzerContext) tryCustom(v reflect.Value) bool {
// First: see if we have a fuzz function for it.
doCustom, ok := f.fuzzFuncs[v.Type()]
doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()]
if !ok {
// Second: see if it can fuzz itself.
if v.CanInterface() {
intf := v.Interface()
if fuzzable, ok := intf.(Interface); ok {
fuzzable.Fuzz(Continue{f: f, Rand: f.r})
fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r})
return true
}
}
// Finally: see if there is a default fuzz function.
doCustom, ok = f.defaultFuzzFuncs[v.Type()]
doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()]
if !ok {
return false
}
@@ -294,8 +328,8 @@ func (f *Fuzzer) tryCustom(v reflect.Value) bool {
}
doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{
f: f,
Rand: f.r,
fc: fc,
Rand: fc.fuzzer.r,
})})
return true
}
@@ -310,7 +344,7 @@ type Interface interface {
// Continue can be passed to custom fuzzing functions to allow them to use
// the correct source of randomness and to continue fuzzing their members.
type Continue struct {
f *Fuzzer
fc *fuzzerContext
// For convenience, Continue implements rand.Rand via embedding.
// Use this for generating any randomness if you want your fuzzing
@@ -325,7 +359,7 @@ func (c Continue) Fuzz(obj interface{}) {
panic("needed ptr!")
}
v = v.Elem()
c.f.doFuzz(v, 0)
c.fc.doFuzz(v, 0)
}
// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for
@@ -338,7 +372,7 @@ func (c Continue) FuzzNoCustom(obj interface{}) {
panic("needed ptr!")
}
v = v.Elem()
c.f.doFuzz(v, flagNoCustomFuzz)
c.fc.doFuzz(v, flagNoCustomFuzz)
}
// RandString makes a random string up to 20 characters long. The returned string

View File

@@ -1,18 +0,0 @@
sudo: false
language: go
go:
- 1.6.x
- 1.7.x
- 1.8.x
- master
matrix:
allow_failures:
- go: master
fast_finish: true
install:
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
script:
- go get -t -v ./...
- diff -u <(echo -n) <(gofmt -d .)
- go tool vet .
- go test -v -race ./...

View File

@@ -1,7 +0,0 @@
Copyright © 2012 Greg Jones (greg.jones@gmail.com)
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

View File

@@ -1,24 +0,0 @@
httpcache
=========
[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache)
Package httpcache provides a http.RoundTripper implementation that works as a mostly RFC-compliant cache for http responses.
It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy).
Cache Backends
--------------
- The built-in 'memory' cache stores responses in an in-memory map.
- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library.
- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers.
- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage.
- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb).
- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries.
- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache.
License
-------
- [MIT License](LICENSE.txt)

View File

@@ -1,61 +0,0 @@
// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package
// to supplement an in-memory map with persistent storage
//
package diskcache
import (
"bytes"
"crypto/md5"
"encoding/hex"
"github.com/peterbourgon/diskv"
"io"
)
// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage
type Cache struct {
d *diskv.Diskv
}
// Get returns the response corresponding to key if present
func (c *Cache) Get(key string) (resp []byte, ok bool) {
key = keyToFilename(key)
resp, err := c.d.Read(key)
if err != nil {
return []byte{}, false
}
return resp, true
}
// Set saves a response to the cache as key
func (c *Cache) Set(key string, resp []byte) {
key = keyToFilename(key)
c.d.WriteStream(key, bytes.NewReader(resp), true)
}
// Delete removes the response with key from the cache
func (c *Cache) Delete(key string) {
key = keyToFilename(key)
c.d.Erase(key)
}
func keyToFilename(key string) string {
h := md5.New()
io.WriteString(h, key)
return hex.EncodeToString(h.Sum(nil))
}
// New returns a new Cache that will store files in basePath
func New(basePath string) *Cache {
return &Cache{
d: diskv.New(diskv.Options{
BasePath: basePath,
CacheSizeMax: 100 * 1024 * 1024, // 100MB
}),
}
}
// NewWithDiskv returns a new Cache using the provided Diskv as underlying
// storage.
func NewWithDiskv(d *diskv.Diskv) *Cache {
return &Cache{d}
}

View File

@@ -1,553 +0,0 @@
// Package httpcache provides a http.RoundTripper implementation that works as a
// mostly RFC-compliant cache for http responses.
//
// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client
// and not for a shared proxy).
//
package httpcache
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httputil"
"strings"
"sync"
"time"
)
const (
stale = iota
fresh
transparent
// XFromCache is the header added to responses that are returned from the cache
XFromCache = "X-From-Cache"
)
// A Cache interface is used by the Transport to store and retrieve responses.
type Cache interface {
// Get returns the []byte representation of a cached response and a bool
// set to true if the value isn't empty
Get(key string) (responseBytes []byte, ok bool)
// Set stores the []byte representation of a response against a key
Set(key string, responseBytes []byte)
// Delete removes the value associated with the key
Delete(key string)
}
// cacheKey returns the cache key for req.
func cacheKey(req *http.Request) string {
return req.URL.String()
}
// CachedResponse returns the cached http.Response for req if present, and nil
// otherwise.
func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) {
cachedVal, ok := c.Get(cacheKey(req))
if !ok {
return
}
b := bytes.NewBuffer(cachedVal)
return http.ReadResponse(bufio.NewReader(b), req)
}
// MemoryCache is an implemtation of Cache that stores responses in an in-memory map.
type MemoryCache struct {
mu sync.RWMutex
items map[string][]byte
}
// Get returns the []byte representation of the response and true if present, false if not
func (c *MemoryCache) Get(key string) (resp []byte, ok bool) {
c.mu.RLock()
resp, ok = c.items[key]
c.mu.RUnlock()
return resp, ok
}
// Set saves response resp to the cache with key
func (c *MemoryCache) Set(key string, resp []byte) {
c.mu.Lock()
c.items[key] = resp
c.mu.Unlock()
}
// Delete removes key from the cache
func (c *MemoryCache) Delete(key string) {
c.mu.Lock()
delete(c.items, key)
c.mu.Unlock()
}
// NewMemoryCache returns a new Cache that will store items in an in-memory map
func NewMemoryCache() *MemoryCache {
c := &MemoryCache{items: map[string][]byte{}}
return c
}
// Transport is an implementation of http.RoundTripper that will return values from a cache
// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since)
// to repeated requests allowing servers to return 304 / Not Modified
type Transport struct {
// The RoundTripper interface actually used to make requests
// If nil, http.DefaultTransport is used
Transport http.RoundTripper
Cache Cache
// If true, responses returned from the cache will be given an extra header, X-From-Cache
MarkCachedResponses bool
}
// NewTransport returns a new Transport with the
// provided Cache implementation and MarkCachedResponses set to true
func NewTransport(c Cache) *Transport {
return &Transport{Cache: c, MarkCachedResponses: true}
}
// Client returns an *http.Client that caches responses.
func (t *Transport) Client() *http.Client {
return &http.Client{Transport: t}
}
// varyMatches will return false unless all of the cached values for the headers listed in Vary
// match the new request
func varyMatches(cachedResp *http.Response, req *http.Request) bool {
for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") {
header = http.CanonicalHeaderKey(header)
if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) {
return false
}
}
return true
}
// RoundTrip takes a Request and returns a Response
//
// If there is a fresh Response already in cache, then it will be returned without connecting to
// the server.
//
// If there is a stale Response, then any validators it contains will be set on the new request
// to give the server a chance to respond with NotModified. If this happens, then the cached Response
// will be returned.
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
cacheKey := cacheKey(req)
cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == ""
var cachedResp *http.Response
if cacheable {
cachedResp, err = CachedResponse(t.Cache, req)
} else {
// Need to invalidate an existing value
t.Cache.Delete(cacheKey)
}
transport := t.Transport
if transport == nil {
transport = http.DefaultTransport
}
if cacheable && cachedResp != nil && err == nil {
if t.MarkCachedResponses {
cachedResp.Header.Set(XFromCache, "1")
}
if varyMatches(cachedResp, req) {
// Can only use cached value if the new request doesn't Vary significantly
freshness := getFreshness(cachedResp.Header, req.Header)
if freshness == fresh {
return cachedResp, nil
}
if freshness == stale {
var req2 *http.Request
// Add validators if caller hasn't already done so
etag := cachedResp.Header.Get("etag")
if etag != "" && req.Header.Get("etag") == "" {
req2 = cloneRequest(req)
req2.Header.Set("if-none-match", etag)
}
lastModified := cachedResp.Header.Get("last-modified")
if lastModified != "" && req.Header.Get("last-modified") == "" {
if req2 == nil {
req2 = cloneRequest(req)
}
req2.Header.Set("if-modified-since", lastModified)
}
if req2 != nil {
req = req2
}
}
}
resp, err = transport.RoundTrip(req)
if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified {
// Replace the 304 response with the one from cache, but update with some new headers
endToEndHeaders := getEndToEndHeaders(resp.Header)
for _, header := range endToEndHeaders {
cachedResp.Header[header] = resp.Header[header]
}
cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK))
cachedResp.StatusCode = http.StatusOK
resp = cachedResp
} else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) &&
req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) {
// In case of transport failure and stale-if-error activated, returns cached content
// when available
cachedResp.Status = fmt.Sprintf("%d %s", http.StatusOK, http.StatusText(http.StatusOK))
cachedResp.StatusCode = http.StatusOK
return cachedResp, nil
} else {
if err != nil || resp.StatusCode != http.StatusOK {
t.Cache.Delete(cacheKey)
}
if err != nil {
return nil, err
}
}
} else {
reqCacheControl := parseCacheControl(req.Header)
if _, ok := reqCacheControl["only-if-cached"]; ok {
resp = newGatewayTimeoutResponse(req)
} else {
resp, err = transport.RoundTrip(req)
if err != nil {
return nil, err
}
}
}
if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) {
for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") {
varyKey = http.CanonicalHeaderKey(varyKey)
fakeHeader := "X-Varied-" + varyKey
reqValue := req.Header.Get(varyKey)
if reqValue != "" {
resp.Header.Set(fakeHeader, reqValue)
}
}
switch req.Method {
case "GET":
// Delay caching until EOF is reached.
resp.Body = &cachingReadCloser{
R: resp.Body,
OnEOF: func(r io.Reader) {
resp := *resp
resp.Body = ioutil.NopCloser(r)
respBytes, err := httputil.DumpResponse(&resp, true)
if err == nil {
t.Cache.Set(cacheKey, respBytes)
}
},
}
default:
respBytes, err := httputil.DumpResponse(resp, true)
if err == nil {
t.Cache.Set(cacheKey, respBytes)
}
}
} else {
t.Cache.Delete(cacheKey)
}
return resp, nil
}
// ErrNoDateHeader indicates that the HTTP headers contained no Date header.
var ErrNoDateHeader = errors.New("no Date header")
// Date parses and returns the value of the Date header.
func Date(respHeaders http.Header) (date time.Time, err error) {
dateHeader := respHeaders.Get("date")
if dateHeader == "" {
err = ErrNoDateHeader
return
}
return time.Parse(time.RFC1123, dateHeader)
}
type realClock struct{}
func (c *realClock) since(d time.Time) time.Duration {
return time.Since(d)
}
type timer interface {
since(d time.Time) time.Duration
}
var clock timer = &realClock{}
// getFreshness will return one of fresh/stale/transparent based on the cache-control
// values of the request and the response
//
// fresh indicates the response can be returned
// stale indicates that the response needs validating before it is returned
// transparent indicates the response should not be used to fulfil the request
//
// Because this is only a private cache, 'public' and 'private' in cache-control aren't
// signficant. Similarly, smax-age isn't used.
func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) {
respCacheControl := parseCacheControl(respHeaders)
reqCacheControl := parseCacheControl(reqHeaders)
if _, ok := reqCacheControl["no-cache"]; ok {
return transparent
}
if _, ok := respCacheControl["no-cache"]; ok {
return stale
}
if _, ok := reqCacheControl["only-if-cached"]; ok {
return fresh
}
date, err := Date(respHeaders)
if err != nil {
return stale
}
currentAge := clock.since(date)
var lifetime time.Duration
var zeroDuration time.Duration
// If a response includes both an Expires header and a max-age directive,
// the max-age directive overrides the Expires header, even if the Expires header is more restrictive.
if maxAge, ok := respCacheControl["max-age"]; ok {
lifetime, err = time.ParseDuration(maxAge + "s")
if err != nil {
lifetime = zeroDuration
}
} else {
expiresHeader := respHeaders.Get("Expires")
if expiresHeader != "" {
expires, err := time.Parse(time.RFC1123, expiresHeader)
if err != nil {
lifetime = zeroDuration
} else {
lifetime = expires.Sub(date)
}
}
}
if maxAge, ok := reqCacheControl["max-age"]; ok {
// the client is willing to accept a response whose age is no greater than the specified time in seconds
lifetime, err = time.ParseDuration(maxAge + "s")
if err != nil {
lifetime = zeroDuration
}
}
if minfresh, ok := reqCacheControl["min-fresh"]; ok {
// the client wants a response that will still be fresh for at least the specified number of seconds.
minfreshDuration, err := time.ParseDuration(minfresh + "s")
if err == nil {
currentAge = time.Duration(currentAge + minfreshDuration)
}
}
if maxstale, ok := reqCacheControl["max-stale"]; ok {
// Indicates that the client is willing to accept a response that has exceeded its expiration time.
// If max-stale is assigned a value, then the client is willing to accept a response that has exceeded
// its expiration time by no more than the specified number of seconds.
// If no value is assigned to max-stale, then the client is willing to accept a stale response of any age.
//
// Responses served only because of a max-stale value are supposed to have a Warning header added to them,
// but that seems like a hassle, and is it actually useful? If so, then there needs to be a different
// return-value available here.
if maxstale == "" {
return fresh
}
maxstaleDuration, err := time.ParseDuration(maxstale + "s")
if err == nil {
currentAge = time.Duration(currentAge - maxstaleDuration)
}
}
if lifetime > currentAge {
return fresh
}
return stale
}
// Returns true if either the request or the response includes the stale-if-error
// cache control extension: https://tools.ietf.org/html/rfc5861
func canStaleOnError(respHeaders, reqHeaders http.Header) bool {
respCacheControl := parseCacheControl(respHeaders)
reqCacheControl := parseCacheControl(reqHeaders)
var err error
lifetime := time.Duration(-1)
if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok {
if staleMaxAge != "" {
lifetime, err = time.ParseDuration(staleMaxAge + "s")
if err != nil {
return false
}
} else {
return true
}
}
if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok {
if staleMaxAge != "" {
lifetime, err = time.ParseDuration(staleMaxAge + "s")
if err != nil {
return false
}
} else {
return true
}
}
if lifetime >= 0 {
date, err := Date(respHeaders)
if err != nil {
return false
}
currentAge := clock.since(date)
if lifetime > currentAge {
return true
}
}
return false
}
func getEndToEndHeaders(respHeaders http.Header) []string {
// These headers are always hop-by-hop
hopByHopHeaders := map[string]struct{}{
"Connection": struct{}{},
"Keep-Alive": struct{}{},
"Proxy-Authenticate": struct{}{},
"Proxy-Authorization": struct{}{},
"Te": struct{}{},
"Trailers": struct{}{},
"Transfer-Encoding": struct{}{},
"Upgrade": struct{}{},
}
for _, extra := range strings.Split(respHeaders.Get("connection"), ",") {
// any header listed in connection, if present, is also considered hop-by-hop
if strings.Trim(extra, " ") != "" {
hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{}
}
}
endToEndHeaders := []string{}
for respHeader, _ := range respHeaders {
if _, ok := hopByHopHeaders[respHeader]; !ok {
endToEndHeaders = append(endToEndHeaders, respHeader)
}
}
return endToEndHeaders
}
func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) {
if _, ok := respCacheControl["no-store"]; ok {
return false
}
if _, ok := reqCacheControl["no-store"]; ok {
return false
}
return true
}
func newGatewayTimeoutResponse(req *http.Request) *http.Response {
var braw bytes.Buffer
braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n")
resp, err := http.ReadResponse(bufio.NewReader(&braw), req)
if err != nil {
panic(err)
}
return resp
}
// cloneRequest returns a clone of the provided *http.Request.
// The clone is a shallow copy of the struct and its Header map.
// (This function copyright goauth2 authors: https://code.google.com/p/goauth2)
func cloneRequest(r *http.Request) *http.Request {
// shallow copy of the struct
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header)
for k, s := range r.Header {
r2.Header[k] = s
}
return r2
}
type cacheControl map[string]string
func parseCacheControl(headers http.Header) cacheControl {
cc := cacheControl{}
ccHeader := headers.Get("Cache-Control")
for _, part := range strings.Split(ccHeader, ",") {
part = strings.Trim(part, " ")
if part == "" {
continue
}
if strings.ContainsRune(part, '=') {
keyval := strings.Split(part, "=")
cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",")
} else {
cc[part] = ""
}
}
return cc
}
// headerAllCommaSepValues returns all comma-separated values (each
// with whitespace trimmed) for header name in headers. According to
// Section 4.2 of the HTTP/1.1 spec
// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2),
// values from multiple occurrences of a header should be concatenated, if
// the header's value is a comma-separated list.
func headerAllCommaSepValues(headers http.Header, name string) []string {
var vals []string
for _, val := range headers[http.CanonicalHeaderKey(name)] {
fields := strings.Split(val, ",")
for i, f := range fields {
fields[i] = strings.TrimSpace(f)
}
vals = append(vals, fields...)
}
return vals
}
// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF
// handler with a full copy of the content read from R when EOF is
// reached.
type cachingReadCloser struct {
// Underlying ReadCloser.
R io.ReadCloser
// OnEOF is called with a copy of the content of R when EOF is reached.
OnEOF func(io.Reader)
buf bytes.Buffer // buf stores a copy of the content of R.
}
// Read reads the next len(p) bytes from R or until R is drained. The
// return value n is the number of bytes read. If R has no data to
// return, err is io.EOF and OnEOF is called with a full copy of what
// has been read so far.
func (r *cachingReadCloser) Read(p []byte) (n int, err error) {
n, err = r.R.Read(p)
r.buf.Write(p[:n])
if err == io.EOF {
r.OnEOF(bytes.NewReader(r.buf.Bytes()))
}
return n, err
}
func (r *cachingReadCloser) Close() error {
return r.R.Close()
}
// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation
func NewMemoryCacheTransport() *Transport {
c := NewMemoryCache()
t := NewTransport(c)
return t
}

View File

@@ -30,9 +30,9 @@ type TwoQueueCache struct {
size int
recentSize int
recent *simplelru.LRU
frequent *simplelru.LRU
recentEvict *simplelru.LRU
recent simplelru.LRUCache
frequent simplelru.LRUCache
recentEvict simplelru.LRUCache
lock sync.RWMutex
}
@@ -84,7 +84,8 @@ func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCa
return c, nil
}
func (c *TwoQueueCache) Get(key interface{}) (interface{}, bool) {
// Get looks up a key's value from the cache.
func (c *TwoQueueCache) Get(key interface{}) (value interface{}, ok bool) {
c.lock.Lock()
defer c.lock.Unlock()
@@ -105,6 +106,7 @@ func (c *TwoQueueCache) Get(key interface{}) (interface{}, bool) {
return nil, false
}
// Add adds a value to the cache.
func (c *TwoQueueCache) Add(key, value interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
@@ -160,12 +162,15 @@ func (c *TwoQueueCache) ensureSpace(recentEvict bool) {
c.frequent.RemoveOldest()
}
// Len returns the number of items in the cache.
func (c *TwoQueueCache) Len() int {
c.lock.RLock()
defer c.lock.RUnlock()
return c.recent.Len() + c.frequent.Len()
}
// Keys returns a slice of the keys in the cache.
// The frequently used keys are first in the returned slice.
func (c *TwoQueueCache) Keys() []interface{} {
c.lock.RLock()
defer c.lock.RUnlock()
@@ -174,6 +179,7 @@ func (c *TwoQueueCache) Keys() []interface{} {
return append(k1, k2...)
}
// Remove removes the provided key from the cache.
func (c *TwoQueueCache) Remove(key interface{}) {
c.lock.Lock()
defer c.lock.Unlock()
@@ -188,6 +194,7 @@ func (c *TwoQueueCache) Remove(key interface{}) {
}
}
// Purge is used to completely clear the cache.
func (c *TwoQueueCache) Purge() {
c.lock.Lock()
defer c.lock.Unlock()
@@ -196,13 +203,17 @@ func (c *TwoQueueCache) Purge() {
c.recentEvict.Purge()
}
// Contains is used to check if the cache contains a key
// without updating recency or frequency.
func (c *TwoQueueCache) Contains(key interface{}) bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.frequent.Contains(key) || c.recent.Contains(key)
}
func (c *TwoQueueCache) Peek(key interface{}) (interface{}, bool) {
// Peek is used to inspect the cache value of a key
// without updating recency or frequency.
func (c *TwoQueueCache) Peek(key interface{}) (value interface{}, ok bool) {
c.lock.RLock()
defer c.lock.RUnlock()
if val, ok := c.frequent.Peek(key); ok {

View File

@@ -18,11 +18,11 @@ type ARCCache struct {
size int // Size is the total capacity of the cache
p int // P is the dynamic preference towards T1 or T2
t1 *simplelru.LRU // T1 is the LRU for recently accessed items
b1 *simplelru.LRU // B1 is the LRU for evictions from t1
t1 simplelru.LRUCache // T1 is the LRU for recently accessed items
b1 simplelru.LRUCache // B1 is the LRU for evictions from t1
t2 *simplelru.LRU // T2 is the LRU for frequently accessed items
b2 *simplelru.LRU // B2 is the LRU for evictions from t2
t2 simplelru.LRUCache // T2 is the LRU for frequently accessed items
b2 simplelru.LRUCache // B2 is the LRU for evictions from t2
lock sync.RWMutex
}
@@ -60,11 +60,11 @@ func NewARC(size int) (*ARCCache, error) {
}
// Get looks up a key's value from the cache.
func (c *ARCCache) Get(key interface{}) (interface{}, bool) {
func (c *ARCCache) Get(key interface{}) (value interface{}, ok bool) {
c.lock.Lock()
defer c.lock.Unlock()
// Ff the value is contained in T1 (recent), then
// If the value is contained in T1 (recent), then
// promote it to T2 (frequent)
if val, ok := c.t1.Peek(key); ok {
c.t1.Remove(key)
@@ -153,7 +153,7 @@ func (c *ARCCache) Add(key, value interface{}) {
// Remove from B2
c.b2.Remove(key)
// Add the key to the frequntly used list
// Add the key to the frequently used list
c.t2.Add(key, value)
return
}
@@ -247,7 +247,7 @@ func (c *ARCCache) Contains(key interface{}) bool {
// Peek is used to inspect the cache value of a key
// without updating recency or frequency.
func (c *ARCCache) Peek(key interface{}) (interface{}, bool) {
func (c *ARCCache) Peek(key interface{}) (value interface{}, ok bool) {
c.lock.RLock()
defer c.lock.RUnlock()
if val, ok := c.t1.Peek(key); ok {

21
vendor/github.com/hashicorp/golang-lru/doc.go generated vendored Normal file
View File

@@ -0,0 +1,21 @@
// Package lru provides three different LRU caches of varying sophistication.
//
// Cache is a simple LRU cache. It is based on the
// LRU implementation in groupcache:
// https://github.com/golang/groupcache/tree/master/lru
//
// TwoQueueCache tracks frequently used and recently used entries separately.
// This avoids a burst of accesses from taking out frequently used entries,
// at the cost of about 2x computational overhead and some extra bookkeeping.
//
// ARCCache is an adaptive replacement cache. It tracks recent evictions as
// well as recent usage in both the frequent and recent caches. Its
// computational overhead is comparable to TwoQueueCache, but the memory
// overhead is linear with the size of the cache.
//
// ARC has been patented by IBM, so do not use it if that is problematic for
// your program.
//
// All caches in this package take locks while operating, and are therefore
// thread-safe for consumers.
package lru

1
vendor/github.com/hashicorp/golang-lru/go.mod generated vendored Normal file
View File

@@ -0,0 +1 @@
module github.com/hashicorp/golang-lru

View File

@@ -1,6 +1,3 @@
// This package provides a simple LRU cache. It is based on the
// LRU implementation in groupcache:
// https://github.com/golang/groupcache/tree/master/lru
package lru
import (
@@ -11,11 +8,11 @@ import (
// Cache is a thread-safe fixed size LRU cache.
type Cache struct {
lru *simplelru.LRU
lru simplelru.LRUCache
lock sync.RWMutex
}
// New creates an LRU of the given size
// New creates an LRU of the given size.
func New(size int) (*Cache, error) {
return NewWithEvict(size, nil)
}
@@ -33,7 +30,7 @@ func NewWithEvict(size int, onEvicted func(key interface{}, value interface{}))
return c, nil
}
// Purge is used to completely clear the cache
// Purge is used to completely clear the cache.
func (c *Cache) Purge() {
c.lock.Lock()
c.lru.Purge()
@@ -41,30 +38,30 @@ func (c *Cache) Purge() {
}
// Add adds a value to the cache. Returns true if an eviction occurred.
func (c *Cache) Add(key, value interface{}) bool {
func (c *Cache) Add(key, value interface{}) (evicted bool) {
c.lock.Lock()
defer c.lock.Unlock()
return c.lru.Add(key, value)
}
// Get looks up a key's value from the cache.
func (c *Cache) Get(key interface{}) (interface{}, bool) {
func (c *Cache) Get(key interface{}) (value interface{}, ok bool) {
c.lock.Lock()
defer c.lock.Unlock()
return c.lru.Get(key)
}
// Check if a key is in the cache, without updating the recent-ness
// or deleting it for being stale.
// Contains checks if a key is in the cache, without updating the
// recent-ness or deleting it for being stale.
func (c *Cache) Contains(key interface{}) bool {
c.lock.RLock()
defer c.lock.RUnlock()
return c.lru.Contains(key)
}
// Returns the key value (or undefined if not found) without updating
// Peek returns the key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *Cache) Peek(key interface{}) (interface{}, bool) {
func (c *Cache) Peek(key interface{}) (value interface{}, ok bool) {
c.lock.RLock()
defer c.lock.RUnlock()
return c.lru.Peek(key)
@@ -73,16 +70,15 @@ func (c *Cache) Peek(key interface{}) (interface{}, bool) {
// ContainsOrAdd checks if a key is in the cache without updating the
// recent-ness or deleting it for being stale, and if not, adds the value.
// Returns whether found and whether an eviction occurred.
func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evict bool) {
func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) {
c.lock.Lock()
defer c.lock.Unlock()
if c.lru.Contains(key) {
return true, false
} else {
evict := c.lru.Add(key, value)
return false, evict
}
evicted = c.lru.Add(key, value)
return false, evicted
}
// Remove removes the provided key from the cache.

View File

@@ -36,7 +36,7 @@ func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
return c, nil
}
// Purge is used to completely clear the cache
// Purge is used to completely clear the cache.
func (c *LRU) Purge() {
for k, v := range c.items {
if c.onEvict != nil {
@@ -47,8 +47,8 @@ func (c *LRU) Purge() {
c.evictList.Init()
}
// Add adds a value to the cache. Returns true if an eviction occured.
func (c *LRU) Add(key, value interface{}) bool {
// Add adds a value to the cache. Returns true if an eviction occurred.
func (c *LRU) Add(key, value interface{}) (evicted bool) {
// Check for existing item
if ent, ok := c.items[key]; ok {
c.evictList.MoveToFront(ent)
@@ -78,17 +78,18 @@ func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
return
}
// Check if a key is in the cache, without updating the recent-ness
// Contains checks if a key is in the cache, without updating the recent-ness
// or deleting it for being stale.
func (c *LRU) Contains(key interface{}) (ok bool) {
_, ok = c.items[key]
return ok
}
// Returns the key value (or undefined if not found) without updating
// Peek returns the key value (or undefined if not found) without updating
// the "recently used"-ness of the key.
func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
if ent, ok := c.items[key]; ok {
var ent *list.Element
if ent, ok = c.items[key]; ok {
return ent.Value.(*entry).value, true
}
return nil, ok
@@ -96,7 +97,7 @@ func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
// Remove removes the provided key from the cache, returning if the
// key was contained.
func (c *LRU) Remove(key interface{}) bool {
func (c *LRU) Remove(key interface{}) (present bool) {
if ent, ok := c.items[key]; ok {
c.removeElement(ent)
return true
@@ -105,7 +106,7 @@ func (c *LRU) Remove(key interface{}) bool {
}
// RemoveOldest removes the oldest item from the cache.
func (c *LRU) RemoveOldest() (interface{}, interface{}, bool) {
func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) {
ent := c.evictList.Back()
if ent != nil {
c.removeElement(ent)
@@ -116,7 +117,7 @@ func (c *LRU) RemoveOldest() (interface{}, interface{}, bool) {
}
// GetOldest returns the oldest entry
func (c *LRU) GetOldest() (interface{}, interface{}, bool) {
func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) {
ent := c.evictList.Back()
if ent != nil {
kv := ent.Value.(*entry)

View File

@@ -0,0 +1,36 @@
package simplelru
// LRUCache is the interface for simple LRU cache.
type LRUCache interface {
// Adds a value to the cache, returns true if an eviction occurred and
// updates the "recently used"-ness of the key.
Add(key, value interface{}) bool
// Returns key's value from the cache and
// updates the "recently used"-ness of the key. #value, isFound
Get(key interface{}) (value interface{}, ok bool)
// Check if a key exsists in cache without updating the recent-ness.
Contains(key interface{}) (ok bool)
// Returns key's value without updating the "recently used"-ness of the key.
Peek(key interface{}) (value interface{}, ok bool)
// Removes a key from the cache.
Remove(key interface{}) bool
// Removes the oldest entry from cache.
RemoveOldest() (interface{}, interface{}, bool)
// Returns the oldest entry from the cache. #key, value, isFound
GetOldest() (interface{}, interface{}, bool)
// Returns a slice of the keys in the cache, from oldest to newest.
Keys() []interface{}
// Returns the number of items in the cache.
Len() int
// Clear all cache entries
Purge()
}

View File

@@ -1,12 +1,6 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
name = "github.com/json-iterator/go"
packages = ["."]
revision = "ca39e5af3ece67bbcda3d0f4f56a8e24d9f2dad4"
version = "1.1.3"
[[projects]]
name = "github.com/modern-go/concurrent"
packages = ["."]
@@ -16,12 +10,12 @@
[[projects]]
name = "github.com/modern-go/reflect2"
packages = ["."]
revision = "1df9eeb2bb81f327b96228865c5687bc2194af3f"
version = "1.0.0"
revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd"
version = "1.0.1"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "56a0b9e9e61d2bc8af5e1b68537401b7f4d60805eda3d107058f3171aa5cf793"
inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8"
solver-name = "gps-cdcl"
solver-version = 1

View File

@@ -23,4 +23,4 @@ ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com
[[constraint]]
name = "github.com/modern-go/reflect2"
version = "1.0.0"
version = "1.0.1"

View File

@@ -6,6 +6,7 @@ go:
before_install:
- go get -t -v ./...
- go get -t -v github.com/modern-go/reflect2-tests/...
script:
- ./test.sh

View File

@@ -24,7 +24,7 @@
# go-tests = true
# unused-packages = true
ignored = ["github.com/modern-go/test","github.com/modern-go/test/must","github.com/modern-go/test/should"]
ignored = []
[[constraint]]
name = "github.com/modern-go/concurrent"

View File

@@ -150,6 +150,9 @@ func (cfg *frozenConfig) TypeOf(obj interface{}) Type {
}
func (cfg *frozenConfig) Type2(type1 reflect.Type) Type {
if type1 == nil {
return nil
}
cacheKey := uintptr(unpackEFace(type1).data)
typeObj, found := cfg.cache.Load(cacheKey)
if found {

View File

@@ -3,7 +3,7 @@
set -e
echo "" > coverage.txt
for d in $(go list ./... | grep -v vendor); do
for d in $(go list github.com/modern-go/reflect2-tests/... | grep -v vendor); do
go test -coverprofile=profile.out -coverpkg=github.com/modern-go/reflect2 $d
if [ -f profile.out ]; then
cat profile.out >> coverage.txt

View File

@@ -4,6 +4,7 @@ import (
"reflect"
"runtime"
"strings"
"sync"
"unsafe"
)
@@ -15,10 +16,17 @@ func typelinks1() [][]unsafe.Pointer
//go:linkname typelinks2 reflect.typelinks
func typelinks2() (sections []unsafe.Pointer, offset [][]int32)
var types = map[string]reflect.Type{}
var packages = map[string]map[string]reflect.Type{}
// initOnce guards initialization of types and packages
var initOnce sync.Once
var types map[string]reflect.Type
var packages map[string]map[string]reflect.Type
// discoverTypes initializes types and packages
func discoverTypes() {
types = make(map[string]reflect.Type)
packages = make(map[string]map[string]reflect.Type)
func init() {
ver := runtime.Version()
if ver == "go1.5" || strings.HasPrefix(ver, "go1.5.") {
loadGo15Types()
@@ -90,11 +98,13 @@ type emptyInterface struct {
// TypeByName return the type by its name, just like Class.forName in java
func TypeByName(typeName string) Type {
initOnce.Do(discoverTypes)
return Type2(types[typeName])
}
// TypeByPackageName return the type by its package and name
func TypeByPackageName(pkgPath string, name string) Type {
initOnce.Do(discoverTypes)
pkgTypes := packages[pkgPath]
if pkgTypes == nil {
return nil

View File

@@ -1,19 +0,0 @@
Copyright (c) 2011-2012 Peter Bourgon
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

View File

@@ -1,141 +0,0 @@
# What is diskv?
Diskv (disk-vee) is a simple, persistent key-value store written in the Go
language. It starts with an incredibly simple API for storing arbitrary data on
a filesystem by key, and builds several layers of performance-enhancing
abstraction on top. The end result is a conceptually simple, but highly
performant, disk-backed storage system.
[![Build Status][1]][2]
[1]: https://drone.io/github.com/peterbourgon/diskv/status.png
[2]: https://drone.io/github.com/peterbourgon/diskv/latest
# Installing
Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5].
Then,
```bash
$ go get github.com/peterbourgon/diskv
```
[3]: http://golang.org
[4]: http://golang.org/doc/install/source
[5]: http://golang.org/doc/install
# Usage
```go
package main
import (
"fmt"
"github.com/peterbourgon/diskv"
)
func main() {
// Simplest transform function: put all the data files into the base dir.
flatTransform := func(s string) []string { return []string{} }
// Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache.
d := diskv.New(diskv.Options{
BasePath: "my-data-dir",
Transform: flatTransform,
CacheSizeMax: 1024 * 1024,
})
// Write three bytes to the key "alpha".
key := "alpha"
d.Write(key, []byte{'1', '2', '3'})
// Read the value back out of the store.
value, _ := d.Read(key)
fmt.Printf("%v\n", value)
// Erase the key+value from the store (and the disk).
d.Erase(key)
}
```
More complex examples can be found in the "examples" subdirectory.
# Theory
## Basic idea
At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`).
The data is written to a single file on disk, with the same name as the key.
The key determines where that file will be stored, via a user-provided
`TransformFunc`, which takes a key and returns a slice (`[]string`)
corresponding to a path list where the key file will be stored. The simplest
TransformFunc,
```go
func SimpleTransform (key string) []string {
return []string{}
}
```
will place all keys in the same, base directory. The design is inspired by
[Redis diskstore][6]; a TransformFunc which emulates the default diskstore
behavior is available in the content-addressable-storage example.
[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1
**Note** that your TransformFunc should ensure that one valid key doesn't
transform to a subset of another valid key. That is, it shouldn't be possible
to construct valid keys that resolve to directory names. As a concrete example,
if your TransformFunc splits on every 3 characters, then
```go
d.Write("abcabc", val) // OK: written to <base>/abc/abc/abcabc
d.Write("abc", val) // Error: attempted write to <base>/abc/abc, but it's a directory
```
This will be addressed in an upcoming version of diskv.
Probably the most important design principle behind diskv is that your data is
always flatly available on the disk. diskv will never do anything that would
prevent you from accessing, copying, backing up, or otherwise interacting with
your data via common UNIX commandline tools.
## Adding a cache
An in-memory caching layer is provided by combining the BasicStore
functionality with a simple map structure, and keeping it up-to-date as
appropriate. Since the map structure in Go is not threadsafe, it's combined
with a RWMutex to provide safe concurrent access.
## Adding order
diskv is a key-value store and therefore inherently unordered. An ordering
system can be injected into the store by passing something which satisfies the
diskv.Index interface. (A default implementation, using Google's
[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a
user-provided Less function) index of the keys, which can be queried.
[7]: https://github.com/google/btree
## Adding compression
Something which implements the diskv.Compression interface may be passed
during store creation, so that all Writes and Reads are filtered through
a compression/decompression pipeline. Several default implementations,
using stdlib compression algorithms, are provided. Note that data is cached
compressed; the cost of decompression is borne with each Read.
## Streaming
diskv also now provides ReadStream and WriteStream methods, to allow very large
data to be handled efficiently.
# Future plans
* Needs plenty of robust testing: huge datasets, etc...
* More thorough benchmarking
* Your suggestions for use-cases I haven't thought of

View File

@@ -1,64 +0,0 @@
package diskv
import (
"compress/flate"
"compress/gzip"
"compress/zlib"
"io"
)
// Compression is an interface that Diskv uses to implement compression of
// data. Writer takes a destination io.Writer and returns a WriteCloser that
// compresses all data written through it. Reader takes a source io.Reader and
// returns a ReadCloser that decompresses all data read through it. You may
// define these methods on your own type, or use one of the NewCompression
// helpers.
type Compression interface {
Writer(dst io.Writer) (io.WriteCloser, error)
Reader(src io.Reader) (io.ReadCloser, error)
}
// NewGzipCompression returns a Gzip-based Compression.
func NewGzipCompression() Compression {
return NewGzipCompressionLevel(flate.DefaultCompression)
}
// NewGzipCompressionLevel returns a Gzip-based Compression with the given level.
func NewGzipCompressionLevel(level int) Compression {
return &genericCompression{
wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) },
rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) },
}
}
// NewZlibCompression returns a Zlib-based Compression.
func NewZlibCompression() Compression {
return NewZlibCompressionLevel(flate.DefaultCompression)
}
// NewZlibCompressionLevel returns a Zlib-based Compression with the given level.
func NewZlibCompressionLevel(level int) Compression {
return NewZlibCompressionLevelDict(level, nil)
}
// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given
// level, based on the given dictionary.
func NewZlibCompressionLevelDict(level int, dict []byte) Compression {
return &genericCompression{
func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) },
func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) },
}
}
type genericCompression struct {
wf func(w io.Writer) (io.WriteCloser, error)
rf func(r io.Reader) (io.ReadCloser, error)
}
func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) {
return g.wf(dst)
}
func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) {
return g.rf(src)
}

View File

@@ -1,624 +0,0 @@
// Diskv (disk-vee) is a simple, persistent, key-value store.
// It stores all data flatly on the filesystem.
package diskv
import (
"bytes"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"sync"
"syscall"
)
const (
defaultBasePath = "diskv"
defaultFilePerm os.FileMode = 0666
defaultPathPerm os.FileMode = 0777
)
var (
defaultTransform = func(s string) []string { return []string{} }
errCanceled = errors.New("canceled")
errEmptyKey = errors.New("empty key")
errBadKey = errors.New("bad key")
errImportDirectory = errors.New("can't import a directory")
)
// TransformFunction transforms a key into a slice of strings, with each
// element in the slice representing a directory in the file path where the
// key's entry will eventually be stored.
//
// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"],
// the final location of the data file will be <basedir>/ab/cde/f/abcdef
type TransformFunction func(s string) []string
// Options define a set of properties that dictate Diskv behavior.
// All values are optional.
type Options struct {
BasePath string
Transform TransformFunction
CacheSizeMax uint64 // bytes
PathPerm os.FileMode
FilePerm os.FileMode
// If TempDir is set, it will enable filesystem atomic writes by
// writing temporary files to that location before being moved
// to BasePath.
// Note that TempDir MUST be on the same device/partition as
// BasePath.
TempDir string
Index Index
IndexLess LessFunction
Compression Compression
}
// Diskv implements the Diskv interface. You shouldn't construct Diskv
// structures directly; instead, use the New constructor.
type Diskv struct {
Options
mu sync.RWMutex
cache map[string][]byte
cacheSize uint64
}
// New returns an initialized Diskv structure, ready to use.
// If the path identified by baseDir already contains data,
// it will be accessible, but not yet cached.
func New(o Options) *Diskv {
if o.BasePath == "" {
o.BasePath = defaultBasePath
}
if o.Transform == nil {
o.Transform = defaultTransform
}
if o.PathPerm == 0 {
o.PathPerm = defaultPathPerm
}
if o.FilePerm == 0 {
o.FilePerm = defaultFilePerm
}
d := &Diskv{
Options: o,
cache: map[string][]byte{},
cacheSize: 0,
}
if d.Index != nil && d.IndexLess != nil {
d.Index.Initialize(d.IndexLess, d.Keys(nil))
}
return d
}
// Write synchronously writes the key-value pair to disk, making it immediately
// available for reads. Write relies on the filesystem to perform an eventual
// sync to physical media. If you need stronger guarantees, see WriteStream.
func (d *Diskv) Write(key string, val []byte) error {
return d.WriteStream(key, bytes.NewBuffer(val), false)
}
// WriteStream writes the data represented by the io.Reader to the disk, under
// the provided key. If sync is true, WriteStream performs an explicit sync on
// the file as soon as it's written.
//
// bytes.Buffer provides io.Reader semantics for basic data types.
func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error {
if len(key) <= 0 {
return errEmptyKey
}
d.mu.Lock()
defer d.mu.Unlock()
return d.writeStreamWithLock(key, r, sync)
}
// createKeyFileWithLock either creates the key file directly, or
// creates a temporary file in TempDir if it is set.
func (d *Diskv) createKeyFileWithLock(key string) (*os.File, error) {
if d.TempDir != "" {
if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil {
return nil, fmt.Errorf("temp mkdir: %s", err)
}
f, err := ioutil.TempFile(d.TempDir, "")
if err != nil {
return nil, fmt.Errorf("temp file: %s", err)
}
if err := f.Chmod(d.FilePerm); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return nil, fmt.Errorf("chmod: %s", err)
}
return f, nil
}
mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists
f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm)
if err != nil {
return nil, fmt.Errorf("open file: %s", err)
}
return f, nil
}
// writeStream does no input validation checking.
func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error {
if err := d.ensurePathWithLock(key); err != nil {
return fmt.Errorf("ensure path: %s", err)
}
f, err := d.createKeyFileWithLock(key)
if err != nil {
return fmt.Errorf("create key file: %s", err)
}
wc := io.WriteCloser(&nopWriteCloser{f})
if d.Compression != nil {
wc, err = d.Compression.Writer(f)
if err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("compression writer: %s", err)
}
}
if _, err := io.Copy(wc, r); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("i/o copy: %s", err)
}
if err := wc.Close(); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("compression close: %s", err)
}
if sync {
if err := f.Sync(); err != nil {
f.Close() // error deliberately ignored
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("file sync: %s", err)
}
}
if err := f.Close(); err != nil {
return fmt.Errorf("file close: %s", err)
}
if f.Name() != d.completeFilename(key) {
if err := os.Rename(f.Name(), d.completeFilename(key)); err != nil {
os.Remove(f.Name()) // error deliberately ignored
return fmt.Errorf("rename: %s", err)
}
}
if d.Index != nil {
d.Index.Insert(key)
}
d.bustCacheWithLock(key) // cache only on read
return nil
}
// Import imports the source file into diskv under the destination key. If the
// destination key already exists, it's overwritten. If move is true, the
// source file is removed after a successful import.
func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) {
if dstKey == "" {
return errEmptyKey
}
if fi, err := os.Stat(srcFilename); err != nil {
return err
} else if fi.IsDir() {
return errImportDirectory
}
d.mu.Lock()
defer d.mu.Unlock()
if err := d.ensurePathWithLock(dstKey); err != nil {
return fmt.Errorf("ensure path: %s", err)
}
if move {
if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil {
d.bustCacheWithLock(dstKey)
return nil
} else if err != syscall.EXDEV {
// If it failed due to being on a different device, fall back to copying
return err
}
}
f, err := os.Open(srcFilename)
if err != nil {
return err
}
defer f.Close()
err = d.writeStreamWithLock(dstKey, f, false)
if err == nil && move {
err = os.Remove(srcFilename)
}
return err
}
// Read reads the key and returns the value.
// If the key is available in the cache, Read won't touch the disk.
// If the key is not in the cache, Read will have the side-effect of
// lazily caching the value.
func (d *Diskv) Read(key string) ([]byte, error) {
rc, err := d.ReadStream(key, false)
if err != nil {
return []byte{}, err
}
defer rc.Close()
return ioutil.ReadAll(rc)
}
// ReadStream reads the key and returns the value (data) as an io.ReadCloser.
// If the value is cached from a previous read, and direct is false,
// ReadStream will use the cached value. Otherwise, it will return a handle to
// the file on disk, and cache the data on read.
//
// If direct is true, ReadStream will lazily delete any cached value for the
// key, and return a direct handle to the file on disk.
//
// If compression is enabled, ReadStream taps into the io.Reader stream prior
// to decompression, and caches the compressed data.
func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) {
d.mu.RLock()
defer d.mu.RUnlock()
if val, ok := d.cache[key]; ok {
if !direct {
buf := bytes.NewBuffer(val)
if d.Compression != nil {
return d.Compression.Reader(buf)
}
return ioutil.NopCloser(buf), nil
}
go func() {
d.mu.Lock()
defer d.mu.Unlock()
d.uncacheWithLock(key, uint64(len(val)))
}()
}
return d.readWithRLock(key)
}
// read ignores the cache, and returns an io.ReadCloser representing the
// decompressed data for the given key, streamed from the disk. Clients should
// acquire a read lock on the Diskv and check the cache themselves before
// calling read.
func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) {
filename := d.completeFilename(key)
fi, err := os.Stat(filename)
if err != nil {
return nil, err
}
if fi.IsDir() {
return nil, os.ErrNotExist
}
f, err := os.Open(filename)
if err != nil {
return nil, err
}
var r io.Reader
if d.CacheSizeMax > 0 {
r = newSiphon(f, d, key)
} else {
r = &closingReader{f}
}
var rc = io.ReadCloser(ioutil.NopCloser(r))
if d.Compression != nil {
rc, err = d.Compression.Reader(r)
if err != nil {
return nil, err
}
}
return rc, nil
}
// closingReader provides a Reader that automatically closes the
// embedded ReadCloser when it reaches EOF
type closingReader struct {
rc io.ReadCloser
}
func (cr closingReader) Read(p []byte) (int, error) {
n, err := cr.rc.Read(p)
if err == io.EOF {
if closeErr := cr.rc.Close(); closeErr != nil {
return n, closeErr // close must succeed for Read to succeed
}
}
return n, err
}
// siphon is like a TeeReader: it copies all data read through it to an
// internal buffer, and moves that buffer to the cache at EOF.
type siphon struct {
f *os.File
d *Diskv
key string
buf *bytes.Buffer
}
// newSiphon constructs a siphoning reader that represents the passed file.
// When a successful series of reads ends in an EOF, the siphon will write
// the buffered data to Diskv's cache under the given key.
func newSiphon(f *os.File, d *Diskv, key string) io.Reader {
return &siphon{
f: f,
d: d,
key: key,
buf: &bytes.Buffer{},
}
}
// Read implements the io.Reader interface for siphon.
func (s *siphon) Read(p []byte) (int, error) {
n, err := s.f.Read(p)
if err == nil {
return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed
}
if err == io.EOF {
s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail
if closeErr := s.f.Close(); closeErr != nil {
return n, closeErr // close must succeed for Read to succeed
}
return n, err
}
return n, err
}
// Erase synchronously erases the given key from the disk and the cache.
func (d *Diskv) Erase(key string) error {
d.mu.Lock()
defer d.mu.Unlock()
d.bustCacheWithLock(key)
// erase from index
if d.Index != nil {
d.Index.Delete(key)
}
// erase from disk
filename := d.completeFilename(key)
if s, err := os.Stat(filename); err == nil {
if s.IsDir() {
return errBadKey
}
if err = os.Remove(filename); err != nil {
return err
}
} else {
// Return err as-is so caller can do os.IsNotExist(err).
return err
}
// clean up and return
d.pruneDirsWithLock(key)
return nil
}
// EraseAll will delete all of the data from the store, both in the cache and on
// the disk. Note that EraseAll doesn't distinguish diskv-related data from non-
// diskv-related data. Care should be taken to always specify a diskv base
// directory that is exclusively for diskv data.
func (d *Diskv) EraseAll() error {
d.mu.Lock()
defer d.mu.Unlock()
d.cache = make(map[string][]byte)
d.cacheSize = 0
if d.TempDir != "" {
os.RemoveAll(d.TempDir) // errors ignored
}
return os.RemoveAll(d.BasePath)
}
// Has returns true if the given key exists.
func (d *Diskv) Has(key string) bool {
d.mu.Lock()
defer d.mu.Unlock()
if _, ok := d.cache[key]; ok {
return true
}
filename := d.completeFilename(key)
s, err := os.Stat(filename)
if err != nil {
return false
}
if s.IsDir() {
return false
}
return true
}
// Keys returns a channel that will yield every key accessible by the store,
// in undefined order. If a cancel channel is provided, closing it will
// terminate and close the keys channel.
func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string {
return d.KeysPrefix("", cancel)
}
// KeysPrefix returns a channel that will yield every key accessible by the
// store with the given prefix, in undefined order. If a cancel channel is
// provided, closing it will terminate and close the keys channel. If the
// provided prefix is the empty string, all keys will be yielded.
func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string {
var prepath string
if prefix == "" {
prepath = d.BasePath
} else {
prepath = d.pathFor(prefix)
}
c := make(chan string)
go func() {
filepath.Walk(prepath, walker(c, prefix, cancel))
close(c)
}()
return c
}
// walker returns a function which satisfies the filepath.WalkFunc interface.
// It sends every non-directory file entry down the channel c.
func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc {
return func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) {
return nil // "pass"
}
select {
case c <- info.Name():
case <-cancel:
return errCanceled
}
return nil
}
}
// pathFor returns the absolute path for location on the filesystem where the
// data for the given key will be stored.
func (d *Diskv) pathFor(key string) string {
return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...))
}
// ensurePathWithLock is a helper function that generates all necessary
// directories on the filesystem for the given key.
func (d *Diskv) ensurePathWithLock(key string) error {
return os.MkdirAll(d.pathFor(key), d.PathPerm)
}
// completeFilename returns the absolute path to the file for the given key.
func (d *Diskv) completeFilename(key string) string {
return filepath.Join(d.pathFor(key), key)
}
// cacheWithLock attempts to cache the given key-value pair in the store's
// cache. It can fail if the value is larger than the cache's maximum size.
func (d *Diskv) cacheWithLock(key string, val []byte) error {
valueSize := uint64(len(val))
if err := d.ensureCacheSpaceWithLock(valueSize); err != nil {
return fmt.Errorf("%s; not caching", err)
}
// be very strict about memory guarantees
if (d.cacheSize + valueSize) > d.CacheSizeMax {
panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax))
}
d.cache[key] = val
d.cacheSize += valueSize
return nil
}
// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock.
func (d *Diskv) cacheWithoutLock(key string, val []byte) error {
d.mu.Lock()
defer d.mu.Unlock()
return d.cacheWithLock(key, val)
}
func (d *Diskv) bustCacheWithLock(key string) {
if val, ok := d.cache[key]; ok {
d.uncacheWithLock(key, uint64(len(val)))
}
}
func (d *Diskv) uncacheWithLock(key string, sz uint64) {
d.cacheSize -= sz
delete(d.cache, key)
}
// pruneDirsWithLock deletes empty directories in the path walk leading to the
// key k. Typically this function is called after an Erase is made.
func (d *Diskv) pruneDirsWithLock(key string) error {
pathlist := d.Transform(key)
for i := range pathlist {
dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...))
// thanks to Steven Blenkinsop for this snippet
switch fi, err := os.Stat(dir); true {
case err != nil:
return err
case !fi.IsDir():
panic(fmt.Sprintf("corrupt dirstate at %s", dir))
}
nlinks, err := filepath.Glob(filepath.Join(dir, "*"))
if err != nil {
return err
} else if len(nlinks) > 0 {
return nil // has subdirs -- do not prune
}
if err = os.Remove(dir); err != nil {
return err
}
}
return nil
}
// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order
// until the cache has at least valueSize bytes available.
func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error {
if valueSize > d.CacheSizeMax {
return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax)
}
safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax }
for key, val := range d.cache {
if safe() {
break
}
d.uncacheWithLock(key, uint64(len(val)))
}
if !safe() {
panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax))
}
return nil
}
// nopWriteCloser wraps an io.Writer and provides a no-op Close method to
// satisfy the io.WriteCloser interface.
type nopWriteCloser struct {
io.Writer
}
func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) }
func (wc *nopWriteCloser) Close() error { return nil }

View File

@@ -1,115 +0,0 @@
package diskv
import (
"sync"
"github.com/google/btree"
)
// Index is a generic interface for things that can
// provide an ordered list of keys.
type Index interface {
Initialize(less LessFunction, keys <-chan string)
Insert(key string)
Delete(key string)
Keys(from string, n int) []string
}
// LessFunction is used to initialize an Index of keys in a specific order.
type LessFunction func(string, string) bool
// btreeString is a custom data type that satisfies the BTree Less interface,
// making the strings it wraps sortable by the BTree package.
type btreeString struct {
s string
l LessFunction
}
// Less satisfies the BTree.Less interface using the btreeString's LessFunction.
func (s btreeString) Less(i btree.Item) bool {
return s.l(s.s, i.(btreeString).s)
}
// BTreeIndex is an implementation of the Index interface using google/btree.
type BTreeIndex struct {
sync.RWMutex
LessFunction
*btree.BTree
}
// Initialize populates the BTree tree with data from the keys channel,
// according to the passed less function. It's destructive to the BTreeIndex.
func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) {
i.Lock()
defer i.Unlock()
i.LessFunction = less
i.BTree = rebuild(less, keys)
}
// Insert inserts the given key (only) into the BTree tree.
func (i *BTreeIndex) Insert(key string) {
i.Lock()
defer i.Unlock()
if i.BTree == nil || i.LessFunction == nil {
panic("uninitialized index")
}
i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction})
}
// Delete removes the given key (only) from the BTree tree.
func (i *BTreeIndex) Delete(key string) {
i.Lock()
defer i.Unlock()
if i.BTree == nil || i.LessFunction == nil {
panic("uninitialized index")
}
i.BTree.Delete(btreeString{s: key, l: i.LessFunction})
}
// Keys yields a maximum of n keys in order. If the passed 'from' key is empty,
// Keys will return the first n keys. If the passed 'from' key is non-empty, the
// first key in the returned slice will be the key that immediately follows the
// passed key, in key order.
func (i *BTreeIndex) Keys(from string, n int) []string {
i.RLock()
defer i.RUnlock()
if i.BTree == nil || i.LessFunction == nil {
panic("uninitialized index")
}
if i.BTree.Len() <= 0 {
return []string{}
}
btreeFrom := btreeString{s: from, l: i.LessFunction}
skipFirst := true
if len(from) <= 0 || !i.BTree.Has(btreeFrom) {
// no such key, so fabricate an always-smallest item
btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }}
skipFirst = false
}
keys := []string{}
iterator := func(i btree.Item) bool {
keys = append(keys, i.(btreeString).s)
return len(keys) < n
}
i.BTree.AscendGreaterOrEqual(btreeFrom, iterator)
if skipFirst && len(keys) > 0 {
keys = keys[1:]
}
return keys
}
// rebuildIndex does the work of regenerating the index
// with the given keys.
func rebuild(less LessFunction, keys <-chan string) *btree.BTree {
tree := btree.New(2)
for key := range keys {
tree.ReplaceOrInsert(btreeString{s: key, l: less})
}
return tree
}

View File

@@ -4,6 +4,9 @@ go:
- 1.4
- 1.5
- 1.6
- 1.7
- 1.8
- 1.9
- tip
go_import_path: gopkg.in/yaml.v2

208
vendor/gopkg.in/yaml.v2/LICENSE generated vendored
View File

@@ -1,13 +1,201 @@
Copyright 2011-2016 Canonical Ltd.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
http://www.apache.org/licenses/LICENSE-2.0
1. Definitions.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

4
vendor/gopkg.in/yaml.v2/README.md generated vendored
View File

@@ -48,8 +48,6 @@ The yaml package is licensed under the Apache License 2.0. Please see the LICENS
Example
-------
Some more examples can be found in the "examples" folder.
```Go
package main
@@ -67,6 +65,8 @@ b:
d: [3, 4]
`
// Note: struct fields must be public in order for unmarshal to
// correctly populate the data.
type T struct {
A string
B struct {

55
vendor/gopkg.in/yaml.v2/apic.go generated vendored
View File

@@ -2,7 +2,6 @@ package yaml
import (
"io"
"os"
)
func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
@@ -48,9 +47,9 @@ func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err
return n, nil
}
// File read handler.
func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
return parser.input_file.Read(buffer)
// Reader read handler.
func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
return parser.input_reader.Read(buffer)
}
// Set a string input.
@@ -64,12 +63,12 @@ func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
}
// Set a file input.
func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) {
if parser.read_handler != nil {
panic("must set the input source only once")
}
parser.read_handler = yaml_file_read_handler
parser.input_file = file
parser.read_handler = yaml_reader_read_handler
parser.input_reader = r
}
// Set the source encoding.
@@ -81,14 +80,13 @@ func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
}
// Create a new emitter object.
func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{
buffer: make([]byte, output_buffer_size),
raw_buffer: make([]byte, 0, output_raw_buffer_size),
states: make([]yaml_emitter_state_t, 0, initial_stack_size),
events: make([]yaml_event_t, 0, initial_queue_size),
}
return true
}
// Destroy an emitter object.
@@ -102,9 +100,10 @@ func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
return nil
}
// File write handler.
func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
_, err := emitter.output_file.Write(buffer)
// yaml_writer_write_handler uses emitter.output_writer to write the
// emitted text.
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
_, err := emitter.output_writer.Write(buffer)
return err
}
@@ -118,12 +117,12 @@ func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]by
}
// Set a file output.
func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
if emitter.write_handler != nil {
panic("must set the output target only once")
}
emitter.write_handler = yaml_file_write_handler
emitter.output_file = file
emitter.write_handler = yaml_writer_write_handler
emitter.output_writer = w
}
// Set the output encoding.
@@ -252,41 +251,41 @@ func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
//
// Create STREAM-START.
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
*event = yaml_event_t{
typ: yaml_STREAM_START_EVENT,
encoding: encoding,
}
return true
}
// Create STREAM-END.
func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
func yaml_stream_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
typ: yaml_STREAM_END_EVENT,
}
return true
}
// Create DOCUMENT-START.
func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
tag_directives []yaml_tag_directive_t, implicit bool) bool {
func yaml_document_start_event_initialize(
event *yaml_event_t,
version_directive *yaml_version_directive_t,
tag_directives []yaml_tag_directive_t,
implicit bool,
) {
*event = yaml_event_t{
typ: yaml_DOCUMENT_START_EVENT,
version_directive: version_directive,
tag_directives: tag_directives,
implicit: implicit,
}
return true
}
// Create DOCUMENT-END.
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
*event = yaml_event_t{
typ: yaml_DOCUMENT_END_EVENT,
implicit: implicit,
}
return true
}
///*
@@ -348,7 +347,7 @@ func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
}
// Create MAPPING-START.
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) {
*event = yaml_event_t{
typ: yaml_MAPPING_START_EVENT,
anchor: anchor,
@@ -356,15 +355,13 @@ func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte
implicit: implicit,
style: yaml_style_t(style),
}
return true
}
// Create MAPPING-END.
func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
typ: yaml_MAPPING_END_EVENT,
}
return true
}
// Destroy an event object.
@@ -471,7 +468,7 @@ func yaml_event_delete(event *yaml_event_t) {
// } context
// tag_directive *yaml_tag_directive_t
//
// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
// context.error = YAML_NO_ERROR // Eliminate a compiler warning.
//
// assert(document) // Non-NULL document object is expected.
//

244
vendor/gopkg.in/yaml.v2/decode.go generated vendored
View File

@@ -4,6 +4,7 @@ import (
"encoding"
"encoding/base64"
"fmt"
"io"
"math"
"reflect"
"strconv"
@@ -22,19 +23,22 @@ type node struct {
kind int
line, column int
tag string
value string
implicit bool
children []*node
anchors map[string]*node
// For an alias node, alias holds the resolved alias.
alias *node
value string
implicit bool
children []*node
anchors map[string]*node
}
// ----------------------------------------------------------------------------
// Parser, produces a node tree out of a libyaml event stream.
type parser struct {
parser yaml_parser_t
event yaml_event_t
doc *node
parser yaml_parser_t
event yaml_event_t
doc *node
doneInit bool
}
func newParser(b []byte) *parser {
@@ -42,21 +46,30 @@ func newParser(b []byte) *parser {
if !yaml_parser_initialize(&p.parser) {
panic("failed to initialize YAML emitter")
}
if len(b) == 0 {
b = []byte{'\n'}
}
yaml_parser_set_input_string(&p.parser, b)
p.skip()
if p.event.typ != yaml_STREAM_START_EVENT {
panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
}
p.skip()
return &p
}
func newParserFromReader(r io.Reader) *parser {
p := parser{}
if !yaml_parser_initialize(&p.parser) {
panic("failed to initialize YAML emitter")
}
yaml_parser_set_input_reader(&p.parser, r)
return &p
}
func (p *parser) init() {
if p.doneInit {
return
}
p.expect(yaml_STREAM_START_EVENT)
p.doneInit = true
}
func (p *parser) destroy() {
if p.event.typ != yaml_NO_EVENT {
yaml_event_delete(&p.event)
@@ -64,16 +77,35 @@ func (p *parser) destroy() {
yaml_parser_delete(&p.parser)
}
func (p *parser) skip() {
if p.event.typ != yaml_NO_EVENT {
if p.event.typ == yaml_STREAM_END_EVENT {
failf("attempted to go past the end of stream; corrupted value?")
// expect consumes an event from the event stream and
// checks that it's of the expected type.
func (p *parser) expect(e yaml_event_type_t) {
if p.event.typ == yaml_NO_EVENT {
if !yaml_parser_parse(&p.parser, &p.event) {
p.fail()
}
yaml_event_delete(&p.event)
}
if p.event.typ == yaml_STREAM_END_EVENT {
failf("attempted to go past the end of stream; corrupted value?")
}
if p.event.typ != e {
p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ)
p.fail()
}
yaml_event_delete(&p.event)
p.event.typ = yaml_NO_EVENT
}
// peek peeks at the next event in the event stream,
// puts the results into p.event and returns the event type.
func (p *parser) peek() yaml_event_type_t {
if p.event.typ != yaml_NO_EVENT {
return p.event.typ
}
if !yaml_parser_parse(&p.parser, &p.event) {
p.fail()
}
return p.event.typ
}
func (p *parser) fail() {
@@ -81,6 +113,10 @@ func (p *parser) fail() {
var line int
if p.parser.problem_mark.line != 0 {
line = p.parser.problem_mark.line
// Scanner errors don't iterate line before returning error
if p.parser.error == yaml_SCANNER_ERROR {
line++
}
} else if p.parser.context_mark.line != 0 {
line = p.parser.context_mark.line
}
@@ -103,7 +139,8 @@ func (p *parser) anchor(n *node, anchor []byte) {
}
func (p *parser) parse() *node {
switch p.event.typ {
p.init()
switch p.peek() {
case yaml_SCALAR_EVENT:
return p.scalar()
case yaml_ALIAS_EVENT:
@@ -118,7 +155,7 @@ func (p *parser) parse() *node {
// Happens when attempting to decode an empty buffer.
return nil
default:
panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
panic("attempted to parse unknown event: " + p.event.typ.String())
}
}
@@ -134,19 +171,20 @@ func (p *parser) document() *node {
n := p.node(documentNode)
n.anchors = make(map[string]*node)
p.doc = n
p.skip()
p.expect(yaml_DOCUMENT_START_EVENT)
n.children = append(n.children, p.parse())
if p.event.typ != yaml_DOCUMENT_END_EVENT {
panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
}
p.skip()
p.expect(yaml_DOCUMENT_END_EVENT)
return n
}
func (p *parser) alias() *node {
n := p.node(aliasNode)
n.value = string(p.event.anchor)
p.skip()
n.alias = p.doc.anchors[n.value]
if n.alias == nil {
failf("unknown anchor '%s' referenced", n.value)
}
p.expect(yaml_ALIAS_EVENT)
return n
}
@@ -156,29 +194,29 @@ func (p *parser) scalar() *node {
n.tag = string(p.event.tag)
n.implicit = p.event.implicit
p.anchor(n, p.event.anchor)
p.skip()
p.expect(yaml_SCALAR_EVENT)
return n
}
func (p *parser) sequence() *node {
n := p.node(sequenceNode)
p.anchor(n, p.event.anchor)
p.skip()
for p.event.typ != yaml_SEQUENCE_END_EVENT {
p.expect(yaml_SEQUENCE_START_EVENT)
for p.peek() != yaml_SEQUENCE_END_EVENT {
n.children = append(n.children, p.parse())
}
p.skip()
p.expect(yaml_SEQUENCE_END_EVENT)
return n
}
func (p *parser) mapping() *node {
n := p.node(mappingNode)
p.anchor(n, p.event.anchor)
p.skip()
for p.event.typ != yaml_MAPPING_END_EVENT {
p.expect(yaml_MAPPING_START_EVENT)
for p.peek() != yaml_MAPPING_END_EVENT {
n.children = append(n.children, p.parse(), p.parse())
}
p.skip()
p.expect(yaml_MAPPING_END_EVENT)
return n
}
@@ -187,7 +225,7 @@ func (p *parser) mapping() *node {
type decoder struct {
doc *node
aliases map[string]bool
aliases map[*node]bool
mapType reflect.Type
terrors []string
strict bool
@@ -198,11 +236,13 @@ var (
durationType = reflect.TypeOf(time.Duration(0))
defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
ifaceType = defaultMapType.Elem()
timeType = reflect.TypeOf(time.Time{})
ptrTimeType = reflect.TypeOf(&time.Time{})
)
func newDecoder(strict bool) *decoder {
d := &decoder{mapType: defaultMapType, strict: strict}
d.aliases = make(map[string]bool)
d.aliases = make(map[*node]bool)
return d
}
@@ -251,7 +291,7 @@ func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
//
// If n holds a null value, prepare returns before doing anything.
func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "" && n.implicit) {
if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) {
return out, false, false
}
again := true
@@ -308,16 +348,13 @@ func (d *decoder) document(n *node, out reflect.Value) (good bool) {
}
func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
an, ok := d.doc.anchors[n.value]
if !ok {
failf("unknown anchor '%s' referenced", n.value)
}
if d.aliases[n.value] {
if d.aliases[n] {
// TODO this could actually be allowed in some circumstances.
failf("anchor '%s' value contains itself", n.value)
}
d.aliases[n.value] = true
good = d.unmarshal(an, out)
delete(d.aliases, n.value)
d.aliases[n] = true
good = d.unmarshal(n.alias, out)
delete(d.aliases, n)
return good
}
@@ -329,7 +366,7 @@ func resetMap(out reflect.Value) {
}
}
func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
func (d *decoder) scalar(n *node, out reflect.Value) bool {
var tag string
var resolved interface{}
if n.tag == "" && !n.implicit {
@@ -353,9 +390,26 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
}
return true
}
if s, ok := resolved.(string); ok && out.CanAddr() {
if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
err := u.UnmarshalText([]byte(s))
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
// We've resolved to exactly the type we want, so use that.
out.Set(resolvedv)
return true
}
// Perhaps we can use the value as a TextUnmarshaler to
// set its value.
if out.CanAddr() {
u, ok := out.Addr().Interface().(encoding.TextUnmarshaler)
if ok {
var text []byte
if tag == yaml_BINARY_TAG {
text = []byte(resolved.(string))
} else {
// We let any value be unmarshaled into TextUnmarshaler.
// That might be more lax than we'd like, but the
// TextUnmarshaler itself should bowl out any dubious values.
text = []byte(n.value)
}
err := u.UnmarshalText(text)
if err != nil {
fail(err)
}
@@ -366,46 +420,54 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
case reflect.String:
if tag == yaml_BINARY_TAG {
out.SetString(resolved.(string))
good = true
} else if resolved != nil {
return true
}
if resolved != nil {
out.SetString(n.value)
good = true
return true
}
case reflect.Interface:
if resolved == nil {
out.Set(reflect.Zero(out.Type()))
} else if tag == yaml_TIMESTAMP_TAG {
// It looks like a timestamp but for backward compatibility
// reasons we set it as a string, so that code that unmarshals
// timestamp-like values into interface{} will continue to
// see a string and not a time.Time.
// TODO(v3) Drop this.
out.Set(reflect.ValueOf(n.value))
} else {
out.Set(reflect.ValueOf(resolved))
}
good = true
return true
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
switch resolved := resolved.(type) {
case int:
if !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
return true
}
case int64:
if !out.OverflowInt(resolved) {
out.SetInt(resolved)
good = true
return true
}
case uint64:
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
return true
}
case float64:
if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
out.SetInt(int64(resolved))
good = true
return true
}
case string:
if out.Type() == durationType {
d, err := time.ParseDuration(resolved)
if err == nil {
out.SetInt(int64(d))
good = true
return true
}
}
}
@@ -414,44 +476,49 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
case int:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
return true
}
case int64:
if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
return true
}
case uint64:
if !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
return true
}
case float64:
if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
out.SetUint(uint64(resolved))
good = true
return true
}
}
case reflect.Bool:
switch resolved := resolved.(type) {
case bool:
out.SetBool(resolved)
good = true
return true
}
case reflect.Float32, reflect.Float64:
switch resolved := resolved.(type) {
case int:
out.SetFloat(float64(resolved))
good = true
return true
case int64:
out.SetFloat(float64(resolved))
good = true
return true
case uint64:
out.SetFloat(float64(resolved))
good = true
return true
case float64:
out.SetFloat(resolved)
good = true
return true
}
case reflect.Struct:
if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() {
out.Set(resolvedv)
return true
}
case reflect.Ptr:
if out.Type().Elem() == reflect.TypeOf(resolved) {
@@ -459,13 +526,11 @@ func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
elem := reflect.New(out.Type().Elem())
elem.Elem().Set(reflect.ValueOf(resolved))
out.Set(elem)
good = true
return true
}
}
if !good {
d.terror(n, tag, out)
}
return good
d.terror(n, tag, out)
return false
}
func settableValueOf(i interface{}) reflect.Value {
@@ -482,6 +547,10 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
switch out.Kind() {
case reflect.Slice:
out.Set(reflect.MakeSlice(out.Type(), l, l))
case reflect.Array:
if l != out.Len() {
failf("invalid array: want %d elements but got %d", out.Len(), l)
}
case reflect.Interface:
// No type hints. Will have to use a generic sequence.
iface = out
@@ -500,7 +569,9 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
j++
}
}
out.Set(out.Slice(0, j))
if out.Kind() != reflect.Array {
out.Set(out.Slice(0, j))
}
if iface.IsValid() {
iface.Set(out)
}
@@ -561,7 +632,7 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
}
e := reflect.New(et).Elem()
if d.unmarshal(n.children[i+1], e) {
out.SetMapIndex(k, e)
d.setMapIndex(n.children[i+1], out, k, e)
}
}
}
@@ -569,6 +640,14 @@ func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
return true
}
func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) {
if d.strict && out.MapIndex(k) != zeroValue {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface()))
return
}
out.SetMapIndex(k, v)
}
func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
outt := out.Type()
if outt.Elem() != mapItemType {
@@ -616,6 +695,10 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
elemType = inlineMap.Type().Elem()
}
var doneFields []bool
if d.strict {
doneFields = make([]bool, len(sinfo.FieldsList))
}
for i := 0; i < l; i += 2 {
ni := n.children[i]
if isMerge(ni) {
@@ -626,6 +709,13 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
continue
}
if info, ok := sinfo.FieldsMap[name.String()]; ok {
if d.strict {
if doneFields[info.Id] {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type()))
continue
}
doneFields[info.Id] = true
}
var field reflect.Value
if info.Inline == nil {
field = out.Field(info.Num)
@@ -639,9 +729,9 @@ func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
}
value := reflect.New(elemType).Elem()
d.unmarshal(n.children[i+1], value)
inlineMap.SetMapIndex(name, value)
d.setMapIndex(n.children[i+1], inlineMap, name, value)
} else if d.strict {
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in struct %s", n.line+1, name.String(), out.Type()))
d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type()))
}
}
return true

11
vendor/gopkg.in/yaml.v2/emitterc.go generated vendored
View File

@@ -2,6 +2,7 @@ package yaml
import (
"bytes"
"fmt"
)
// Flush the buffer if needed.
@@ -664,7 +665,7 @@ func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
return yaml_emitter_emit_mapping_start(emitter, event)
default:
return yaml_emitter_set_emitter_error(emitter,
"expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ))
}
}
@@ -842,7 +843,7 @@ func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event
return true
}
// Write an achor.
// Write an anchor.
func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
if emitter.anchor_data.anchor == nil {
return true
@@ -995,9 +996,9 @@ func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
space_break = false
preceded_by_whitespace = false
followed_by_whitespace = false
previous_space = false
previous_break = false
followed_by_whitespace = false
previous_space = false
previous_break = false
)
emitter.scalar_data.value = value

136
vendor/gopkg.in/yaml.v2/encode.go generated vendored
View File

@@ -3,12 +3,14 @@ package yaml
import (
"encoding"
"fmt"
"io"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
"unicode/utf8"
)
type encoder struct {
@@ -16,25 +18,39 @@ type encoder struct {
event yaml_event_t
out []byte
flow bool
// doneInit holds whether the initial stream_start_event has been
// emitted.
doneInit bool
}
func newEncoder() (e *encoder) {
e = &encoder{}
e.must(yaml_emitter_initialize(&e.emitter))
func newEncoder() *encoder {
e := &encoder{}
yaml_emitter_initialize(&e.emitter)
yaml_emitter_set_output_string(&e.emitter, &e.out)
yaml_emitter_set_unicode(&e.emitter, true)
e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
e.emit()
e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
e.emit()
return e
}
func (e *encoder) finish() {
e.must(yaml_document_end_event_initialize(&e.event, true))
func newEncoderWithWriter(w io.Writer) *encoder {
e := &encoder{}
yaml_emitter_initialize(&e.emitter)
yaml_emitter_set_output_writer(&e.emitter, w)
yaml_emitter_set_unicode(&e.emitter, true)
return e
}
func (e *encoder) init() {
if e.doneInit {
return
}
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
e.emit()
e.doneInit = true
}
func (e *encoder) finish() {
e.emitter.open_ended = false
e.must(yaml_stream_end_event_initialize(&e.event))
yaml_stream_end_event_initialize(&e.event)
e.emit()
}
@@ -44,9 +60,7 @@ func (e *encoder) destroy() {
func (e *encoder) emit() {
// This will internally delete the e.event value.
if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
e.must(false)
}
e.must(yaml_emitter_emit(&e.emitter, &e.event))
}
func (e *encoder) must(ok bool) {
@@ -59,13 +73,28 @@ func (e *encoder) must(ok bool) {
}
}
func (e *encoder) marshalDoc(tag string, in reflect.Value) {
e.init()
yaml_document_start_event_initialize(&e.event, nil, nil, true)
e.emit()
e.marshal(tag, in)
yaml_document_end_event_initialize(&e.event, true)
e.emit()
}
func (e *encoder) marshal(tag string, in reflect.Value) {
if !in.IsValid() {
if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() {
e.nilv()
return
}
iface := in.Interface()
if m, ok := iface.(Marshaler); ok {
switch m := iface.(type) {
case time.Time, *time.Time:
// Although time.Time implements TextMarshaler,
// we don't want to treat it as a string for YAML
// purposes because YAML has special support for
// timestamps.
case Marshaler:
v, err := m.MarshalYAML()
if err != nil {
fail(err)
@@ -75,31 +104,34 @@ func (e *encoder) marshal(tag string, in reflect.Value) {
return
}
in = reflect.ValueOf(v)
} else if m, ok := iface.(encoding.TextMarshaler); ok {
case encoding.TextMarshaler:
text, err := m.MarshalText()
if err != nil {
fail(err)
}
in = reflect.ValueOf(string(text))
case nil:
e.nilv()
return
}
switch in.Kind() {
case reflect.Interface:
if in.IsNil() {
e.nilv()
} else {
e.marshal(tag, in.Elem())
}
e.marshal(tag, in.Elem())
case reflect.Map:
e.mapv(tag, in)
case reflect.Ptr:
if in.IsNil() {
e.nilv()
if in.Type() == ptrTimeType {
e.timev(tag, in.Elem())
} else {
e.marshal(tag, in.Elem())
}
case reflect.Struct:
e.structv(tag, in)
case reflect.Slice:
if in.Type() == timeType {
e.timev(tag, in)
} else {
e.structv(tag, in)
}
case reflect.Slice, reflect.Array:
if in.Type().Elem() == mapItemType {
e.itemsv(tag, in)
} else {
@@ -191,10 +223,10 @@ func (e *encoder) mappingv(tag string, f func()) {
e.flow = false
style = yaml_FLOW_MAPPING_STYLE
}
e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
e.emit()
f()
e.must(yaml_mapping_end_event_initialize(&e.event))
yaml_mapping_end_event_initialize(&e.event)
e.emit()
}
@@ -240,23 +272,36 @@ var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0
func (e *encoder) stringv(tag string, in reflect.Value) {
var style yaml_scalar_style_t
s := in.String()
rtag, rs := resolve("", s)
if rtag == yaml_BINARY_TAG {
if tag == "" || tag == yaml_STR_TAG {
tag = rtag
s = rs.(string)
} else if tag == yaml_BINARY_TAG {
canUsePlain := true
switch {
case !utf8.ValidString(s):
if tag == yaml_BINARY_TAG {
failf("explicitly tagged !!binary data must be base64-encoded")
} else {
}
if tag != "" {
failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
}
// It can't be encoded directly as YAML so use a binary tag
// and encode it as base64.
tag = yaml_BINARY_TAG
s = encodeBase64(s)
case tag == "":
// Check to see if it would resolve to a specific
// tag when encoded unquoted. If it doesn't,
// there's no need to quote it.
rtag, _ := resolve("", s)
canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s)
}
if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
} else if strings.Contains(s, "\n") {
// Note: it's possible for user code to emit invalid YAML
// if they explicitly specify a tag and a string containing
// text that's incompatible with that tag.
switch {
case strings.Contains(s, "\n"):
style = yaml_LITERAL_SCALAR_STYLE
} else {
case canUsePlain:
style = yaml_PLAIN_SCALAR_STYLE
default:
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
}
e.emitScalar(s, "", tag, style)
}
@@ -281,9 +326,20 @@ func (e *encoder) uintv(tag string, in reflect.Value) {
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) timev(tag string, in reflect.Value) {
t := in.Interface().(time.Time)
s := t.Format(time.RFC3339Nano)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *encoder) floatv(tag string, in reflect.Value) {
// FIXME: Handle 64 bits here.
s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
// Issue #352: When formatting, use the precision of the underlying value
precision := 64
if in.Kind() == reflect.Float32 {
precision = 32
}
s := strconv.FormatFloat(in.Float(), 'g', -1, precision)
switch s {
case "+Inf":
s = ".inf"

5
vendor/gopkg.in/yaml.v2/go.mod generated vendored Normal file
View File

@@ -0,0 +1,5 @@
module "gopkg.in/yaml.v2"
require (
"gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405
)

20
vendor/gopkg.in/yaml.v2/readerc.go generated vendored
View File

@@ -93,9 +93,18 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
panic("read handler must be set")
}
// [Go] This function was changed to guarantee the requested length size at EOF.
// The fact we need to do this is pretty awful, but the description above implies
// for that to be the case, and there are tests
// If the EOF flag is set and the raw buffer is empty, do nothing.
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
return true
// [Go] ACTUALLY! Read the documentation of this function above.
// This is just broken. To return true, we need to have the
// given length in the buffer. Not doing that means every single
// check that calls this function to make sure the buffer has a
// given length is Go) panicking; or C) accessing invalid memory.
//return true
}
// Return if the buffer contains enough characters.
@@ -389,6 +398,15 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
break
}
}
// [Go] Read the documentation of this function above. To return true,
// we need to have the given length in the buffer. Not doing that means
// every single check that calls this function to make sure the buffer
// has a given length is Go) panicking; or C) accessing invalid memory.
// This happens here due to the EOF above breaking early.
for buffer_len < length {
parser.buffer[buffer_len] = 0
buffer_len++
}
parser.buffer = parser.buffer[:buffer_len]
return true
}

80
vendor/gopkg.in/yaml.v2/resolve.go generated vendored
View File

@@ -6,7 +6,7 @@ import (
"regexp"
"strconv"
"strings"
"unicode/utf8"
"time"
)
type resolveMapItem struct {
@@ -75,7 +75,7 @@ func longTag(tag string) string {
func resolvableTag(tag string) bool {
switch tag {
case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG:
return true
}
return false
@@ -92,6 +92,19 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
switch tag {
case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
return
case yaml_FLOAT_TAG:
if rtag == yaml_INT_TAG {
switch v := out.(type) {
case int64:
rtag = yaml_FLOAT_TAG
out = float64(v)
return
case int:
rtag = yaml_FLOAT_TAG
out = float64(v)
return
}
}
}
failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
}()
@@ -125,6 +138,15 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
case 'D', 'S':
// Int, float, or timestamp.
// Only try values as a timestamp if the value is unquoted or there's an explicit
// !!timestamp tag.
if tag == "" || tag == yaml_TIMESTAMP_TAG {
t, ok := parseTimestamp(in)
if ok {
return yaml_TIMESTAMP_TAG, t
}
}
plain := strings.Replace(in, "_", "", -1)
intv, err := strconv.ParseInt(plain, 0, 64)
if err == nil {
@@ -158,28 +180,20 @@ func resolve(tag string, in string) (rtag string, out interface{}) {
return yaml_INT_TAG, uintv
}
} else if strings.HasPrefix(plain, "-0b") {
intv, err := strconv.ParseInt(plain[3:], 2, 64)
intv, err := strconv.ParseInt("-" + plain[3:], 2, 64)
if err == nil {
if intv == int64(int(intv)) {
return yaml_INT_TAG, -int(intv)
if true || intv == int64(int(intv)) {
return yaml_INT_TAG, int(intv)
} else {
return yaml_INT_TAG, -intv
return yaml_INT_TAG, intv
}
}
}
// XXX Handle timestamps here.
default:
panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
}
}
if tag == yaml_BINARY_TAG {
return yaml_BINARY_TAG, in
}
if utf8.ValidString(in) {
return yaml_STR_TAG, in
}
return yaml_BINARY_TAG, encodeBase64(in)
return yaml_STR_TAG, in
}
// encodeBase64 encodes s as base64 that is broken up into multiple lines
@@ -206,3 +220,39 @@ func encodeBase64(s string) string {
}
return string(out[:k])
}
// This is a subset of the formats allowed by the regular expression
// defined at http://yaml.org/type/timestamp.html.
var allowedTimestampFormats = []string{
"2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields.
"2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t".
"2006-1-2 15:4:5.999999999", // space separated with no time zone
"2006-1-2", // date only
// Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5"
// from the set of examples.
}
// parseTimestamp parses s as a timestamp string and
// returns the timestamp and reports whether it succeeded.
// Timestamp formats are defined at http://yaml.org/type/timestamp.html
func parseTimestamp(s string) (time.Time, bool) {
// TODO write code to check all the formats supported by
// http://yaml.org/type/timestamp.html instead of using time.Parse.
// Quick check: all date formats start with YYYY-.
i := 0
for ; i < len(s); i++ {
if c := s[i]; c < '0' || c > '9' {
break
}
}
if i != 4 || i == len(s) || s[i] != '-' {
return time.Time{}, false
}
for _, format := range allowedTimestampFormats {
if t, err := time.Parse(format, s); err == nil {
return t, true
}
}
return time.Time{}, false
}

40
vendor/gopkg.in/yaml.v2/scannerc.go generated vendored
View File

@@ -871,12 +871,6 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
required := parser.flow_level == 0 && parser.indent == parser.mark.column
// A simple key is required only when it is the first token in the current
// line. Therefore it is always allowed. But we add a check anyway.
if required && !parser.simple_key_allowed {
panic("should not happen")
}
//
// If the current position may start a simple key, save it.
//
@@ -1944,7 +1938,7 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma
} else {
// It's either the '!' tag or not really a tag handle. If it's a %TAG
// directive, it's an error. If it's a tag token, it must be a part of URI.
if directive && !(s[0] == '!' && s[1] == 0) {
if directive && string(s) != "!" {
yaml_parser_set_scanner_tag_error(parser, directive,
start_mark, "did not find expected '!'")
return false
@@ -1959,12 +1953,12 @@ func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_ma
func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
//size_t length = head ? strlen((char *)head) : 0
var s []byte
length := len(head)
hasTag := len(head) > 0
// Copy the head if needed.
//
// Note that we don't copy the leading '!' character.
if length > 0 {
if len(head) > 1 {
s = append(s, head[1:]...)
}
@@ -1997,15 +1991,14 @@ func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte
}
} else {
s = read(parser, s)
length++
}
if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
return false
}
hasTag = true
}
// Check if the tag is non-empty.
if length == 0 {
if !hasTag {
yaml_parser_set_scanner_tag_error(parser, directive,
start_mark, "did not find expected tag URI")
return false
@@ -2476,6 +2469,10 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si
}
}
if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
return false
}
// Check if we are at the end of the scalar.
if single {
if parser.buffer[parser.buffer_pos] == '\'' {
@@ -2488,10 +2485,6 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si
}
// Consume blank characters.
if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
return false
}
for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
if is_blank(parser.buffer, parser.buffer_pos) {
// Consume a space or a tab character.
@@ -2593,19 +2586,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b
// Consume non-blank characters.
for !is_blankz(parser.buffer, parser.buffer_pos) {
// Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
if parser.flow_level > 0 &&
parser.buffer[parser.buffer_pos] == ':' &&
!is_blankz(parser.buffer, parser.buffer_pos+1) {
yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
start_mark, "found unexpected ':'")
return false
}
// Check for indicators that may end a plain scalar.
if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
(parser.flow_level > 0 &&
(parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
(parser.buffer[parser.buffer_pos] == ',' ||
parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
parser.buffer[parser.buffer_pos] == '}')) {
@@ -2657,10 +2641,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b
for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
if is_blank(parser.buffer, parser.buffer_pos) {
// Check for tab character that abuse indentation.
// Check for tab characters that abuse indentation.
if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
start_mark, "found a tab character that violate indentation")
start_mark, "found a tab character that violates indentation")
return false
}

9
vendor/gopkg.in/yaml.v2/sorter.go generated vendored
View File

@@ -51,6 +51,15 @@ func (l keyList) Less(i, j int) bool {
}
var ai, bi int
var an, bn int64
if ar[i] == '0' || br[i] == '0' {
for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- {
if ar[j] != '0' {
an = 1
bn = 1
break
}
}
}
for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
an = an*10 + int64(ar[ai]-'0')
}

65
vendor/gopkg.in/yaml.v2/writerc.go generated vendored
View File

@@ -18,72 +18,9 @@ func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
return true
}
// If the output encoding is UTF-8, we don't need to recode the buffer.
if emitter.encoding == yaml_UTF8_ENCODING {
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
return true
}
// Recode the buffer into the raw buffer.
var low, high int
if emitter.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
high, low = 1, 0
}
pos := 0
for pos < emitter.buffer_pos {
// See the "reader.c" code for more details on UTF-8 encoding. Note
// that we assume that the buffer contains a valid UTF-8 sequence.
// Read the next UTF-8 character.
octet := emitter.buffer[pos]
var w int
var value rune
switch {
case octet&0x80 == 0x00:
w, value = 1, rune(octet&0x7F)
case octet&0xE0 == 0xC0:
w, value = 2, rune(octet&0x1F)
case octet&0xF0 == 0xE0:
w, value = 3, rune(octet&0x0F)
case octet&0xF8 == 0xF0:
w, value = 4, rune(octet&0x07)
}
for k := 1; k < w; k++ {
octet = emitter.buffer[pos+k]
value = (value << 6) + (rune(octet) & 0x3F)
}
pos += w
// Write the character.
if value < 0x10000 {
var b [2]byte
b[high] = byte(value >> 8)
b[low] = byte(value & 0xFF)
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
} else {
// Write the character using a surrogate pair (check "reader.c").
var b [4]byte
value -= 0x10000
b[high] = byte(0xD8 + (value >> 18))
b[low] = byte((value >> 10) & 0xFF)
b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
b[low+2] = byte(value & 0xFF)
emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
}
}
// Write the raw buffer.
if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
}
emitter.buffer_pos = 0
emitter.raw_buffer = emitter.raw_buffer[:0]
return true
}

125
vendor/gopkg.in/yaml.v2/yaml.go generated vendored
View File

@@ -9,6 +9,7 @@ package yaml
import (
"errors"
"fmt"
"io"
"reflect"
"strings"
"sync"
@@ -81,12 +82,58 @@ func Unmarshal(in []byte, out interface{}) (err error) {
}
// UnmarshalStrict is like Unmarshal except that any fields that are found
// in the data that do not have corresponding struct members will result in
// in the data that do not have corresponding struct members, or mapping
// keys that are duplicates, will result in
// an error.
func UnmarshalStrict(in []byte, out interface{}) (err error) {
return unmarshal(in, out, true)
}
// A Decorder reads and decodes YAML values from an input stream.
type Decoder struct {
strict bool
parser *parser
}
// NewDecoder returns a new decoder that reads from r.
//
// The decoder introduces its own buffering and may read
// data from r beyond the YAML values requested.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{
parser: newParserFromReader(r),
}
}
// SetStrict sets whether strict decoding behaviour is enabled when
// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict.
func (dec *Decoder) SetStrict(strict bool) {
dec.strict = strict
}
// Decode reads the next YAML-encoded value from its input
// and stores it in the value pointed to by v.
//
// See the documentation for Unmarshal for details about the
// conversion of YAML into a Go value.
func (dec *Decoder) Decode(v interface{}) (err error) {
d := newDecoder(dec.strict)
defer handleErr(&err)
node := dec.parser.parse()
if node == nil {
return io.EOF
}
out := reflect.ValueOf(v)
if out.Kind() == reflect.Ptr && !out.IsNil() {
out = out.Elem()
}
d.unmarshal(node, out)
if len(d.terrors) > 0 {
return &TypeError{d.terrors}
}
return nil
}
func unmarshal(in []byte, out interface{}, strict bool) (err error) {
defer handleErr(&err)
d := newDecoder(strict)
@@ -110,8 +157,8 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) {
// of the generated document will reflect the structure of the value itself.
// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
//
// Struct fields are only unmarshalled if they are exported (have an upper case
// first letter), and are unmarshalled using the field name lowercased as the
// Struct fields are only marshalled if they are exported (have an upper case
// first letter), and are marshalled using the field name lowercased as the
// default key. Custom keys may be defined via the "yaml" name in the field
// tag: the content preceding the first comma is used as the key, and the
// following comma-separated options are used to tweak the marshalling process.
@@ -125,7 +172,10 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) {
//
// omitempty Only include the field if it's not set to the zero
// value for the type or to empty slices or maps.
// Does not apply to zero valued structs.
// Zero valued structs will be omitted if all their public
// fields are zero, unless they implement an IsZero
// method (see the IsZeroer interface type), in which
// case the field will be included if that method returns true.
//
// flow Marshal using a flow style (useful for structs,
// sequences and maps).
@@ -140,7 +190,7 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) {
// For example:
//
// type T struct {
// F int "a,omitempty"
// F int `yaml:"a,omitempty"`
// B int
// }
// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
@@ -150,12 +200,47 @@ func Marshal(in interface{}) (out []byte, err error) {
defer handleErr(&err)
e := newEncoder()
defer e.destroy()
e.marshal("", reflect.ValueOf(in))
e.marshalDoc("", reflect.ValueOf(in))
e.finish()
out = e.out
return
}
// An Encoder writes YAML values to an output stream.
type Encoder struct {
encoder *encoder
}
// NewEncoder returns a new encoder that writes to w.
// The Encoder should be closed after use to flush all data
// to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{
encoder: newEncoderWithWriter(w),
}
}
// Encode writes the YAML encoding of v to the stream.
// If multiple items are encoded to the stream, the
// second and subsequent document will be preceded
// with a "---" document separator, but the first will not.
//
// See the documentation for Marshal for details about the conversion of Go
// values to YAML.
func (e *Encoder) Encode(v interface{}) (err error) {
defer handleErr(&err)
e.encoder.marshalDoc("", reflect.ValueOf(v))
return nil
}
// Close closes the encoder by writing any remaining data.
// It does not write a stream terminating string "...".
func (e *Encoder) Close() (err error) {
defer handleErr(&err)
e.encoder.finish()
return nil
}
func handleErr(err *error) {
if v := recover(); v != nil {
if e, ok := v.(yamlError); ok {
@@ -211,6 +296,9 @@ type fieldInfo struct {
Num int
OmitEmpty bool
Flow bool
// Id holds the unique field identifier, so we can cheaply
// check for field duplicates without maintaining an extra map.
Id int
// Inline holds the field index if the field is part of an inlined struct.
Inline []int
@@ -290,6 +378,7 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
} else {
finfo.Inline = append([]int{i}, finfo.Inline...)
}
finfo.Id = len(fieldsList)
fieldsMap[finfo.Key] = finfo
fieldsList = append(fieldsList, finfo)
}
@@ -311,11 +400,16 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
return nil, errors.New(msg)
}
info.Id = len(fieldsList)
fieldsList = append(fieldsList, info)
fieldsMap[info.Key] = info
}
sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
sinfo = &structInfo{
FieldsMap: fieldsMap,
FieldsList: fieldsList,
InlineMap: inlineMap,
}
fieldMapMutex.Lock()
structMap[st] = sinfo
@@ -323,8 +417,23 @@ func getStructInfo(st reflect.Type) (*structInfo, error) {
return sinfo, nil
}
// IsZeroer is used to check whether an object is zero to
// determine whether it should be omitted when marshaling
// with the omitempty flag. One notable implementation
// is time.Time.
type IsZeroer interface {
IsZero() bool
}
func isZero(v reflect.Value) bool {
switch v.Kind() {
kind := v.Kind()
if z, ok := v.Interface().(IsZeroer); ok {
if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() {
return true
}
return z.IsZero()
}
switch kind {
case reflect.String:
return len(v.String()) == 0
case reflect.Interface, reflect.Ptr:

30
vendor/gopkg.in/yaml.v2/yamlh.go generated vendored
View File

@@ -1,6 +1,7 @@
package yaml
import (
"fmt"
"io"
)
@@ -239,6 +240,27 @@ const (
yaml_MAPPING_END_EVENT // A MAPPING-END event.
)
var eventStrings = []string{
yaml_NO_EVENT: "none",
yaml_STREAM_START_EVENT: "stream start",
yaml_STREAM_END_EVENT: "stream end",
yaml_DOCUMENT_START_EVENT: "document start",
yaml_DOCUMENT_END_EVENT: "document end",
yaml_ALIAS_EVENT: "alias",
yaml_SCALAR_EVENT: "scalar",
yaml_SEQUENCE_START_EVENT: "sequence start",
yaml_SEQUENCE_END_EVENT: "sequence end",
yaml_MAPPING_START_EVENT: "mapping start",
yaml_MAPPING_END_EVENT: "mapping end",
}
func (e yaml_event_type_t) String() string {
if e < 0 || int(e) >= len(eventStrings) {
return fmt.Sprintf("unknown event %d", e)
}
return eventStrings[e]
}
// The event structure.
type yaml_event_t struct {
@@ -521,9 +543,9 @@ type yaml_parser_t struct {
read_handler yaml_read_handler_t // Read handler.
input_file io.Reader // File input data.
input []byte // String input data.
input_pos int
input_reader io.Reader // File input data.
input []byte // String input data.
input_pos int
eof bool // EOF flag
@@ -632,7 +654,7 @@ type yaml_emitter_t struct {
write_handler yaml_write_handler_t // Write handler.
output_buffer *[]byte // String output data.
output_file io.Writer // File output data.
output_writer io.Writer // File output data.
buffer []byte // The working buffer.
buffer_pos int // The current position of the buffer.

View File

@@ -1,25 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +k8s:openapi-gen=true
// Package v1alpha1 is the v1alpha1 version of the API.
// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration
// InitializerConfiguration and validatingWebhookConfiguration is for the
// new dynamic admission controller configuration.
// +groupName=admissionregistration.k8s.io
package v1alpha1

View File

@@ -1,107 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by go-to-protobuf. Do not edit it manually!
syntax = 'proto2';
package k8s.io.api.admissionregistration.v1alpha1;
import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/generated.proto";
import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto";
// Package-wide variables from generator "generated".
option go_package = "v1alpha1";
// Initializer describes the name and the failure policy of an initializer, and
// what resources it applies to.
message Initializer {
// Name is the identifier of the initializer. It will be added to the
// object that needs to be initialized.
// Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where
// "alwayspullimages" is the name of the webhook, and kubernetes.io is the name
// of the organization.
// Required
optional string name = 1;
// Rules describes what resources/subresources the initializer cares about.
// The initializer cares about an operation if it matches _any_ Rule.
// Rule.Resources must not include subresources.
repeated Rule rules = 2;
}
// InitializerConfiguration describes the configuration of initializers.
message InitializerConfiguration {
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1;
// Initializers is a list of resources and their default initializers
// Order-sensitive.
// When merging multiple InitializerConfigurations, we sort the initializers
// from different InitializerConfigurations by the name of the
// InitializerConfigurations; the order of the initializers from the same
// InitializerConfiguration is preserved.
// +patchMergeKey=name
// +patchStrategy=merge
// +optional
repeated Initializer initializers = 2;
}
// InitializerConfigurationList is a list of InitializerConfiguration.
message InitializerConfigurationList {
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1;
// List of InitializerConfiguration.
repeated InitializerConfiguration items = 2;
}
// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended
// to make sure that all the tuple expansions are valid.
message Rule {
// APIGroups is the API groups the resources belong to. '*' is all groups.
// If '*' is present, the length of the slice must be one.
// Required.
repeated string apiGroups = 1;
// APIVersions is the API versions the resources belong to. '*' is all versions.
// If '*' is present, the length of the slice must be one.
// Required.
repeated string apiVersions = 2;
// Resources is a list of resources this rule applies to.
//
// For example:
// 'pods' means pods.
// 'pods/log' means the log subresource of pods.
// '*' means all resources, but not subresources.
// 'pods/*' means all subresources of pods.
// '*/scale' means all scale subresources.
// '*/*' means all resources and their subresources.
//
// If wildcard is present, the validation rule will ensure resources do not
// overlap with each other.
//
// Depending on the enclosing object, subresources might not be allowed.
// Required.
repeated string resources = 3;
}

View File

@@ -1,106 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// InitializerConfiguration describes the configuration of initializers.
type InitializerConfiguration struct {
metav1.TypeMeta `json:",inline"`
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Initializers is a list of resources and their default initializers
// Order-sensitive.
// When merging multiple InitializerConfigurations, we sort the initializers
// from different InitializerConfigurations by the name of the
// InitializerConfigurations; the order of the initializers from the same
// InitializerConfiguration is preserved.
// +patchMergeKey=name
// +patchStrategy=merge
// +optional
Initializers []Initializer `json:"initializers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=initializers"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// InitializerConfigurationList is a list of InitializerConfiguration.
type InitializerConfigurationList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of InitializerConfiguration.
Items []InitializerConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// Initializer describes the name and the failure policy of an initializer, and
// what resources it applies to.
type Initializer struct {
// Name is the identifier of the initializer. It will be added to the
// object that needs to be initialized.
// Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where
// "alwayspullimages" is the name of the webhook, and kubernetes.io is the name
// of the organization.
// Required
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// Rules describes what resources/subresources the initializer cares about.
// The initializer cares about an operation if it matches _any_ Rule.
// Rule.Resources must not include subresources.
Rules []Rule `json:"rules,omitempty" protobuf:"bytes,2,rep,name=rules"`
}
// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended
// to make sure that all the tuple expansions are valid.
type Rule struct {
// APIGroups is the API groups the resources belong to. '*' is all groups.
// If '*' is present, the length of the slice must be one.
// Required.
APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"`
// APIVersions is the API versions the resources belong to. '*' is all versions.
// If '*' is present, the length of the slice must be one.
// Required.
APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"`
// Resources is a list of resources this rule applies to.
//
// For example:
// 'pods' means pods.
// 'pods/log' means the log subresource of pods.
// '*' means all resources, but not subresources.
// 'pods/*' means all subresources of pods.
// '*/scale' means all scale subresources.
// '*/*' means all resources and their subresources.
//
// If wildcard is present, the validation rule will ensure resources do not
// overlap with each other.
//
// Depending on the enclosing object, subresources might not be allowed.
// Required.
Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"`
}

View File

@@ -1,71 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
// This file contains a collection of methods that can be used from go-restful to
// generate Swagger API documentation for its models. Please read this PR for more
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
//
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
// they are on one line! For multiple line or blocks that you want to ignore use ---.
// Any context after a --- is ignored.
//
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
var map_Initializer = map[string]string{
"": "Initializer describes the name and the failure policy of an initializer, and what resources it applies to.",
"name": "Name is the identifier of the initializer. It will be added to the object that needs to be initialized. Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where \"alwayspullimages\" is the name of the webhook, and kubernetes.io is the name of the organization. Required",
"rules": "Rules describes what resources/subresources the initializer cares about. The initializer cares about an operation if it matches _any_ Rule. Rule.Resources must not include subresources.",
}
func (Initializer) SwaggerDoc() map[string]string {
return map_Initializer
}
var map_InitializerConfiguration = map[string]string{
"": "InitializerConfiguration describes the configuration of initializers.",
"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.",
"initializers": "Initializers is a list of resources and their default initializers Order-sensitive. When merging multiple InitializerConfigurations, we sort the initializers from different InitializerConfigurations by the name of the InitializerConfigurations; the order of the initializers from the same InitializerConfiguration is preserved.",
}
func (InitializerConfiguration) SwaggerDoc() map[string]string {
return map_InitializerConfiguration
}
var map_InitializerConfigurationList = map[string]string{
"": "InitializerConfigurationList is a list of InitializerConfiguration.",
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
"items": "List of InitializerConfiguration.",
}
func (InitializerConfigurationList) SwaggerDoc() map[string]string {
return map_InitializerConfigurationList
}
var map_Rule = map[string]string{
"": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.",
"apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.",
"apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.",
"resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.",
}
func (Rule) SwaggerDoc() map[string]string {
return map_Rule
}
// AUTO-GENERATED FUNCTIONS END HERE

View File

@@ -16,10 +16,10 @@ limitations under the License.
// +k8s:deepcopy-gen=package
// +k8s:openapi-gen=true
// +groupName=admissionregistration.k8s.io
// Package v1beta1 is the v1beta1 version of the API.
// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration
// InitializerConfiguration and validatingWebhookConfiguration is for the
// MutatingWebhookConfiguration and ValidatingWebhookConfiguration are for the
// new dynamic admission controller configuration.
// +groupName=admissionregistration.k8s.io
package v1beta1

View File

@@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by protoc-gen-gogo.
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
// DO NOT EDIT!
/*
Package v1beta1 is a generated protocol buffer package.
@@ -506,24 +505,6 @@ func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
dAtA[offset+4] = uint8(v >> 32)
dAtA[offset+5] = uint8(v >> 40)
dAtA[offset+6] = uint8(v >> 48)
dAtA[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
dAtA[offset] = uint8(v)
dAtA[offset+1] = uint8(v >> 8)
dAtA[offset+2] = uint8(v >> 16)
dAtA[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)

Some files were not shown because too many files have changed in this diff Show More