mirror of
https://github.com/kubernetes/sample-controller.git
synced 2026-02-15 00:07:52 +08:00
Compare commits
1 Commits
master
...
kubernetes
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
95a1066305 |
12
go.mod
12
go.mod
@@ -8,14 +8,14 @@ godebug default=go1.25
|
||||
|
||||
require (
|
||||
golang.org/x/time v0.14.0
|
||||
k8s.io/api v0.0.0-20260212204713-44213e038791
|
||||
k8s.io/apimachinery v0.0.0-20260212204335-aed281c35483
|
||||
k8s.io/client-go v0.0.0-20260212205228-7aaede787267
|
||||
k8s.io/code-generator v0.0.0-20260212210152-c9a3346aa756
|
||||
k8s.io/api v0.36.0-alpha.1
|
||||
k8s.io/apimachinery v0.36.0-alpha.1
|
||||
k8s.io/client-go v0.36.0-alpha.1
|
||||
k8s.io/code-generator v0.36.0-alpha.1
|
||||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4
|
||||
k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.2
|
||||
k8s.io/utils v0.0.0-20260108192941-914a6e750570
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.1
|
||||
)
|
||||
|
||||
require (
|
||||
|
||||
24
go.sum
24
go.sum
@@ -115,27 +115,27 @@ gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
k8s.io/api v0.0.0-20260212204713-44213e038791 h1:EmU9p9/78PfLzMIiBR7jLGrBDWQadw9sP0N0cwxhPWw=
|
||||
k8s.io/api v0.0.0-20260212204713-44213e038791/go.mod h1:lqM03fgO3Hxw6VR9c5bLCKvc1v/PIcun8qcqYrhZJeU=
|
||||
k8s.io/apimachinery v0.0.0-20260212204335-aed281c35483 h1:t6T/l7vW88+5v57Tp3Vnwg9WYfPaydspTn+V3YNGCP0=
|
||||
k8s.io/apimachinery v0.0.0-20260212204335-aed281c35483/go.mod h1:7mgr/dli8ofwAbcIQXetFVX1fbOYsOYojq3AUbybVmQ=
|
||||
k8s.io/client-go v0.0.0-20260212205228-7aaede787267 h1:aGYlF0Fs64sW91eYl5fkcjpmufNMWpVjczBJKwsWBe0=
|
||||
k8s.io/client-go v0.0.0-20260212205228-7aaede787267/go.mod h1:1O9M9BLxIBTUHXSOdPux8rk9Iw0qtGJvWp1Y80EX9c8=
|
||||
k8s.io/code-generator v0.0.0-20260212210152-c9a3346aa756 h1:O0MAofdXy+S56W1Gp9fFQSn/5//zVAdQTmWiUQa9rLM=
|
||||
k8s.io/code-generator v0.0.0-20260212210152-c9a3346aa756/go.mod h1:G8ZQHj4mTDmCxTF7/RjqEJxGg0O6KxuCJ8+EygZuzK0=
|
||||
k8s.io/api v0.36.0-alpha.1 h1:iuVKXufxgq4sTsIJRZ3Sz2M3qAqfcNjestKRxfkk6Qg=
|
||||
k8s.io/api v0.36.0-alpha.1/go.mod h1:tmQdQJzGz7/vieQ6wWJlG9AqQPEyE/BjODuCPePopkk=
|
||||
k8s.io/apimachinery v0.36.0-alpha.1 h1:MrQLU+TD3A2/ywQiTHEJ5BEKKk3XHpy0RkT98V0XdKI=
|
||||
k8s.io/apimachinery v0.36.0-alpha.1/go.mod h1:hQkG060WLAG1TIkYsu5lj3tb6YdNpKe5Zrr2UPGg+/k=
|
||||
k8s.io/client-go v0.36.0-alpha.1 h1:6LgGjlF/C1U7gVn43ugh9hDyR2ToGZk4zKMwIPocYh8=
|
||||
k8s.io/client-go v0.36.0-alpha.1/go.mod h1:lQ0Wzo39wPLju4QEVKX3WeLCvElRinyiHZjkeWy2gZ4=
|
||||
k8s.io/code-generator v0.36.0-alpha.1 h1:hbtoV6I20HR587oRagZXWnZ06VR7bgBzhJHvuNfD0PA=
|
||||
k8s.io/code-generator v0.36.0-alpha.1/go.mod h1:oKnm+HAZGkoccqNUEuzLK4CRJK3Z4kgwsrm7+ZWW6rA=
|
||||
k8s.io/gengo/v2 v2.0.0-20250922181213-ec3ebc5fd46b h1:gMplByicHV/TJBizHd9aVEsTYoJBnnUAT5MHlTkbjhQ=
|
||||
k8s.io/gengo/v2 v2.0.0-20250922181213-ec3ebc5fd46b/go.mod h1:CgujABENc3KuTrcsdpGmrrASjtQsWCT7R99mEV4U/fM=
|
||||
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
|
||||
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
|
||||
k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4 h1:HhDfevmPS+OalTjQRKbTHppRIz01AWi8s45TMXStgYY=
|
||||
k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
|
||||
k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2 h1:AZYQSJemyQB5eRxqcPky+/7EdBj0xi3g0ZcxxJ7vbWU=
|
||||
k8s.io/utils v0.0.0-20260210185600-b8788abfbbc2/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk=
|
||||
k8s.io/utils v0.0.0-20260108192941-914a6e750570 h1:JT4W8lsdrGENg9W+YwwdLJxklIuKWdRm+BC+xt33FOY=
|
||||
k8s.io/utils v0.0.0-20260108192941-914a6e750570/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
|
||||
sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
|
||||
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
|
||||
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.2 h1:kwVWMx5yS1CrnFWA/2QHyRVJ8jM6dBA80uLmm0wJkk8=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.2/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.1 h1:JrhdFMqOd/+3ByqlP2I45kTOZmTRLBUm5pvRjeheg7E=
|
||||
sigs.k8s.io/structured-merge-diff/v6 v6.3.1/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
|
||||
sigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=
|
||||
sigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=
|
||||
|
||||
@@ -35,6 +35,10 @@ import (
|
||||
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
|
||||
// without applying any field management, validations and/or defaults. It shouldn't be considered a replacement
|
||||
// for a real clientset and is mostly useful in simple unit tests.
|
||||
//
|
||||
// Deprecated: NewClientset replaces this with support for field management, which significantly improves
|
||||
// server side apply testing. NewClientset is only available when apply configurations are generated (e.g.
|
||||
// via --with-applyconfig).
|
||||
func NewSimpleClientset(objects ...runtime.Object) *Clientset {
|
||||
o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
|
||||
for _, obj := range objects {
|
||||
@@ -95,10 +99,6 @@ func (c *Clientset) IsWatchListSemanticsUnSupported() bool {
|
||||
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
|
||||
// without applying any validations and/or defaults. It shouldn't be considered a replacement
|
||||
// for a real clientset and is mostly useful in simple unit tests.
|
||||
//
|
||||
// Compared to NewSimpleClientset, the Clientset returned here supports field tracking and thus
|
||||
// server-side apply. Beware though that support in that for CRDs is missing
|
||||
// (https://github.com/kubernetes/kubernetes/issues/126850).
|
||||
func NewClientset(objects ...runtime.Object) *Clientset {
|
||||
o := testing.NewFieldManagedObjectTracker(
|
||||
scheme,
|
||||
|
||||
@@ -19,7 +19,6 @@ limitations under the License.
|
||||
package externalversions
|
||||
|
||||
import (
|
||||
context "context"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
time "time"
|
||||
@@ -27,7 +26,6 @@ import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
wait "k8s.io/apimachinery/pkg/util/wait"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
versioned "k8s.io/sample-controller/pkg/generated/clientset/versioned"
|
||||
internalinterfaces "k8s.io/sample-controller/pkg/generated/informers/externalversions/internalinterfaces"
|
||||
@@ -141,10 +139,6 @@ func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResy
|
||||
}
|
||||
|
||||
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
|
||||
f.StartWithContext(wait.ContextForChannel(stopCh))
|
||||
}
|
||||
|
||||
func (f *sharedInformerFactory) StartWithContext(ctx context.Context) {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
|
||||
@@ -154,9 +148,15 @@ func (f *sharedInformerFactory) StartWithContext(ctx context.Context) {
|
||||
|
||||
for informerType, informer := range f.informers {
|
||||
if !f.startedInformers[informerType] {
|
||||
f.wg.Go(func() {
|
||||
informer.RunWithContext(ctx)
|
||||
})
|
||||
f.wg.Add(1)
|
||||
// We need a new variable in each loop iteration,
|
||||
// otherwise the goroutine would use the loop variable
|
||||
// and that keeps changing.
|
||||
informer := informer
|
||||
go func() {
|
||||
defer f.wg.Done()
|
||||
informer.Run(stopCh)
|
||||
}()
|
||||
f.startedInformers[informerType] = true
|
||||
}
|
||||
}
|
||||
@@ -173,11 +173,6 @@ func (f *sharedInformerFactory) Shutdown() {
|
||||
}
|
||||
|
||||
func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
|
||||
result := f.WaitForCacheSyncWithContext(wait.ContextForChannel(stopCh))
|
||||
return result.Synced
|
||||
}
|
||||
|
||||
func (f *sharedInformerFactory) WaitForCacheSyncWithContext(ctx context.Context) cache.SyncResult {
|
||||
informers := func() map[reflect.Type]cache.SharedIndexInformer {
|
||||
f.lock.Lock()
|
||||
defer f.lock.Unlock()
|
||||
@@ -191,31 +186,10 @@ func (f *sharedInformerFactory) WaitForCacheSyncWithContext(ctx context.Context)
|
||||
return informers
|
||||
}()
|
||||
|
||||
// Wait for informers to sync, without polling.
|
||||
cacheSyncs := make([]cache.DoneChecker, 0, len(informers))
|
||||
for _, informer := range informers {
|
||||
cacheSyncs = append(cacheSyncs, informer.HasSyncedChecker())
|
||||
}
|
||||
cache.WaitFor(ctx, "" /* no logging */, cacheSyncs...)
|
||||
|
||||
res := cache.SyncResult{
|
||||
Synced: make(map[reflect.Type]bool, len(informers)),
|
||||
}
|
||||
failed := false
|
||||
res := map[reflect.Type]bool{}
|
||||
for informType, informer := range informers {
|
||||
hasSynced := informer.HasSynced()
|
||||
if !hasSynced {
|
||||
failed = true
|
||||
}
|
||||
res.Synced[informType] = hasSynced
|
||||
res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
|
||||
}
|
||||
if failed {
|
||||
// context.Cause is more informative than ctx.Err().
|
||||
// This must be non-nil, otherwise WaitFor wouldn't have stopped
|
||||
// prematurely.
|
||||
res.Err = context.Cause(ctx)
|
||||
}
|
||||
|
||||
return res
|
||||
}
|
||||
|
||||
@@ -254,46 +228,27 @@ func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internal
|
||||
// defer factory.WaitForStop() // Returns immediately if nothing was started.
|
||||
// genericInformer := factory.ForResource(resource)
|
||||
// typedInformer := factory.SomeAPIGroup().V1().SomeType()
|
||||
// handle, err := typeInformer.Informer().AddEventHandler(...)
|
||||
// if err != nil {
|
||||
// return fmt.Errorf("register event handler: %v", err)
|
||||
// }
|
||||
// defer typeInformer.Informer().RemoveEventHandler(handle) // Avoids leaking goroutines.
|
||||
// factory.StartWithContext(ctx) // Start processing these informers.
|
||||
// synced := factory.WaitForCacheSyncWithContext(ctx)
|
||||
// if err := synced.AsError(); err != nil {
|
||||
// return err
|
||||
// }
|
||||
// for v := range synced {
|
||||
// // Only if desired log some information similar to this.
|
||||
// fmt.Fprintf(os.Stdout, "cache synced: %s", v)
|
||||
// }
|
||||
//
|
||||
// // Also make sure that all of the initial cache events have been delivered.
|
||||
// if !WaitFor(ctx, "event handler sync", handle.HasSyncedChecker()) {
|
||||
// // Must have failed because of context.
|
||||
// return fmt.Errorf("sync event handler: %w", context.Cause(ctx))
|
||||
// factory.Start(ctx.Done()) // Start processing these informers.
|
||||
// synced := factory.WaitForCacheSync(ctx.Done())
|
||||
// for v, ok := range synced {
|
||||
// if !ok {
|
||||
// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v)
|
||||
// return
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// // Creating informers can also be created after Start, but then
|
||||
// // Start must be called again:
|
||||
// anotherGenericInformer := factory.ForResource(resource)
|
||||
// factory.StartWithContext(ctx)
|
||||
// factory.Start(ctx.Done())
|
||||
type SharedInformerFactory interface {
|
||||
internalinterfaces.SharedInformerFactory
|
||||
|
||||
// Start initializes all requested informers. They are handled in goroutines
|
||||
// which run until the stop channel gets closed.
|
||||
// Warning: Start does not block. When run in a go-routine, it will race with a later WaitForCacheSync.
|
||||
//
|
||||
// Contextual logging: StartWithContext should be used instead of Start in code which supports contextual logging.
|
||||
Start(stopCh <-chan struct{})
|
||||
|
||||
// StartWithContext initializes all requested informers. They are handled in goroutines
|
||||
// which run until the context gets canceled.
|
||||
// Warning: StartWithContext does not block. When run in a go-routine, it will race with a later WaitForCacheSync.
|
||||
StartWithContext(ctx context.Context)
|
||||
|
||||
// Shutdown marks a factory as shutting down. At that point no new
|
||||
// informers can be started anymore and Start will return without
|
||||
// doing anything.
|
||||
@@ -308,14 +263,8 @@ type SharedInformerFactory interface {
|
||||
|
||||
// WaitForCacheSync blocks until all started informers' caches were synced
|
||||
// or the stop channel gets closed.
|
||||
//
|
||||
// Contextual logging: WaitForCacheSync should be used instead of WaitForCacheSync in code which supports contextual logging. It also returns a more useful result.
|
||||
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
|
||||
|
||||
// WaitForCacheSyncWithContext blocks until all started informers' caches were synced
|
||||
// or the context gets canceled.
|
||||
WaitForCacheSyncWithContext(ctx context.Context) cache.SyncResult
|
||||
|
||||
// ForResource gives generic access to a shared informer of the matching type.
|
||||
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user