-
Notifications
You must be signed in to change notification settings - Fork 0
/
main.go
261 lines (219 loc) · 8.79 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
package main
import (
"context"
"fmt"
"time"
apiv1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
dynamiclister "k8s.io/client-go/dynamic/dynamiclister"
kube_client "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog/v2"
)
func main() {
// Check and load kubeconfig from the path set
// in KUBECONFIG env variable (if not use default path of ~/.kube/config)
apiConfig, err := clientcmd.NewDefaultClientConfigLoadingRules().Load()
if err != nil {
panic(err)
}
// Create rest config from kubeconfig
restConfig, err := clientcmd.NewDefaultClientConfig(*apiConfig, &clientcmd.ConfigOverrides{}).ClientConfig()
if err != nil {
panic(err)
}
kubeClient := createKubeClient(restConfig)
dClient := dynamic.New(kubeClient.Discovery().RESTClient())
// as an alternative to dynamic.New(), you can uncomment
// the following line to create a dynamic client
// dClient := dynamic.NewForConfigOrDie(restConfig)
// For stopping the reflector
stopCh := make(chan struct{})
// Note that for `core` group we use ""
// Resource should be in plural form e.g., pods, deployments etc.,
// Ref: https://github.com/kubernetes/client-go/issues/737
var nodeGVR = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "nodes"}
nodeLister := NewDynamicLister(dClient, stopCh, nodeGVR, apiv1.NamespaceAll)
// Get all the nodes
nos, err := nodeLister.List(labels.Everything())
if err != nil {
panic(err)
}
fmt.Println("") // add some space after the last line for better display
fmt.Println("All nodes:")
fmt.Println("----------")
for _, n := range nos {
fmt.Println(n.GetName())
}
fmt.Println("") // add some space after the last line for better display
// Note that for `core` group we use ""
// Resource should be in plural form e.g., pods, deployments etc.,
// Ref: https://github.com/kubernetes/client-go/issues/737
var podGVR = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
// Limit lister to a particular namespace (use only for namespaced resources)
podLister := NewDynamicLister(dClient, stopCh, podGVR, "kube-system")
// Get all the pods
po, err := podLister.List(labels.Everything())
if err != nil {
panic(err)
}
fmt.Println("") // add some space after the last line for better display
fmt.Println("Pods in `kube-system` namespace:")
fmt.Println("--------------------------------")
for _, p := range po {
fmt.Println(p.GetName())
}
fmt.Println("") // add some space after the last line for better display
// Limit lister to a particular namespace (use only for namespaced resources)
allPodsLister := NewDynamicLister(dClient, stopCh, podGVR, apiv1.NamespaceAll)
// Get all the pods in all the namespaces
allPo, err := allPodsLister.List(labels.Everything())
if err != nil {
panic(err)
}
fmt.Println("") // add some space after the last line for better display
fmt.Println("Pods in all the namespaces:")
fmt.Println("---------------------------")
for _, p := range allPo {
fmt.Println(p.GetName())
}
fmt.Println("") // add some space after the last line for better display
// Uncomment to use the CRD Lister
// // CRD Lister
// crdLister := NewDynamicCRDLister(dClient, stopCh)
// // Get CRDs by specifying the key in the format `<group>/Kind` (<- Kind needs to be in CamelCase)
// // Note that this is quite different from specifying the key as `<namespace>/<name>`
// no, err := crdLister.Get("traefik.containo.us/ServersTransport")
// if err != nil {
// panic(err)
// }
// // pretty print
// output, err := json.MarshalIndent(no, "", " ")
// if err != nil {
// panic(err)
// }
// fmt.Println("CRD", string(output))
}
func NewDynamicLister(dClient *dynamic.DynamicClient, stopChannel <-chan struct{}, gvr schema.GroupVersionResource, namespace string) dynamiclister.Lister {
var lister func(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error)
var watcher func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
if namespace == apiv1.NamespaceAll {
lister = dClient.Resource(gvr).List
watcher = dClient.Resource(gvr).Watch
} else {
// For lister limited to a particular namespace
lister = dClient.Resource(gvr).Namespace(namespace).List
watcher = dClient.Resource(gvr).Namespace(namespace).Watch
}
// NewNamespaceKeyedIndexerAndReflector can be
// used for both namespace and cluster scoped resources
store, reflector := cache.NewNamespaceKeyedIndexerAndReflector(&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return lister(context.Background(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return watcher(context.Background(), options)
},
}, unstructured.Unstructured{}, time.Hour)
nodeLister := dynamiclister.New(store, gvr)
// Run reflector in the background so that we get new updates from the api-server
go reflector.Run(stopChannel)
// Wait for reflector to sync the cache for the first time
// TODO: check if there's a better way to do this (listing all the nodes seems wasteful)
// Note: Based on the docs WaitForNamedCacheSync seems to be used to check if an informer has synced
// but the function is generic enough so we can use
// it for reflectors as well
synced := cache.WaitForNamedCacheSync(fmt.Sprintf("generic-%s-lister", gvr.Resource), stopChannel, func() bool {
no, err := nodeLister.List(labels.Everything())
if err != nil {
klog.Error("err", err)
}
return len(no) > 0
})
if !synced {
klog.Error("couldn't sync cache")
}
return nodeLister
}
func NewDynamicCRDLister(dClient *dynamic.DynamicClient, stopChannel <-chan struct{}) dynamiclister.Lister {
var lister func(ctx context.Context, opts metav1.ListOptions) (*unstructured.UnstructuredList, error)
var watcher func(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
gvr := schema.GroupVersionResource{Group: "apiextensions.k8s.io", Version: "v1", Resource: "customresourcedefinitions"}
lister = dClient.Resource(gvr).List
watcher = dClient.Resource(gvr).Watch
store := cache.NewIndexer( /* Key Func*/ func(obj interface{}) (string, error) {
uo := obj.(*unstructured.Unstructured)
o := uo.Object
group, found, err := unstructured.NestedString(o, "spec", "group")
if !found {
fmt.Printf("didn't find value on %v", uo.GetName())
}
if err != nil {
fmt.Printf("err: %v", err)
}
names, found, err := unstructured.NestedStringMap(o, "spec", "names")
if !found {
fmt.Printf("didn't find value on %v", uo.GetName())
}
if err != nil {
fmt.Printf("err: %v", err)
}
// Key is <group>/<Kind> as opposed to <namespace>/name
// This is so that you can find CRD just using Kind and API Group
// instead of knowing the name
return group + "/" + names["kind"], nil
}, cache.Indexers{"group": /* Index Func */ func(obj interface{}) ([]string, error) {
uo := obj.(*unstructured.Unstructured)
o := uo.Object
group, found, err := unstructured.NestedString(o, "spec", "group")
if !found {
fmt.Printf("didn't find value on %v", uo.GetName())
}
if err != nil {
return []string{""}, fmt.Errorf("err: %v", err)
}
/* Index by APi Group of the CRD */
return []string{group}, nil
}})
lw := &cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
return lister(context.Background(), options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
return watcher(context.Background(), options)
},
}
reflector := cache.NewReflector(lw, unstructured.Unstructured{}, store, time.Hour)
crdLister := dynamiclister.New(store, gvr)
// Run reflector in the background so that we get new updates from the api-server
go reflector.Run(stopChannel)
// Wait for reflector to sync the cache for the first time
// TODO: check if there's a better way to do this (listing all the nodes seems wasteful)
// Note: Based on the docs WaitForNamedCacheSync seems to be used to check if an informer has synced
// but the function is generic enough so we can use
// it for reflectors as well
synced := cache.WaitForNamedCacheSync(fmt.Sprintf("generic-%s-lister", gvr.Resource), stopChannel, func() bool {
no, err := crdLister.List(labels.Everything())
if err != nil {
klog.Error("err", err)
}
return len(no) > 0
})
if !synced {
klog.Error("couldn't sync cache")
}
return crdLister
}
// createKubeClient mimics function of the same name in cluster-autoscaler
func createKubeClient(kubeConfig *rest.Config) kube_client.Interface {
return kube_client.NewForConfigOrDie(kubeConfig)
}