diff --git a/.werf/consts.yaml b/.werf/consts.yaml index dad020414..f23df83c5 100644 --- a/.werf/consts.yaml +++ b/.werf/consts.yaml @@ -24,10 +24,6 @@ {{- $_ := set $versions "LINSTOR_COMMON" "999c3c0793376f88b891eb7ef3dc8f8bc9aa26ec" }} {{- $_ := set $versions "LINSTOR_CSI" "98544cadb6d111d27a86a11ec07de91b99704b82" }} {{- $_ := set $versions "LINSTOR_K8S_AWAIT_ELECTION" "0.3.1" }} -{{- $_ := set $versions "LINSTOR_SCHEDULER_EXTENDER" "0.3.2" }} -# the closest version I found for the stork's module version v1.4.1-0.20220512171133-b99428ee1ddf which used in linstor-scheduler-extender v0.3.2 -# https://github.com/libopenstorage/stork/pull/1097/commits -{{- $_ := set $versions "LINSTOR_SCHEDULER_STORK" "2.11.5" }} {{- $_ := set $versions "LINSTOR_SERVER" "1.24.2" }} # We are using Gradle version 6.9.3 because building Linstor Controller with version 7+ completes unsuccesfully with error: # Could not set unknown property 'release' for object of type org.gradle.api.tasks.compile.CompileOptions. @@ -45,4 +41,4 @@ {{- $_ := set $ "BUILD_PACKAGES" "build-essential rpm-build rpm-macros-intro-conflicts sudo git jq" }} {{- $_ := set $ "DECKHOUSE_UID_GID" "64535" }} {{- $_ := set $ "ALT_CLEANUP_CMD" "rm -rf /var/lib/apt/lists/* /var/cache/apt/* && mkdir -p /var/lib/apt/lists/partial /var/cache/apt/archives/partial" }} -{{- $_ := set $ "ALT_BASE_PACKAGES" "openssl libtirpc tzdata" }} \ No newline at end of file +{{- $_ := set $ "ALT_BASE_PACKAGES" "openssl libtirpc tzdata" }} diff --git a/api/go.mod b/api/go.mod index ea41542ac..53c2f95f5 100644 --- a/api/go.mod +++ b/api/go.mod @@ -1,22 +1,26 @@ module github.com/deckhouse/sds-replicated-volume/api -go 1.23.6 +go 1.24.0 -require k8s.io/apimachinery v0.30.2 +toolchain go1.24.3 + +require k8s.io/apimachinery v0.33.1 require ( - github.com/go-logr/logr v1.4.1 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/google/gofuzz v1.2.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - golang.org/x/net v0.40.0 // indirect - golang.org/x/text v0.25.0 // indirect + github.com/x448/float16 v0.8.4 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/text v0.23.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect - k8s.io/klog/v2 v2.120.1 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/api/go.sum b/api/go.sum index dde294bf2..0f90ff67a 100644 --- a/api/go.sum +++ b/api/go.sum @@ -1,16 +1,16 @@ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= @@ -26,8 +26,10 @@ github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -39,8 +41,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY= -golang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -49,8 +51,8 @@ golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= -golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -63,20 +65,20 @@ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+ gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/apimachinery v0.30.2 h1:fEMcnBj6qkzzPGSVsAZtQThU62SmQ4ZymlXRC5yFSCg= -k8s.io/apimachinery v0.30.2/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4= +k8s.io/apimachinery v0.33.1/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/api/linstor/layer_storage_resource_ids.go b/api/linstor/layer_storage_resource_ids.go new file mode 100644 index 000000000..c9eba574d --- /dev/null +++ b/api/linstor/layer_storage_resource_ids.go @@ -0,0 +1,31 @@ +package linstor + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// LayerResourceIdsSpec defines the desired state of LayerResourceIds +type LayerResourceIdsSpec struct { + LayerResourceID int `json:"layer_resource_id"` + LayerResourceKind string `json:"layer_resource_kind"` + LayerResourceSuffix string `json:"layer_resource_suffix"` + LayerResourceSuspended bool `json:"layer_resource_suspended"` + NodeName string `json:"node_name"` + ResourceName string `json:"resource_name"` + SnapshotName string `json:"snapshot_name"` +} + +// LayerResourceIds is the Schema for the layerresourceids API +type LayerResourceIds struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec LayerResourceIdsSpec `json:"spec,omitempty"` +} + +// LayerResourceIdsList contains a list of LayerResourceIds +type LayerResourceIdsList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata,omitempty"` + Items []LayerResourceIds `json:"items"` +} diff --git a/api/linstor/register.go b/api/linstor/register.go index d78ab01cc..fefcb7e0e 100644 --- a/api/linstor/register.go +++ b/api/linstor/register.go @@ -56,6 +56,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &LayerStorageVolumesList{}, &ResourceDefinitions{}, &ResourceDefinitionsList{}, + &LayerResourceIds{}, + &LayerResourceIdsList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/api/linstor/zz_generated.deepcopy.go b/api/linstor/zz_generated.deepcopy.go index f13430bd3..32007abb4 100644 --- a/api/linstor/zz_generated.deepcopy.go +++ b/api/linstor/zz_generated.deepcopy.go @@ -502,3 +502,64 @@ func (in *ResourceDefinitionsList) DeepCopyObject() runtime.Object { } // ------------------- ResourceDefinitions ---------------------------- + +// ------------------- ResourceIds ---------------------------- + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LayerResourceIds) DeepCopyInto(out *LayerResourceIds) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyBlockDevice. +func (in *LayerResourceIds) DeepCopy() *LayerResourceIds { + if in == nil { + return nil + } + out := new(LayerResourceIds) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LayerResourceIds) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LayerResourceIdsList) DeepCopyInto(out *LayerResourceIdsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LayerResourceIds, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GuestbookList. +func (in *LayerResourceIdsList) DeepCopy() *LayerResourceIdsList { + if in == nil { + return nil + } + out := new(LayerResourceIdsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LayerResourceIdsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// ------------------- ResourceIds ---------------------------- \ No newline at end of file diff --git a/api/v1alpha1/drbd_cluster.go b/api/v1alpha1/drbd_cluster.go new file mode 100644 index 000000000..7c641b7be --- /dev/null +++ b/api/v1alpha1/drbd_cluster.go @@ -0,0 +1,109 @@ +/* +Copyright 2025 Flant JSC +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// DRBDClusterSpec defines the desired state of DRBDCluster +// +k8s:deepcopy-gen=true +type DRBDClusterSpec struct { + Replicas int32 `json:"replicas"` + QuorumPolicy string `json:"quorumPolicy"` + NetworkPoolName string `json:"networkPoolName"` + SharedSecret string `json:"sharedSecret"` + Size int64 `json:"size"` + DrbdCurrentGi string `json:"drbdCurrentGi"` + Port int32 `json:"port"` + Minor int `json:"minor"` + AttachmentRequested []string `json:"attachmentRequested"` + TopologySpreadConstraints []TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"` + Affinity Affinity `json:"affinity,omitempty"` + AutoDiskful AutoDiskful `json:"autoDiskful,omitempty"` + AutoRecovery AutoRecovery `json:"autoRecovery,omitempty"` + StoragePoolSelector []metav1.LabelSelector `json:"storagePoolSelector,omitempty"` +} + +// TopologySpreadConstraint specifies topology constraints +// +k8s:deepcopy-gen=true +type TopologySpreadConstraint struct { + MaxSkew int `json:"maxSkew"` + TopologyKey string `json:"topologyKey"` + WhenUnsatisfiable string `json:"whenUnsatisfiable"` +} + +// Affinity defines node affinity scheduling rules +// +k8s:deepcopy-gen=true +type Affinity struct { + NodeAffinity NodeAffinity `json:"nodeAffinity,omitempty"` +} + +// NodeAffinity specifies node selection criteria +// +k8s:deepcopy-gen=true +type NodeAffinity struct { + RequiredDuringSchedulingIgnoredDuringExecution NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` +} + +// NodeSelector represents constraints to match nodes +// +k8s:deepcopy-gen=true +type NodeSelector struct { + NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms"` +} + +// NodeSelectorTerm defines node selection conditions +// +k8s:deepcopy-gen=true +type NodeSelectorTerm struct { + MatchExpressions []metav1.LabelSelectorRequirement `json:"matchExpressions"` +} + +// AutoDiskful represents auto-diskful settings +// +k8s:deepcopy-gen=true +type AutoDiskful struct { + DelaySeconds int `json:"delaySeconds"` +} + +// AutoRecovery represents auto-recovery settings +// +k8s:deepcopy-gen=true +type AutoRecovery struct { + DelaySeconds int `json:"delaySeconds"` +} + +// DRBDClusterStatus defines the observed state of DRBDCluster +// +k8s:deepcopy-gen=true +type DRBDClusterStatus struct { + Size int64 `json:"size"` + AttachmentCompleted []string `json:"attachmentCompleted"` + Conditions []metav1.Condition `json:"conditions"` +} + +// DRBDCluster is the Schema for the drbdclusters API +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type DRBDCluster struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec DRBDClusterSpec `json:"spec"` + Status DRBDClusterStatus `json:"status,omitempty"` +} + +// DRBDClusterList is the list of DRBDClusters +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type DRBDClusterList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []DRBDCluster `json:"items"` +} \ No newline at end of file diff --git a/api/v1alpha1/register.go b/api/v1alpha1/register.go index a2ceb69e7..c4bfd3b7e 100644 --- a/api/v1alpha1/register.go +++ b/api/v1alpha1/register.go @@ -44,6 +44,8 @@ func addKnownTypes(scheme *runtime.Scheme) error { &ReplicatedStorageClassList{}, &ReplicatedStoragePool{}, &ReplicatedStoragePoolList{}, + &DRBDCluster{}, + &DRBDClusterList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil diff --git a/api/v1alpha1/replicated_storage_class.go b/api/v1alpha1/replicated_storage_class.go index ac196c6e9..f589c963e 100644 --- a/api/v1alpha1/replicated_storage_class.go +++ b/api/v1alpha1/replicated_storage_class.go @@ -1,11 +1,10 @@ /* -Copyright 2025 Flant JSC - +Copyright 2023 Flant JSC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -13,11 +12,12 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - package v1alpha1 import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type ReplicatedStorageClass struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -25,6 +25,8 @@ type ReplicatedStorageClass struct { Status ReplicatedStorageClassStatus `json:"status,omitempty"` } +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ReplicatedStorageClassList contains a list of empty block device type ReplicatedStorageClassList struct { metav1.TypeMeta `json:",inline"` @@ -32,6 +34,7 @@ type ReplicatedStorageClassList struct { Items []ReplicatedStorageClass `json:"items"` } +// +k8s:deepcopy-gen=true type ReplicatedStorageClassSpec struct { StoragePool string `json:"storagePool"` ReclaimPolicy string `json:"reclaimPolicy"` @@ -41,6 +44,7 @@ type ReplicatedStorageClassSpec struct { Zones []string `json:"zones"` } +// +k8s:deepcopy-gen=true type ReplicatedStorageClassStatus struct { Phase string `json:"phase,omitempty"` Reason string `json:"reason,omitempty"` diff --git a/api/v1alpha1/replicated_storage_pool.go b/api/v1alpha1/replicated_storage_pool.go index d43c5db13..76aa70694 100644 --- a/api/v1alpha1/replicated_storage_pool.go +++ b/api/v1alpha1/replicated_storage_pool.go @@ -1,23 +1,21 @@ /* -Copyright 2025 Flant JSC - +Copyright 2023 Flant JSC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ - package v1alpha1 import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type ReplicatedStoragePool struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` @@ -25,24 +23,29 @@ type ReplicatedStoragePool struct { Status ReplicatedStoragePoolStatus `json:"status,omitempty"` } +// +k8s:deepcopy-gen=true type ReplicatedStoragePoolSpec struct { Type string `json:"type"` LVMVolumeGroups []ReplicatedStoragePoolLVMVolumeGroups `json:"lvmVolumeGroups"` } +// +k8s:deepcopy-gen=true type ReplicatedStoragePoolLVMVolumeGroups struct { Name string `json:"name"` ThinPoolName string `json:"thinPoolName"` } +// +k8s:deepcopy-gen=true type ReplicatedStoragePoolStatus struct { Phase string `json:"phase"` Reason string `json:"reason"` } // ReplicatedStoragePoolList contains a list of ReplicatedStoragePool +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type ReplicatedStoragePoolList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata"` Items []ReplicatedStoragePool `json:"items"` -} +} \ No newline at end of file diff --git a/api/v1alpha1/zz_generated.deepcopy.go b/api/v1alpha1/zz_generated.deepcopy.go index 6aece3f00..5a2c50340 100644 --- a/api/v1alpha1/zz_generated.deepcopy.go +++ b/api/v1alpha1/zz_generated.deepcopy.go @@ -1,3 +1,6 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + /* Copyright 2025 Flant JSC @@ -13,21 +16,263 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +// Code generated by deepcopy-gen. DO NOT EDIT. package v1alpha1 -import "k8s.io/apimachinery/pkg/runtime" +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Affinity) DeepCopyInto(out *Affinity) { + *out = *in + in.NodeAffinity.DeepCopyInto(&out.NodeAffinity) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Affinity. +func (in *Affinity) DeepCopy() *Affinity { + if in == nil { + return nil + } + out := new(Affinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoDiskful) DeepCopyInto(out *AutoDiskful) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoDiskful. +func (in *AutoDiskful) DeepCopy() *AutoDiskful { + if in == nil { + return nil + } + out := new(AutoDiskful) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AutoRecovery) DeepCopyInto(out *AutoRecovery) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AutoRecovery. +func (in *AutoRecovery) DeepCopy() *AutoRecovery { + if in == nil { + return nil + } + out := new(AutoRecovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRBDCluster) DeepCopyInto(out *DRBDCluster) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRBDCluster. +func (in *DRBDCluster) DeepCopy() *DRBDCluster { + if in == nil { + return nil + } + out := new(DRBDCluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DRBDCluster) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRBDClusterList) DeepCopyInto(out *DRBDClusterList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DRBDCluster, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRBDClusterList. +func (in *DRBDClusterList) DeepCopy() *DRBDClusterList { + if in == nil { + return nil + } + out := new(DRBDClusterList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DRBDClusterList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRBDClusterSpec) DeepCopyInto(out *DRBDClusterSpec) { + *out = *in + if in.AttachmentRequested != nil { + in, out := &in.AttachmentRequested, &out.AttachmentRequested + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]TopologySpreadConstraint, len(*in)) + copy(*out, *in) + } + in.Affinity.DeepCopyInto(&out.Affinity) + out.AutoDiskful = in.AutoDiskful + out.AutoRecovery = in.AutoRecovery + if in.StoragePoolSelector != nil { + in, out := &in.StoragePoolSelector, &out.StoragePoolSelector + *out = make([]v1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRBDClusterSpec. +func (in *DRBDClusterSpec) DeepCopy() *DRBDClusterSpec { + if in == nil { + return nil + } + out := new(DRBDClusterSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRBDClusterStatus) DeepCopyInto(out *DRBDClusterStatus) { + *out = *in + if in.AttachmentCompleted != nil { + in, out := &in.AttachmentCompleted, &out.AttachmentCompleted + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRBDClusterStatus. +func (in *DRBDClusterStatus) DeepCopy() *DRBDClusterStatus { + if in == nil { + return nil + } + out := new(DRBDClusterStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAffinity) DeepCopyInto(out *NodeAffinity) { + *out = *in + in.RequiredDuringSchedulingIgnoredDuringExecution.DeepCopyInto(&out.RequiredDuringSchedulingIgnoredDuringExecution) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinity. +func (in *NodeAffinity) DeepCopy() *NodeAffinity { + if in == nil { + return nil + } + out := new(NodeAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelector) DeepCopyInto(out *NodeSelector) { + *out = *in + if in.NodeSelectorTerms != nil { + in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms + *out = make([]NodeSelectorTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} -// --------------- replicated storage class +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelector. +func (in *NodeSelector) DeepCopy() *NodeSelector { + if in == nil { + return nil + } + out := new(NodeSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelectorTerm) DeepCopyInto(out *NodeSelectorTerm) { + *out = *in + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]v1.LabelSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorTerm. +func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm { + if in == nil { + return nil + } + out := new(NodeSelectorTerm) + in.DeepCopyInto(out) + return out +} // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ReplicatedStorageClass) DeepCopyInto(out *ReplicatedStorageClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyBlockDevice. +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicatedStorageClass. func (in *ReplicatedStorageClass) DeepCopy() *ReplicatedStorageClass { if in == nil { return nil @@ -57,9 +302,10 @@ func (in *ReplicatedStorageClassList) DeepCopyInto(out *ReplicatedStorageClassLi (*in)[i].DeepCopyInto(&(*out)[i]) } } + return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GuestbookList. +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicatedStorageClassList. func (in *ReplicatedStorageClassList) DeepCopy() *ReplicatedStorageClassList { if in == nil { return nil @@ -77,16 +323,54 @@ func (in *ReplicatedStorageClassList) DeepCopyObject() runtime.Object { return nil } -// --------------- replicated storage pool +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicatedStorageClassSpec) DeepCopyInto(out *ReplicatedStorageClassSpec) { + *out = *in + if in.Zones != nil { + in, out := &in.Zones, &out.Zones + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicatedStorageClassSpec. +func (in *ReplicatedStorageClassSpec) DeepCopy() *ReplicatedStorageClassSpec { + if in == nil { + return nil + } + out := new(ReplicatedStorageClassSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicatedStorageClassStatus) DeepCopyInto(out *ReplicatedStorageClassStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicatedStorageClassStatus. +func (in *ReplicatedStorageClassStatus) DeepCopy() *ReplicatedStorageClassStatus { + if in == nil { + return nil + } + out := new(ReplicatedStorageClassStatus) + in.DeepCopyInto(out) + return out +} // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ReplicatedStoragePool) DeepCopyInto(out *ReplicatedStoragePool) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyBlockDevice. +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicatedStoragePool. func (in *ReplicatedStoragePool) DeepCopy() *ReplicatedStoragePool { if in == nil { return nil @@ -104,6 +388,22 @@ func (in *ReplicatedStoragePool) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicatedStoragePoolLVMVolumeGroups) DeepCopyInto(out *ReplicatedStoragePoolLVMVolumeGroups) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicatedStoragePoolLVMVolumeGroups. +func (in *ReplicatedStoragePoolLVMVolumeGroups) DeepCopy() *ReplicatedStoragePoolLVMVolumeGroups { + if in == nil { + return nil + } + out := new(ReplicatedStoragePoolLVMVolumeGroups) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ReplicatedStoragePoolList) DeepCopyInto(out *ReplicatedStoragePoolList) { *out = *in @@ -116,9 +416,10 @@ func (in *ReplicatedStoragePoolList) DeepCopyInto(out *ReplicatedStoragePoolList (*in)[i].DeepCopyInto(&(*out)[i]) } } + return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GuestbookList. +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicatedStoragePoolList. func (in *ReplicatedStoragePoolList) DeepCopy() *ReplicatedStoragePoolList { if in == nil { return nil @@ -135,3 +436,56 @@ func (in *ReplicatedStoragePoolList) DeepCopyObject() runtime.Object { } return nil } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicatedStoragePoolSpec) DeepCopyInto(out *ReplicatedStoragePoolSpec) { + *out = *in + if in.LVMVolumeGroups != nil { + in, out := &in.LVMVolumeGroups, &out.LVMVolumeGroups + *out = make([]ReplicatedStoragePoolLVMVolumeGroups, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicatedStoragePoolSpec. +func (in *ReplicatedStoragePoolSpec) DeepCopy() *ReplicatedStoragePoolSpec { + if in == nil { + return nil + } + out := new(ReplicatedStoragePoolSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicatedStoragePoolStatus) DeepCopyInto(out *ReplicatedStoragePoolStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicatedStoragePoolStatus. +func (in *ReplicatedStoragePoolStatus) DeepCopy() *ReplicatedStoragePoolStatus { + if in == nil { + return nil + } + out := new(ReplicatedStoragePoolStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySpreadConstraint. +func (in *TopologySpreadConstraint) DeepCopy() *TopologySpreadConstraint { + if in == nil { + return nil + } + out := new(TopologySpreadConstraint) + in.DeepCopyInto(out) + return out +} diff --git a/api/v1alpha2/drbd_resource.go b/api/v1alpha2/drbd_resource.go new file mode 100644 index 000000000..8dd3a14a2 --- /dev/null +++ b/api/v1alpha2/drbd_resource.go @@ -0,0 +1,36 @@ + +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +type DRBDResource struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec DRBDResourceSpec `json:"spec"` + Status DRBDResourceStatus `json:"status"` +} + +// +k8s:deepcopy-gen=true +type DRBDResourceSpec struct { + Size int64 `json:"size"` +} + +// +k8s:deepcopy-gen=true +type DRBDResourceStatus struct { +} + +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +type DRBDResourceList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []DRBDResource `json:"items"` +} \ No newline at end of file diff --git a/api/v1alpha2/drbd_resource_replica.go b/api/v1alpha2/drbd_resource_replica.go new file mode 100644 index 000000000..126d2785d --- /dev/null +++ b/api/v1alpha2/drbd_resource_replica.go @@ -0,0 +1,47 @@ +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +kubebuilder:object:root=true +// +kubebuilder:subresource:status +type DRBDResourceReplica struct { + metav1.TypeMeta `json:",inline"` + metav1.ObjectMeta `json:"metadata"` + + Spec DRBDResourceReplicaSpec `json:"spec"` + Status *DRBDResourceReplicaStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen=true +type DRBDResourceReplicaSpec struct { + // NodeName string `json:"nodeName"` + Peers map[string]Peer `json:"peers,omitempty"` +} + +// +k8s:deepcopy-gen=true +type Peer struct { + Address Address `json:"address"` + Diskless bool `json:"diskless"` +} + +// +k8s:deepcopy-gen=true +type Address struct { + IPv4 string `json:"ipv4"` +} + +// +k8s:deepcopy-gen=true +type DRBDResourceReplicaStatus struct { + Conditions []metav1.Condition `json:"conditions"` +} + +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type DRBDResourceReplicaList struct { + metav1.TypeMeta `json:",inline"` + metav1.ListMeta `json:"metadata"` + Items []DRBDResourceReplica `json:"items"` +} diff --git a/api/v1alpha2/register.go b/api/v1alpha2/register.go new file mode 100644 index 000000000..2d982d350 --- /dev/null +++ b/api/v1alpha2/register.go @@ -0,0 +1,49 @@ +/* +Copyright 2025 Flant JSC +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +kubebuilder:object:generate=true +// +groupName=storage.deckhouse.io +package v1alpha2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + APIGroup = "storage.deckhouse.io" + APIVersion = "v1alpha2" +) + +// SchemeGroupVersion is group version used to register these objects +var ( + SchemeGroupVersion = schema.GroupVersion{ + Group: APIGroup, + Version: APIVersion, + } + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &DRBDResource{}, + &DRBDResourceList{}, + &DRBDResourceReplica{}, + &DRBDResourceReplicaList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/api/v1alpha2/zz_generated.deepcopy.go b/api/v1alpha2/zz_generated.deepcopy.go new file mode 100644 index 000000000..3863e3487 --- /dev/null +++ b/api/v1alpha2/zz_generated.deepcopy.go @@ -0,0 +1,260 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright 2025 Flant JSC +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha2 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Address) DeepCopyInto(out *Address) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Address. +func (in *Address) DeepCopy() *Address { + if in == nil { + return nil + } + out := new(Address) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRBDResource) DeepCopyInto(out *DRBDResource) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRBDResource. +func (in *DRBDResource) DeepCopy() *DRBDResource { + if in == nil { + return nil + } + out := new(DRBDResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DRBDResource) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRBDResourceList) DeepCopyInto(out *DRBDResourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DRBDResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRBDResourceList. +func (in *DRBDResourceList) DeepCopy() *DRBDResourceList { + if in == nil { + return nil + } + out := new(DRBDResourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DRBDResourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRBDResourceReplica) DeepCopyInto(out *DRBDResourceReplica) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(DRBDResourceReplicaStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRBDResourceReplica. +func (in *DRBDResourceReplica) DeepCopy() *DRBDResourceReplica { + if in == nil { + return nil + } + out := new(DRBDResourceReplica) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DRBDResourceReplica) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRBDResourceReplicaList) DeepCopyInto(out *DRBDResourceReplicaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DRBDResourceReplica, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRBDResourceReplicaList. +func (in *DRBDResourceReplicaList) DeepCopy() *DRBDResourceReplicaList { + if in == nil { + return nil + } + out := new(DRBDResourceReplicaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DRBDResourceReplicaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRBDResourceReplicaSpec) DeepCopyInto(out *DRBDResourceReplicaSpec) { + *out = *in + if in.Peers != nil { + in, out := &in.Peers, &out.Peers + *out = make(map[string]Peer, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRBDResourceReplicaSpec. +func (in *DRBDResourceReplicaSpec) DeepCopy() *DRBDResourceReplicaSpec { + if in == nil { + return nil + } + out := new(DRBDResourceReplicaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRBDResourceReplicaStatus) DeepCopyInto(out *DRBDResourceReplicaStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRBDResourceReplicaStatus. +func (in *DRBDResourceReplicaStatus) DeepCopy() *DRBDResourceReplicaStatus { + if in == nil { + return nil + } + out := new(DRBDResourceReplicaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRBDResourceSpec) DeepCopyInto(out *DRBDResourceSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRBDResourceSpec. +func (in *DRBDResourceSpec) DeepCopy() *DRBDResourceSpec { + if in == nil { + return nil + } + out := new(DRBDResourceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DRBDResourceStatus) DeepCopyInto(out *DRBDResourceStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DRBDResourceStatus. +func (in *DRBDResourceStatus) DeepCopy() *DRBDResourceStatus { + if in == nil { + return nil + } + out := new(DRBDResourceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Peer) DeepCopyInto(out *Peer) { + *out = *in + out.Address = in.Address + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Peer. +func (in *Peer) DeepCopy() *Peer { + if in == nil { + return nil + } + out := new(Peer) + in.DeepCopyInto(out) + return out +} \ No newline at end of file diff --git a/crds/drbdcluster.yaml b/crds/drbdcluster.yaml new file mode 100644 index 000000000..82f256acc --- /dev/null +++ b/crds/drbdcluster.yaml @@ -0,0 +1,161 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: drbdclusters.storage.deckhouse.io + labels: + heritage: deckhouse + module: storage + backup.deckhouse.io/cluster-config: "true" +spec: + group: storage.deckhouse.io + scope: Cluster + names: + kind: DRBDCluster + plural: drbdclusters + singular: drbdcluster + shortNames: + - drbdcl + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + description: | + DRBDCluster is a Kubernetes Custom Resource that defines a configuration for a DRBD cluster. + properties: + spec: + type: object + properties: + replicas: + type: integer + minimum: 1 + description: "Number of replicas." + quorumPolicy: + type: string + enum: + - off + - none + - majority + - all + description: "Quorum policy for the cluster." + networkPoolName: + type: string + description: "Name of the network pool to use." + sharedSecret: + type: string + description: "Shared secret for authentication." + size: + type: integer + description: "Requested size of the DRBD device." # TODO: divice же? + drbdCurrentGi: + type: string + description: "Current DRBD generation identifier." # TODO: generation identifier же? + port: + type: integer + description: "Port for DRBD communication." + minor: + type: integer + description: "Minor number for the DRBD device." + attachmentRequested: + type: array + items: + type: string + description: "List of nodes where attachment is requested." + topologySpreadConstraints: + type: array + items: + type: object + properties: + maxSkew: + type: integer + topologyKey: + type: string + whenUnsatisfiable: + type: string + description: "Topology spread constraints for scheduling." + affinity: + type: object + properties: + nodeAffinity: + type: object + properties: + requiredDuringSchedulingIgnoredDuringExecution: + type: object + properties: + nodeSelectorTerms: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + autoDiskful: + type: object + properties: + delaySeconds: + type: integer + description: "Delay in seconds for auto-diskful operation." + autoRecovery: + type: object + properties: + delaySeconds: + type: integer + description: "Delay in seconds for auto-recovery." + storagePoolSelector: + type: array + items: + type: object + properties: + matchExpressions: + type: array + items: + type: object + properties: + key: + type: string + operator: + type: string + values: + type: array + items: + type: string + status: + type: object + properties: + size: + type: integer + description: "Actual size of the DRBD device." + attachmentCompleted: + type: array + items: + type: string + description: "List of nodes where attachment is completed." + conditions: + type: array + items: + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string diff --git a/crds/drbdnode.yaml b/crds/drbdnode.yaml new file mode 100644 index 000000000..eeab35a1f --- /dev/null +++ b/crds/drbdnode.yaml @@ -0,0 +1,53 @@ +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: drbdnodes.storage.deckhouse.io +spec: + group: storage.deckhouse.io + scope: Namespaced + names: + plural: drbdnodes + singular: drbdnode + kind: DRBDNode + shortNames: + - drbdn + versions: + - name: v1alpha1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + properties: + networkPools: + type: object + additionalProperties: + type: object + properties: + address: + type: object + properties: + ipv4: + type: string + status: + type: object + properties: + conditions: + type: array + items: + type: object + properties: + lastTransitionTime: + type: string + format: date-time + message: + type: string + reason: + type: string + status: + type: string + type: + type: string diff --git a/crds/storage.deckhouse.io_drbdresourcereplicas.yaml b/crds/storage.deckhouse.io_drbdresourcereplicas.yaml new file mode 100644 index 000000000..d2b166160 --- /dev/null +++ b/crds/storage.deckhouse.io_drbdresourcereplicas.yaml @@ -0,0 +1,127 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: drbdresourcereplicas.storage.deckhouse.io +spec: + group: storage.deckhouse.io + names: + kind: DRBDResourceReplica + listKind: DRBDResourceReplicaList + plural: drbdresourcereplicas + singular: drbdresourcereplica + scope: Namespaced + versions: + - name: v1alpha2 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + peers: + additionalProperties: + properties: + address: + properties: + ipv4: + type: string + required: + - ipv4 + type: object + diskless: + type: boolean + required: + - address + - diskless + type: object + description: NodeName string `json:"nodeName"` + type: object + type: object + status: + properties: + conditions: + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + required: + - conditions + type: object + required: + - metadata + - spec + type: object + served: true + storage: true + subresources: + status: {} diff --git a/crds/storage.deckhouse.io_drbdresources.yaml b/crds/storage.deckhouse.io_drbdresources.yaml new file mode 100644 index 000000000..f25369070 --- /dev/null +++ b/crds/storage.deckhouse.io_drbdresources.yaml @@ -0,0 +1,56 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.18.0 + name: drbdresources.storage.deckhouse.io +spec: + group: storage.deckhouse.io + names: + kind: DRBDResource + listKind: DRBDResourceList + plural: drbdresources + singular: drbdresource + scope: Namespaced + versions: + - name: v1alpha2 + schema: + openAPIV3Schema: + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + properties: + size: + format: int64 + type: integer + required: + - size + type: object + status: + type: object + required: + - metadata + - spec + - status + type: object + served: true + storage: true + subresources: + status: {} diff --git a/hooks/go/070-generate-certs/scheduler_extender_cert.go b/hack/boilerplate.txt similarity index 74% rename from hooks/go/070-generate-certs/scheduler_extender_cert.go rename to hack/boilerplate.txt index f461e8350..5749b43c6 100644 --- a/hooks/go/070-generate-certs/scheduler_extender_cert.go +++ b/hack/boilerplate.txt @@ -1,5 +1,5 @@ /* -Copyright 2022 Flant JSC +Copyright YEAR Flant JSC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,12 +12,4 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. -*/ - -package generatecerts - -import "github.com/deckhouse/sds-replicated-volume/hooks/go/certs" - -func init() { - certs.RegisterSchedulerExtenderCertHook() -} +*/ \ No newline at end of file diff --git a/hack/gen_crd.sh b/hack/gen_crd.sh new file mode 100755 index 000000000..81bfc10e2 --- /dev/null +++ b/hack/gen_crd.sh @@ -0,0 +1,7 @@ +#!/bin/bash + +cd ./api/ + +controller-gen crd paths=./v1alpha2 output:crd:dir=../crds + +cd .. \ No newline at end of file diff --git a/hack/generate_code.sh b/hack/generate_code.sh new file mode 100755 index 000000000..7cd745f27 --- /dev/null +++ b/hack/generate_code.sh @@ -0,0 +1,13 @@ +#!/bin/bash + +# run from repository root +cd api + +go get k8s.io/code-generator/cmd/deepcopy-gen + +go run k8s.io/code-generator/cmd/deepcopy-gen -v 2 \ + --output-file zz_generated.deepcopy.go \ + --go-header-file ../hack/boilerplate.txt \ + ./v1alpha1 + +cd .. \ No newline at end of file diff --git a/hack/increase_semver.sh b/hack/increase_semver.sh new file mode 100644 index 000000000..e66bccc95 --- /dev/null +++ b/hack/increase_semver.sh @@ -0,0 +1,106 @@ +#!/bin/bash +# +# BORROWED FROM https://github.com/fmahnke/shell-semver +# +# ============================================================================== +# The MIT License (MIT) +# +# Copyright (c) 2014 Fritz Mahnke +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. +# +# ============================================================================== +# +# Increment a version string using Semantic Versioning (SemVer) terminology. +# +# $ ./increment_version.sh +# usage: increment_version.sh [-Mmp] major.minor.patch +# +# $ ./increment_version.sh -p 0.0.0 +# 0.0.1 +# +# $ ./increment_version.sh -m 0.0.3 +# 0.1.0 +# +# $ ./increment_version.sh -M 1.1.15 +# 2.0.0 +# +# $ ./increment_version.sh -Mmp 2.3.4 +# 3.1.1 +# +# $ ./increment_version.sh -d 2.3.4 +# 2.3.4-dev.1691718412 + +# Parse command line options. + +while getopts ":Mmpd" Option +do + case $Option in + M ) major=true;; + m ) minor=true;; + p ) patch=true;; + d ) dev=true;; + esac +done + +shift $(($OPTIND - 1)) + +# remove dev version 1.2.3-dev.321 -> 1.2.3 +version=$(sed -E 's/-.*//' <<< $1) + +# Build array from version string. + +a=( ${version//./ } ) + +dev_version="" + +# If version string is missing or has the wrong number of members, show usage message. + +if [ ${#a[@]} -ne 3 ] +then + echo "usage: $(basename $0) [-Mmp] major.minor.patch" + exit 1 +fi + +# Increment version numbers as requested. + +if [ ! -z $major ] +then + ((a[0]++)) + a[1]=0 + a[2]=0 +fi + +if [ ! -z $minor ] +then + ((a[1]++)) + a[2]=0 +fi + +if [ ! -z $patch ] +then + ((a[2]++)) +fi + +if [ ! -z $dev ] +then + dev_version="-dev.$(date +%s)" +fi + +echo "${a[0]}.${a[1]}.${a[2]}${dev_version}" \ No newline at end of file diff --git a/hooks/go/060-manual-cert-renewal/state_machine.go b/hooks/go/060-manual-cert-renewal/state_machine.go index 277dc4253..91b177524 100644 --- a/hooks/go/060-manual-cert-renewal/state_machine.go +++ b/hooks/go/060-manual-cert-renewal/state_machine.go @@ -44,7 +44,6 @@ const ( DaemonSetNameCsiNode = "linstor-csi-node" DaemonSetNameNode = "linstor-node" - DeploymentNameSchedulerExtender = "linstor-scheduler-extender" DeploymentNameWebhooks = "webhooks" DeploymentNameSpaas = "spaas" DeploymentNameController = "linstor-controller" @@ -59,7 +58,6 @@ var ( DaemonSetNameList = []string{DaemonSetNameCsiNode, DaemonSetNameNode} DeploymentNameList = []string{ - DeploymentNameSchedulerExtender, DeploymentNameWebhooks, DeploymentNameSpaas, DeploymentNameAffinityController, @@ -299,10 +297,6 @@ func (s *stateMachine) turnOffAndRenewCerts() error { return err } - if err := s.turnOffDeploymentAndWait(DeploymentNameSchedulerExtender); err != nil { - return err - } - if err := s.turnOffDeploymentAndWait(DeploymentNameSpaas); err != nil { return err } @@ -432,10 +426,6 @@ func (s *stateMachine) turnOn() error { return err } - if err := s.turnOnDeploymentAndWait(DeploymentNameSchedulerExtender); err != nil { - return err - } - if err := s.turnOnDeploymentAndWait(DeploymentNameCsiController); err != nil { return err } diff --git a/hooks/go/060-manual-cert-renewal/state_machine_certs.go b/hooks/go/060-manual-cert-renewal/state_machine_certs.go index b87ab6c3f..d66810907 100644 --- a/hooks/go/060-manual-cert-renewal/state_machine_certs.go +++ b/hooks/go/060-manual-cert-renewal/state_machine_certs.go @@ -34,9 +34,6 @@ func makeAllCertGroupsConfigs() iter.Seq[tlsc.GenSelfSignedTLSGroupHookConf] { if !yield(certs.WebhookCertConfigs()) { return } - if !yield(certs.SchedulerExtenderCertConfig) { - return - } if !yield(certs.SpaasCertConfig) { return } diff --git a/hooks/go/certs/scheduler_extender_cert.go b/hooks/go/certs/scheduler_extender_cert.go deleted file mode 100644 index b46d31c44..000000000 --- a/hooks/go/certs/scheduler_extender_cert.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2022 Flant JSC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package certs - -import ( - "fmt" - - kcertificates "k8s.io/api/certificates/v1" - - chcrt "github.com/deckhouse/module-sdk/common-hooks/tls-certificate" - . "github.com/deckhouse/sds-replicated-volume/hooks/go/consts" - tlscertificate "github.com/deckhouse/sds-replicated-volume/hooks/go/tls-certificate" -) - -func RegisterSchedulerExtenderCertHook() { - tlscertificate.RegisterManualTLSHookEM(SchedulerExtenderCertConfig) -} - -var SchedulerExtenderCertConfig = tlscertificate.MustNewGenSelfSignedTLSGroupHookConf( - tlscertificate.GenSelfSignedTLSHookConf{ - CN: "linstor-scheduler-extender", - Namespace: ModuleNamespace, - TLSSecretName: "linstor-scheduler-extender-https-certs", - SANs: chcrt.DefaultSANs([]string{ - "linstor-scheduler-extender", - fmt.Sprintf("linstor-scheduler-extender.%s", ModuleNamespace), - fmt.Sprintf("linstor-scheduler-extender.%s.svc", ModuleNamespace), - fmt.Sprintf("%%CLUSTER_DOMAIN%%://linstor-scheduler-extender.%s.svc", ModuleNamespace), - }), - FullValuesPathPrefix: fmt.Sprintf("%s.internal.customSchedulerExtenderCert", ModuleName), - Usages: []kcertificates.KeyUsage{ - kcertificates.UsageKeyEncipherment, - kcertificates.UsageCertSign, - // ExtKeyUsage - kcertificates.UsageServerAuth, - }, - CAExpiryDuration: DefaultCertExpiredDuration, - CertExpiryDuration: DefaultCertExpiredDuration, - CertOutdatedDuration: DefaultCertOutdatedDuration, - }, -) diff --git a/hooks/go/certs/webhook_certs.go b/hooks/go/certs/webhook_certs.go index 507436f58..d5b72a573 100644 --- a/hooks/go/certs/webhook_certs.go +++ b/hooks/go/certs/webhook_certs.go @@ -38,16 +38,6 @@ func WebhookCertConfigs() tlscertificate.GenSelfSignedTLSGroupHookConf { slices.Collect( webhookCertConfigsFromArgs( []webhookHookArgs{ - { - cn: "linstor-scheduler-admission", - secretName: "linstor-scheduler-admission-certs", - valuesPropName: "webhookCert", - additionalSANs: []string{ - "linstor-scheduler-admission", - fmt.Sprintf("linstor-scheduler-admission.%s", ModuleNamespace), - fmt.Sprintf("linstor-scheduler-admission.%s.svc", ModuleNamespace), - }, - }, { cn: "webhooks", secretName: "webhooks-https-certs", @@ -87,7 +77,7 @@ func webhookCertConfigsFromArgs(hookArgs []webhookHookArgs) iter.Seq[tlscertific ModuleName, args.valuesPropName, ), - CommonCACanonicalName: "linstor-scheduler-admission", + CommonCACanonicalName: "webhooks-ca", Usages: []kcertificates.KeyUsage{ kcertificates.UsageKeyEncipherment, kcertificates.UsageCertSign, diff --git a/hooks/migrate_csi_endpoint.sh b/hooks/migrate_csi_endpoint.sh index fbf7911a6..b4177d3dc 100755 --- a/hooks/migrate_csi_endpoint.sh +++ b/hooks/migrate_csi_endpoint.sh @@ -64,9 +64,7 @@ run_trigger() { delete_resource ${NAMESPACE} daemonset linstor-csi-node scale_down_pods ${NAMESPACE} linstor-csi-controller scale_down_pods ${NAMESPACE} linstor-affinity-controller - scale_down_pods ${NAMESPACE} linstor-scheduler scale_down_pods ${NAMESPACE} sds-replicated-volume-controller - scale_down_pods ${NAMESPACE} linstor-scheduler-admission export temp_dir=$(mktemp -d) diff --git a/images/drbd-cluster-sync/src/Dockerfile b/images/drbd-cluster-sync/src/Dockerfile new file mode 100644 index 000000000..54b633c41 --- /dev/null +++ b/images/drbd-cluster-sync/src/Dockerfile @@ -0,0 +1,20 @@ +FROM golang:1.20 AS builder + +WORKDIR /app + +COPY go.mod go.sum ./ +RUN go mod download + +COPY . . + +ARG VERSION=unknown +RUN CGO_ENABLED=0 GOOS=linux \ + go build \ + -ldflags "-X github.com/piraeusdatastore/linstor-csi/pkg/driver.Version=$VERSION -extldflags -static" \ + -o drbd-cluster-sync . + +FROM alpine:latest + +COPY --from=builder /app/drbd-cluster-sync /usr/local/bin/drbd-cluster-sync + +ENTRYPOINT ["/usr/local/bin/drbd-cluster-sync"] diff --git a/images/drbd-cluster-sync/src/cmd/main.go b/images/drbd-cluster-sync/src/cmd/main.go new file mode 100644 index 000000000..f603e5a5b --- /dev/null +++ b/images/drbd-cluster-sync/src/cmd/main.go @@ -0,0 +1,151 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package main + +import ( + "context" + "fmt" + "os" + + "drbd-cluster-sync/config" + "drbd-cluster-sync/controller" + kubutils "drbd-cluster-sync/kubeutils" + "drbd-cluster-sync/logger" + + "github.com/deckhouse/sds-common-lib/kubeclient" + lsrv "github.com/deckhouse/sds-replicated-volume/api/linstor" + srv "github.com/deckhouse/sds-replicated-volume/api/v1alpha1" + srv2 "github.com/deckhouse/sds-replicated-volume/api/v1alpha2" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + apiruntime "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/manager/signals" +) + +func main() { + ctx := signals.SetupSignalHandler() + opts := config.NewDefaultOptions() + + resourcesSchemeFuncs := []func(*apiruntime.Scheme) error{ + srv.AddToScheme, + v1.AddToScheme, + lsrv.AddToScheme, + srv2.AddToScheme, + } + + log, err := logger.NewLogger(logger.Verbosity("4")) + if err != nil { + os.Exit(1) + } + + scheme := runtime.NewScheme() + for _, f := range resourcesSchemeFuncs { + if err := f(scheme); err != nil { + log.Error(err, "[Main] unable to add scheme to func") + os.Exit(1) + } + } + log.Info("[Main] successfully read scheme CR") + + kConfig, err := kubutils.KubernetesDefaultConfigCreate() + if err != nil { + log.Error(err, "[Main] unable to KubernetesDefaultConfigCreate") + os.Exit(1) + } + log.Info("[Main] kubernetes config has been successfully created.") + + managerOpts := manager.Options{ + Scheme: scheme, + Logger: log.GetLogger(), + BaseContext: func() context.Context { return ctx }, + } + + mgr, err := manager.New(kConfig, managerOpts) + if err != nil { + log.Error(err, "[Main] unable to create manager for creating controllers") + os.Exit(1) + } + + kc, err := kubeclient.New(resourcesSchemeFuncs...) + if err != nil { + log.Error(err, "[Main] failed to initialize kube client") + os.Exit(1) + } + + if err = controller.RunLayerResourceIDsWatcher(mgr, log, kc, opts); err != nil { + log.Error(err, fmt.Sprintf("[Main] unable to run %s controller", controller.LVGLayerResourceIDsWatcherName)) + os.Exit(1) + } + log.Info(fmt.Sprintf("[Main] successfully ran %s controller", controller.LVGLayerResourceIDsWatcherName)) + + err = mgr.Start(ctx) + if err != nil { + log.Error(err, "[Main] unable to mgr.Start()") + os.Exit(1) + } + + // r := rate.Limit(opts.RPS) + // if r <= 0 { + // r = rate.Inf + // } + + // linstorOpts := []lapi.Option{ + // lapi.Limit(r, opts.Burst), + // lapi.UserAgent("linstor-csi/" + driver.Version), + // lapi.Log(log), + // } + + // if opts.LSEndpoint != "" { + // u, err := url.Parse(opts.LSEndpoint) + // if err != nil { + // log.Error(err, "[Main] Failed to parse endpoint") + // os.Exit(1) + // } + + // linstorOpts = append(linstorOpts, lapi.BaseURL(u)) + // } + + // if opts.LSSkipTLSVerification { + // linstorOpts = append(linstorOpts, lapi.HTTPClient(&http.Client{ + // Transport: &http.Transport{ + // TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, + // }, + // })) + // } + + // if opts.BearerTokenFile != "" { + // token, err := os.ReadFile(opts.BearerTokenFile) + // if err != nil { + // log.Error(err, "[Main] failed to read bearer token file") + // os.Exit(1) + // } + + // linstorOpts = append(linstorOpts, lapi.BearerToken(string(token))) + // } + + // lc, err := lc.NewHighLevelClient(linstorOpts...) + // if err != nil { + // log.Error(err, "[Main] failed to create linstor high level client") + // os.Exit(1) + // } + + // if err = crd_sync.NewDRBDClusterSyncer(kc, log, opts).Sync(ctx); err != nil { + // log.Info(fmt.Sprintf("[Main] failed to sync DRBD clusters: %v", err.Error())) + // } + + // os.Exit(0) +} diff --git a/images/drbd-cluster-sync/src/config/config.go b/images/drbd-cluster-sync/src/config/config.go new file mode 100644 index 000000000..7fab04ec3 --- /dev/null +++ b/images/drbd-cluster-sync/src/config/config.go @@ -0,0 +1,41 @@ +package config + +import "flag" + +type Options struct { + RetryCount uint + RetryDelaySec uint + NumWorkers int + LogLevel string + Burst int + RPS float64 + LSEndpoint string + LSSkipTLSVerification bool + BearerTokenFile string +} + +func NewDefaultOptions() *Options { + var opts Options + + flag.UintVar(&opts.RetryCount, "retry-count", 10, "Number of retry attempts") + flag.UintVar(&opts.RetryDelaySec, "retry-delay", 2, "Delay between retries in seconds") + if opts.RetryDelaySec < 1 { + opts.RetryDelaySec = 2 // set default value + } + + flag.IntVar(&opts.NumWorkers, "num-workers", 3, "Number of workers") + if opts.NumWorkers < 1 { + opts.NumWorkers = 3 // set default value + } + + flag.StringVar(&opts.LogLevel, "log-level", "info", "Enable debug log output. Choose from: panic, fatal, error, warn, info, debug") + flag.IntVar(&opts.Burst, "linstor-api-burst", 1, "Maximum number of API requests allowed before being limited by requests-per-second. Default: 1 (no bursting)") + flag.Float64Var(&opts.RPS, "linstor-api-requests-per-second", 0, "Maximum allowed number of LINSTOR API requests per second. Default: Unlimited") + flag.StringVar(&opts.LSEndpoint, "linstor-endpoint", "", "Controller API endpoint for LINSTOR") + flag.BoolVar(&opts.LSSkipTLSVerification, "linstor-skip-tls-verification", false, "If true, do not verify TLS") + flag.StringVar(&opts.BearerTokenFile, "bearer-token", "", "Read the bearer token from the given file and use it for authentication.") + + flag.Parse() + + return &opts +} diff --git a/images/drbd-cluster-sync/src/controller/layer_resource_ids_watcher.go b/images/drbd-cluster-sync/src/controller/layer_resource_ids_watcher.go new file mode 100644 index 000000000..98b521621 --- /dev/null +++ b/images/drbd-cluster-sync/src/controller/layer_resource_ids_watcher.go @@ -0,0 +1,191 @@ +package controller + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + lapi "github.com/deckhouse/sds-replicated-volume/api/linstor" + srv2 "github.com/deckhouse/sds-replicated-volume/api/v1alpha2" + v1 "k8s.io/api/core/v1" + "k8s.io/client-go/util/retry" + "k8s.io/client-go/util/workqueue" + + "drbd-cluster-sync/config" + "drbd-cluster-sync/logger" + + k8sErr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + kubecl "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/event" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/manager" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + "sigs.k8s.io/controller-runtime/pkg/source" +) + +const ( + LVGLayerResourceIDsWatcherName = "layer-resource-ids-watcher" +) + +func RunLayerResourceIDsWatcher( + mgr manager.Manager, + log *logger.Logger, + kc kubecl.Client, + opts *config.Options, +) error { + log.Info("[RunLayerResourceIDsWatcher] starts the work") + + c, err := controller.New(LVGLayerResourceIDsWatcherName, mgr, controller.Options{ + Reconciler: reconcile.Func(func(_ context.Context, _ reconcile.Request) (reconcile.Result, error) { + return reconcile.Result{}, nil + }), + }) + if err != nil { + log.Error(err, "[RunLayerResourceIDsWatcher] unable to create a controller") + return err + } + + err = c.Watch(source.Kind(mgr.GetCache(), &lapi.LayerResourceIds{}, handler.TypedFuncs[*lapi.LayerResourceIds, reconcile.Request]{ + CreateFunc: func(ctx context.Context, e event.TypedCreateEvent[*lapi.LayerResourceIds], _ workqueue.TypedRateLimitingInterface[reconcile.Request]) { + pvs := &v1.PersistentVolumeList{} + if err := kc.List(ctx, pvs); err != nil { + log.Error(err, "[RunLayerResourceIDsWatcher] failed to get persistent volumes") + return + } + + pvcMap := make(map[string]*v1.PersistentVolume, len(pvs.Items)) + for _, pvc := range pvs.Items { + pvcMap[pvc.Name] = &pvc + } + + layerStorageVolumeList := &lapi.LayerStorageVolumesList{} + err := kc.List(ctx, layerStorageVolumeList) + if err != nil { + log.Error(err, "[RunLayerResourceIDsWatcher] failed to list layer storage volumes") + return + } + + layerStorageResourceIDs := &lapi.LayerResourceIdsList{} + err = kc.List(ctx, layerStorageResourceIDs) + if err != nil { + log.Error(err, "[RunLayerResourceIDsWatcher] failed to list layer resource id") + return + } + + lriMap := make(map[int]*lapi.LayerResourceIds, len(layerStorageResourceIDs.Items)) + for _, lri := range layerStorageResourceIDs.Items { + lriMap[lri.Spec.LayerResourceID] = &lri + } + + replicaMap := make(map[string]*srv2.DRBDResourceReplica) + for _, lsv := range layerStorageVolumeList.Items { + lri, found := lriMap[lsv.Spec.LayerResourceID] + if !found { + fmt.Printf("[RunLayerResourceIDsWatcher] no layer resource id %s found. skipping iteration") + } + + isDiskless := false + if lsv.Spec.ProviderKind == "DISKLESS" { + isDiskless = true + } + nodeName := strings.ToLower(lsv.Spec.NodeName) + + r, found := replicaMap[lri.Spec.ResourceName] + if !found { + pvName := strings.ToLower(lri.Spec.ResourceName) + + replicaMap[lri.Spec.ResourceName] = &srv2.DRBDResourceReplica{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvName, + Namespace: pvcMap[pvName].Spec.ClaimRef.Namespace, + }, + Spec: srv2.DRBDResourceReplicaSpec{ + Peers: map[string]srv2.Peer{ + nodeName: srv2.Peer{ + Diskless: isDiskless, + }, + }, + }, + } + continue + } + + peer := r.Spec.Peers[nodeName] + peer.Diskless = isDiskless + r.Spec.Peers[nodeName] = peer + } + + var wg sync.WaitGroup + semaphore := make(chan struct{}, opts.NumWorkers) + for _, replica := range replicaMap { + semaphore <- struct{}{} + wg.Add(1) + go func() { + defer func() { + <-semaphore + wg.Done() + }() + createDRBDResource(ctx, kc, replica, opts, log) + }() + } + + wg.Wait() + close(semaphore) + return + }, + })) + if err != nil { + log.Error(err, "[RunLayerResourceIDsWatcher] Watch error") + return err + } + + return nil +} + +func createDRBDResource(ctx context.Context, kc kubecl.Client, drbdResourceReplica *srv2.DRBDResourceReplica, opts *config.Options, log *logger.Logger) { + if err := retry.OnError( + // backoff settings + wait.Backoff{ + Duration: 2 * time.Second, // initial delay before first retry + Factor: 1.0, // Cap is multiplied by this value each retry + Steps: int(opts.RetryCount), // amount of retries + Cap: time.Duration(opts.RetryDelaySec), // delay between retries + }, + // this function takes an error returned by kc.Create and decides whether to make a retry or not + func(err error) bool { + if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { + log.Error(err, "[RunLayerResourceIDsWatcher] drbd resource replica retry context err") + return false + } + + if statusError, ok := err.(*k8sErr.StatusError); ok { + switch statusError.ErrStatus.Reason { + case + metav1.StatusReasonForbidden, + metav1.StatusReasonAlreadyExists, + metav1.StatusReasonInvalid, + metav1.StatusReasonConflict, + metav1.StatusReasonBadRequest: + log.Error(statusError, fmt.Sprintf("[RunLayerResourceIDsWatcher] drbd resource replica retry creation err: %s", statusError.ErrStatus.Reason)) + return false + } + } + return true + }, + func() error { + err := kc.Create(ctx, drbdResourceReplica) + if err == nil { + log.Info(fmt.Sprintf("[RunLayerResourceIDsWatcher] DRBD resource replica %s successfully created", drbdResourceReplica.Name)) + } + return err + }, + ); err != nil { + log.Error(err, fmt.Sprintf("[RunLayerResourceIDsWatcher] failed to create a DRBD resource replica %s", drbdResourceReplica.Name)) + } +} diff --git a/images/drbd-cluster-sync/src/crd_sync/drbd_cluster.go b/images/drbd-cluster-sync/src/crd_sync/drbd_cluster.go new file mode 100644 index 000000000..df1d32263 --- /dev/null +++ b/images/drbd-cluster-sync/src/crd_sync/drbd_cluster.go @@ -0,0 +1,364 @@ +package crd_sync + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + "time" + + "drbd-cluster-sync/config" + "drbd-cluster-sync/logger" + + lsrv "github.com/deckhouse/sds-replicated-volume/api/linstor" + srv "github.com/deckhouse/sds-replicated-volume/api/v1alpha1" + srv2 "github.com/deckhouse/sds-replicated-volume/api/v1alpha2" + log "github.com/sirupsen/logrus" + v1 "k8s.io/api/core/v1" + k8sErr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/util/retry" + kubecl "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + ParameterResourceDefQuorumPolicy = "DrbdOptions/Resource/quorum" + K8sNameLabel = "kubernetes.io/metadata.name" +) + +type DRBDClusterSyncer struct { + kc kubecl.Client + opts *config.Options + log *logger.Logger +} + +func NewDRBDClusterSyncer(kc kubecl.Client, log *logger.Logger, opts *config.Options) *DRBDClusterSyncer { + return &DRBDClusterSyncer{kc: kc, log: log, opts: opts} +} + +func (r *DRBDClusterSyncer) Sync(ctx context.Context) error { + pvs := &v1.PersistentVolumeList{} + if err := r.kc.List(ctx, pvs); err != nil { + return fmt.Errorf("failed to get persistent volumes: %w", err) + } + + pvcMap := make(map[string]*v1.PersistentVolume, len(pvs.Items)) + for _, pvc := range pvs.Items { + pvcMap[pvc.Name] = &pvc + } + + layerStorageVolumeList := &lsrv.LayerStorageVolumesList{} + err := r.kc.List(ctx, layerStorageVolumeList) + if err != nil { + return fmt.Errorf("failed to list layer storage volumes: %w", err) + } + + layerStorageResourceIDs := &lsrv.LayerResourceIdsList{} + err = r.kc.List(ctx, layerStorageResourceIDs) + if err != nil { + return fmt.Errorf("failed to list layer resource id: %w", err) + } + + lriMap := make(map[int]*lsrv.LayerResourceIds, len(layerStorageResourceIDs.Items)) + for _, lri := range layerStorageResourceIDs.Items { + lriMap[lri.Spec.LayerResourceID] = &lri + } + + replicaMap := make(map[string]*srv2.DRBDResourceReplica) + for _, lsv := range layerStorageVolumeList.Items { + lri, found := lriMap[lsv.Spec.LayerResourceID] + if !found { + fmt.Printf("no layer resource id %s found. skipping iteration") + } + + isDiskless := false + if lsv.Spec.ProviderKind == "DISKLESS" { + isDiskless = true + } + nodeName := strings.ToLower(lsv.Spec.NodeName) + + r, found := replicaMap[lri.Spec.ResourceName] + if !found { + pvName := strings.ToLower(lri.Spec.ResourceName) + + replicaMap[lri.Spec.ResourceName] = &srv2.DRBDResourceReplica{ + ObjectMeta: metav1.ObjectMeta{ + Name: pvName, + Namespace: pvcMap[pvName].Spec.ClaimRef.Namespace, + }, + Spec: srv2.DRBDResourceReplicaSpec{ + Peers: map[string]srv2.Peer{ + nodeName: srv2.Peer{ + Diskless: isDiskless, + }, + }, + }, + } + continue + } + + peer := r.Spec.Peers[nodeName] + peer.Diskless = isDiskless + r.Spec.Peers[nodeName] = peer + } + + var wg sync.WaitGroup + semaphore := make(chan struct{}, r.opts.NumWorkers) + for _, replica := range replicaMap { + semaphore <- struct{}{} + wg.Add(1) + go func() { + defer func() { + <-semaphore + wg.Done() + }() + createDRBDResource(ctx, r.kc, replica, r.opts) + }() + } + + wg.Wait() + close(semaphore) + return nil + + // Step 1 - get entities from k8s etcd + // drbdClusters := &srv.DRBDClusterList{} + // if err := r.kc.List(ctx, drbdClusters); err != nil { + // return fmt.Errorf("failed to get resource definitions: %w", err) + // } + + // pvs := &v1.PersistentVolumeList{} + // if err := r.kc.List(ctx, pvs); err != nil { + // return fmt.Errorf("failed to get persistent volumes: %w", err) + // } + + // resDefs := &lsrv.ResourceDefinitionsList{} + // if err := r.kc.List(ctx, resDefs); err != nil { + // return fmt.Errorf("failed to get resource definitions: %w", err) + // } + + // volDefs := &lsrv.VolumeDefinitionsList{} + // if err := r.kc.List(ctx, volDefs); err != nil { + // return fmt.Errorf("failed to get volume definitions: %w", err) + // } + + // resGroups := &lsrv.ResourceGroupsList{} + // if err := r.kc.List(ctx, resGroups); err != nil { + // return fmt.Errorf("failed to get resource groups: %w", err) + // } + + // rscList := &srv.ReplicatedStorageClassList{} + // if err := r.kc.List(ctx, rscList); err != nil { + // return fmt.Errorf("failed to get replicated storage class: %w", err) + // } + + // layerDRBDResDefList := &lsrv.LayerDrbdResourceDefinitionsList{} + // if err := r.kc.List(ctx, layerDRBDResDefList); err != nil { + // return fmt.Errorf("failed to get layer drbd resource definitions: %w", err) + // } + + // Step 2: Filtering resource definitions + // drbdClusterFilter := make(map[string]struct{}, len(drbdClusters.Items)) + // for _, cluster := range drbdClusters.Items { + // drbdClusterFilter[strings.ToLower(cluster.Name)] = struct{}{} + // } + + // filteredResDefs := resDefs.Items[:0] + // for _, resDef := range resDefs.Items { + // _, clusterExists := drbdClusterFilter[strings.ToLower(resDef.Spec.ResourceName)] + + // if !clusterExists { + // filteredResDefs = append(filteredResDefs, resDef) + // } + // } + + // Step 3: Joins and aggregations + // pvToLayerDRBDResDef := make(map[string]*lsrv.LayerDrbdResourceDefinitions, len(layerDRBDResDefList.Items)) + // for _, resDef := range layerDRBDResDefList.Items { + // pvToLayerDRBDResDef[strings.ToLower(resDef.Spec.ResourceName)] = &resDef + // } + + // pvToRSC := make(map[string]*srv.ReplicatedStorageClass, len(pvs.Items)) + // for _, rsc := range rscList.Items { + // pvToRSC[strings.ToLower(rsc.Name)] = &rsc + // } + + // pvToVolumeDef := make(map[string]*lsrv.VolumeDefinitions, len(volDefs.Items)) + // for _, volDef := range volDefs.Items { + // pvToVolumeDef[strings.ToLower(volDef.Spec.ResourceName)] = &volDef + // } + + // resGrNameToStruct := make(map[string]*lsrv.ResourceGroups, len(resGroups.Items)) + // for _, resGroup := range resGroups.Items { + // resGrNameToStruct[strings.ToLower(resGroup.Spec.ResourceGroupName)] = &resGroup + // } + + // rscNameToStruct := make(map[string]*srv.ReplicatedStorageClass, len(rscList.Items)) + // for _, rsc := range rscList.Items { + // rscNameToStruct[strings.ToLower(rsc.Name)] = &rsc + // } + + // pvToResGroup := make(map[string]*lsrv.ResourceGroups, len(filteredResDefs)) + // for _, resDef := range filteredResDefs { + // pvToResGroup[strings.ToLower(resDef.Spec.ResourceName)] = resGrNameToStruct[strings.ToLower(resDef.Spec.ResourceGroupName)] + // } + + // pvtoStruct := make(map[string]*v1.PersistentVolume, len(pvs.Items)) + // for _, pv := range pvs.Items { + // pvtoStruct[strings.ToLower(pv.Name)] = &pv + // } + + // resDefToPV := make(map[string]*v1.PersistentVolume, len(filteredResDefs)) + // for _, resDef := range filteredResDefs { + // resDefToPV[strings.ToLower(resDef.Name)] = pvtoStruct[strings.ToLower(resDef.Spec.ResourceName)] + // } + + // Step 4: Create DRBDCluster + // var wg sync.WaitGroup + // semaphore := make(chan struct{}, r.opts.NumWorkers) + // for _, resDef := range filteredResDefs { + // semaphore <- struct{}{} + // wg.Add(1) + // go func() { + // defer func() { + // <-semaphore + // wg.Done() + // }() + // createDRBDCluster(ctx, resDefToPV[resDef.Name], pvToRSC, pvToResGroup, pvToVolumeDef, pvToLayerDRBDResDef, r.log, r.kc, r.opts) + // }() + // } + + // wg.Wait() + // close(semaphore) + // return nil +} + +func createDRBDResource(ctx context.Context, kc kubecl.Client, drbdResourceReplica *srv2.DRBDResourceReplica, opts *config.Options) { + if err := retry.OnError( + // backoff settings + wait.Backoff{ + Duration: 2 * time.Second, // initial delay before first retry + Factor: 1.0, // Cap is multiplied by this value each retry + Steps: int(opts.RetryCount), // amount of retries + Cap: time.Duration(opts.RetryDelaySec), // delay between retries + }, + // this function takes an error returned by kc.Create and decides whether to make a retry or not + func(err error) bool { + if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { + log.Errorf("drbd resource replica retry context err: %v", err) + return false + } + + if statusError, ok := err.(*k8sErr.StatusError); ok { + switch statusError.ErrStatus.Reason { + case + metav1.StatusReasonForbidden, + metav1.StatusReasonAlreadyExists, + metav1.StatusReasonInvalid, + metav1.StatusReasonConflict, + metav1.StatusReasonBadRequest: + log.Errorf("drbd resource replica retry creation err: %s", statusError.ErrStatus.Reason) + return false + } + } + return true + }, + func() error { + err := kc.Create(ctx, drbdResourceReplica) + if err == nil { + log.Infof("DRBD resource replica %s successfully created", drbdResourceReplica.Name) + } + return err + }, + ); err != nil { + log.Errorf("failed to create a DRBD resource replica %s: %s", drbdResourceReplica.Name, err.Error()) + } +} + +func createDRBDCluster( + ctx context.Context, + pv *v1.PersistentVolume, + rscToStruct map[string]*srv.ReplicatedStorageClass, + pvToResGroup map[string]*lsrv.ResourceGroups, + pvToVolumeDef map[string]*lsrv.VolumeDefinitions, + pvToLayerDRBDResDef map[string]*lsrv.LayerDrbdResourceDefinitions, + log *log.Entry, + kc kubecl.Client, + opts *config.Options, +) { + autoDiskfulDelaySec := 0 + if rscToStruct[pv.Spec.StorageClassName].Spec.VolumeAccess == "EventuallyLocal" { + autoDiskfulDelaySec = 30 * 60 + } + + drbdCluster := &srv.DRBDCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: pv.Name, + }, + Spec: srv.DRBDClusterSpec{ + Replicas: int32(pvToResGroup[pv.Name].Spec.ReplicaCount), + QuorumPolicy: "majority", + Size: int64(pvToVolumeDef[pv.Name].Spec.VlmSize), + SharedSecret: pvToLayerDRBDResDef[pv.Name].Spec.Secret, + Port: int32(pvToLayerDRBDResDef[pv.Name].Spec.TCPPort), + AutoDiskful: srv.AutoDiskful{ + DelaySeconds: autoDiskfulDelaySec, + }, + StoragePoolSelector: []metav1.LabelSelector{ + { + MatchExpressions: []metav1.LabelSelectorRequirement{ + { + Key: K8sNameLabel, + Operator: "In", + Values: []string{ + rscToStruct[pv.Spec.StorageClassName].Spec.StoragePool, + }, + }, + }, + }, + }, + }, + } + + if err := retry.OnError( + // backoff settings + wait.Backoff{ + Duration: 2 * time.Second, // initial delay before first retry + Factor: 1.0, // Cap is multiplied by this value each retry + Steps: int(opts.RetryCount), // amount of retries + Cap: time.Duration(opts.RetryDelaySec), // delay between retries + }, + // this function takes an error returned by kc.Create and decides whether to make a retry or not + func(err error) bool { + if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { + log.Errorf("drbd cluster retry context err: %v", err) + return false + } + + if statusError, ok := err.(*k8sErr.StatusError); ok { + switch statusError.ErrStatus.Reason { + case + metav1.StatusReasonForbidden, + metav1.StatusReasonAlreadyExists, + metav1.StatusReasonInvalid, + metav1.StatusReasonConflict, + metav1.StatusReasonBadRequest: + log.Errorf("drbd cluster retry creation err: %s", statusError.ErrStatus.Reason) + return false + } + } + return true + }, + func() error { + err := kc.Create(ctx, drbdCluster) + if err == nil { + log.Infof("DRBD cluster %s successfully created", drbdCluster.Name) + } + return err + }, + ); err != nil { + log.Errorf("failed to create a DRBD cluster %s: %s", pv.Name, err.Error()) + return + } +} diff --git a/images/drbd-cluster-sync/src/go.mod b/images/drbd-cluster-sync/src/go.mod new file mode 100644 index 000000000..0be7b2746 --- /dev/null +++ b/images/drbd-cluster-sync/src/go.mod @@ -0,0 +1,70 @@ +module drbd-cluster-sync + +go 1.24.0 + +require ( + github.com/deckhouse/sds-replicated-volume/api v0.0.0-20250530134639-b540b9d2edbf + k8s.io/apimachinery v0.33.1 +) + +replace github.com/deckhouse/sds-replicated-volume/api => ../../../api + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/fsnotify/fsnotify v1.8.0 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.22.0 // indirect + github.com/prometheus/client_model v0.6.1 // indirect + github.com/prometheus/common v0.62.0 // indirect + github.com/prometheus/procfs v0.15.1 // indirect + github.com/spf13/pflag v1.0.5 // indirect + golang.org/x/sync v0.12.0 // indirect + gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect +) + +require ( + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect + github.com/deckhouse/sds-common-lib v0.0.0-20250611081307-a9d174560ad3 + github.com/emicklei/go-restful/v3 v3.12.1 // indirect + github.com/evanphx/json-patch/v5 v5.9.11 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 + github.com/go-openapi/jsonpointer v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/mailru/easyjson v0.9.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/sirupsen/logrus v1.9.3 + github.com/x448/float16 v0.8.4 // indirect + golang.org/x/net v0.38.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sys v0.31.0 // indirect + golang.org/x/term v0.30.0 // indirect + golang.org/x/text v0.23.0 // indirect + golang.org/x/time v0.9.0 // indirect + google.golang.org/protobuf v1.36.5 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + k8s.io/api v0.33.0 + k8s.io/client-go v0.33.0 + k8s.io/klog/v2 v2.130.1 + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect + k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect + sigs.k8s.io/controller-runtime v0.21.0 + sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect +) diff --git a/images/drbd-cluster-sync/src/go.sum b/images/drbd-cluster-sync/src/go.sum new file mode 100644 index 000000000..69fb6b5e6 --- /dev/null +++ b/images/drbd-cluster-sync/src/go.sum @@ -0,0 +1,194 @@ +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deckhouse/sds-common-lib v0.0.0-20250611081307-a9d174560ad3 h1:G6OcJSP98KLrhvwyqzRlLQwiFiyj+zcRWb79nhopx+Q= +github.com/deckhouse/sds-common-lib v0.0.0-20250611081307-a9d174560ad3/go.mod h1:tAZI7ZaVeJi5/Fe5Mebw3d6NC4nTHUOOTwZFnHHzxFU= +github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= +github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k= +github.com/evanphx/json-patch v0.5.2/go.mod h1:ZWS5hhDbVDyob71nXKNL0+PWn6ToqBHMikGIFbs31qQ= +github.com/evanphx/json-patch/v5 v5.9.11 h1:/8HVnzMq13/3x9TPvjG08wUGqBTmZBsCWzjTM0wiaDU= +github.com/evanphx/json-patch/v5 v5.9.11/go.mod h1:3j+LviiESTElxA4p3EMKAB9HXj3/XEtnUf6OZxqIQTM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ= +github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= +github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ= +github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= +github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v1.22.0 h1:rb93p9lokFEsctTys46VnV1kLCDpVZ0a/Y92Vm0Zc6Q= +github.com/prometheus/client_golang v1.22.0/go.mod h1:R7ljNsLXhuQXYZYtw6GAE9AZg8Y7vEW5scdCXrWRXC0= +github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= +github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= +github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= +github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= +github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= +github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= +go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= +golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw= +golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= +golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= +golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= +golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= +golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ= +golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= +gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU= +k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM= +k8s.io/apiextensions-apiserver v0.33.0 h1:d2qpYL7Mngbsc1taA4IjJPRJ9ilnsXIrndH+r9IimOs= +k8s.io/apiextensions-apiserver v0.33.0/go.mod h1:VeJ8u9dEEN+tbETo+lFkwaaZPg6uFKLGj5vyNEwwSzc= +k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4= +k8s.io/apimachinery v0.33.1/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98= +k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= +k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= +k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= +sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/images/drbd-cluster-sync/src/kubeutils/kubernetes.go b/images/drbd-cluster-sync/src/kubeutils/kubernetes.go new file mode 100644 index 000000000..0e0a69d22 --- /dev/null +++ b/images/drbd-cluster-sync/src/kubeutils/kubernetes.go @@ -0,0 +1,35 @@ +/* +Copyright 2024 Flant JSC +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubutils + +import ( + "fmt" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +func KubernetesDefaultConfigCreate() (*rest.Config, error) { + clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + clientcmd.NewDefaultClientConfigLoadingRules(), + &clientcmd.ConfigOverrides{}, + ) + + // Get a config to talk to API server + config, err := clientConfig.ClientConfig() + if err != nil { + return nil, fmt.Errorf("config kubernetes error %w", err) + } + return config, nil +} diff --git a/images/drbd-cluster-sync/src/logger/logger.go b/images/drbd-cluster-sync/src/logger/logger.go new file mode 100644 index 000000000..455c03955 --- /dev/null +++ b/images/drbd-cluster-sync/src/logger/logger.go @@ -0,0 +1,84 @@ +/* +Copyright 2024 Flant JSC +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logger + +import ( + "fmt" + "strconv" + + "github.com/go-logr/logr" + "k8s.io/klog/v2/textlogger" +) + +type Verbosity string + +const ( + ErrorLevel Verbosity = "0" + WarningLevel Verbosity = "1" + InfoLevel Verbosity = "2" + DebugLevel Verbosity = "3" + TraceLevel Verbosity = "4" + CacheLevel Verbosity = "5" +) + +const ( + warnLvl = iota + 1 + infoLvl + debugLvl + traceLvl + cacheLvl +) + +type Logger struct { + log logr.Logger +} + +func NewLogger(level Verbosity) (*Logger, error) { + v, err := strconv.Atoi(string(level)) + if err != nil { + return nil, err + } + + log := textlogger.NewLogger(textlogger.NewConfig(textlogger.Verbosity(v))).WithCallDepth(1) + + return &Logger{log: log}, nil +} + +func (l Logger) GetLogger() logr.Logger { + return l.log +} + +func (l Logger) Error(err error, message string, keysAndValues ...interface{}) { + l.log.Error(err, fmt.Sprintf("ERROR %s", message), keysAndValues...) +} + +func (l Logger) Warning(message string, keysAndValues ...interface{}) { + l.log.V(warnLvl).Info(fmt.Sprintf("WARNING %s", message), keysAndValues...) +} + +func (l Logger) Info(message string, keysAndValues ...interface{}) { + l.log.V(infoLvl).Info(fmt.Sprintf("INFO %s", message), keysAndValues...) +} + +func (l Logger) Debug(message string, keysAndValues ...interface{}) { + l.log.V(debugLvl).Info(fmt.Sprintf("DEBUG %s", message), keysAndValues...) +} + +func (l Logger) Trace(message string, keysAndValues ...interface{}) { + l.log.V(traceLvl).Info(fmt.Sprintf("TRACE %s", message), keysAndValues...) +} + +func (l Logger) Cache(message string, keysAndValues ...interface{}) { + l.log.V(cacheLvl).Info(fmt.Sprintf("CACHE %s", message), keysAndValues...) +} diff --git a/images/drbd-cluster-sync/src/pkg/kubernetes.go b/images/drbd-cluster-sync/src/pkg/kubernetes.go new file mode 100644 index 000000000..7f8c32a8b --- /dev/null +++ b/images/drbd-cluster-sync/src/pkg/kubernetes.go @@ -0,0 +1,34 @@ +/* +Copyright 2023 Flant JSC +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package kubeutils + +import ( + "fmt" + + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" +) + +func KubernetesDefaultConfigCreate() (*rest.Config, error) { + clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + clientcmd.NewDefaultClientConfigLoadingRules(), + &clientcmd.ConfigOverrides{}, + ) + // Get a config to talk to API server + config, err := clientConfig.ClientConfig() + if err != nil { + return nil, fmt.Errorf("config kubernetes error %w", err) + } + return config, nil +} diff --git a/images/drbd-cluster-sync/werf.inc.yaml b/images/drbd-cluster-sync/werf.inc.yaml new file mode 100644 index 000000000..709257e47 --- /dev/null +++ b/images/drbd-cluster-sync/werf.inc.yaml @@ -0,0 +1,35 @@ +--- +image: {{ $.ImageName }}-golang-artifact +fromImage: builder/golang-alpine + +git: + - add: / + to: / + includePaths: + - api + - images/drbd-cluster-sync/src + stageDependencies: + setup: + - "**/*" + +mount: + - fromPath: ~/go-pkg-cache + to: /go/pkg +shell: + setup: + - cd /images/drbd-cluster-sync/src/cmd + - GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -ldflags="-X github.com/piraeusdatastore/linstor-csi/pkg/driver.Version=v98544cadb6d111d27a86a11ec07de91b99704b82" -o /drbd-cluster-sync + - chmod +x /drbd-cluster-sync + +--- +image: {{ $.ImageName }} +fromImage: base/distroless + +import: + - image: {{ $.ImageName }}-golang-artifact + add: /drbd-cluster-sync + to: /drbd-cluster-sync + before: setup + +docker: + ENTRYPOINT: ["/drbd-cluster-sync"] diff --git a/images/linstor-csi/patches/004-csi-add-new-topology-logic.patch b/images/linstor-csi/patches/004-csi-add-new-topology-logic.patch index f6ec05f65..083b4fa18 100644 --- a/images/linstor-csi/patches/004-csi-add-new-topology-logic.patch +++ b/images/linstor-csi/patches/004-csi-add-new-topology-logic.patch @@ -779,10 +779,10 @@ index 856eae7..21feb74 100644 ) -const _paramKeyName = "allowremotevolumeaccessautoplaceclientlistdisklessonremainingdisklessstoragepooldonotplacewithregexencryptionfsoptslayerlistmountoptsnodelistplacementcountplacementpolicyreplicasondifferentreplicasonsamesizekibstoragepoolpostmountxfsoptsresourcegroupusepvcnameoverprovision" -+const _paramKeyName = "allowremotevolumeaccessautoplaceclientlistdisklessonremainingdisklessstoragepooldonotplacewithregexencryptionfsoptslayerlistmountoptsnodelistplacementcountplacementpolicyreplicasondifferentreplicasonsamesizekibstoragepoolpostmountxfsoptsresourcegroupusepvcnameoverprovisionreplicatedstorageclassname" ++const _paramKeyName = "allowremotevolumeaccessautoplaceclientlistdisklessonremainingdisklessstoragepooldonotplacewithregexencryptionfsoptslayerlistmountoptsnodelistplacementcountplacementpolicyreplicasondifferentreplicasonsamesizekibstoragepoolpostmountxfsoptsresourcegroupusepvcnameoverprovisionreplicatedstorageclassnamelvm-volume-groups" -var _paramKeyIndex = [...]uint16{0, 23, 32, 42, 61, 80, 99, 109, 115, 124, 133, 141, 155, 170, 189, 203, 210, 221, 237, 250, 260, 273} -+var _paramKeyIndex = [...]uint16{0, 23, 32, 42, 61, 80, 99, 109, 115, 124, 133, 141, 155, 170, 189, 203, 210, 221, 237, 250, 260, 273, 299} ++var _paramKeyIndex = [...]uint16{0, 23, 32, 42, 61, 80, 99, 109, 115, 124, 133, 141, 155, 170, 189, 203, 210, 221, 237, 250, 260, 273, 299, 316} func (i paramKey) String() string { if i < 0 || i >= paramKey(len(_paramKeyIndex)-1) { @@ -791,15 +791,16 @@ index 856eae7..21feb74 100644 } -var _paramKeyValues = []paramKey{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20} -+var _paramKeyValues = []paramKey{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21} ++var _paramKeyValues = []paramKey{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22} var _paramKeyNameToValueMap = map[string]paramKey{ _paramKeyName[0:23]: 0, -@@ -41,6 +41,7 @@ var _paramKeyNameToValueMap = map[string]paramKey{ +@@ -41,6 +41,8 @@ var _paramKeyNameToValueMap = map[string]paramKey{ _paramKeyName[237:250]: 18, _paramKeyName[250:260]: 19, _paramKeyName[260:273]: 20, + _paramKeyName[273:299]: 21, ++ _paramKeyName[299:316]: 22, } // paramKeyString retrieves an enum value from the enum constants string name. diff --git a/images/linstor-scheduler-admission/werf.inc.yaml b/images/linstor-scheduler-admission/werf.inc.yaml deleted file mode 100644 index a81208c8d..000000000 --- a/images/linstor-scheduler-admission/werf.inc.yaml +++ /dev/null @@ -1,39 +0,0 @@ - ---- -image: {{ $.ImageName }}-golang-artifact -from: {{ $.Root.BASE_GOLANG_1_22 }} -final: false - -git: - - url: https://github.com/piraeusdatastore/linstor-scheduler-extender - add: / - to: /usr/local/go/{{ $.ImageName }} - tag: v{{ $.Versions.LINSTOR_SCHEDULER_EXTENDER }} - stageDependencies: - setup: - - "**/*" -mount: - - fromPath: ~/go-pkg-cache - to: /go/pkg -shell: - setup: - - cd /usr/local/go/{{ $.ImageName }}/cmd/linstor-scheduler-admission - - GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -ldflags="-s -w -X github.com/piraeusdatastore/linstor-scheduler-extender/pkg/consts.Version=v{{ $.Versions.LINSTOR_SCHEDULER_EXTENDER }}" - - mv {{ $.ImageName }} / - - chmod +x /{{ $.ImageName }} ---- -image: {{ $.ImageName }} -from: {{ $.Root.BASE_SCRATCH }} - -import: - - image: {{ $.ImageName }}-golang-artifact - add: /{{ $.ImageName }} - to: /{{ $.ImageName }} - before: setup - -docker: - ENTRYPOINT: ["/{{ $.ImageName }}"] - LABEL: - distro: all - version: all - linstor-scheduler-admission: {{ $.Versions.LINSTOR_SCHEDULER_EXTENDER }} diff --git a/images/linstor-scheduler-extender/patches/001-linstor-scheduler-extender.patch b/images/linstor-scheduler-extender/patches/001-linstor-scheduler-extender.patch deleted file mode 100644 index 428665697..000000000 --- a/images/linstor-scheduler-extender/patches/001-linstor-scheduler-extender.patch +++ /dev/null @@ -1,46 +0,0 @@ -diff --git a/cmd/linstor-scheduler-extender/linstor-scheduler-extender.go b/cmd/linstor-scheduler-extender/linstor-scheduler-extender.go -index 9f61882..0f53cf1 100644 ---- a/cmd/linstor-scheduler-extender/linstor-scheduler-extender.go -+++ b/cmd/linstor-scheduler-extender/linstor-scheduler-extender.go -@@ -24,6 +24,11 @@ import ( - _ "github.com/piraeusdatastore/linstor-scheduler-extender/pkg/driver" - ) - -+const ( -+ defaultCertFile string = "/etc/sds-replicated-volume-scheduler-extender/certs/tls.crt" -+ defaultKeyFile string = "/etc/sds-replicated-volume-scheduler-extender/certs/tls.key" -+) -+ - var ext *extender.Extender - - func main() { -@@ -88,19 +93,24 @@ func run(c *cli.Context) { - log.Fatalf("Error initializing Scheduler Driver %v: %v", "linstor", err) - } - -+ // Create operator-sdk manager that will manage all controllers. -+ mgr, err := manager.New(config, manager.Options{}) -+ if err != nil { -+ log.Fatalf("Setup controller manager: %v", err) -+ } -+ - ext = &extender.Extender{ - Driver: d, - Recorder: recorder, -+ CertFile: defaultCertFile, -+ KeyFile: defaultKeyFile, -+ LogLevel: log.GetLevel(), -+ Manager: &mgr, - } - - if err = ext.Start(); err != nil { - log.Fatalf("Error starting scheduler extender: %v", err) - } -- // Create operator-sdk manager that will manage all controllers. -- mgr, err := manager.New(config, manager.Options{}) -- if err != nil { -- log.Fatalf("Setup controller manager: %v", err) -- } - - signalChan := make(chan os.Signal, 1) - signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM) diff --git a/images/linstor-scheduler-extender/patches/README.md b/images/linstor-scheduler-extender/patches/README.md deleted file mode 100644 index 1688f30f9..000000000 --- a/images/linstor-scheduler-extender/patches/README.md +++ /dev/null @@ -1,7 +0,0 @@ -## Patches - -### 001-linstor-scheduler-extender.patch - -Scheduler extender - -Extend scheduler with HTTPS support and our own logic. diff --git a/images/linstor-scheduler-extender/patches/stork/new-files/pkg/cache/cache.go b/images/linstor-scheduler-extender/patches/stork/new-files/pkg/cache/cache.go deleted file mode 100644 index 08685172e..000000000 --- a/images/linstor-scheduler-extender/patches/stork/new-files/pkg/cache/cache.go +++ /dev/null @@ -1,183 +0,0 @@ -/* -Copyright 2025 Flant JSC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package cache - -import ( - "context" - "fmt" - "sync" - - "github.com/sirupsen/logrus" - corev1 "k8s.io/api/core/v1" - "k8s.io/client-go/rest" - clientCache "k8s.io/client-go/tools/cache" - controllercache "sigs.k8s.io/controller-runtime/pkg/cache" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" -) - -// SharedInformerCache is an eventually consistent cache. The cache interface -// provides APIs to fetch specific k8s objects from the cache. Only a subset -// of k8s objects are currently managed by this cache. -// DO NOT USE it when you need the latest and accurate copy of a CR. -type SharedInformerCache interface { - // block added by flant.com - // GetNode returns the node info if present in the cache. - GetNode(nodeName string) (*corev1.Node, error) - // end of flant.com block - - // WatchPods registers the pod event handlers with the informer cache - WatchPods(fn func(object interface{})) error - - // GetPersistentVolumeClaim returns the PersistentVolumeClaim in a namespace from the cache after applying TransformFunc - GetPersistentVolumeClaim(pvcName string, namespace string) (*corev1.PersistentVolumeClaim, error) -} - -type cache struct { - controllerCache controllercache.Cache -} - -var ( - cacheLock sync.Mutex - instance SharedInformerCache - - cacheNotInitializedErr = "shared informer cache has not been initialized yet" -) - -func CreateSharedInformerCache(mgr manager.Manager) error { - cacheLock.Lock() - defer cacheLock.Unlock() - if instance != nil { - return fmt.Errorf("shared informer cache already initialized") - } - config, err := rest.InClusterConfig() - if err != nil { - return err - } - - sharedInformerCache := &cache{} - // Set the global instance - instance = sharedInformerCache - sharedInformerCache.controllerCache, err = controllercache.New(config, controllercache.Options{ - Scheme: mgr.GetScheme(), - // flant.com TODO: make transformMap as in https://github.com/libopenstorage/stork/blob/828e9a057905b93cf1ad43155d9adac5ac8fe8c0/pkg/cache/cache.go#L72 - // when controller-runtime package will be at least v0.12.0 - // TransformByObject: transformMap, - }) - if err != nil { - logrus.Errorf("error creating shared informer cache: %v", err) - return err - } - // indexing pods by nodeName so that we can list all pods running on a node - err = sharedInformerCache.controllerCache.IndexField(context.Background(), &corev1.Pod{}, "spec.nodeName", func(obj client.Object) []string { - podObject, ok := obj.(*corev1.Pod) - if !ok { - return []string{} - } - return []string{podObject.Spec.NodeName} - }) - if err != nil { - logrus.Errorf("error indexing field spec.nodeName for pods: %v", err) - return err - } - - // block added by flant.com - // indexing nodes by nodeName so that we can get node info by it's name - err = sharedInformerCache.controllerCache.IndexField(context.Background(), &corev1.Node{}, "metadata.name", func(obj client.Object) []string { - nodeObject, ok := obj.(*corev1.Node) - if !ok { - return []string{} - } - return []string{nodeObject.Name} - }) - if err != nil { - logrus.Errorf("error indexing field name for nodes: %v", err) - return err - } - // end of flant.com block - - go sharedInformerCache.controllerCache.Start(context.Background()) - - synced := sharedInformerCache.controllerCache.WaitForCacheSync(context.Background()) - if !synced { - return fmt.Errorf("error syncing the shared informer cache") - } - - logrus.Tracef("Shared informer cache synced") - - return nil -} - -func Instance() SharedInformerCache { - cacheLock.Lock() - defer cacheLock.Unlock() - return instance -} - -// Only used for UTs -func SetTestInstance(s SharedInformerCache) { - cacheLock.Lock() - defer cacheLock.Unlock() - instance = s -} - -// block added by flant.com -// GetNode returns the node info if present in the cache. -func (c *cache) GetNode(nodeName string) (*corev1.Node, error) { - if c == nil || c.controllerCache == nil { - return nil, fmt.Errorf(cacheNotInitializedErr) - } - node := &corev1.Node{} - if err := c.controllerCache.Get(context.Background(), client.ObjectKey{Name: nodeName}, node); err != nil { - return nil, err - } - return node, nil -} - -// end of flant.com block - -// WatchPods uses handlers for different pod events with shared informers. -func (c *cache) WatchPods(fn func(object interface{})) error { - informer, err := c.controllerCache.GetInformer(context.Background(), &corev1.Pod{}) - if err != nil { - logrus.WithError(err).Error("error getting the informer for pods") - return err - } - - informer.AddEventHandler(clientCache.ResourceEventHandlerFuncs{ - AddFunc: fn, - UpdateFunc: func(oldObj, newObj interface{}) { - // Only considering the new pod object - fn(newObj) - }, - DeleteFunc: fn, - }) - - return nil -} - -// GetPersistentVolumeClaim returns the transformed PersistentVolumeClaim if present in the cache. -func (c *cache) GetPersistentVolumeClaim(pvcName string, namespace string) (*corev1.PersistentVolumeClaim, error) { - if c == nil || c.controllerCache == nil { - return nil, fmt.Errorf(cacheNotInitializedErr) - } - pvc := &corev1.PersistentVolumeClaim{} - if err := c.controllerCache.Get(context.Background(), client.ObjectKey{Name: pvcName, Namespace: namespace}, pvc); err != nil { - return nil, err - } - return pvc, nil -} diff --git a/images/linstor-scheduler-extender/patches/stork/stork.patch b/images/linstor-scheduler-extender/patches/stork/stork.patch deleted file mode 100644 index b6e936edd..000000000 --- a/images/linstor-scheduler-extender/patches/stork/stork.patch +++ /dev/null @@ -1,293 +0,0 @@ -diff --git a/pkg/extender/extender.go b/pkg/extender/extender.go -index 73c8c307a..531718a0c 100644 ---- a/pkg/extender/extender.go -+++ b/pkg/extender/extender.go -@@ -3,14 +3,18 @@ package extender - import ( - "context" - "encoding/json" -+ "errors" - "fmt" - "net/http" -+ "net/http/httputil" -+ "os" - "strconv" - "strings" - "sync" - "time" - - "github.com/libopenstorage/stork/drivers/volume" -+ storkcache "github.com/libopenstorage/stork/pkg/cache" - storklog "github.com/libopenstorage/stork/pkg/log" - restore "github.com/libopenstorage/stork/pkg/snapshot/controllers" - "github.com/portworx/sched-ops/k8s/core" -@@ -21,6 +25,7 @@ import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/tools/record" - schedulerapi "k8s.io/kube-scheduler/extender/v1" -+ "sigs.k8s.io/controller-runtime/pkg/manager" - ) - - const ( -@@ -77,9 +82,15 @@ var ( - type Extender struct { - Recorder record.EventRecorder - Driver volume.Driver -- server *http.Server -- lock sync.Mutex -- started bool -+ // block added by flant.com -+ CertFile string `json:"cert-file"` -+ KeyFile string `json:"key-file"` -+ LogLevel log.Level `json:"log-level" default:"DebugLevel"` -+ Manager *manager.Manager -+ // end of flant.com block -+ server *http.Server -+ lock sync.Mutex -+ started bool - } - - // Start Starts the extender -@@ -90,13 +101,53 @@ func (e *Extender) Start() error { - if e.started { - return fmt.Errorf("Extender has already been started") - } -+ // block added by flant.com -+ log.SetLevel(e.LogLevel) -+ -+ if e.Manager != nil { -+ // Setup stork cache. We setup this cache for all the stork pods instead of just the leader pod. -+ // In this way, even the stork extender code can use this cache, since the extender filter/process -+ // requests can land on any stork pod. -+ if err := storkcache.CreateSharedInformerCache(*e.Manager); err != nil { -+ log.Fatalf("failed to setup shared informer cache: %v", err) -+ } -+ log.Infof("shared informer cache has been intialized") -+ } -+ // end of flant.com block -+ - // TODO: Make the listen port configurable - e.server = &http.Server{Addr: ":8099"} - http.HandleFunc("/", e.serveHTTP) - go func() { -- if err := e.server.ListenAndServe(); err != http.ErrServerClosed { -+ // block added by flant.com -+ tlsMode := len(e.CertFile) > 0 && len(e.KeyFile) > 0 -+ -+ var err error -+ -+ if tlsMode { -+ if _, err := os.Stat(e.CertFile); errors.Is(err, os.ErrNotExist) { -+ log.Warnf("CertFile `%s` does not exists. Launch as HTTP", e.CertFile) -+ tlsMode = false -+ } -+ -+ if _, err := os.Stat(e.KeyFile); errors.Is(err, os.ErrNotExist) { -+ log.Warnf("KeyFile `%s` does not exists. Launch as HTTP", e.KeyFile) -+ tlsMode = false -+ } -+ } -+ -+ if tlsMode { -+ log.Warn("Starting as HTTPS on :8099") -+ err = e.server.ListenAndServeTLS(e.CertFile, e.KeyFile) -+ } else { -+ log.Warn("Starting as HTTP on :8099") -+ err = e.server.ListenAndServe() -+ } -+ -+ if err != http.ErrServerClosed { - log.Panicf("Error starting extender server: %v", err) - } -+ // end of flant.com block - }() - - prometheus.MustRegister(HyperConvergedPodsCounter) -@@ -149,6 +200,16 @@ func (e *Extender) getHostname(node *v1.Node) string { - } - - func (e *Extender) processFilterRequest(w http.ResponseWriter, req *http.Request) { -+ // block added by flant.com -+ payload, err := httputil.DumpRequest(req, true) -+ if err != nil { -+ log.Errorf("[filter] Error debugging request: %v", err) -+ http.Error(w, err.Error(), http.StatusInternalServerError) -+ return -+ } -+ log.Tracef("[filter] received request: %s", string(payload)) -+ // end of flant.com block -+ - decoder := json.NewDecoder(req.Body) - defer func() { - if err := req.Body.Close(); err != nil { -@@ -164,6 +225,14 @@ func (e *Extender) processFilterRequest(w http.ResponseWriter, req *http.Request - return - } - -+ // block added by flant.com -+ if err = fillNodesByNames(&args); err != nil { -+ log.Errorf("[filter] Cannot fill nodes: %s", err.Error()) -+ http.Error(w, "[filter] Cannot fill nodes", http.StatusBadRequest) -+ return -+ } -+ // end of flant.com block -+ - pod := args.Pod - if pod == nil { - msg := "Empty pod received in filter request" -@@ -176,9 +245,24 @@ func (e *Extender) processFilterRequest(w http.ResponseWriter, req *http.Request - if vol.PersistentVolumeClaim == nil { - continue - } -- pvc, err := core.Instance().GetPersistentVolumeClaim(vol.PersistentVolumeClaim.ClaimName, pod.Namespace) -+ -+ var pvc *v1.PersistentVolumeClaim -+ var err error -+ var msg string -+ -+ if storkcache.Instance() != nil { -+ pvc, err = storkcache.Instance().GetPersistentVolumeClaim(vol.PersistentVolumeClaim.ClaimName, pod.Namespace) -+ if err != nil { -+ msg = fmt.Sprintf("Unable to find PVC %s in informer cache, err: %s", vol.Name, err.Error()) -+ } -+ } else { -+ pvc, err = core.Instance().GetPersistentVolumeClaim(vol.PersistentVolumeClaim.ClaimName, pod.Namespace) -+ if err != nil { -+ msg = fmt.Sprintf("Unable to find PVC %s, err: %s", vol.Name, err.Error()) -+ } -+ } -+ - if err != nil { -- msg := fmt.Sprintf("Unable to find PVC %s, err: %v", vol.Name, err) - storklog.PodLog(pod).Warnf(msg) - e.Recorder.Event(pod, v1.EventTypeWarning, schedulingFailureEventReason, msg) - http.Error(w, msg, http.StatusBadRequest) -@@ -192,7 +276,7 @@ func (e *Extender) processFilterRequest(w http.ResponseWriter, req *http.Request - } - } - -- storklog.PodLog(pod).Debugf("Nodes in filter request:") -+ storklog.PodLog(pod).Debugf("[filter] Nodes in filter request") - for _, node := range args.Nodes.Items { - storklog.PodLog(pod).Debugf("%v %+v", node.Name, node.Status.Addresses) - } -@@ -385,9 +469,27 @@ func (e *Extender) collectExtenderMetrics() error { - return nil - } - -- if err := core.Instance().WatchPods("", fn, metav1.ListOptions{}); err != nil { -- log.Errorf("failed to watch pods due to: %v", err) -- return err -+ podHandler := func(object interface{}) { -+ pod, ok := object.(*v1.Pod) -+ if !ok { -+ log.Errorf("invalid object type on pod watch from cache: %v", object) -+ } else { -+ fn(pod) -+ } -+ } -+ -+ if storkcache.Instance() != nil { -+ log.Debugf("Shared informer cache has been initialized, using it for extender metrics.") -+ err := storkcache.Instance().WatchPods(podHandler) -+ if err != nil { -+ log.Errorf("failed to watch pods with informer cache for health monitoring, err: %v", err) -+ } -+ } else { -+ log.Warnf("Shared informer cache has not been initialized, using watch for extender metrics.") -+ if err := core.Instance().WatchPods("", fn, metav1.ListOptions{}); err != nil { -+ log.Errorf("failed to watch pods for metrics due to: %v", err) -+ return err -+ } - } - return nil - } -@@ -461,6 +563,16 @@ type localityInfo struct { - } - - func (e *Extender) processPrioritizeRequest(w http.ResponseWriter, req *http.Request) { -+ // block added by flant.com -+ payload, err := httputil.DumpRequest(req, true) -+ if err != nil { -+ log.Errorf("[prioritize] Error debugging request: %v", err) -+ http.Error(w, err.Error(), http.StatusInternalServerError) -+ return -+ } -+ log.Tracef("[prioritize] received request: %s", string(payload)) -+ // end of flant.com block -+ - decoder := json.NewDecoder(req.Body) - defer func() { - if err := req.Body.Close(); err != nil { -@@ -476,6 +588,14 @@ func (e *Extender) processPrioritizeRequest(w http.ResponseWriter, req *http.Req - return - } - -+ // block added by flant.com -+ if err = fillNodesByNames(&args); err != nil { -+ log.Errorf("[prioritize] Cannot fill nodes: %+v", err) -+ http.Error(w, "[prioritize] Cannot fill nodes", http.StatusBadRequest) -+ return -+ } -+ // end of flant.com block -+ - pod := args.Pod - storklog.PodLog(pod).Debugf("Nodes in prioritize request:") - for _, node := range args.Nodes.Items { -@@ -495,7 +615,6 @@ func (e *Extender) processPrioritizeRequest(w http.ResponseWriter, req *http.Req - - // Score all nodes the same if hyperconvergence is disabled - disableHyperconvergence := false -- var err error - if pod.Annotations != nil { - if value, ok := pod.Annotations[disableHyperconvergenceAnnotation]; ok { - if disableHyperconvergence, err = strconv.ParseBool(value); err != nil { -@@ -632,3 +751,48 @@ sendResponse: - storklog.PodLog(pod).Errorf("Failed to encode response: %v", err) - } - } -+ -+// block added by flant.com -+func fillNodesByNames(inputData *schedulerapi.ExtenderArgs) error { -+ if inputData.Nodes != nil && len(inputData.Nodes.Items) > 0 { -+ // nodes already presents in inputData, just return 'as is' -+ return nil -+ } -+ -+ if inputData.NodeNames == nil || len(*inputData.NodeNames) == 0 { -+ return fmt.Errorf("no NodeNames") -+ } -+ -+ var node *v1.Node -+ var err error -+ var msg string -+ -+ nodeList := &v1.NodeList{} -+ -+ for _, nodeName := range *inputData.NodeNames { -+ if storkcache.Instance() != nil { -+ node, err = storkcache.Instance().GetNode(nodeName) -+ if err == nil { -+ log.Tracef("Get node %s from cache", nodeName) -+ } -+ msg = fmt.Sprintf("Unable to get node info for node %s from informer cache, err: %v", nodeName, err) -+ } else { -+ node, err = core.Instance().GetNodeByName(nodeName) -+ if err == nil { -+ log.Tracef("Get node %s from API-server", nodeName) -+ } -+ msg = fmt.Sprintf("Unable to get node info for node %s, err: %v", nodeName, err) -+ } -+ -+ if err != nil { -+ return fmt.Errorf(msg) -+ } -+ -+ nodeList.Items = append(nodeList.Items, *node) -+ } -+ -+ inputData.Nodes = nodeList -+ return nil -+} -+ -+// end of flant.com block diff --git a/images/linstor-scheduler-extender/werf.inc.yaml b/images/linstor-scheduler-extender/werf.inc.yaml deleted file mode 100644 index a0f668d8f..000000000 --- a/images/linstor-scheduler-extender/werf.inc.yaml +++ /dev/null @@ -1,66 +0,0 @@ ---- -image: {{ $.ImageName }}-golang-artifact -from: {{ $.Root.BASE_GOLANG_1_22 }} -final: false - -git: - - url: https://github.com/piraeusdatastore/linstor-scheduler-extender - add: / - to: /usr/local/go/{{ $.ImageName }} - tag: v{{ $.Versions.LINSTOR_SCHEDULER_EXTENDER }} - stageDependencies: - setup: - - "**/*" - - - url: https://github.com/libopenstorage/stork - add: / - to: /usr/local/go/stork - tag: v{{ $.Versions.LINSTOR_SCHEDULER_STORK }} - stageDependencies: - beforeSetup: - - "**/*" - setup: - - "**/*" - - - add: /images/{{ $.ImageName }}/patches - to: /patches - stageDependencies: - beforeSetup: - - "**/*" - setup: - - "**/*" - -mount: - - fromPath: ~/go-pkg-cache - to: /go/pkg -shell: - beforeSetup: - - cd /usr/local/go/stork - - '[ -d "/patches/stork/new-files" ] && cp -frp /patches/stork/new-files/* /usr/local/go/stork/' - - git apply /patches/stork/*.patch - - setup: - - cd /usr/local/go/{{ $.ImageName }} - - git apply /patches/*.patch - - go mod edit -replace=github.com/libopenstorage/stork=/usr/local/go/stork - - go mod tidy - - cd cmd/linstor-scheduler-extender - - GOOS=linux GOARCH=amd64 CGO_ENABLED=0 go build -ldflags="-s -w -X github.com/piraeusdatastore/linstor-scheduler-extender/pkg/consts.Version=v{{ $.Versions.LINSTOR_SCHEDULER_EXTENDER }}" - - mv {{ $.ImageName }} / - - chmod +x /{{ $.ImageName }} ---- -image: {{ $.ImageName }} -from: {{ $.Root.BASE_SCRATCH }} - -import: - - image: {{ $.ImageName }}-golang-artifact - add: /{{ $.ImageName }} - to: /{{ $.ImageName }} - before: setup - -docker: - ENTRYPOINT: ["/{{ $.ImageName }}"] - LABEL: - distro: all - version: all - linstor-scheduler-extender: {{ $.Versions.LINSTOR_SCHEDULER_EXTENDER }} diff --git a/images/sds-replicated-volume-controller/config/config.go b/images/sds-replicated-volume-controller/config/config.go index a8ee9a701..8da7aaebe 100644 --- a/images/sds-replicated-volume-controller/config/config.go +++ b/images/sds-replicated-volume-controller/config/config.go @@ -60,12 +60,14 @@ func NewConfig() (*Options, error) { opts.LinstorLeaseName = LinstorLeaseName opts.ConfigSecretName = ConfigSecretName - loglevel := os.Getenv(LogLevel) - if loglevel == "" { - opts.Loglevel = logger.DebugLevel - } else { - opts.Loglevel = logger.Verbosity(loglevel) - } + // loglevel := os.Getenv(LogLevel) + // if loglevel == "" { + // opts.Loglevel = logger.TraceLevel + // } else { + // opts.Loglevel = logger.Verbosity(loglevel) + // } + // TODO remove + opts.Loglevel = logger.TraceLevel opts.MetricsPort = os.Getenv(MetricsPortEnv) if opts.MetricsPort == "" { diff --git a/images/sds-replicated-volume-controller/go.mod b/images/sds-replicated-volume-controller/go.mod index 15535be28..d6cd4b30a 100644 --- a/images/sds-replicated-volume-controller/go.mod +++ b/images/sds-replicated-volume-controller/go.mod @@ -2,6 +2,8 @@ module github.com/deckhouse/sds-replicated-volume/images/sds-replicated-volume-c go 1.24.2 +toolchain go1.24.3 + require ( github.com/LINBIT/golinstor v0.49.0 github.com/deckhouse/sds-node-configurator/api v0.0.0-20250424082358-e271071c2a57 @@ -12,7 +14,7 @@ require ( gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.31.0 k8s.io/apiextensions-apiserver v0.31.0 - k8s.io/apimachinery v0.32.3 + k8s.io/apimachinery v0.33.1 k8s.io/client-go v0.31.0 sigs.k8s.io/controller-runtime v0.19.0 ) @@ -22,11 +24,12 @@ replace github.com/deckhouse/sds-replicated-volume/api => ../../api require ( github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect - github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gnostic-models v0.6.9 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/x448/float16 v0.8.4 // indirect golang.org/x/exp v0.0.0-20230515195305-f3d0a9c9a5cc // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect ) require ( @@ -44,9 +47,8 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect github.com/google/uuid v1.6.0 github.com/imdario/mergo v0.3.6 // indirect @@ -62,22 +64,22 @@ require ( github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/spf13/pflag v1.0.5 // indirect - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 golang.org/x/net v0.40.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/sys v0.33.0 // indirect golang.org/x/term v0.32.0 // indirect golang.org/x/text v0.25.0 // indirect - golang.org/x/time v0.7.0 // indirect + golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.26.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/protobuf v1.35.1 // indirect + google.golang.org/protobuf v1.36.5 // indirect gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/klog/v2 v2.130.1 - k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 moul.io/http2curl/v2 v2.3.0 // indirect sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/images/sds-replicated-volume-controller/go.sum b/images/sds-replicated-volume-controller/go.sum index 91eba24de..b29cc7809 100644 --- a/images/sds-replicated-volume-controller/go.sum +++ b/images/sds-replicated-volume-controller/go.sum @@ -43,12 +43,12 @@ github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -100,8 +100,8 @@ github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= @@ -115,8 +115,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/tailscale/depaware v0.0.0-20210622194025-720c4b409502/go.mod h1:p9lPsd+cx33L3H9nNoecRRxPssFKUwwI50I3pZ0yT+8= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= @@ -158,8 +158,8 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4= golang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA= -golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= -golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -173,8 +173,8 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= -google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= -google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= @@ -192,14 +192,14 @@ k8s.io/api v0.31.0 h1:b9LiSjR2ym/SzTOlfMHm1tr7/21aD7fSkqgD/CVJBCo= k8s.io/api v0.31.0/go.mod h1:0YiFF+JfFxMM6+1hQei8FY8M7s1Mth+z/q7eF1aJkTE= k8s.io/apiextensions-apiserver v0.31.0 h1:fZgCVhGwsclj3qCw1buVXCV6khjRzKC5eCFt24kyLSk= k8s.io/apiextensions-apiserver v0.31.0/go.mod h1:b9aMDEYaEe5sdK+1T0KU78ApR/5ZVp4i56VacZYEHxk= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4= +k8s.io/apimachinery v0.33.1/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= k8s.io/client-go v0.31.0 h1:QqEJzNjbN2Yv1H79SsS+SWnXkBgVu4Pj3CJQgbx0gI8= k8s.io/client-go v0.31.0/go.mod h1:Y9wvC76g4fLjmU0BA+rV+h2cncoadjvjjkkIGoTLcGU= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= -k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= moul.io/http2curl/v2 v2.3.0 h1:9r3JfDzWPcbIklMOs2TnIFzDYvfAZvjeavG6EzP7jYs= @@ -208,7 +208,10 @@ sigs.k8s.io/controller-runtime v0.19.0 h1:nWVM7aq+Il2ABxwiCizrVDSlmDcshi9llbaFbC sigs.k8s.io/controller-runtime v0.19.0/go.mod h1:iRmWllt8IlaLjvTTDLhRBXIEtkCK6hwVBJJsYS9Ajf4= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= -sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/images/sds-replicated-volume-controller/pkg/controller/replicated_storage_class.go b/images/sds-replicated-volume-controller/pkg/controller/replicated_storage_class.go index e151ab157..a42e5d68e 100644 --- a/images/sds-replicated-volume-controller/pkg/controller/replicated_storage_class.go +++ b/images/sds-replicated-volume-controller/pkg/controller/replicated_storage_class.go @@ -18,6 +18,7 @@ package controller import ( "context" + "encoding/json" "errors" "fmt" "maps" @@ -83,6 +84,8 @@ const ( StorageClassParamAllowRemoteVolumeAccessKey = "replicated.csi.storage.deckhouse.io/allowRemoteVolumeAccess" StorageClassParamAllowRemoteVolumeAccessValue = "- fromSame:\n - topology.kubernetes.io/zone" ReplicatedStorageClassParamNameKey = "replicated.csi.storage.deckhouse.io/replicatedStorageClassName" + StorageClassLVMVolumeGroupsParamKey = "csi.storage.deckhouse.io/lvm-volume-groups" + StorageClassLVMType = "csi.storage.deckhouse.io/lvm-type" StorageClassParamFSTypeKey = "csi.storage.k8s.io/fstype" FsTypeExt4 = "ext4" @@ -131,7 +134,7 @@ func NewReplicatedStorageClass( return reconcile.Result{Requeue: true, RequeueAfter: time.Duration(cfg.ScanInterval) * time.Second}, nil } - log.Info(fmt.Sprintf("[ReplicatedStorageClassReconciler] Finish event for ReplicatedStorageClass %s in reconciler. No need to reconcile it again.", request.Name)) + log.Info(fmt.Sprintf("[ReplicatedStorageClassReconciler] Finish event for ReplicatedStorageClass %s in reconciler. No need to reconcile it again. ", request.Name)) return reconcile.Result{}, nil }), }) @@ -195,7 +198,7 @@ func ReconcileReplicatedStorageClassEvent( } if sc != nil && sc.Provisioner != StorageClassProvisioner { - return false, fmt.Errorf("Reconcile StorageClass with provisioner %s is not allowed", sc.Provisioner) + return false, fmt.Errorf("[ReconcileReplicatedStorageClassEvent] Reconcile StorageClass with provisioner %s is not allowed", sc.Provisioner) } // Handle deletion @@ -267,7 +270,13 @@ func ReconcileReplicatedStorageClass( "to Local and virtualization module is %t", virtualizationEnabled)) } - newSC := GetNewStorageClass(replicatedSC, virtualizationEnabled) + rspData, err := GetReplicatedStoragePoolData(ctx, cl, replicatedSC) + if err != nil { + err = fmt.Errorf("[ReconcileReplicatedStorageClass] error getting replicated storage class'es LVGs: %w", err) + return false, err + } + + newSC := GetNewStorageClass(replicatedSC, virtualizationEnabled, rspData) if oldSC == nil { log.Info("[ReconcileReplicatedStorageClass] StorageClass with name: " + @@ -464,6 +473,46 @@ func CreateStorageClass(ctx context.Context, cl client.Client, newStorageClass * return nil } +func GetReplicatedStoragePoolData(ctx context.Context, cl client.Client, replicatedSC *srv.ReplicatedStorageClass) (map[string]string, error) { + result := map[string]string{} + type ThinPool struct { + PoolName string `yaml:"poolName"` + } + type LVMVolumeGroup struct { + Name string `yaml:"name"` + Thin ThinPool `yaml:"Thin"` + } + + cwt, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + rsp := &srv.ReplicatedStoragePool{} + err := cl.Get(cwt, client.ObjectKey{Name: replicatedSC.Spec.StoragePool, Namespace: replicatedSC.Namespace}, rsp) + if err != nil { + fmt.Printf("[GenerateStorageClassFromReplicatedStorageClass] failed to get ReplicatedStoragePools %s", replicatedSC.Spec.StoragePool) + return result, err + } + + rscLVGs := make([]LVMVolumeGroup, 0, len(rsp.Spec.LVMVolumeGroups)) + for _, val := range rsp.Spec.LVMVolumeGroups { + rscLVGs = append(rscLVGs, LVMVolumeGroup{ + Name: val.Name, + Thin: ThinPool{PoolName: val.ThinPoolName}, + }) + } + + rscLVGsStr, err := json.Marshal(rscLVGs) + if err != nil { + fmt.Printf("[GenerateStorageClassFromReplicatedStorageClass] failed to marshal LVMVolumeGroups: %s", err.Error()) + return result, err + } + + result["LVGs"] = string(rscLVGsStr) + result["Type"] = rsp.Spec.Type + + return result, nil +} + func GenerateStorageClassFromReplicatedStorageClass(replicatedSC *srv.ReplicatedStorageClass) *storagev1.StorageClass { allowVolumeExpansion := true reclaimPolicy := v1.PersistentVolumeReclaimPolicy(replicatedSC.Spec.ReclaimPolicy) @@ -481,6 +530,8 @@ func GenerateStorageClassFromReplicatedStorageClass(replicatedSC *srv.Replicated ReplicatedStorageClassParamNameKey: replicatedSC.Name, } + fmt.Printf("[GenerateStorageClassFromReplicatedStorageClass] storageClassParameters %s", storageClassParameters[ReplicatedStorageClassParamNameKey]) + switch replicatedSC.Spec.Replication { case ReplicationNone: storageClassParameters[StorageClassPlacementCountKey] = "1" @@ -689,7 +740,7 @@ func recreateStorageClassIfNeeded( return true, false, nil } -func GetNewStorageClass(replicatedSC *srv.ReplicatedStorageClass, virtualizationEnabled bool) *storagev1.StorageClass { +func GetNewStorageClass(replicatedSC *srv.ReplicatedStorageClass, virtualizationEnabled bool, replicatedStoragePoolData map[string]string) *storagev1.StorageClass { newSC := GenerateStorageClassFromReplicatedStorageClass(replicatedSC) // Do NOT add the virtualization annotation `virtualdisk.virtualization.deckhouse.io/access-mode: ReadWriteOnce` if the source ReplicatedStorageClass // has replicatedstorageclass.storage.deckhouse.io/ignore-local: "true". @@ -703,6 +754,15 @@ func GetNewStorageClass(replicatedSC *srv.ReplicatedStorageClass, virtualization } newSC.Annotations[StorageClassVirtualizationAnnotationKey] = StorageClassVirtualizationAnnotationValue } + newSC.Parameters[StorageClassLVMVolumeGroupsParamKey] = replicatedStoragePoolData["LVGs"] + LVMtype := replicatedStoragePoolData["Type"] + if LVMtype == "LVM" { + newSC.Parameters[StorageClassLVMType] = "Thick" + } + if LVMtype == "LVMThin" { + newSC.Parameters[StorageClassLVMType] = "Thin" + } + return newSC } @@ -798,4 +858,5 @@ func updateMap(dst, src map[string]string) { dst[k] = v } } + } diff --git a/images/sds-replicated-volume-controller/pkg/controller/replicated_storage_class_test.go b/images/sds-replicated-volume-controller/pkg/controller/replicated_storage_class_test.go index 8ef6735f6..3621a1321 100644 --- a/images/sds-replicated-volume-controller/pkg/controller/replicated_storage_class_test.go +++ b/images/sds-replicated-volume-controller/pkg/controller/replicated_storage_class_test.go @@ -39,6 +39,8 @@ import ( "github.com/deckhouse/sds-replicated-volume/images/sds-replicated-volume-controller/pkg/logger" ) +var mockMap map[string]string = map[string]string{} + var _ = Describe(controller.ReplicatedStorageClassControllerName, func() { var ( @@ -131,7 +133,7 @@ var _ = Describe(controller.ReplicatedStorageClassControllerName, func() { replicatedSC.Name = testName virtualizationEnabled := false - actualSC := controller.GetNewStorageClass(&replicatedSC, virtualizationEnabled) + actualSC := controller.GetNewStorageClass(&replicatedSC, virtualizationEnabled, mockMap) Expect(actualSC).To(Equal(expectedSC)) }) @@ -197,7 +199,7 @@ var _ = Describe(controller.ReplicatedStorageClassControllerName, func() { replicatedSC := validSpecReplicatedSCTemplate replicatedSC.Name = testName virtualizationEnabled := false - sc := controller.GetNewStorageClass(&replicatedSC, virtualizationEnabled) + sc := controller.GetNewStorageClass(&replicatedSC, virtualizationEnabled, mockMap) err := controller.CreateStorageClass(ctx, cl, sc) if err == nil { defer func() { @@ -350,7 +352,7 @@ var _ = Describe(controller.ReplicatedStorageClassControllerName, func() { Expect(err).NotTo(HaveOccurred()) virtualizationEnabled := false - scTemplate := controller.GetNewStorageClass(&replicatedSC, virtualizationEnabled) + scTemplate := controller.GetNewStorageClass(&replicatedSC, virtualizationEnabled, mockMap) err = controller.CreateStorageClass(ctx, cl, scTemplate) if err == nil { defer func() { @@ -412,7 +414,7 @@ var _ = Describe(controller.ReplicatedStorageClassControllerName, func() { } virtualizationEnabled := false - sc := controller.GetNewStorageClass(&replicatedSC, virtualizationEnabled) + sc := controller.GetNewStorageClass(&replicatedSC, virtualizationEnabled, mockMap) err = controller.CreateStorageClass(ctx, cl, sc) Expect(err).NotTo(HaveOccurred()) @@ -747,7 +749,7 @@ var _ = Describe(controller.ReplicatedStorageClassControllerName, func() { Expect(err).NotTo(HaveOccurred()) virtualizationEnabled := false - sc := controller.GetNewStorageClass(&replicatedSC, virtualizationEnabled) + sc := controller.GetNewStorageClass(&replicatedSC, virtualizationEnabled, mockMap) err = controller.CreateStorageClass(ctx, cl, sc) Expect(err).NotTo(HaveOccurred()) @@ -805,7 +807,7 @@ var _ = Describe(controller.ReplicatedStorageClassControllerName, func() { Expect(err).NotTo(HaveOccurred()) virtualizationEnabled := false - anotherSC := controller.GetNewStorageClass(&anotherReplicatedSC, virtualizationEnabled) + anotherSC := controller.GetNewStorageClass(&anotherReplicatedSC, virtualizationEnabled, mockMap) err = controller.CreateStorageClass(ctx, cl, anotherSC) Expect(err).NotTo(HaveOccurred()) @@ -835,6 +837,7 @@ var _ = Describe(controller.ReplicatedStorageClassControllerName, func() { replicatedSC := validSpecReplicatedSCTemplate replicatedSC.Name = testName replicatedSC.Status.Phase = controller.Created + // TODO add mock value storageClass := controller.GenerateStorageClassFromReplicatedStorageClass(&replicatedSC) equal, _ := controller.CompareStorageClasses(storageClass, storageClass) @@ -1516,7 +1519,7 @@ var _ = Describe(controller.ReplicatedStorageClassControllerName, func() { Expect(err).NotTo(HaveOccurred()) Expect(virtualizationEnabled).To(BeTrue()) - scResource := controller.GetNewStorageClass(&replicatedSC, virtualizationEnabled) + scResource := controller.GetNewStorageClass(&replicatedSC, virtualizationEnabled, mockMap) Expect(scResource).NotTo(BeNil()) Expect(scResource.Annotations).NotTo(BeNil()) Expect(scResource.Annotations[controller.StorageClassVirtualizationAnnotationKey]).To(Equal(controller.StorageClassVirtualizationAnnotationValue)) @@ -1575,7 +1578,7 @@ var _ = Describe(controller.ReplicatedStorageClassControllerName, func() { Expect(storageClass.Annotations).NotTo(BeNil()) Expect(storageClass.Annotations[controller.StorageClassVirtualizationAnnotationKey]).To(Equal(controller.StorageClassVirtualizationAnnotationValue)) - scResourceAfterUpdate := controller.GetNewStorageClass(&replicatedSC, virtualizationEnabled) + scResourceAfterUpdate := controller.GetNewStorageClass(&replicatedSC, virtualizationEnabled, mockMap) controller.DoUpdateStorageClass(scResourceAfterUpdate, storageClass) Expect(scResourceAfterUpdate).NotTo(BeNil()) Expect(scResourceAfterUpdate.Annotations).To(BeNil()) @@ -1630,7 +1633,7 @@ var _ = Describe(controller.ReplicatedStorageClassControllerName, func() { }, } - storageClassResource := controller.GetNewStorageClass(&replicatedSC, false) + storageClassResource := controller.GetNewStorageClass(&replicatedSC, false, mockMap) Expect(storageClassResource).NotTo(BeNil()) Expect(storageClassResource.Annotations).To(BeNil()) Expect(storageClassResource.Name).To(Equal(replicatedSC.Name)) @@ -1668,7 +1671,7 @@ var _ = Describe(controller.ReplicatedStorageClassControllerName, func() { Expect(err).NotTo(HaveOccurred()) Expect(virtualizationEnabled).To(BeTrue()) - scResource := controller.GetNewStorageClass(&replicatedSC, virtualizationEnabled) + scResource := controller.GetNewStorageClass(&replicatedSC, virtualizationEnabled, mockMap) controller.DoUpdateStorageClass(scResource, storageClass) Expect(scResource).NotTo(BeNil()) Expect(scResource.Annotations).NotTo(BeNil()) @@ -1734,7 +1737,7 @@ var _ = Describe(controller.ReplicatedStorageClassControllerName, func() { Expect(err).NotTo(HaveOccurred()) Expect(virtualizationEnabled).To(BeFalse()) - scResourceAfterUpdate := controller.GetNewStorageClass(&replicatedSC, virtualizationEnabled) + scResourceAfterUpdate := controller.GetNewStorageClass(&replicatedSC, virtualizationEnabled, mockMap) controller.DoUpdateStorageClass(scResourceAfterUpdate, storageClass) Expect(scResourceAfterUpdate.Annotations).NotTo(BeNil()) Expect(len(scResourceAfterUpdate.Annotations)).To(Equal(1)) diff --git a/images/webhooks/go.mod b/images/webhooks/go.mod index 75eba0881..5c3ea8b67 100644 --- a/images/webhooks/go.mod +++ b/images/webhooks/go.mod @@ -11,7 +11,7 @@ require ( github.com/slok/kubewebhook/v2 v2.6.0 k8s.io/api v0.32.1 k8s.io/apiextensions-apiserver v0.32.1 - k8s.io/apimachinery v0.32.3 + k8s.io/apimachinery v0.33.1 k8s.io/client-go v0.32.1 k8s.io/klog/v2 v2.130.1 sigs.k8s.io/controller-runtime v0.20.4 @@ -35,7 +35,6 @@ require ( github.com/google/btree v1.1.3 // indirect github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-cmp v0.7.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -63,9 +62,10 @@ require ( gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect k8s.io/utils v0.0.0-20241210054802-24370beab758 // indirect sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.5.0 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/images/webhooks/go.sum b/images/webhooks/go.sum index 73e469bdc..703402618 100644 --- a/images/webhooks/go.sum +++ b/images/webhooks/go.sum @@ -90,8 +90,8 @@ github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFS github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/slok/kubewebhook/v2 v2.6.0 h1:NMDDXx219OcNDc17ZYpqGXW81/jkBNmkdEwFDcZDVcA= @@ -175,21 +175,24 @@ k8s.io/api v0.32.1 h1:f562zw9cy+GvXzXf0CKlVQ7yHJVYzLfL6JAS4kOAaOc= k8s.io/api v0.32.1/go.mod h1:/Yi/BqkuueW1BgpoePYBRdDYfjPF5sgTr5+YqDZra5k= k8s.io/apiextensions-apiserver v0.32.1 h1:hjkALhRUeCariC8DiVmb5jj0VjIc1N0DREP32+6UXZw= k8s.io/apiextensions-apiserver v0.32.1/go.mod h1:sxWIGuGiYov7Io1fAS2X06NjMIk5CbRHc2StSmbaQto= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4= +k8s.io/apimachinery v0.33.1/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= k8s.io/client-go v0.32.1 h1:otM0AxdhdBIaQh7l1Q0jQpmo7WOFIk5FFa4bg6YMdUU= k8s.io/client-go v0.32.1/go.mod h1:aTTKZY7MdxUaJ/KiUs8D+GssR9zJZi77ZqtzcGXIiDg= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= -k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= -sigs.k8s.io/structured-merge-diff/v4 v4.5.0 h1:nbCitCK2hfnhyiKo6uf2HxUPTCodY6Qaf85SbDIaMBk= -sigs.k8s.io/structured-merge-diff/v4 v4.5.0/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/monitoring/prometheus-rules/linstor-scheduler-admission.tpl b/monitoring/prometheus-rules/linstor-scheduler-admission.tpl deleted file mode 100644 index 3d8e4b08f..000000000 --- a/monitoring/prometheus-rules/linstor-scheduler-admission.tpl +++ /dev/null @@ -1,38 +0,0 @@ -{{- if and (ne "dev" .Values.global.deckhouseVersion) (semverCompare "<1.64" .Values.global.deckhouseVersion) }} -- name: kubernetes.linstor.scheduler_state - rules: - - alert: D8LinstorSchedulerAdmissionPodIsNotReady - expr: min by (pod) (kube_pod_status_ready{condition="true", namespace="d8-sds-replicated-volume", pod=~"linstor-scheduler-admission-.*"}) != 1 - for: 10m - labels: - severity_level: "6" - tier: cluster - annotations: - plk_protocol_version: "1" - plk_markup_format: "markdown" - plk_labels_as_annotations: "pod" - plk_create_group_if_not_exists__d8_linstor_scheduler_health: "D8LinstorSchedulerAdmissionHealth,tier=~tier,prometheus=deckhouse,kubernetes=~kubernetes" - plk_grouped_by__d8_linstor_scheduler_health: "D8LinstorSchedulerHealth,tier=~tier,prometheus=deckhouse,kubernetes=~kubernetes" - summary: The linstor-scheduler-admission Pod is NOT Ready. - description: | - The recommended course of action: - 1. Retrieve details of the Deployment: `kubectl -n d8-sds-replicated-volume describe deploy linstor-scheduler-admission` - 2. View the status of the Pod and try to figure out why it is not running: `kubectl -n d8-sds-replicated-volume describe pod -l app=linstor-scheduler-admission` - - - alert: D8LinstorSchedulerAdmissionPodIsNotRunning - expr: absent(kube_pod_status_phase{namespace="d8-sds-replicated-volume",phase="Running",pod=~"linstor-scheduler-admission-.*"}) - for: 2m - labels: - severity_level: "6" - tier: cluster - annotations: - plk_protocol_version: "1" - plk_markup_format: "markdown" - plk_create_group_if_not_exists__d8_linstor_scheduler_health: "D8LinstorSchedulerAdmissionHealth,tier=~tier,prometheus=deckhouse,kubernetes=~kubernetes" - plk_grouped_by__d8_linstor_scheduler_health: "D8LinstorSchedulerAdmissionHealth,tier=~tier,prometheus=deckhouse,kubernetes=~kubernetes" - summary: The linstor-scheduler-admission Pod is NOT Running. - description: | - The recommended course of action: - 1. Retrieve details of the Deployment: `kubectl -n d8-sds-replicated-volume describe deploy linstor-scheduler-admission` - 2. View the status of the Pod and try to figure out why it is not running: `kubectl -n d8-sds-replicated-volume describe pod -l app=linstor-scheduler-admission` -{{- end }} diff --git a/monitoring/prometheus-rules/linstor-scheduler.yaml b/monitoring/prometheus-rules/linstor-scheduler.yaml deleted file mode 100644 index ce63de331..000000000 --- a/monitoring/prometheus-rules/linstor-scheduler.yaml +++ /dev/null @@ -1,36 +0,0 @@ -- name: kubernetes.linstor.scheduler_state - rules: - - alert: D8LinstorSchedulerPodIsNotReady - expr: min by (pod) (kube_pod_status_ready{condition="true", namespace="d8-sds-replicated-volume", pod=~"linstor-scheduler-.*",pod!~"linstor-scheduler-admission-.*"}) != 1 - for: 10m - labels: - severity_level: "6" - tier: cluster - annotations: - plk_protocol_version: "1" - plk_markup_format: "markdown" - plk_labels_as_annotations: "pod" - plk_create_group_if_not_exists__d8_linstor_scheduler_health: "D8LinstorSchedulerHealth,tier=~tier,prometheus=deckhouse,kubernetes=~kubernetes" - plk_grouped_by__d8_linstor_scheduler_health: "D8LinstorSchedulerHealth,tier=~tier,prometheus=deckhouse,kubernetes=~kubernetes" - summary: The linstor-scheduler Pod is NOT Ready. - description: | - The recommended course of action: - 1. Retrieve details of the Deployment: `kubectl -n d8-sds-replicated-volume describe deploy linstor-scheduler` - 2. View the status of the Pod and try to figure out why it is not running: `kubectl -n d8-sds-replicated-volume describe pod -l app=linstor-scheduler` - - - alert: D8LinstorSchedulerPodIsNotRunning - expr: absent(kube_pod_status_phase{namespace="d8-sds-replicated-volume",phase="Running",pod=~"linstor-scheduler-.*",pod!~"linstor-scheduler-admission-.*"}) - for: 2m - labels: - severity_level: "6" - tier: cluster - annotations: - plk_protocol_version: "1" - plk_markup_format: "markdown" - plk_create_group_if_not_exists__d8_linstor_scheduler_health: "D8LinstorSchedulerHealth,tier=~tier,prometheus=deckhouse,kubernetes=~kubernetes" - plk_grouped_by__d8_linstor_scheduler_health: "D8LinstorSchedulerHealth,tier=~tier,prometheus=deckhouse,kubernetes=~kubernetes" - summary: The linstor-scheduler Pod is NOT Running. - description: | - The recommended course of action: - 1. Retrieve details of the Deployment: `kubectl -n d8-sds-replicated-volume describe deploy linstor-scheduler` - 2. View the status of the Pod and try to figure out why it is not running: `kubectl -n d8-sds-replicated-volume describe pod -l app=linstor-scheduler` diff --git a/openapi/values_ce.yaml b/openapi/values_ce.yaml index 7967f1e83..cd0635aa3 100644 --- a/openapi/values_ce.yaml +++ b/openapi/values_ce.yaml @@ -118,23 +118,6 @@ properties: ca: type: string x-examples: ["YjY0ZW5jX3N0cmluZwo="] - webhookCert: - type: object - default: {} - x-required-for-helm: - - crt - - key - - ca - properties: - crt: - type: string - x-examples: ["YjY0ZW5jX3N0cmluZwo="] - key: - type: string - x-examples: ["YjY0ZW5jX3N0cmluZwo="] - ca: - type: string - x-examples: ["YjY0ZW5jX3N0cmluZwo="] customWebhookCert: type: object default: {} @@ -152,20 +135,3 @@ properties: ca: type: string x-examples: ["YjY0ZW5jX3N0cmluZwo="] - customSchedulerExtenderCert: - type: object - default: {} - x-required-for-helm: - - crt - - key - - ca - properties: - crt: - type: string - x-examples: ["YjY0ZW5jX3N0cmluZwo="] - key: - type: string - x-examples: ["YjY0ZW5jX3N0cmluZwo="] - ca: - type: string - x-examples: ["YjY0ZW5jX3N0cmluZwo="] diff --git a/openapi/values_ee.yaml b/openapi/values_ee.yaml index 7967f1e83..cd0635aa3 100644 --- a/openapi/values_ee.yaml +++ b/openapi/values_ee.yaml @@ -118,23 +118,6 @@ properties: ca: type: string x-examples: ["YjY0ZW5jX3N0cmluZwo="] - webhookCert: - type: object - default: {} - x-required-for-helm: - - crt - - key - - ca - properties: - crt: - type: string - x-examples: ["YjY0ZW5jX3N0cmluZwo="] - key: - type: string - x-examples: ["YjY0ZW5jX3N0cmluZwo="] - ca: - type: string - x-examples: ["YjY0ZW5jX3N0cmluZwo="] customWebhookCert: type: object default: {} @@ -152,20 +135,3 @@ properties: ca: type: string x-examples: ["YjY0ZW5jX3N0cmluZwo="] - customSchedulerExtenderCert: - type: object - default: {} - x-required-for-helm: - - crt - - key - - ca - properties: - crt: - type: string - x-examples: ["YjY0ZW5jX3N0cmluZwo="] - key: - type: string - x-examples: ["YjY0ZW5jX3N0cmluZwo="] - ca: - type: string - x-examples: ["YjY0ZW5jX3N0cmluZwo="] diff --git a/templates/csi/rbac-for-us.yaml b/templates/csi/rbac-for-us.yaml index e1b3e1fb6..ba36441fc 100644 --- a/templates/csi/rbac-for-us.yaml +++ b/templates/csi/rbac-for-us.yaml @@ -47,3 +47,51 @@ roleRef: kind: ClusterRole name: d8:{{ .Chart.Name }}:rsc-watcher apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: d8:{{ .Chart.Name }}:drbdclusters-manager + {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-csi-controller")) | nindent 2 }} +rules: + - apiGroups: ["storage.deckhouse.io"] + resources: ["drbdclusters", "drbdresourcereplicas"] + verbs: ["create", "get", "list", "watch", "update", "patch", "delete"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: d8:{{ .Chart.Name }}:drbdclusters-access + {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-csi-controller")) | nindent 2 }} +subjects: + - kind: ServiceAccount + name: csi + namespace: d8-sds-replicated-volume +roleRef: + kind: ClusterRole + name: d8:{{ .Chart.Name }}:drbdclusters-manager + apiGroup: rbac.authorization.k8s.io +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: d8:{{ .Chart.Name }}:drbd-sync-resource-accessor + {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-csi-controller")) | nindent 2 }} +rules: + - apiGroups: ["internal.linstor.linbit.com"] + resources: ["resourcegroups", "resourcedefinitions", "volumedefinitions", "layerdrbdresourcedefinitions", "layerstoragevolumes", "layerresourceids"] + verbs: ["get", "list", "watch"] +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: d8:{{ .Chart.Name }}:drbd-sync-resource-read-access + {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-csi-controller")) | nindent 2 }} +subjects: + - kind: ServiceAccount + name: csi + namespace: d8-sds-replicated-volume +roleRef: + kind: ClusterRole + name: d8:{{ .Chart.Name }}:drbd-sync-resource-accessor + apiGroup: rbac.authorization.k8s.io diff --git a/templates/linstor-csi/controller.yaml b/templates/linstor-csi/controller.yaml new file mode 100644 index 000000000..c0fe73e85 --- /dev/null +++ b/templates/linstor-csi/controller.yaml @@ -0,0 +1,738 @@ +{{- define "csi_attacher_resources" }} +cpu: 10m +memory: 25Mi +{{- end }} + +{{- define "csi_provisioner_resources" }} +cpu: 10m +memory: 25Mi +{{- end }} + +{{- define "csi_resizer_resources" }} +cpu: 10m +memory: 25Mi +{{- end }} + +{{- define "linstor_csi_plugin_resources" }} +cpu: 35m +memory: 50Mi +{{- end }} + +{{- define "csi_snapshotter_resources" }} +cpu: 50m +memory: 25Mi +{{- end }} + +{{- define "csi_livenessprobe_resources" }} +cpu: 50m +memory: 25Mi +{{- end }} + +{{- define "csi_node_driver_registrar_resources" }} +cpu: 10m +memory: 25Mi +{{- end }} + +{{- $kubeVersion := semver .Values.global.discovery.kubernetesVersion -}} +{{- if (.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} +--- +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: linstor-csi-controller + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-csi-controller")) | nindent 2 }} +spec: + targetRef: + apiVersion: "apps/v1" + kind: Deployment + name: linstor-csi-controller + updatePolicy: + updateMode: "Auto" + resourcePolicy: + containerPolicies: + - containerName: csi-attacher + minAllowed: + {{- include "csi_attacher_resources" . | nindent 8 }} + maxAllowed: + cpu: 20m + memory: 50Mi + - containerName: csi-provisioner + minAllowed: + {{- include "csi_provisioner_resources" . | nindent 8 }} + maxAllowed: + cpu: 20m + memory: 50Mi + - containerName: csi-resizer + minAllowed: + {{- include "csi_resizer_resources" . | nindent 8 }} + maxAllowed: + cpu: 20m + memory: 50Mi + - containerName: linstor-csi-plugin + minAllowed: + {{- include "linstor_csi_plugin_resources" . | nindent 8 }} + maxAllowed: + cpu: 70m + memory: 100Mi + - containerName: csi-snapshotter + minAllowed: + {{- include "csi_snapshotter_resources" . | nindent 8 }} + maxAllowed: + cpu: 100m + memory: 50Mi + - containerName: csi-livenessprobe + minAllowed: + {{- include "csi_livenessprobe_resources" . | nindent 8 }} + maxAllowed: + cpu: 100m + memory: 50Mi +--- +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: linstor-csi-node + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-csi-node" "workload-resource-policy.deckhouse.io" "every-node")) | nindent 2 }} +spec: + targetRef: + apiVersion: "apps/v1" + kind: DaemonSet + name: linstor-csi-node + updatePolicy: + updateMode: "Auto" + resourcePolicy: + containerPolicies: + - containerName: linstor-csi-plugin + minAllowed: + {{- include "linstor_csi_plugin_resources" . | nindent 8 }} + maxAllowed: + cpu: 70m + memory: 100Mi + - containerName: csi-livenessprobe + minAllowed: + {{- include "csi_livenessprobe_resources" . | nindent 8 }} + maxAllowed: + cpu: 50m + memory: 50Mi + - containerName: csi-node-driver-registrar + minAllowed: + {{- include "csi_node_driver_registrar_resources" . | nindent 8 }} + maxAllowed: + cpu: 50m + memory: 50Mi +{{- end }} +--- +apiVersion: policy/v1 +kind: PodDisruptionBudget +metadata: + name: linstor-csi-controller + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-csi-controller")) | nindent 2 }} +spec: + minAvailable: {{ include "helm_lib_is_ha_to_value" (list . 1 0) }} + selector: + matchLabels: + app: linstor-csi-controller +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: linstor-csi-node + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-csi-node")) | nindent 2 }} +spec: + revisionHistoryLimit: 2 + selector: + matchLabels: + app: linstor-csi-node + template: + metadata: + labels: + app: linstor-csi-node + name: linstor-csi-node + namespace: d8-{{ .Chart.Name }} + spec: + {{- include "helm_lib_tolerations" (tuple . "any-node" "storage-problems") | nindent 6 }} + {{- include "helm_lib_priority_class" (tuple . "cluster-medium") | nindent 6 }} + {{- include "helm_lib_module_pod_security_context_run_as_user_root" . | nindent 6 }} + nodeSelector: + storage.deckhouse.io/sds-replicated-volume-node: "" + affinity: {} + containers: + - args: + - --v=5 + - --csi-address=$(CSI_ENDPOINT) + - --kubelet-registration-path=$(DRIVER_REG_SOCK_PATH) + env: + - name: CSI_ENDPOINT + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/replicated.csi.storage.deckhouse.io/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LS_CONTROLLERS + value: https://linstor.d8-{{ .Chart.Name }}.svc:3371 + - name: LS_ROOT_CA + valueFrom: + secretKeyRef: + key: ca.crt + name: linstor-client-https-cert + - name: LS_USER_CERTIFICATE + valueFrom: + secretKeyRef: + key: tls.crt + name: linstor-client-https-cert + - name: LS_USER_KEY + valueFrom: + secretKeyRef: + key: tls.key + name: linstor-client-https-cert + image: {{ include "helm_lib_module_common_image" (list . (list "csiNodeDriverRegistrar" $kubeVersion.Major $kubeVersion.Minor | join "" )) }} + imagePullPolicy: IfNotPresent + lifecycle: + preStop: + exec: + command: + - /bin/sh + - -c + - rm -rf /registration/replicated.csi.storage.deckhouse.io /registration/replicated.csi.storage.deckhouse.io-reg.sock + name: csi-node-driver-registrar + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 12 }} +{{- if not ( .Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "csi_attacher_resources" . | nindent 12 }} +{{- end }} + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: + - SYS_ADMIN + privileged: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /csi/ + name: plugin-dir + - mountPath: /registration/ + name: registration-dir + - args: + - --csi-address=$(CSI_ENDPOINT) + env: + - name: CSI_ENDPOINT + value: /csi/csi.sock + image: {{ include "helm_lib_module_common_image" (list . (list "csiLivenessprobe" $kubeVersion.Major $kubeVersion.Minor | join "" )) }} + imagePullPolicy: IfNotPresent + name: csi-livenessprobe + securityContext: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /csi/ + name: plugin-dir + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 12 }} +{{- if not ( .Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "csi_livenessprobe_resources" . | nindent 12 }} +{{- end }} + - args: + - --csi-endpoint=unix://$(CSI_ENDPOINT) + - --node=$(KUBE_NODE_NAME) + - --linstor-endpoint=$(LS_CONTROLLERS) +{{- if eq .Values.sdsReplicatedVolume.logLevel "ERROR" }} + - --log-level=error +{{- else if eq .Values.sdsReplicatedVolume.logLevel "WARN" }} + - --log-level=warn +{{- else if eq .Values.sdsReplicatedVolume.logLevel "INFO" }} + - --log-level=info +{{- else if eq .Values.sdsReplicatedVolume.logLevel "DEBUG" }} + - --log-level=debug +{{- else if eq .Values.sdsReplicatedVolume.logLevel "TRACE" }} + - --log-level=debug +{{- end }} + env: + - name: CSI_ENDPOINT + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/replicated.csi.storage.deckhouse.io/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LS_CONTROLLERS + value: https://linstor.d8-{{ .Chart.Name }}.svc:3371 + - name: LS_ROOT_CA + valueFrom: + secretKeyRef: + key: ca.crt + name: linstor-client-https-cert + - name: LS_USER_CERTIFICATE + valueFrom: + secretKeyRef: + key: tls.crt + name: linstor-client-https-cert + - name: LS_USER_KEY + valueFrom: + secretKeyRef: + key: tls.key + name: linstor-client-https-cert + image: {{ include "helm_lib_module_image" (list . "linstorCsi") }} + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 9808 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + name: linstor-csi-plugin + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 12 }} +{{- if not ( .Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "linstor_csi_plugin_resources" . | nindent 12 }} +{{- end }} + securityContext: + capabilities: + add: + - SYS_ADMIN + privileged: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /csi/ + name: plugin-dir + - mountPath: /var/lib/kubelet + mountPropagation: Bidirectional + name: publish-dir + - mountPath: /dev + name: device-dir + dnsPolicy: ClusterFirstWithHostNet + imagePullSecrets: + - name: {{ .Chart.Name }}-module-registry + initContainers: + - command: + - /linstor-wait-until + - satellite-online + - $(KUBE_NODE_NAME) + env: + - name: CSI_ENDPOINT + value: /csi/csi.sock + - name: DRIVER_REG_SOCK_PATH + value: /var/lib/kubelet/plugins/replicated.csi.storage.deckhouse.io/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LS_CONTROLLERS + value: https://linstor.d8-{{ .Chart.Name }}.svc:3371 + - name: LS_ROOT_CA + valueFrom: + secretKeyRef: + key: ca.crt + name: linstor-client-https-cert + - name: LS_USER_CERTIFICATE + valueFrom: + secretKeyRef: + key: tls.crt + name: linstor-client-https-cert + - name: LS_USER_KEY + valueFrom: + secretKeyRef: + key: tls.key + name: linstor-client-https-cert + image: {{ include "helm_lib_module_image" (list . "linstorWaitUntil") }} + imagePullPolicy: IfNotPresent + name: linstor-wait-node-online + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 12 }} +{{- if not ( .Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "csi_attacher_resources" . | nindent 12 }} +{{- end }} + securityContext: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + restartPolicy: Always + schedulerName: default-scheduler + serviceAccount: linstor-csi-node + serviceAccountName: linstor-csi-node + terminationGracePeriodSeconds: 30 + volumes: + - hostPath: + path: /dev + type: "" + name: device-dir + - hostPath: + path: /var/lib/kubelet/plugins/replicated.csi.storage.deckhouse.io + type: DirectoryOrCreate + name: plugin-dir + - hostPath: + path: /var/lib/kubelet + type: Directory + name: publish-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: DirectoryOrCreate + name: registration-dir + updateStrategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: linstor-csi-controller + namespace: d8-{{ .Chart.Name }} + {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-csi-controller" "workload-resource-policy.deckhouse.io" "system")) | nindent 2 }} +spec: + progressDeadlineSeconds: 600 + replicas: {{ include "helm_lib_is_ha_to_value" (list . 2 1) }} + revisionHistoryLimit: 2 + selector: + matchLabels: + app: linstor-csi-controller + strategy: + type: RollingUpdate + {{- if (include "helm_lib_ha_enabled" .) }} + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + {{- end }} + template: + metadata: + labels: + app: linstor-csi-controller + name: linstor-csi-controller + namespace: d8-{{ .Chart.Name }} + spec: + automountServiceAccountToken: true + {{- include "helm_lib_priority_class" (tuple . "cluster-medium") | nindent 6 }} + {{- include "helm_lib_module_pod_security_context_run_as_user_root" . | nindent 6 }} + {{- include "helm_lib_tolerations" (tuple . "system") | nindent 6 }} + affinity: +{{- with (include "helm_lib_node_selector" (tuple . "system")) }} + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: +{{- range $k, $v := index (fromYaml .) "nodeSelector" }} + - key: {{ $k }} + operator: In + values: + - {{ $v | quote }} +{{- end }} +{{- else }} + nodeAffinity: {} +{{- end }} +{{- with (index (fromYaml (include "helm_lib_pod_anti_affinity_for_ha" (list . (dict "app" "linstor-csi-controller")))) "affinity") }} + {{ toYaml . | nindent 8 }} +{{- else }} + podAntiAffinity: {} +{{- end }} + containers: + - args: + - --v=5 + - --csi-address=$(ADDRESS) + - --timeout=1m + - --leader-election=true + - --leader-election-namespace=$(NAMESPACE) + - --worker-threads=1 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/./csi.sock + - name: NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + image: {{ include "helm_lib_module_common_image" (list . (list "csiExternalAttacher" $kubeVersion.Major $kubeVersion.Minor | join "" )) }} + imagePullPolicy: IfNotPresent + name: csi-attacher + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 12 }} +{{- if not ( .Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "csi_attacher_resources" . | nindent 12 }} +{{- end }} + securityContext: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/./csi.sock + image: {{ include "helm_lib_module_common_image" (list . (list "csiLivenessprobe" $kubeVersion.Major $kubeVersion.Minor | join "" )) }} + imagePullPolicy: IfNotPresent + name: csi-livenessprobe + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 12 }} +{{- if not ( .Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "csi_livenessprobe_resources" . | nindent 12 }} +{{- end }} + securityContext: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-address=$(ADDRESS) + - --timeout=1m + - --default-fstype=ext4 + - --feature-gates=Topology=true + - --leader-election=true + - --leader-election-namespace=$(NAMESPACE) + - --enable-capacity + - --extra-create-metadata + - --capacity-ownerref-level=2 + - --worker-threads=1 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/./csi.sock + - name: NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + image: {{ include "helm_lib_module_common_image" (list . (list "csiExternalProvisioner" $kubeVersion.Major $kubeVersion.Minor | join "" )) }} + imagePullPolicy: IfNotPresent + name: csi-provisioner + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 12 }} +{{- if not ( .Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "csi_provisioner_resources" . | nindent 12 }} +{{- end }} + securityContext: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --timeout=1m + - --csi-address=$(ADDRESS) + - --leader-election=true + - --leader-election-namespace=$(NAMESPACE) + - --worker-threads=1 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/./csi.sock + - name: NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + image: {{ include "helm_lib_module_common_image" (list . (list "csiExternalSnapshotter" $kubeVersion.Major $kubeVersion.Minor | join "" )) }} + imagePullPolicy: IfNotPresent + name: csi-snapshotter + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 12 }} +{{- if not ( .Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "csi_snapshotter_resources" . | nindent 12 }} +{{- end }} + securityContext: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --v=5 + - --csi-address=$(ADDRESS) + - --timeout=1m + - --handle-volume-inuse-error=false + - --leader-election=true + - --leader-election-namespace=$(NAMESPACE) + - --workers=1 + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/./csi.sock + - name: NAMESPACE + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.namespace + image: {{ include "helm_lib_module_common_image" (list . (list "csiExternalResizer" $kubeVersion.Major $kubeVersion.Minor | join "" )) }} + imagePullPolicy: IfNotPresent + name: csi-resizer + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 12 }} +{{- if not ( .Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "csi_resizer_resources" . | nindent 12 }} +{{- end }} + securityContext: {} + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + - args: + - --csi-endpoint=unix://$(ADDRESS) + - --node=$(KUBE_NODE_NAME) + - --linstor-endpoint=$(LS_CONTROLLERS) + - --log-level=info + env: + - name: ADDRESS + value: /var/lib/csi/sockets/pluginproxy/./csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + - name: LS_CONTROLLERS + value: https://linstor.d8-{{ .Chart.Name }}.svc:3371 + - name: LS_ROOT_CA + valueFrom: + secretKeyRef: + key: ca.crt + name: linstor-client-https-cert + - name: LS_USER_CERTIFICATE + valueFrom: + secretKeyRef: + key: tls.crt + name: linstor-client-https-cert + - name: LS_USER_KEY + valueFrom: + secretKeyRef: + key: tls.key + name: linstor-client-https-cert + image: {{ include "helm_lib_module_image" (list . "linstorCsi") }} + imagePullPolicy: IfNotPresent + livenessProbe: + failureThreshold: 3 + httpGet: + path: /healthz + port: 9808 + scheme: HTTP + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + name: linstor-csi-plugin + ports: + - containerPort: 9808 + name: healthz + protocol: TCP + resources: + requests: + {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 12 }} +{{- if not ( .Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} + {{- include "linstor_csi_plugin_resources" . | nindent 12 }} +{{- end }} + securityContext: + capabilities: + add: + - SYS_ADMIN + privileged: true + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + volumeMounts: + - mountPath: /var/lib/csi/sockets/pluginproxy/ + name: socket-dir + dnsPolicy: ClusterFirst + imagePullSecrets: + - name: {{ .Chart.Name }}-module-registry + initContainers: + - command: + - /drbd-cluster-sync + env: + - name: LS_CONTROLLERS + value: https://linstor.d8-{{ .Chart.Name }}.svc:3371 + - name: LS_ROOT_CA + valueFrom: + secretKeyRef: + key: ca.crt + name: linstor-client-https-cert + - name: LS_USER_CERTIFICATE + valueFrom: + secretKeyRef: + key: tls.crt + name: linstor-client-https-cert + - name: LS_USER_KEY + valueFrom: + secretKeyRef: + key: tls.key + name: linstor-client-https-cert + - name: RETRY_COUNT + value: "2" + - name: RETRY_DELAY_SEC + value: "10" + - name: NUM_WORKERS + value: "3" + image: {{ include "helm_lib_module_image" (list . "drbdClusterSync") }} + imagePullPolicy: IfNotPresent + name: drbd-cluster-sync + resources: + requests: + ephemeral-storage: 50Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + securityContext: {} + - command: + - /linstor-wait-until + - api-online + env: + - name: LS_CONTROLLERS + value: https://linstor.d8-{{ .Chart.Name }}.svc:3371 + - name: LS_ROOT_CA + valueFrom: + secretKeyRef: + key: ca.crt + name: linstor-client-https-cert + - name: LS_USER_CERTIFICATE + valueFrom: + secretKeyRef: + key: tls.crt + name: linstor-client-https-cert + - name: LS_USER_KEY + valueFrom: + secretKeyRef: + key: tls.key + name: linstor-client-https-cert + image: {{ include "helm_lib_module_image" (list . "linstorWaitUntil") }} + imagePullPolicy: IfNotPresent + name: linstor-wait-api-online + resources: + requests: + ephemeral-storage: 50Mi + terminationMessagePath: /dev/termination-log + terminationMessagePolicy: File + securityContext: {} + restartPolicy: Always + schedulerName: default-scheduler + serviceAccount: csi + serviceAccountName: csi + terminationGracePeriodSeconds: 30 + volumes: + - emptyDir: {} + name: socket-dir + +# BUMP \ No newline at end of file diff --git a/templates/linstor-scheduler-admission/deployment.yaml b/templates/linstor-scheduler-admission/deployment.yaml deleted file mode 100644 index d541e53b6..000000000 --- a/templates/linstor-scheduler-admission/deployment.yaml +++ /dev/null @@ -1,98 +0,0 @@ -{{- define "linstor_scheduler_admission_resources" }} -cpu: 20m -memory: 50Mi -{{- end }} - -{{- $dhVersionIsDev := or (hasPrefix "dev" .Values.global.deckhouseVersion) (hasSuffix "dev" .Values.global.deckhouseVersion) }} -{{- if and (not $dhVersionIsDev) (semverCompare "<1.64" .Values.global.deckhouseVersion) }} - -{{- if (.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} ---- -apiVersion: autoscaling.k8s.io/v1 -kind: VerticalPodAutoscaler -metadata: - name: linstor-scheduler-admission - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-scheduler-admission" "workload-resource-policy.deckhouse.io" "master")) | nindent 2 }} -spec: - targetRef: - apiVersion: "apps/v1" - kind: Deployment - name: linstor-scheduler-admission - updatePolicy: - updateMode: "Auto" - resourcePolicy: - containerPolicies: - - containerName: "linstor-scheduler-admission" - minAllowed: - {{- include "linstor_scheduler_admission_resources" . | nindent 8 }} - maxAllowed: - cpu: 40m - memory: 50Mi -{{- end }} ---- -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: linstor-scheduler-admission - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-scheduler-admission" )) | nindent 2 }} -spec: - minAvailable: {{ include "helm_lib_is_ha_to_value" (list . 1 0) }} - selector: - matchLabels: - app: linstor-scheduler-admission ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: linstor-scheduler-admission - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-scheduler-admission" )) | nindent 2 }} -spec: - revisionHistoryLimit: 2 - {{- include "helm_lib_deployment_on_master_strategy_and_replicas_for_ha" . | nindent 2 }} - selector: - matchLabels: - app: linstor-scheduler-admission - template: - metadata: - labels: - app: linstor-scheduler-admission - annotations: - checksum/config: {{ include (print $.Template.BasePath "/linstor-scheduler-admission/secret.yaml") . | sha256sum }} - spec: - imagePullSecrets: - - name: {{ .Chart.Name }}-module-registry - {{- include "helm_lib_priority_class" (tuple . "system-cluster-critical") | nindent 6 }} - {{- include "helm_lib_node_selector" (tuple . "master") | nindent 6 }} - {{- include "helm_lib_tolerations" (tuple . "any-node") | nindent 6 }} - {{- include "helm_lib_module_pod_security_context_run_as_user_nobody" . | nindent 6 }} - {{- include "helm_lib_pod_anti_affinity_for_ha" (list . (dict "app" "linstor-scheduler-admission")) | nindent 6 }} - containers: - - name: linstor-scheduler-admission - {{- include "helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all" . | nindent 8 }} - args: - - -tls-cert-file=/etc/webhook/certs/tls.crt - - -tls-key-file=/etc/webhook/certs/tls.key - - -driver=replicated.csi.storage.deckhouse.io - image: {{ include "helm_lib_module_image" (list . "linstorSchedulerAdmission") }} - imagePullPolicy: IfNotPresent - ports: - - containerPort: 8080 - resources: - requests: - {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 12 }} -{{- if not ( .Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} - {{- include "linstor_scheduler_admission_resources" . | nindent 12 }} -{{- end }} - volumeMounts: - - name: linstor-scheduler-admission-certs - mountPath: /etc/webhook/certs - readOnly: true - serviceAccountName: linstor-scheduler-admission - volumes: - - name: linstor-scheduler-admission-certs - secret: - secretName: linstor-scheduler-admission-certs -{{- end }} \ No newline at end of file diff --git a/templates/linstor-scheduler-admission/rbac-for-us.yaml b/templates/linstor-scheduler-admission/rbac-for-us.yaml deleted file mode 100644 index faa75a45b..000000000 --- a/templates/linstor-scheduler-admission/rbac-for-us.yaml +++ /dev/null @@ -1,37 +0,0 @@ -{{- $dhVersionIsDev := or (hasPrefix "dev" .Values.global.deckhouseVersion) (hasSuffix "dev" .Values.global.deckhouseVersion) }} -{{- if and (not $dhVersionIsDev) (semverCompare "<1.64" .Values.global.deckhouseVersion) }} ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: linstor-scheduler-admission - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-scheduler-admission")) | nindent 2 }} ---- -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: d8:{{ .Chart.Name }}:linstor-scheduler-admission - {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-scheduler-admission")) | nindent 2 }} -rules: - - apiGroups: [""] - resources: ["pods", "persistentvolumeclaims", "persistentvolumes"] - verbs: ["get"] - - apiGroups: ["storage.k8s.io"] - resources: ["storageclasses"] - verbs: ["get"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: d8:{{ .Chart.Name }}:linstor-scheduler-admission - {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-scheduler-admission")) | nindent 2 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: d8:{{ .Chart.Name }}:linstor-scheduler-admission -subjects: -- kind: ServiceAccount - name: linstor-scheduler-admission - namespace: d8-{{ .Chart.Name }} -{{- end }} \ No newline at end of file diff --git a/templates/linstor-scheduler-admission/secret.yaml b/templates/linstor-scheduler-admission/secret.yaml deleted file mode 100644 index 84016ee2c..000000000 --- a/templates/linstor-scheduler-admission/secret.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- /* - TODO: This secret should be removed after almost all clients moved to 1.64+ Deckhouse version (February 2025?). - Also remove it from hooks/generate_webhook_certs.py and set in module.yaml following: - requirements: - deckhouse: >= 1.64 -*/}} ---- -apiVersion: v1 -kind: Secret -metadata: - name: linstor-scheduler-admission-certs - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-scheduler-admission")) | nindent 2 }} -type: kubernetes.io/tls -data: - {{- with .Values.sdsReplicatedVolume.internal.webhookCert }} - tls.crt: {{ .crt | b64enc }} - tls.key: {{ .key | b64enc }} - ca.crt: {{ .ca | b64enc }} - {{- end }} diff --git a/templates/linstor-scheduler-admission/service.yaml b/templates/linstor-scheduler-admission/service.yaml deleted file mode 100644 index 6352eae83..000000000 --- a/templates/linstor-scheduler-admission/service.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- $dhVersionIsDev := or (hasPrefix "dev" .Values.global.deckhouseVersion) (hasSuffix "dev" .Values.global.deckhouseVersion) }} -{{- if and (not $dhVersionIsDev) (semverCompare "<1.64" .Values.global.deckhouseVersion) }} ---- -apiVersion: v1 -kind: Service -metadata: - name: linstor-scheduler-admission - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-scheduler-admission")) | nindent 2 }} -spec: - selector: - app: linstor-scheduler-admission - ports: - - protocol: TCP - port: 4443 - targetPort: 8080 -{{- end }} \ No newline at end of file diff --git a/templates/linstor-scheduler-admission/webhook.yaml b/templates/linstor-scheduler-admission/webhook.yaml deleted file mode 100644 index 06cf4d2db..000000000 --- a/templates/linstor-scheduler-admission/webhook.yaml +++ /dev/null @@ -1,76 +0,0 @@ -{{- $dhVersionIsDev := or (hasPrefix "dev" .Values.global.deckhouseVersion) (hasSuffix "dev" .Values.global.deckhouseVersion) }} -{{- if and (not $dhVersionIsDev) (semverCompare "<1.64" .Values.global.deckhouseVersion) }} -# File generated by "tools/linstor_scheduler_webhook.go" DO NOT EDIT. -# To generate run 'make generate' ---- -apiVersion: admissionregistration.k8s.io/v1 -kind: MutatingWebhookConfiguration -metadata: - name: linstor-scheduler-admission - {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-scheduler-admission")) | nindent 2 }} -webhooks: -- name: scheduler-admission.linstor.deckhouse.io - namespaceSelector: - matchExpressions: - - key: kubernetes.io/metadata.name - operator: NotIn - values: - - kube-basic-auth - - d8-cloud-provider-openstack - - d8-cloud-provider-vsphere - - d8-multitenancy-manager - - kube-system - - d8-metallb - - d8-keepalived - - d8-network-gateway - - d8-operator-trivy - - d8-delivery - - d8-flant-integration - - d8-runtime-audit-engine - - d8-system - - d8-admission-policy-engine - - d8-cni-cilium - - d8-cloud-provider-aws - - d8-cloud-provider-azure - - d8-cloud-provider-gcp - - d8-cloud-provider-yandex - - d8-ceph-csi - - d8-local-path-provisioner - - d8-cni-flannel - - d8-cni-simple-bridge - - d8-cloud-instance-manager - - d8-{{ .Chart.Name }} - - d8-snapshot-controller - - d8-cert-manager - - d8-istio - - d8-user-authz - - d8-user-authn - - d8-operator-prometheus - - kube-prometheus-pushgateway - - d8-descheduler - - d8-ingress-nginx - - d8-log-shipper - - d8-pod-reloader - - d8-chrony - - d8-virtualization - - d8-cdi - - d8-okmeter - - d8-openvpn - rules: - - apiGroups: [""] - apiVersions: ["v1"] - operations: ["CREATE"] - resources: ["pods"] - scope: "*" - clientConfig: - service: - namespace: d8-{{ .Chart.Name }} - name: "linstor-scheduler-admission" - path: "/mutate" - port: 4443 - caBundle: {{ .Values.sdsReplicatedVolume.internal.webhookCert.ca | b64enc }} - admissionReviewVersions: ["v1", "v1beta1"] - sideEffects: None - failurePolicy: Ignore - timeoutSeconds: 10 -{{- end }} \ No newline at end of file diff --git a/templates/linstor-scheduler-extender/configmap.yaml b/templates/linstor-scheduler-extender/configmap.yaml deleted file mode 100644 index 4d697cd34..000000000 --- a/templates/linstor-scheduler-extender/configmap.yaml +++ /dev/null @@ -1,30 +0,0 @@ -{{- $dhVersionIsDev := or (hasPrefix "dev" .Values.global.deckhouseVersion) (hasSuffix "dev" .Values.global.deckhouseVersion) }} -{{- if and (not $dhVersionIsDev) (semverCompare "<1.64" .Values.global.deckhouseVersion) }} ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: linstor-scheduler - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-scheduler")) | nindent 2 }} -data: - scheduler-config.yaml: |- - {{- if semverCompare ">= 1.26" .Values.global.discovery.kubernetesVersion }} - apiVersion: kubescheduler.config.k8s.io/v1 - {{- else }} - apiVersion: kubescheduler.config.k8s.io/v1beta3 - {{- end }} - kind: KubeSchedulerConfiguration - profiles: - - schedulerName: linstor - extenders: - - urlPrefix: https://localhost:8099 - filterVerb: filter - prioritizeVerb: prioritize - weight: 5 - enableHTTPS: true - httpTimeout: 300s - nodeCacheCapable: true - tlsConfig: - caData: {{ .Values.sdsReplicatedVolume.internal.customSchedulerExtenderCert.ca | b64enc }} -{{- end }} \ No newline at end of file diff --git a/templates/linstor-scheduler-extender/deployment.yaml b/templates/linstor-scheduler-extender/deployment.yaml deleted file mode 100644 index 1f7e8f0a0..000000000 --- a/templates/linstor-scheduler-extender/deployment.yaml +++ /dev/null @@ -1,193 +0,0 @@ -# Source https://github.com/kvaps/linstor-scheduler-extender/blob/master/deploy/all.yaml -{{- define "kube_scheduler_resources" }} -cpu: 10m -memory: 30Mi -{{- end }} - -{{- define "linstor_scheduler_extender_resources" }} -cpu: 10m -memory: 25Mi -{{- end }} - -{{- $dhVersionIsDev := or (hasPrefix "dev" .Values.global.deckhouseVersion) (hasSuffix "dev" .Values.global.deckhouseVersion) }} -{{- $kubeVersion := semver .Values.global.discovery.kubernetesVersion -}} -{{- if (.Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} ---- -apiVersion: autoscaling.k8s.io/v1 -kind: VerticalPodAutoscaler -metadata: - name: linstor-scheduler-extender - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-scheduler-extender")) | nindent 2 }} -spec: - targetRef: - apiVersion: "apps/v1" - kind: Deployment - name: linstor-scheduler-extender - updatePolicy: - updateMode: "Auto" - resourcePolicy: - containerPolicies: -{{- if and (not $dhVersionIsDev) (semverCompare "<1.64" .Values.global.deckhouseVersion) }} - - containerName: kube-scheduler - minAllowed: - {{- include "kube_scheduler_resources" . | nindent 8 }} - maxAllowed: - memory: 60Mi - cpu: 20m -{{- end }} - - containerName: linstor-scheduler-extender - minAllowed: - {{- include "linstor_scheduler_extender_resources" . | nindent 8 }} - maxAllowed: - memory: 40Mi - cpu: 20m -{{- end }} ---- -apiVersion: policy/v1 -kind: PodDisruptionBudget -metadata: - name: linstor-scheduler-extender - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-scheduler-extender" )) | nindent 2 }} -spec: - minAvailable: {{ include "helm_lib_is_ha_to_value" (list . 1 0) }} - selector: - matchLabels: - app: linstor-scheduler-extender ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: linstor-scheduler-extender - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-scheduler" )) | nindent 2 }} -spec: - {{- include "helm_lib_deployment_strategy_and_replicas_for_ha" . | nindent 2 }} - revisionHistoryLimit: 2 - selector: - matchLabels: - app: linstor-scheduler-extender - template: - metadata: - labels: - app: linstor-scheduler-extender - spec: - {{- include "helm_lib_priority_class" (tuple . "system-cluster-critical") | nindent 6 }} - {{- include "helm_lib_node_selector" (tuple . "system") | nindent 6 }} - {{- include "helm_lib_tolerations" (tuple . "system") | nindent 6 }} - {{- include "helm_lib_module_pod_security_context_run_as_user_nobody" . | nindent 6 }} - {{- include "helm_lib_pod_anti_affinity_for_ha" (list . (dict "app" "linstor-scheduler-extender")) | nindent 6 }} - imagePullSecrets: - - name: {{ .Chart.Name }}-module-registry - containers: -{{- if and (not $dhVersionIsDev) (semverCompare "<1.64" .Values.global.deckhouseVersion) }} - - name: kube-scheduler - {{- include "helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all" . | nindent 10 }} - command: - - kube-scheduler - {{- if semverCompare ">= 1.22" .Values.global.discovery.kubernetesVersion }} - - --config=/etc/kubernetes/scheduler-config.yaml - {{- else }} - - --scheduler-name=linstor - - --policy-configmap=linstor-scheduler - - --policy-configmap-namespace=d8-{{ .Chart.Name }} - {{- end }} - - --leader-elect=true - - --leader-elect-resource-name=linstor-scheduler - - --leader-elect-resource-namespace=$(NAMESPACE) - env: - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - {{- /* Here we use kube-scheduler image from control-plane-manager module */}} - image: {{ include "helm_lib_module_image" (list (dict "Chart" (dict "Name" "control-plane-manager") "Values" .Values) (list "kubeScheduler" $kubeVersion.Major $kubeVersion.Minor | join "")) }} - imagePullPolicy: IfNotPresent - startupProbe: - failureThreshold: 24 - httpGet: - path: /healthz - port: 10259 - scheme: HTTPS - livenessProbe: - failureThreshold: 8 - httpGet: - path: /healthz - port: 10259 - scheme: HTTPS - initialDelaySeconds: 10 - periodSeconds: 10 - timeoutSeconds: 15 - readinessProbe: - httpGet: - path: /healthz - port: 10259 - scheme: HTTPS - {{- if semverCompare ">= 1.22" .Values.global.discovery.kubernetesVersion }} - volumeMounts: - - mountPath: /etc/kubernetes - name: scheduler-config - {{- end }} - resources: - requests: - {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 14 }} - {{- if not ( .Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} - {{- include "kube_scheduler_resources" . | nindent 14 }} - {{- end }} -{{- end }} - - name: linstor-scheduler-extender - {{- include "helm_lib_module_container_security_context_read_only_root_filesystem_capabilities_drop_all" . | nindent 10 }} - image: {{ include "helm_lib_module_image" (list . "linstorSchedulerExtender") }} - imagePullPolicy: IfNotPresent - args: - - --verbose=true - env: - - name: LS_CONTROLLERS - value: https://linstor.d8-{{ .Chart.Name }}.svc:3371 - - name: LS_USER_CERTIFICATE - valueFrom: - secretKeyRef: - name: linstor-client-https-cert - key: tls.crt - - name: LS_USER_KEY - valueFrom: - secretKeyRef: - name: linstor-client-https-cert - key: tls.key - - name: LS_ROOT_CA - valueFrom: - secretKeyRef: - name: linstor-client-https-cert - key: ca.crt - volumeMounts: - - name: scheduler-extender-certs - mountPath: /etc/sds-replicated-volume-scheduler-extender/certs - readOnly: true - resources: - requests: - {{- include "helm_lib_module_ephemeral_storage_only_logs" . | nindent 14 }} - {{- if not ( .Values.global.enabledModules | has "vertical-pod-autoscaler-crd") }} - {{- include "linstor_scheduler_extender_resources" . | nindent 14 }} - {{- end }} - {{- if or (hasPrefix "dev" .Values.global.deckhouseVersion) (hasSuffix "dev" .Values.global.deckhouseVersion) (semverCompare ">=1.64" .Values.global.deckhouseVersion) }} - ports: - - containerPort: 8099 - protocol: TCP - name: scheduler - {{- end }} - -{{- if semverCompare ">= 1.22" .Values.global.discovery.kubernetesVersion }} - volumes: - - name: scheduler-extender-certs - secret: - secretName: linstor-scheduler-extender-https-certs - {{- $dhVersionIsDev := or (hasPrefix "dev" .Values.global.deckhouseVersion) (hasSuffix "dev" .Values.global.deckhouseVersion) }} - {{- if and (not $dhVersionIsDev) (semverCompare "<1.64" .Values.global.deckhouseVersion) }} - - configMap: - defaultMode: 420 - name: linstor-scheduler - name: scheduler-config - {{- end }} -{{- end }} - serviceAccountName: linstor-scheduler-extender diff --git a/templates/linstor-scheduler-extender/kube-scheduler-webhook-configuration.yaml b/templates/linstor-scheduler-extender/kube-scheduler-webhook-configuration.yaml deleted file mode 100644 index 4d4ad55e1..000000000 --- a/templates/linstor-scheduler-extender/kube-scheduler-webhook-configuration.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if or (hasPrefix "dev" .Values.global.deckhouseVersion) (hasSuffix "dev" .Values.global.deckhouseVersion) (semverCompare ">=1.64" .Values.global.deckhouseVersion) }} -apiVersion: deckhouse.io/v1alpha1 -kind: KubeSchedulerWebhookConfiguration -metadata: - name: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . ) | nindent 2 }} -webhooks: -- weight: 5 - failurePolicy: Ignore - clientConfig: - service: - name: linstor-scheduler-extender - namespace: d8-{{ .Chart.Name }} - port: 8099 - path: / - caBundle: {{ .Values.sdsReplicatedVolume.internal.customSchedulerExtenderCert.ca | b64enc }} - timeoutSeconds: 5 - -{{- end }} \ No newline at end of file diff --git a/templates/linstor-scheduler-extender/rbac-for-us.yaml b/templates/linstor-scheduler-extender/rbac-for-us.yaml deleted file mode 100644 index 59ea31f46..000000000 --- a/templates/linstor-scheduler-extender/rbac-for-us.yaml +++ /dev/null @@ -1,76 +0,0 @@ ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: linstor-scheduler-extender - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-scheduler-extender")) | nindent 2 }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: d8:{{ .Chart.Name }}:linstor-scheduler-extender-kube-scheduler - {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-scheduler-extender")) | nindent 2 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:kube-scheduler -subjects: - - kind: ServiceAccount - name: linstor-scheduler-extender - namespace: d8-{{ .Chart.Name }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: d8:{{ .Chart.Name }}:linstor-scheduler-extender-volume-scheduler - {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-scheduler-extender")) | nindent 2 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:volume-scheduler -subjects: - - kind: ServiceAccount - name: linstor-scheduler-extender - namespace: d8-{{ .Chart.Name }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: linstor-scheduler-extender - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-scheduler-extender")) | nindent 2 }} -rules: - - apiGroups: ["coordination.k8s.io"] - resources: ["leases"] - verbs: ["create", "get", "update"] ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: linstor-scheduler-extender - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-scheduler-extender")) | nindent 2 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: linstor-scheduler-extender -subjects: - - kind: ServiceAccount - name: linstor-scheduler-extender - namespace: d8-{{ .Chart.Name }} ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: d8:{{ .Chart.Name }}:linstor-scheduler-extender:extension-apiserver-authentication-reader - namespace: kube-system - {{- include "helm_lib_module_labels" (list . (dict "app" "linstor-scheduler-extender" )) | nindent 2 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: extension-apiserver-authentication-reader -subjects: - - kind: ServiceAccount - name: linstor-scheduler-extender - namespace: d8-{{ .Chart.Name }} diff --git a/templates/linstor-scheduler-extender/secret.yaml b/templates/linstor-scheduler-extender/secret.yaml deleted file mode 100644 index fd6ce929b..000000000 --- a/templates/linstor-scheduler-extender/secret.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -apiVersion: v1 -kind: Secret -metadata: - name: linstor-scheduler-extender-https-certs - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "sds-replicated-volume-scheduler-extender")) | nindent 2 }} -type: kubernetes.io/tls -data: - ca.crt: {{ .Values.sdsReplicatedVolume.internal.customSchedulerExtenderCert.ca | b64enc }} - tls.crt: {{ .Values.sdsReplicatedVolume.internal.customSchedulerExtenderCert.crt | b64enc }} - tls.key: {{ .Values.sdsReplicatedVolume.internal.customSchedulerExtenderCert.key | b64enc }} \ No newline at end of file diff --git a/templates/linstor-scheduler-extender/service.yaml b/templates/linstor-scheduler-extender/service.yaml deleted file mode 100644 index 40e4d50a2..000000000 --- a/templates/linstor-scheduler-extender/service.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{{- if or (hasPrefix "dev" .Values.global.deckhouseVersion) (hasSuffix "dev" .Values.global.deckhouseVersion) (semverCompare ">=1.64" .Values.global.deckhouseVersion) }} ---- -apiVersion: v1 -kind: Service -metadata: - name: linstor-scheduler-extender - namespace: d8-{{ .Chart.Name }} - {{- include "helm_lib_module_labels" (list . (dict "app" "sds-replicated-volume-scheduler-extender" )) | nindent 2 }} -spec: - type: ClusterIP - ports: - - port: 8099 - targetPort: scheduler - protocol: TCP - name: http - selector: - app: linstor-scheduler-extender -{{- end }} diff --git a/werf.yaml b/werf.yaml index 2c3514910..72c814adb 100644 --- a/werf.yaml +++ b/werf.yaml @@ -18,3 +18,4 @@ build: {{ tpl (.Files.Get ".werf/python-deps.yaml") $ }} {{ tpl (.Files.Get ".werf/bundle.yaml") $ }} {{ tpl (.Files.Get ".werf/release.yaml") $ }} +